-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-17153][SQL] Should read partition data when reading new files in filestream without globbing #14803
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-17153][SQL] Should read partition data when reading new files in filestream without globbing #14803
Changes from 9 commits
2771d71
0d841e2
6adf2e2
f7c9067
04b61c7
23ba9a2
5b101ab
541dfdc
9d16631
e21536e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -608,6 +608,73 @@ class FileStreamSourceSuite extends FileStreamSourceTest { | |
|
|
||
| // =============== other tests ================ | ||
|
|
||
| test("read new files in partitioned table without globbing, should read partition data") { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should probably also have an explicit test for the case where schema inference is turned on (you implicitly test it some with the code changed below)
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added a test for it. |
||
| withTempDirs { case (dir, tmp) => | ||
| val partitionFooSubDir = new File(dir, "partition=foo") | ||
| val partitionBarSubDir = new File(dir, "partition=bar") | ||
|
|
||
| val schema = new StructType().add("value", StringType).add("partition", StringType) | ||
| val fileStream = createFileStream("json", s"${dir.getCanonicalPath}", Some(schema)) | ||
| val filtered = fileStream.filter($"value" contains "keep") | ||
| testStream(filtered)( | ||
| // Create new partition=foo sub dir and write to it | ||
| AddTextFileData("{'value': 'drop1'}\n{'value': 'keep2'}", partitionFooSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo")), | ||
|
|
||
| // Append to same partition=foo sub dir | ||
| AddTextFileData("{'value': 'keep3'}", partitionFooSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo"), ("keep3", "foo")), | ||
|
|
||
| // Create new partition sub dir and write to it | ||
| AddTextFileData("{'value': 'keep4'}", partitionBarSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar")), | ||
|
|
||
| // Append to same partition=bar sub dir | ||
| AddTextFileData("{'value': 'keep5'}", partitionBarSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar"), ("keep5", "bar")) | ||
| ) | ||
| } | ||
| } | ||
|
|
||
| test("when schema inference is turned on, should read partition data") { | ||
| def createFile(content: String, src: File, tmp: File): Unit = { | ||
| val tempFile = Utils.tempFileWith(new File(tmp, "text")) | ||
| val finalFile = new File(src, tempFile.getName) | ||
| src.mkdirs() | ||
| require(stringToFile(tempFile, content).renameTo(finalFile)) | ||
| } | ||
|
|
||
| withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") { | ||
| withTempDirs { case (dir, tmp) => | ||
| val partitionFooSubDir = new File(dir, "partition=foo") | ||
| val partitionBarSubDir = new File(dir, "partition=bar") | ||
|
|
||
| // Create file in partition, so we can infer the schema. | ||
| createFile("{'value': 'drop0'}", partitionFooSubDir, tmp) | ||
|
|
||
| val fileStream = createFileStream("json", s"${dir.getCanonicalPath}") | ||
| val filtered = fileStream.filter($"value" contains "keep") | ||
| testStream(filtered)( | ||
| // Append to same partition=foo sub dir | ||
| AddTextFileData("{'value': 'drop1'}\n{'value': 'keep2'}", partitionFooSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo")), | ||
|
|
||
| // Append to same partition=foo sub dir | ||
| AddTextFileData("{'value': 'keep3'}", partitionFooSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo"), ("keep3", "foo")), | ||
|
|
||
| // Create new partition sub dir and write to it | ||
| AddTextFileData("{'value': 'keep4'}", partitionBarSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar")), | ||
|
|
||
| // Append to same partition=bar sub dir | ||
| AddTextFileData("{'value': 'keep5'}", partitionBarSubDir, tmp), | ||
| CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar"), ("keep5", "bar")) | ||
| ) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| test("fault tolerance") { | ||
| withTempDirs { case (src, tmp) => | ||
| val fileStream = createFileStream("text", src.getCanonicalPath) | ||
|
|
@@ -792,7 +859,7 @@ class FileStreamSourceSuite extends FileStreamSourceTest { | |
| } | ||
| assert(src.listFiles().size === numFiles) | ||
|
|
||
| val files = spark.readStream.text(root.getCanonicalPath).as[String] | ||
| val files = spark.readStream.text(root.getCanonicalPath).as[(String, Int)] | ||
|
|
||
| // Note this query will use constant folding to eliminate the file scan. | ||
| // This is to avoid actually running a Spark job with 10000 tasks | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
By default, Structured Streaming from file based sources requires you to specify the schema, rather than rely on Spark to infer it automatically. This restriction ensures a consistent schema will be used for the streaming query, even in the case of failures. For ad-hoc use cases, you can reenable schema inference by setting
spark.sql.streaming.schemaInferencetotrue.Partition discovery does occur when subdirectories that are named
/key=value/are present and listing will automatically recurse into these directories. If these columns appear in the user provided schema, they will be filled in by Spark based on the path of the file being read. The directories that make up the partitioning scheme must be present when the query starts and must remain static. For example, it is okay to add/data/year=2016/when/data/year=2015/was present, but it is invalid to change the partitioning column (i.e. by creating the directory/data/date=2016-04-17/).