-
Notifications
You must be signed in to change notification settings - Fork 51
[SPARK-25299] Propose a new NIO transfer API for partition writing. #535
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
7e13ed1
81e8a86
53f6bbd
9b77268
0dd4ffa
e98661e
1faf980
d12a86c
5ae75de
ce68613
f3dac6e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
…-transferto-api
- Loading branch information
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,9 +23,6 @@ | |
| import java.nio.channels.FileChannel; | ||
| import java.util.Iterator; | ||
|
|
||
| import org.apache.spark.api.shuffle.DefaultTransferrableWritableByteChannel; | ||
| import org.apache.spark.api.shuffle.SupportsTransferTo; | ||
| import org.apache.spark.api.shuffle.TransferrableWritableByteChannel; | ||
| import scala.Option; | ||
| import scala.Product2; | ||
| import scala.collection.JavaConverters; | ||
|
|
@@ -40,9 +37,14 @@ | |
|
|
||
| import org.apache.spark.*; | ||
| import org.apache.spark.annotation.Private; | ||
| import org.apache.spark.api.java.Optional; | ||
| import org.apache.spark.api.shuffle.DefaultTransferrableWritableByteChannel; | ||
| import org.apache.spark.api.shuffle.MapShuffleLocations; | ||
| import org.apache.spark.api.shuffle.TransferrableWritableByteChannel; | ||
| import org.apache.spark.api.shuffle.ShuffleMapOutputWriter; | ||
| import org.apache.spark.api.shuffle.ShufflePartitionWriter; | ||
| import org.apache.spark.api.shuffle.ShuffleWriteSupport; | ||
| import org.apache.spark.api.shuffle.SupportsTransferTo; | ||
| import org.apache.spark.internal.config.package$; | ||
| import org.apache.spark.io.CompressionCodec; | ||
| import org.apache.spark.io.CompressionCodec$; | ||
|
|
@@ -286,11 +288,6 @@ private long[] mergeSpills(SpillInfo[] spills, | |
| long[] partitionLengths = new long[numPartitions]; | ||
| try { | ||
| if (spills.length == 0) { | ||
| // The contract we are working under states that we will open a partition writer for | ||
| // each partition, regardless of number of spills | ||
| for (int i = 0; i < numPartitions; i++) { | ||
| mapWriter.getNextPartitionWriter(); | ||
| } | ||
| return partitionLengths; | ||
| } else { | ||
| // There are multiple spills to merge, so none of these spill files' lengths were counted | ||
|
|
@@ -364,7 +361,7 @@ private long[] mergeSpillsWithFileStream( | |
| } | ||
| for (int partition = 0; partition < numPartitions; partition++) { | ||
| boolean copyThrewExecption = true; | ||
| ShufflePartitionWriter writer = mapWriter.getNextPartitionWriter(); | ||
| ShufflePartitionWriter writer = mapWriter.getPartitionWriter(partition); | ||
| OutputStream partitionOutput = null; | ||
| try { | ||
| partitionOutput = writer.openStream(); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see that you are not wrapping this around a
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You aren't, that's the point. The shielding of the close of the underlying file is handed by |
||
|
|
@@ -435,7 +432,7 @@ private long[] mergeSpillsWithTransferTo( | |
| } | ||
| for (int partition = 0; partition < numPartitions; partition++) { | ||
| boolean copyThrewExecption = true; | ||
| ShufflePartitionWriter writer = mapWriter.getNextPartitionWriter(); | ||
| ShufflePartitionWriter writer = mapWriter.getPartitionWriter(partition); | ||
| TransferrableWritableByteChannel partitionChannel = null; | ||
| try { | ||
| partitionChannel = writer instanceof SupportsTransferTo ? | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i think you're missing the case for if
file.exists()is false, you need to still set thecopyThrewExceptionto falseThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The value of
copyThrewExceptionisn't used if the file does not exist.