|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark.shuffle.sort; |
| 19 | + |
| 20 | +import java.io.File; |
| 21 | +import java.io.FileInputStream; |
| 22 | +import java.io.FileOutputStream; |
| 23 | +import java.io.IOException; |
| 24 | + |
| 25 | +import scala.Product2; |
| 26 | +import scala.Tuple2; |
| 27 | +import scala.collection.Iterator; |
| 28 | + |
| 29 | +import com.google.common.io.Closeables; |
| 30 | +import org.slf4j.Logger; |
| 31 | +import org.slf4j.LoggerFactory; |
| 32 | + |
| 33 | +import org.apache.spark.Partitioner; |
| 34 | +import org.apache.spark.SparkConf; |
| 35 | +import org.apache.spark.TaskContext; |
| 36 | +import org.apache.spark.executor.ShuffleWriteMetrics; |
| 37 | +import org.apache.spark.serializer.Serializer; |
| 38 | +import org.apache.spark.serializer.SerializerInstance; |
| 39 | +import org.apache.spark.storage.*; |
| 40 | +import org.apache.spark.util.Utils; |
| 41 | + |
| 42 | +/** |
| 43 | + * This class implements sort-based shuffle's hash-style shuffle fallback path. This write path |
| 44 | + * writes incoming records to separate files, one file per reduce partition, then concatenates these |
| 45 | + * per-partition files to form a single output file, regions of which are served to reducers. |
| 46 | + * Records are not buffered in memory. This is essentially identical to |
| 47 | + * {@link org.apache.spark.shuffle.hash.HashShuffleWriter}, except that it writes output in a format |
| 48 | + * that can be served / consumed via {@link org.apache.spark.shuffle.IndexShuffleBlockResolver}. |
| 49 | + * <p> |
| 50 | + * This write path is inefficient for shuffles with large numbers of reduce partitions because it |
| 51 | + * simultaneously opens separate serializers and file streams for all partitions. As a result, |
| 52 | + * {@link SortShuffleManager} only selects this write path when |
| 53 | + * <ul> |
| 54 | + * <li>no Ordering is specified,</li> |
| 55 | + * <li>no Aggregator is specific, and</li> |
| 56 | + * <li>the number of partitions is less than |
| 57 | + * <code>spark.shuffle.sort.bypassMergeThreshold</code>.</li> |
| 58 | + * </ul> |
| 59 | + * |
| 60 | + * This code used to be part of {@link org.apache.spark.util.collection.ExternalSorter} but was |
| 61 | + * refactored into its own class in order to reduce code complexity; see SPARK-7855 for details. |
| 62 | + * <p> |
| 63 | + * There have been proposals to completely remove this code path; see SPARK-6026 for details. |
| 64 | + */ |
| 65 | +final class BypassMergeSortShuffleWriter<K, V> implements SortShuffleFileWriter<K, V> { |
| 66 | + |
| 67 | + private final Logger logger = LoggerFactory.getLogger(BypassMergeSortShuffleWriter.class); |
| 68 | + |
| 69 | + private final int fileBufferSize; |
| 70 | + private final boolean transferToEnabled; |
| 71 | + private final int numPartitions; |
| 72 | + private final BlockManager blockManager; |
| 73 | + private final Partitioner partitioner; |
| 74 | + private final ShuffleWriteMetrics writeMetrics; |
| 75 | + private final Serializer serializer; |
| 76 | + |
| 77 | + /** Array of file writers, one for each partition */ |
| 78 | + private BlockObjectWriter[] partitionWriters; |
| 79 | + |
| 80 | + public BypassMergeSortShuffleWriter( |
| 81 | + SparkConf conf, |
| 82 | + BlockManager blockManager, |
| 83 | + Partitioner partitioner, |
| 84 | + ShuffleWriteMetrics writeMetrics, |
| 85 | + Serializer serializer) { |
| 86 | + // Use getSizeAsKb (not bytes) to maintain backwards compatibility if no units are provided |
| 87 | + this.fileBufferSize = (int) conf.getSizeAsKb("spark.shuffle.file.buffer", "32k") * 1024; |
| 88 | + this.transferToEnabled = conf.getBoolean("spark.file.transferTo", true); |
| 89 | + this.numPartitions = partitioner.numPartitions(); |
| 90 | + this.blockManager = blockManager; |
| 91 | + this.partitioner = partitioner; |
| 92 | + this.writeMetrics = writeMetrics; |
| 93 | + this.serializer = serializer; |
| 94 | + } |
| 95 | + |
| 96 | + @Override |
| 97 | + public void insertAll(Iterator<Product2<K, V>> records) throws IOException { |
| 98 | + assert (partitionWriters == null); |
| 99 | + if (!records.hasNext()) { |
| 100 | + return; |
| 101 | + } |
| 102 | + final SerializerInstance serInstance = serializer.newInstance(); |
| 103 | + final long openStartTime = System.nanoTime(); |
| 104 | + partitionWriters = new BlockObjectWriter[numPartitions]; |
| 105 | + for (int i = 0; i < numPartitions; i++) { |
| 106 | + final Tuple2<TempShuffleBlockId, File> tempShuffleBlockIdPlusFile = |
| 107 | + blockManager.diskBlockManager().createTempShuffleBlock(); |
| 108 | + final File file = tempShuffleBlockIdPlusFile._2(); |
| 109 | + final BlockId blockId = tempShuffleBlockIdPlusFile._1(); |
| 110 | + partitionWriters[i] = |
| 111 | + blockManager.getDiskWriter(blockId, file, serInstance, fileBufferSize, writeMetrics).open(); |
| 112 | + } |
| 113 | + // Creating the file to write to and creating a disk writer both involve interacting with |
| 114 | + // the disk, and can take a long time in aggregate when we open many files, so should be |
| 115 | + // included in the shuffle write time. |
| 116 | + writeMetrics.incShuffleWriteTime(System.nanoTime() - openStartTime); |
| 117 | + |
| 118 | + while (records.hasNext()) { |
| 119 | + final Product2<K, V> record = records.next(); |
| 120 | + final K key = record._1(); |
| 121 | + partitionWriters[partitioner.getPartition(key)].write(key, record._2()); |
| 122 | + } |
| 123 | + |
| 124 | + for (BlockObjectWriter writer : partitionWriters) { |
| 125 | + writer.commitAndClose(); |
| 126 | + } |
| 127 | + } |
| 128 | + |
| 129 | + @Override |
| 130 | + public long[] writePartitionedFile( |
| 131 | + BlockId blockId, |
| 132 | + TaskContext context, |
| 133 | + File outputFile) throws IOException { |
| 134 | + // Track location of the partition starts in the output file |
| 135 | + final long[] lengths = new long[numPartitions]; |
| 136 | + if (partitionWriters == null) { |
| 137 | + // We were passed an empty iterator |
| 138 | + return lengths; |
| 139 | + } |
| 140 | + |
| 141 | + final FileOutputStream out = new FileOutputStream(outputFile, true); |
| 142 | + final long writeStartTime = System.nanoTime(); |
| 143 | + boolean threwException = true; |
| 144 | + try { |
| 145 | + for (int i = 0; i < numPartitions; i++) { |
| 146 | + final FileInputStream in = new FileInputStream(partitionWriters[i].fileSegment().file()); |
| 147 | + boolean copyThrewException = true; |
| 148 | + try { |
| 149 | + lengths[i] = Utils.copyStream(in, out, false, transferToEnabled); |
| 150 | + copyThrewException = false; |
| 151 | + } finally { |
| 152 | + Closeables.close(in, copyThrewException); |
| 153 | + } |
| 154 | + if (!blockManager.diskBlockManager().getFile(partitionWriters[i].blockId()).delete()) { |
| 155 | + logger.error("Unable to delete file for partition {}", i); |
| 156 | + } |
| 157 | + } |
| 158 | + threwException = false; |
| 159 | + } finally { |
| 160 | + Closeables.close(out, threwException); |
| 161 | + writeMetrics.incShuffleWriteTime(System.nanoTime() - writeStartTime); |
| 162 | + } |
| 163 | + partitionWriters = null; |
| 164 | + return lengths; |
| 165 | + } |
| 166 | + |
| 167 | + @Override |
| 168 | + public void stop() throws IOException { |
| 169 | + if (partitionWriters != null) { |
| 170 | + try { |
| 171 | + final DiskBlockManager diskBlockManager = blockManager.diskBlockManager(); |
| 172 | + for (BlockObjectWriter writer : partitionWriters) { |
| 173 | + // This method explicitly does _not_ throw exceptions: |
| 174 | + writer.revertPartialWritesAndClose(); |
| 175 | + if (!diskBlockManager.getFile(writer.blockId()).delete()) { |
| 176 | + logger.error("Error while deleting file for block {}", writer.blockId()); |
| 177 | + } |
| 178 | + } |
| 179 | + } finally { |
| 180 | + partitionWriters = null; |
| 181 | + } |
| 182 | + } |
| 183 | + } |
| 184 | +} |
0 commit comments