Skip to content

Commit 8b3aca5

Browse files
author
Ilya Ganelin
committed
Style fixes
1 parent 864d603 commit 8b3aca5

File tree

3 files changed

+32
-32
lines changed

3 files changed

+32
-32
lines changed

core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,9 @@ import org.apache.spark.storage.{BlockId, BlockObjectWriter}
8686
* - Users are expected to call stop() at the end to delete all the intermediate files.
8787
*/
8888
private[spark] abstract class ExternalSorter[K, V, C](
89-
partitioner: Option[Partitioner] = None,
90-
ordering: Option[Ordering[K]] = None,
91-
serializer: Option[Serializer] = None)
89+
partitioner: Option[Partitioner] = None,
90+
ordering: Option[Ordering[K]] = None,
91+
serializer: Option[Serializer] = None)
9292
extends Logging
9393
with Spillable[WritablePartitionedPairCollection[K, C]]
9494
with SortShuffleFileWriter[K, V] {
@@ -324,10 +324,10 @@ private[spark] abstract class ExternalSorter[K, V, C](
324324
* they're not), we still merge them by doing equality tests for all keys that compare as equal.
325325
*/
326326
protected def mergeWithAggregation(
327-
iterators: Seq[Iterator[Product2[K, C]]],
328-
mergeCombiners: (C, C) => C,
329-
comparator: Comparator[K],
330-
totalOrder: Boolean): Iterator[Product2[K, C]] =
327+
iterators: Seq[Iterator[Product2[K, C]]],
328+
mergeCombiners: (C, C) => C,
329+
comparator: Comparator[K],
330+
totalOrder: Boolean): Iterator[Product2[K, C]] =
331331
{
332332
if (!totalOrder) {
333333
// We only have a partial ordering, e.g. comparing the keys by hash code, which means that
@@ -558,8 +558,8 @@ private[spark] abstract class ExternalSorter[K, V, C](
558558
@VisibleForTesting
559559
def partitionedIterator: Iterator[(Int, Iterator[Product2[K, C]])]
560560

561-
protected def partitionedIterator(collection: WritablePartitionedPairCollection[K, C]):
562-
Iterator[(Int, Iterator[Product2[K, C]])] = {
561+
protected def partitionedIterator(collection: WritablePartitionedPairCollection[K, C])
562+
: Iterator[(Int, Iterator[Product2[K, C]])] = {
563563
if (spills.isEmpty) {
564564
// Special case: if we have only in-memory data, we don't need to merge streams, and perhaps
565565
// we don't even need to sort by anything other than partition ID
@@ -592,15 +592,15 @@ private[spark] abstract class ExternalSorter[K, V, C](
592592
* This interface abstracts away aggregator dependence.
593593
*/
594594
override def writePartitionedFile(
595-
blockId: BlockId,
596-
context: TaskContext,
597-
outputFile: File): Array[Long]
595+
blockId: BlockId,
596+
context: TaskContext,
597+
outputFile: File): Array[Long]
598598

599599
protected def writePartitionedFile(
600-
blockId: BlockId,
601-
context: TaskContext,
602-
outputFile: File,
603-
collection: WritablePartitionedPairCollection[K, C]): Array[Long] = {
600+
blockId: BlockId,
601+
context: TaskContext,
602+
outputFile: File,
603+
collection: WritablePartitionedPairCollection[K, C]): Array[Long] = {
604604

605605
// Track location of each range in the output file
606606
val lengths = new Array[Long](numPartitions)
@@ -664,8 +664,8 @@ private[spark] abstract class ExternalSorter[K, V, C](
664664
* stream, assuming this partition is the next one to be read. Used to make it easier to return
665665
* partitioned iterators from our in-memory collection.
666666
*/
667-
protected[this] class IteratorForPartition(partitionId: Int, data: BufferedIterator[((Int, K), C)])
668-
extends Iterator[Product2[K, C]]
667+
protected[this] class IteratorForPartition(partitionId: Int,
668+
data: BufferedIterator[((Int, K), C)]) extends Iterator[Product2[K, C]]
669669
{
670670
override def hasNext: Boolean = data.hasNext && data.head._1._1 == partitionId
671671

core/src/main/scala/org/apache/spark/util/collection/ExternalSorterAgg.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -82,11 +82,11 @@ import org.apache.spark.storage.BlockId
8282
* - Users are expected to call stop() at the end to delete all the intermediate files.
8383
*/
8484
private[spark] class ExternalSorterAgg[K, V, C](
85-
aggregator: Aggregator[K, V, C],
86-
partitioner: Option[Partitioner] = None,
87-
ordering: Option[Ordering[K]] = None,
88-
serializer: Option[Serializer] = None)
89-
extends ExternalSorter[K,V,C]
85+
aggregator: Aggregator[K, V, C],
86+
partitioner: Option[Partitioner] = None,
87+
ordering: Option[Ordering[K]] = None,
88+
serializer: Option[Serializer] = None)
89+
extends ExternalSorter[K, V, C]
9090
with Spillable[WritablePartitionedPairCollection[K, C]]
9191
with SortShuffleFileWriter[K, V] {
9292

@@ -165,9 +165,9 @@ private[spark] class ExternalSorterAgg[K, V, C](
165165
* @return array of lengths, in bytes, of each partition of the file (used by map output tracker)
166166
*/
167167
override def writePartitionedFile(
168-
blockId: BlockId,
169-
context: TaskContext,
170-
outputFile: File): Array[Long] = {
168+
blockId: BlockId,
169+
context: TaskContext,
170+
outputFile: File): Array[Long] = {
171171
writePartitionedFile(blockId, context, outputFile, map)
172172
}
173173
}

core/src/main/scala/org/apache/spark/util/collection/ExternalSorterNoAgg.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -81,9 +81,9 @@ import org.apache.spark.storage.BlockId
8181
* - Users are expected to call stop() at the end to delete all the intermediate files.
8282
*/
8383
private[spark] class ExternalSorterNoAgg[K, V, C](
84-
partitioner: Option[Partitioner] = None,
85-
ordering: Option[Ordering[K]] = None,
86-
serializer: Option[Serializer] = None)
84+
partitioner: Option[Partitioner] = None,
85+
ordering: Option[Ordering[K]] = None,
86+
serializer: Option[Serializer] = None)
8787
extends ExternalSorter[K, V, C](partitioner, ordering, serializer)
8888
with Spillable[WritablePartitionedPairCollection[K, C]]
8989
with SortShuffleFileWriter[K, V] {
@@ -164,9 +164,9 @@ private[spark] class ExternalSorterNoAgg[K, V, C](
164164
* @return array of lengths, in bytes, of each partition of the file (used by map output tracker)
165165
*/
166166
override def writePartitionedFile(
167-
blockId: BlockId,
168-
context: TaskContext,
169-
outputFile: File): Array[Long] = {
167+
blockId: BlockId,
168+
context: TaskContext,
169+
outputFile: File): Array[Long] = {
170170

171171
writePartitionedFile(blockId, context, outputFile, buffer)
172172
}

0 commit comments

Comments
 (0)