Skip to content

Commit 038314c

Browse files
committed
[SPARK-3850] Trim trailing spaces for core.
1 parent f7fe9e4 commit 038314c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+117
-117
lines changed

core/src/main/scala/org/apache/spark/Aggregator.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@ case class Aggregator[K, V, C] (
3434
mergeValue: (C, V) => C,
3535
mergeCombiners: (C, C) => C) {
3636

37-
// When spilling is enabled sorting will happen externally, but not necessarily with an
38-
// ExternalSorter.
37+
// When spilling is enabled sorting will happen externally, but not necessarily with an
38+
// ExternalSorter.
3939
private val isSpillEnabled = SparkEnv.get.conf.getBoolean("spark.shuffle.spill", true)
4040

4141
@deprecated("use combineValuesByKey with TaskContext argument", "0.9.0")

core/src/main/scala/org/apache/spark/FutureAction.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:
150150
}
151151

152152
override def isCompleted: Boolean = jobWaiter.jobFinished
153-
153+
154154
override def isCancelled: Boolean = _cancelled
155155

156156
override def value: Option[Try[T]] = {

core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import org.apache.spark.util.{ThreadUtils, Utils}
2929

3030
/**
3131
* A heartbeat from executors to the driver. This is a shared message used by several internal
32-
* components to convey liveness or execution information for in-progress tasks. It will also
32+
* components to convey liveness or execution information for in-progress tasks. It will also
3333
* expire the hosts that have not heartbeated for more than spark.network.timeout.
3434
*/
3535
private[spark] case class Heartbeat(
@@ -43,8 +43,8 @@ private[spark] case class Heartbeat(
4343
*/
4444
private[spark] case object TaskSchedulerIsSet
4545

46-
private[spark] case object ExpireDeadHosts
47-
46+
private[spark] case object ExpireDeadHosts
47+
4848
private[spark] case class HeartbeatResponse(reregisterBlockManager: Boolean)
4949

5050
/**
@@ -62,18 +62,18 @@ private[spark] class HeartbeatReceiver(sc: SparkContext)
6262

6363
// "spark.network.timeout" uses "seconds", while `spark.storage.blockManagerSlaveTimeoutMs` uses
6464
// "milliseconds"
65-
private val slaveTimeoutMs =
65+
private val slaveTimeoutMs =
6666
sc.conf.getTimeAsMs("spark.storage.blockManagerSlaveTimeoutMs", "120s")
67-
private val executorTimeoutMs =
67+
private val executorTimeoutMs =
6868
sc.conf.getTimeAsSeconds("spark.network.timeout", s"${slaveTimeoutMs}ms") * 1000
69-
69+
7070
// "spark.network.timeoutInterval" uses "seconds", while
7171
// "spark.storage.blockManagerTimeoutIntervalMs" uses "milliseconds"
72-
private val timeoutIntervalMs =
72+
private val timeoutIntervalMs =
7373
sc.conf.getTimeAsMs("spark.storage.blockManagerTimeoutIntervalMs", "60s")
74-
private val checkTimeoutIntervalMs =
74+
private val checkTimeoutIntervalMs =
7575
sc.conf.getTimeAsSeconds("spark.network.timeoutInterval", s"${timeoutIntervalMs}ms") * 1000
76-
76+
7777
private var timeoutCheckingTask: ScheduledFuture[_] = null
7878

7979
// "eventLoopThread" is used to run some pretty fast actions. The actions running in it should not
@@ -140,7 +140,7 @@ private[spark] class HeartbeatReceiver(sc: SparkContext)
140140
}
141141
}
142142
}
143-
143+
144144
override def onStop(): Unit = {
145145
if (timeoutCheckingTask != null) {
146146
timeoutCheckingTask.cancel(true)

core/src/main/scala/org/apache/spark/HttpFileServer.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ private[spark] class HttpFileServer(
5050

5151
def stop() {
5252
httpServer.stop()
53-
54-
// If we only stop sc, but the driver process still run as a services then we need to delete
53+
54+
// If we only stop sc, but the driver process still run as a services then we need to delete
5555
// the tmp dir, if not, it will create too many tmp dirs
5656
try {
5757
Utils.deleteRecursively(baseDir)

core/src/main/scala/org/apache/spark/SparkConf.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
227227
def getSizeAsBytes(key: String, defaultValue: String): Long = {
228228
Utils.byteStringAsBytes(get(key, defaultValue))
229229
}
230-
230+
231231
/**
232232
* Get a size parameter as Kibibytes; throws a NoSuchElementException if it's not set. If no
233233
* suffix is provided then Kibibytes are assumed.
@@ -244,7 +244,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
244244
def getSizeAsKb(key: String, defaultValue: String): Long = {
245245
Utils.byteStringAsKb(get(key, defaultValue))
246246
}
247-
247+
248248
/**
249249
* Get a size parameter as Mebibytes; throws a NoSuchElementException if it's not set. If no
250250
* suffix is provided then Mebibytes are assumed.
@@ -261,7 +261,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
261261
def getSizeAsMb(key: String, defaultValue: String): Long = {
262262
Utils.byteStringAsMb(get(key, defaultValue))
263263
}
264-
264+
265265
/**
266266
* Get a size parameter as Gibibytes; throws a NoSuchElementException if it's not set. If no
267267
* suffix is provided then Gibibytes are assumed.
@@ -278,7 +278,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
278278
def getSizeAsGb(key: String, defaultValue: String): Long = {
279279
Utils.byteStringAsGb(get(key, defaultValue))
280280
}
281-
281+
282282
/** Get a parameter as an Option */
283283
def getOption(key: String): Option[String] = {
284284
Option(settings.get(key)).orElse(getDeprecatedConfig(key, this))
@@ -480,7 +480,7 @@ private[spark] object SparkConf extends Logging {
480480
"spark.kryoserializer.buffer.mb was previously specified as '0.064'. Fractional values " +
481481
"are no longer accepted. To specify the equivalent now, one may use '64k'.")
482482
)
483-
483+
484484
Map(configs.map { cfg => (cfg.key -> cfg) } : _*)
485485
}
486486

@@ -508,7 +508,7 @@ private[spark] object SparkConf extends Logging {
508508
"spark.reducer.maxSizeInFlight" -> Seq(
509509
AlternateConfig("spark.reducer.maxMbInFlight", "1.4")),
510510
"spark.kryoserializer.buffer" ->
511-
Seq(AlternateConfig("spark.kryoserializer.buffer.mb", "1.4",
511+
Seq(AlternateConfig("spark.kryoserializer.buffer.mb", "1.4",
512512
translation = s => s"${(s.toDouble * 1000).toInt}k")),
513513
"spark.kryoserializer.buffer.max" -> Seq(
514514
AlternateConfig("spark.kryoserializer.buffer.max.mb", "1.4")),

core/src/main/scala/org/apache/spark/TestUtils.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ private[spark] object TestUtils {
5151
classpathUrls: Seq[URL] = Seq()): URL = {
5252
val tempDir = Utils.createTempDir()
5353
val files1 = for (name <- classNames) yield {
54-
createCompiledClass(name, tempDir, toStringValue, classpathUrls = classpathUrls)
54+
createCompiledClass(name, tempDir, toStringValue, classpathUrls = classpathUrls)
5555
}
5656
val files2 = for ((childName, baseName) <- classNamesWithBase) yield {
5757
createCompiledClass(childName, tempDir, toStringValue, baseName, classpathUrls)

core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double])
137137
*/
138138
def sample(withReplacement: Boolean, fraction: JDouble): JavaDoubleRDD =
139139
sample(withReplacement, fraction, Utils.random.nextLong)
140-
140+
141141
/**
142142
* Return a sampled subset of this RDD.
143143
*/

core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,18 +101,18 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T])
101101

102102
/**
103103
* Return a sampled subset of this RDD.
104-
*
104+
*
105105
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
106106
* @param fraction expected size of the sample as a fraction of this RDD's size
107107
* without replacement: probability that each element is chosen; fraction must be [0, 1]
108108
* with replacement: expected number of times each element is chosen; fraction must be >= 0
109109
*/
110110
def sample(withReplacement: Boolean, fraction: Double): JavaRDD[T] =
111111
sample(withReplacement, fraction, Utils.random.nextLong)
112-
112+
113113
/**
114114
* Return a sampled subset of this RDD.
115-
*
115+
*
116116
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
117117
* @param fraction expected size of the sample as a fraction of this RDD's size
118118
* without replacement: probability that each element is chosen; fraction must be [0, 1]

core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -797,10 +797,10 @@ private class PythonAccumulatorParam(@transient serverHost: String, serverPort:
797797

798798
val bufferSize = SparkEnv.get.conf.getInt("spark.buffer.size", 65536)
799799

800-
/**
800+
/**
801801
* We try to reuse a single Socket to transfer accumulator updates, as they are all added
802802
* by the DAGScheduler's single-threaded actor anyway.
803-
*/
803+
*/
804804
@transient var socket: Socket = _
805805

806806
def openSocket(): Socket = synchronized {

core/src/main/scala/org/apache/spark/api/r/RBackend.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,11 @@ private[spark] class RBackend {
4444
bossGroup = new NioEventLoopGroup(2)
4545
val workerGroup = bossGroup
4646
val handler = new RBackendHandler(this)
47-
47+
4848
bootstrap = new ServerBootstrap()
4949
.group(bossGroup, workerGroup)
5050
.channel(classOf[NioServerSocketChannel])
51-
51+
5252
bootstrap.childHandler(new ChannelInitializer[SocketChannel]() {
5353
def initChannel(ch: SocketChannel): Unit = {
5454
ch.pipeline()

0 commit comments

Comments
 (0)