Skip to content

Commit 3824ce3

Browse files
committed
[SPARK-2808][Streaming][Kafka] naming / comments per tdas
1 parent 61b3464 commit 3824ce3

File tree

2 files changed

+9
-8
lines changed

2 files changed

+9
-8
lines changed

external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -233,9 +233,9 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
233233
def getConsumerOffsets(
234234
groupId: String,
235235
topicAndPartitions: Set[TopicAndPartition],
236-
versionId: Short
236+
consumerApiVersion: Short
237237
): Either[Err, Map[TopicAndPartition, Long]] = {
238-
getConsumerOffsetMetadata(groupId, topicAndPartitions, versionId).right.map { r =>
238+
getConsumerOffsetMetadata(groupId, topicAndPartitions, consumerApiVersion).right.map { r =>
239239
r.map { kv =>
240240
kv._1 -> kv._2.offset
241241
}
@@ -252,10 +252,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
252252
def getConsumerOffsetMetadata(
253253
groupId: String,
254254
topicAndPartitions: Set[TopicAndPartition],
255-
versionId: Short
255+
consumerApiVersion: Short
256256
): Either[Err, Map[TopicAndPartition, OffsetMetadataAndError]] = {
257257
var result = Map[TopicAndPartition, OffsetMetadataAndError]()
258-
val req = OffsetFetchRequest(groupId, topicAndPartitions.toSeq, versionId)
258+
val req = OffsetFetchRequest(groupId, topicAndPartitions.toSeq, consumerApiVersion)
259259
val errs = new Err
260260
withBrokers(Random.shuffle(config.seedBrokers), errs) { consumer =>
261261
val resp = consumer.fetchOffsets(req)
@@ -289,12 +289,12 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
289289
def setConsumerOffsets(
290290
groupId: String,
291291
offsets: Map[TopicAndPartition, Long],
292-
versionId: Short
292+
consumerApiVersion: Short
293293
): Either[Err, Map[TopicAndPartition, Short]] = {
294294
val meta = offsets.map { kv =>
295295
kv._1 -> OffsetAndMetadata(kv._2)
296296
}
297-
setConsumerOffsetMetadata(groupId, meta, versionId)
297+
setConsumerOffsetMetadata(groupId, meta, consumerApiVersion)
298298
}
299299

300300
/** Requires Kafka >= 0.8.1.1 */
@@ -307,10 +307,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
307307
def setConsumerOffsetMetadata(
308308
groupId: String,
309309
metadata: Map[TopicAndPartition, OffsetAndMetadata],
310-
versionId: Short
310+
consumerApiVersion: Short
311311
): Either[Err, Map[TopicAndPartition, Short]] = {
312312
var result = Map[TopicAndPartition, Short]()
313-
val req = OffsetCommitRequest(groupId, metadata, versionId)
313+
val req = OffsetCommitRequest(groupId, metadata, consumerApiVersion)
314314
val errs = new Err
315315
val topicAndPartitions = metadata.keySet
316316
withBrokers(Random.shuffle(config.seedBrokers), errs) { consumer =>

external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaTestUtils.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,7 @@ private class KafkaTestUtils extends Logging {
229229
tryAgain(1)
230230
}
231231

232+
/** wait until the leader offset for the given topic / partition equals the specified offset */
232233
def waitUntilLeaderOffset(
233234
kc: KafkaCluster,
234235
topic: String,

0 commit comments

Comments
 (0)