Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
e768164
#2808 update kafka to version 0.8.2
Dec 7, 2014
d9dc2bc
Merge remote-tracking branch 'upstream/master' into wip-2808-kafka-0.…
Dec 23, 2014
2e67c66
#SPARK-2808 Update to Kafka 0.8.2.0 GA from beta.
Feb 5, 2015
6953429
[SPARK-2808][Streaming][Kafka] update kafka to 0.8.2
koeninger Feb 11, 2015
77de6c2
Merge branch 'master' into wip-2808-kafka-0.8.2-upgrade
koeninger Mar 18, 2015
407382e
[SPARK-2808][Streaming][Kafka] update kafka to 0.8.2.1
koeninger Mar 18, 2015
ed02d2c
[SPARK-2808][Streaming][Kafka] move default argument for api version …
koeninger Apr 15, 2015
1d10751
Merge branch 'master' into wip-2808-kafka-0.8.2-upgrade
koeninger Apr 27, 2015
c70ee43
[SPARK-2808][Streaming][Kafka] add more asserts to test, try to figur…
koeninger Apr 28, 2015
9edab4c
[SPARK-2808][Streaming][Kafka] more shots in the dark on jenkins fail…
koeninger Apr 28, 2015
af6f3ec
[SPARK-2808][Streaming][Kafka] delay test until latest leader offset …
koeninger Apr 29, 2015
61b3464
[SPARK-2808][Streaming][Kafka] delay for second send in boundary cond…
koeninger Apr 29, 2015
3824ce3
[SPARK-2808][Streaming][Kafka] naming / comments per tdas
koeninger Apr 29, 2015
2b92d3f
[SPARK-2808][Streaming][Kafka] wait for leader offsets in the java te…
koeninger Apr 29, 2015
2712649
[SPARK-2808][Streaming][Kafka] add more logging to python test, see w…
koeninger Apr 29, 2015
115aeee
Merge branch 'master' into wip-2808-kafka-0.8.2-upgrade
koeninger Apr 29, 2015
4c4557f
[SPARK-2808][Streaming][Kafka] add even more logging to python test
koeninger Apr 30, 2015
1d896e2
[SPARK-2808][Streaming][Kafka] add even even more logging to python test
koeninger Apr 30, 2015
bb0cfe2
Changes to debug flaky streaming tests.
tdas May 1, 2015
ae12eb2
Enable only kafka streaming test
tdas May 1, 2015
4bb7e40
Fix goof up
tdas May 1, 2015
9804030
removed sleeps.
tdas May 1, 2015
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge branch 'master' into wip-2808-kafka-0.8.2-upgrade
Conflicts:
	external/kafka/src/test/scala/org/apache/spark/streaming/kafka/KafkaStreamSuite.scala
  • Loading branch information
koeninger committed Apr 27, 2015
commit 1d10751fb39a418cfd11a3d2e2b90f53cb71fdc5
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,13 @@ private class KafkaTestUtils extends Logging {
private def waitUntilMetadataIsPropagated(topic: String, partition: Int): Unit = {
eventually(Time(10000), Time(100)) {
assert(
server.apis.metadataCache.containsTopicAndPartition(topic, partition),
server.apis.metadataCache.getPartitionInfo(topic, partition) match {
case Some(partitionState) =>
// is valid broker id
partitionState.leaderIsrAndControllerEpoch.leaderAndIsr.leader >= 0
case _ =>
false
},
s"Partition [$topic, $partition] metadata not propagated after timeout"
)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,159 +39,7 @@ class KafkaStreamSuite extends FunSuite with Eventually with BeforeAndAfterAll {
kafkaTestUtils.setup()
}

def brokerAddress: String = {
assert(brokerReady, "Kafka not setup yet or already torn down, cannot get broker address")
s"$brokerHost:$brokerPort"
}

def setupKafka() {
// Zookeeper server startup
zookeeper = new EmbeddedZookeeper(s"$zkHost:$zkPort")
// Get the actual zookeeper binding port
zkPort = zookeeper.actualPort
zkReady = true
logInfo("==================== Zookeeper Started ====================")

zkClient = new ZkClient(zkAddress, zkSessionTimeout, zkConnectionTimeout, ZKStringSerializer)
logInfo("==================== Zookeeper Client Created ====================")

// Kafka broker startup
var bindSuccess: Boolean = false
while(!bindSuccess) {
try {
brokerConf = new KafkaConfig(brokerConfig)
server = new KafkaServer(brokerConf)
server.startup()
logInfo("==================== Kafka Broker Started ====================")
bindSuccess = true
} catch {
case e: KafkaException =>
if (e.getMessage != null && e.getMessage.contains("Socket server failed to bind to")) {
brokerPort += 1
}
case e: Exception => throw new Exception("Kafka server create failed", e)
}
}

Thread.sleep(2000)
logInfo("==================== Kafka + Zookeeper Ready ====================")
brokerReady = true
}

def tearDownKafka() {
brokerReady = false
zkReady = false
if (producer != null) {
producer.close()
producer = null
}

if (server != null) {
server.shutdown()
server = null
}

brokerConf.logDirs.foreach { f => Utils.deleteRecursively(new File(f)) }

if (zkClient != null) {
zkClient.close()
zkClient = null
}

if (zookeeper != null) {
zookeeper.shutdown()
zookeeper = null
}
}

def createTopic(topic: String) {
AdminUtils.createTopic(zkClient, topic, 1, 1)
// wait until metadata is propagated
waitUntilMetadataIsPropagated(Seq(server), topic, 0)
logInfo(s"==================== Topic $topic Created ====================")
}

def sendMessages(topic: String, messageToFreq: Map[String, Int]) {
val messages = messageToFreq.flatMap { case (s, freq) => Seq.fill(freq)(s) }.toArray
sendMessages(topic, messages)
}

def sendMessages(topic: String, messages: Array[String]) {
producer = new Producer[String, String](new ProducerConfig(producerConfig))
producer.send(messages.map { new KeyedMessage[String, String](topic, _ ) }: _*)
producer.close()
logInfo(s"==================== Sent Messages: ${messages.mkString(", ")} ====================")
}

private def brokerConfig: Properties = {
val props = new Properties()
props.put("broker.id", "0")
props.put("host.name", "localhost")
props.put("port", brokerPort.toString)
props.put("log.dir", Utils.createTempDir().getAbsolutePath)
props.put("zookeeper.connect", zkAddress)
props.put("log.flush.interval.messages", "1")
props.put("replica.socket.timeout.ms", "1500")
props
}

private def producerConfig: Properties = {
val brokerAddr = brokerConf.hostName + ":" + brokerConf.port
val props = new Properties()
props.put("metadata.broker.list", brokerAddr)
props.put("serializer.class", classOf[StringEncoder].getName)
props
}

private def waitUntilMetadataIsPropagated(servers: Seq[KafkaServer], topic: String, partition: Int): Int = {
var leader: Int = -1
eventually(timeout(1000 milliseconds), interval(100 milliseconds)) {
assert(servers.forall { server =>
val partitionStateOpt = server.apis.metadataCache.getPartitionInfo(topic, partition)
partitionStateOpt match {
case Some(partitionState) =>
leader = partitionState.leaderIsrAndControllerEpoch.leaderAndIsr.leader
leader >= 0 // is valid broker id
case _ => false
}
}, s"Partition [$topic, $partition] metadata not propagated after timeout")
}
leader
}

class EmbeddedZookeeper(val zkConnect: String) {
val random = new Random()
val snapshotDir = Utils.createTempDir()
val logDir = Utils.createTempDir()

val zookeeper = new ZooKeeperServer(snapshotDir, logDir, 500)
val (ip, port) = {
val splits = zkConnect.split(":")
(splits(0), splits(1).toInt)
}
val factory = new NIOServerCnxnFactory()
factory.configure(new InetSocketAddress(ip, port), 16)
factory.startup(zookeeper)

val actualPort = factory.getLocalPort

def shutdown() {
factory.shutdown()
Utils.deleteRecursively(snapshotDir)
Utils.deleteRecursively(logDir)
}
}
}


class KafkaStreamSuite extends KafkaStreamSuiteBase with BeforeAndAfter {
var ssc: StreamingContext = _

before {
setupKafka()
}

after {
override def afterAll(): Unit = {
if (ssc != null) {
ssc.stop()
ssc = null
Expand Down
You are viewing a condensed version of this merge commit. You can view the full changes here.