Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Update dependency of spark-avro that was donated to ASF from 2.4.0 of…
… Spark by Databricks
  • Loading branch information
sungjuly committed Jan 31, 2019
commit e9cacb1b32c12aed28ce15bac605afaec1a628c5
4 changes: 2 additions & 2 deletions project/SparkRedshiftBuild.scala
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ object SparkRedshiftBuild extends Build {
"com.eclipsesource.minimal-json" % "minimal-json" % "0.9.4",
// We require spark-avro, but avro-mapred must be provided to match Hadoop version.
// In most cases, avro-mapred will be provided as part of the Spark assembly JAR.
"com.databricks" %% "spark-avro" % "3.0.0",
"org.apache.spark" %% "spark-avro" % sparkVersion.value,
if (testHadoopVersion.value.startsWith("1")) {
"org.apache.avro" % "avro-mapred" % "1.7.7" % "provided" classifier "hadoop1" exclude("org.mortbay.jetty", "servlet-api")
} else {
Expand Down Expand Up @@ -118,7 +118,7 @@ object SparkRedshiftBuild extends Build {
"org.apache.spark" %% "spark-core" % testSparkVersion.value % "test" exclude("org.apache.hadoop", "hadoop-client") force(),
"org.apache.spark" %% "spark-sql" % testSparkVersion.value % "test" exclude("org.apache.hadoop", "hadoop-client") force(),
"org.apache.spark" %% "spark-hive" % testSparkVersion.value % "test" exclude("org.apache.hadoop", "hadoop-client") force(),
"com.databricks" %% "spark-avro" % testSparkAvroVersion.value % "test" exclude("org.apache.avro", "avro-mapred") force()
"org.apache.spark" %% "spark-avro" % testSparkVersion.value % "test" exclude("org.apache.avro", "avro-mapred") force()
),
// Although spark-avro declares its avro-mapred dependency as `provided`, its version of the
// dependency can still end up on the classpath during tests, which breaks the tests for
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ private[redshift] class RedshiftWriter(
val writer = sqlContext.createDataFrame(convertedRows, convertedSchema).write
(tempFormat match {
case "AVRO" =>
writer.format("com.databricks.spark.avro")
writer.format("avro")
case "CSV" =>
writer.format("csv")
.option("escape", "\"")
Expand Down