diff --git a/sql/core/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister b/sql/core/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister index c0b8b270bab1..315a4be76d9d 100644 --- a/sql/core/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister +++ b/sql/core/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister @@ -1,5 +1,5 @@ org.apache.spark.sql.execution.datasources.v2.csv.CSVDataSourceV2 -org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider +org.apache.spark.sql.execution.datasources.jdbc.JDBCRelationProvider org.apache.spark.sql.execution.datasources.v2.json.JsonDataSourceV2 org.apache.spark.sql.execution.datasources.noop.NoopDataSource org.apache.spark.sql.execution.datasources.orc.OrcFileFormat diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala index 36e5eb33e1ba..04ebb0d6a8a2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala @@ -38,7 +38,7 @@ import org.apache.spark.sql.connector.catalog.TableProvider import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.command.DataWritingCommand import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat -import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider +import org.apache.spark.sql.execution.datasources.jdbc.JDBCRelationProvider import org.apache.spark.sql.execution.datasources.json.JsonFileFormat import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat @@ -582,7 +582,7 @@ object DataSource extends Logging { /** A map to maintain backward compatibility in case we move data sources around. */ private val backwardCompatibilityMap: Map[String, String] = { - val jdbc = classOf[JdbcRelationProvider].getCanonicalName + val jdbc = classOf[JDBCRelationProvider].getCanonicalName val json = classOf[JsonFileFormat].getCanonicalName val parquet = classOf[ParquetFileFormat].getCanonicalName val csv = classOf[CSVFileFormat].getCanonicalName diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala index e25ce53941ff..c0982dbaf567 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala @@ -53,14 +53,14 @@ object JDBCRDD extends Logging { val url = options.url val table = options.tableOrQuery val dialect = JdbcDialects.get(url) - val conn: Connection = JdbcUtils.createConnectionFactory(options)() + val conn: Connection = JDBCUtils.createConnectionFactory(options)() try { val statement = conn.prepareStatement(dialect.getSchemaQuery(table)) try { statement.setQueryTimeout(options.queryTimeout) val rs = statement.executeQuery() try { - JdbcUtils.getSchema(rs, dialect, alwaysNullable = true) + JDBCUtils.getSchema(rs, dialect, alwaysNullable = true) } finally { rs.close() } @@ -158,7 +158,7 @@ object JDBCRDD extends Logging { val quotedColumns = requiredColumns.map(colName => dialect.quoteIdentifier(colName)) new JDBCRDD( sc, - JdbcUtils.createConnectionFactory(options), + JDBCUtils.createConnectionFactory(options), pruneSchema(schema, requiredColumns), quotedColumns, filters, @@ -302,7 +302,7 @@ private[jdbc] class JDBCRDD( stmt.setFetchSize(options.fetchSize) stmt.setQueryTimeout(options.queryTimeout) rs = stmt.executeQuery() - val rowsIterator = JdbcUtils.resultSetToSparkInternalRows(rs, schema, inputMetrics) + val rowsIterator = JDBCUtils.resultSetToSparkInternalRows(rs, schema, inputMetrics) CompletionIterator[InternalRow, Iterator[InternalRow]]( new InterruptibleIterator(context, rowsIterator), close()) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala index 2f1ee0f23d45..d3b4cfd9355b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala @@ -225,7 +225,7 @@ private[sql] object JDBCRelation extends Logging { def getSchema(resolver: Resolver, jdbcOptions: JDBCOptions): StructType = { val tableSchema = JDBCRDD.resolveTable(jdbcOptions) jdbcOptions.customSchema match { - case Some(customSchema) => JdbcUtils.getCustomSchema( + case Some(customSchema) => JDBCUtils.getCustomSchema( tableSchema, customSchema, resolver) case None => tableSchema } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcRelationProvider.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelationProvider.scala similarity index 90% rename from sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcRelationProvider.scala rename to sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelationProvider.scala index 314012feef4c..70153afd78dd 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcRelationProvider.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelationProvider.scala @@ -18,10 +18,10 @@ package org.apache.spark.sql.execution.datasources.jdbc import org.apache.spark.sql.{AnalysisException, DataFrame, SaveMode, SQLContext} -import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils._ +import org.apache.spark.sql.execution.datasources.jdbc.JDBCUtils._ import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, DataSourceRegister, RelationProvider} -class JdbcRelationProvider extends CreatableRelationProvider +class JDBCRelationProvider extends CreatableRelationProvider with RelationProvider with DataSourceRegister { override def shortName(): String = "jdbc" @@ -45,16 +45,16 @@ class JdbcRelationProvider extends CreatableRelationProvider val options = new JdbcOptionsInWrite(parameters) val isCaseSensitive = sqlContext.conf.caseSensitiveAnalysis - val conn = JdbcUtils.createConnectionFactory(options)() + val conn = JDBCUtils.createConnectionFactory(options)() try { - val tableExists = JdbcUtils.tableExists(conn, options) + val tableExists = JDBCUtils.tableExists(conn, options) if (tableExists) { mode match { case SaveMode.Overwrite => if (options.isTruncate && isCascadingTruncateTable(options.url) == Some(false)) { // In this case, we should truncate table and then load. truncateTable(conn, options) - val tableSchema = JdbcUtils.getSchemaOption(conn, options) + val tableSchema = JDBCUtils.getSchemaOption(conn, options) saveTable(df, tableSchema, isCaseSensitive, options) } else { // Otherwise, do not truncate the table, instead drop and recreate it @@ -64,7 +64,7 @@ class JdbcRelationProvider extends CreatableRelationProvider } case SaveMode.Append => - val tableSchema = JdbcUtils.getSchemaOption(conn, options) + val tableSchema = JDBCUtils.getSchemaOption(conn, options) saveTable(df, tableSchema, isCaseSensitive, options) case SaveMode.ErrorIfExists => diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCUtils.scala similarity index 99% rename from sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala rename to sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCUtils.scala index 3c6649b26ecd..4477c0355a79 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCUtils.scala @@ -44,7 +44,7 @@ import org.apache.spark.util.NextIterator /** * Util functions for JDBC tables. */ -object JdbcUtils extends Logging { +object JDBCUtils extends Logging { /** * Returns a factory for creating connections to the given JDBC URL. * diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalog.scala index 5d64cf4ca896..b7941fce5d99 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalog.scala @@ -24,7 +24,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.analysis.{NoSuchNamespaceException, NoSuchTableException} import org.apache.spark.sql.connector.catalog.{Identifier, Table, TableCatalog, TableChange} import org.apache.spark.sql.connector.expressions.Transform -import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcOptionsInWrite, JDBCRDD, JdbcUtils} +import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcOptionsInWrite, JDBCRDD, JDBCUtils} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects} import org.apache.spark.sql.types.StructType @@ -70,14 +70,14 @@ class JDBCTableCatalog extends TableCatalog with Logging { checkNamespace(ident.namespace()) val writeOptions = new JdbcOptionsInWrite( options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident))) - withConnection(JdbcUtils.tableExists(_, writeOptions)) + withConnection(JDBCUtils.tableExists(_, writeOptions)) } override def dropTable(ident: Identifier): Boolean = { checkNamespace(ident.namespace()) withConnection { conn => try { - JdbcUtils.dropTable(conn, getTableName(ident), options) + JDBCUtils.dropTable(conn, getTableName(ident), options) true } catch { case _: SQLException => false @@ -88,7 +88,7 @@ class JDBCTableCatalog extends TableCatalog with Logging { override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = { checkNamespace(oldIdent.namespace()) withConnection { conn => - JdbcUtils.renameTable(conn, getTableName(oldIdent), getTableName(newIdent), options) + JDBCUtils.renameTable(conn, getTableName(oldIdent), getTableName(newIdent), options) } } @@ -123,7 +123,7 @@ class JDBCTableCatalog extends TableCatalog with Logging { options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident))) val caseSensitive = SQLConf.get.caseSensitiveAnalysis withConnection { conn => - JdbcUtils.createTable(conn, getTableName(ident), schema, caseSensitive, writeOptions) + JDBCUtils.createTable(conn, getTableName(ident), schema, caseSensitive, writeOptions) } JDBCTable(ident, schema, writeOptions) @@ -144,7 +144,7 @@ class JDBCTableCatalog extends TableCatalog with Logging { } private def withConnection[T](f: Connection => T): T = { - val conn = JdbcUtils.createConnectionFactory(options)() + val conn = JDBCUtils.createConnectionFactory(options)() try { f(conn) } finally { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala index c8d8a3392128..4d6eaf99c9e8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.jdbc import java.sql.{Connection, Types} import java.util.Locale -import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils} +import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JDBCUtils} import org.apache.spark.sql.types._ @@ -80,7 +80,7 @@ private object PostgresDialect extends JdbcDialect { JdbcType(s"NUMERIC(${t.precision},${t.scale})", java.sql.Types.NUMERIC)) case ArrayType(et, _) if et.isInstanceOf[AtomicType] => getJDBCType(et).map(_.databaseTypeDefinition) - .orElse(JdbcUtils.getCommonJDBCType(et).map(_.databaseTypeDefinition)) + .orElse(JDBCUtils.getCommonJDBCType(et).map(_.databaseTypeDefinition)) .map(typeName => JdbcType(s"$typeName[]", java.sql.Types.ARRAY)) case _ => None } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtilsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCUtilsSuite.scala similarity index 81% rename from sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtilsSuite.scala rename to sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCUtilsSuite.scala index 7d277c1ffaff..fcf3544710ed 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtilsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCUtilsSuite.scala @@ -22,7 +22,7 @@ import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.types._ -class JdbcUtilsSuite extends SparkFunSuite { +class JDBCUtilsSuite extends SparkFunSuite { val tableSchema = StructType(Seq( StructField("C1", StringType, false), StructField("C2", IntegerType, false))) @@ -30,23 +30,23 @@ class JdbcUtilsSuite extends SparkFunSuite { val caseInsensitive = org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution test("Parse user specified column types") { - assert(JdbcUtils.getCustomSchema(tableSchema, null, caseInsensitive) === tableSchema) - assert(JdbcUtils.getCustomSchema(tableSchema, "", caseInsensitive) === tableSchema) + assert(JDBCUtils.getCustomSchema(tableSchema, null, caseInsensitive) === tableSchema) + assert(JDBCUtils.getCustomSchema(tableSchema, "", caseInsensitive) === tableSchema) - assert(JdbcUtils.getCustomSchema(tableSchema, "c1 DATE", caseInsensitive) === + assert(JDBCUtils.getCustomSchema(tableSchema, "c1 DATE", caseInsensitive) === StructType(Seq(StructField("C1", DateType, false), StructField("C2", IntegerType, false)))) - assert(JdbcUtils.getCustomSchema(tableSchema, "c1 DATE", caseSensitive) === + assert(JDBCUtils.getCustomSchema(tableSchema, "c1 DATE", caseSensitive) === StructType(Seq(StructField("C1", StringType, false), StructField("C2", IntegerType, false)))) assert( - JdbcUtils.getCustomSchema(tableSchema, "c1 DATE, C2 STRING", caseInsensitive) === + JDBCUtils.getCustomSchema(tableSchema, "c1 DATE, C2 STRING", caseInsensitive) === StructType(Seq(StructField("C1", DateType, false), StructField("C2", StringType, false)))) - assert(JdbcUtils.getCustomSchema(tableSchema, "c1 DATE, C2 STRING", caseSensitive) === + assert(JDBCUtils.getCustomSchema(tableSchema, "c1 DATE, C2 STRING", caseSensitive) === StructType(Seq(StructField("C1", StringType, false), StructField("C2", StringType, false)))) // Throw AnalysisException val duplicate = intercept[AnalysisException]{ - JdbcUtils.getCustomSchema(tableSchema, "c1 DATE, c1 STRING", caseInsensitive) === + JDBCUtils.getCustomSchema(tableSchema, "c1 DATE, c1 STRING", caseInsensitive) === StructType(Seq(StructField("c1", DateType, false), StructField("c1", StringType, false))) } assert(duplicate.getMessage.contains( @@ -54,13 +54,13 @@ class JdbcUtilsSuite extends SparkFunSuite { // Throw ParseException val dataTypeNotSupported = intercept[ParseException]{ - JdbcUtils.getCustomSchema(tableSchema, "c3 DATEE, C2 STRING", caseInsensitive) === + JDBCUtils.getCustomSchema(tableSchema, "c3 DATEE, C2 STRING", caseInsensitive) === StructType(Seq(StructField("c3", DateType, false), StructField("C2", StringType, false))) } assert(dataTypeNotSupported.getMessage.contains("DataType datee is not supported")) val mismatchedInput = intercept[ParseException]{ - JdbcUtils.getCustomSchema(tableSchema, "c3 DATE. C2 STRING", caseInsensitive) === + JDBCUtils.getCustomSchema(tableSchema, "c3 DATE. C2 STRING", caseInsensitive) === StructType(Seq(StructField("c3", DateType, false), StructField("C2", StringType, false))) } assert(mismatchedInput.getMessage.contains("mismatched input '.' expecting")) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index 06dd6615a817..e59d4e008211 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -34,7 +34,7 @@ import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeTestUtils import org.apache.spark.sql.execution.{DataSourceScanExec, ExtendedMode} import org.apache.spark.sql.execution.command.{ExplainCommand, ShowCreateTableCommand} import org.apache.spark.sql.execution.datasources.LogicalRelation -import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JDBCPartition, JDBCRDD, JDBCRelation, JdbcUtils} +import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JDBCPartition, JDBCRDD, JDBCRelation, JDBCUtils} import org.apache.spark.sql.execution.metric.InputOutputMetricsHelper import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources._ @@ -1138,7 +1138,7 @@ class JDBCSuite extends QueryTest test("SPARK-16625: General data types to be mapped to Oracle") { def getJdbcType(dialect: JdbcDialect, dt: DataType): String = { - dialect.getJDBCType(dt).orElse(JdbcUtils.getCommonJDBCType(dt)). + dialect.getJDBCType(dt).orElse(JDBCUtils.getCommonJDBCType(dt)). map(_.databaseTypeDefinition).get } @@ -1184,7 +1184,7 @@ class JDBCSuite extends QueryTest test("SPARK-16387: Reserved SQL words are not escaped by JDBC writer") { val df = spark.createDataset(Seq("a", "b", "c")).toDF("order") - val schema = JdbcUtils.schemaString( + val schema = JDBCUtils.schemaString( df.schema, df.sqlContext.conf.caseSensitiveAnalysis, "jdbc:mysql://localhost:3306/temp") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala index 3f621e04338a..b72c16531407 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala @@ -29,7 +29,7 @@ import org.apache.spark.SparkException import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd} import org.apache.spark.sql.{AnalysisException, DataFrame, Row, SaveMode} import org.apache.spark.sql.catalyst.parser.ParseException -import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils} +import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JDBCUtils} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ @@ -390,7 +390,7 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { val expectedSchemaStr = colTypes.map { case (col, dataType) => s""""$col" $dataType """ }.mkString(", ") - assert(JdbcUtils.schemaString( + assert(JDBCUtils.schemaString( df.schema, df.sqlContext.conf.caseSensitiveAnalysis, url1, diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/ResolvedDataSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/ResolvedDataSourceSuite.scala index 818a66eb436c..cf2228096fb6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/ResolvedDataSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/ResolvedDataSourceSuite.scala @@ -35,13 +35,13 @@ class ResolvedDataSourceSuite extends SharedSparkSession { test("jdbc") { assert( getProvidingClass("jdbc") === - classOf[org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider]) + classOf[org.apache.spark.sql.execution.datasources.jdbc.JDBCRelationProvider]) assert( getProvidingClass("org.apache.spark.sql.execution.datasources.jdbc") === - classOf[org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider]) + classOf[org.apache.spark.sql.execution.datasources.jdbc.JDBCRelationProvider]) assert( getProvidingClass("org.apache.spark.sql.jdbc") === - classOf[org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider]) + classOf[org.apache.spark.sql.execution.datasources.jdbc.JDBCRelationProvider]) } test("json") { diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JdbcConnectionUriSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JDBCConnectionUriSuite.scala similarity index 97% rename from sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JdbcConnectionUriSuite.scala rename to sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JDBCConnectionUriSuite.scala index fb8a7e273ae4..882322bae4fc 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JdbcConnectionUriSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JDBCConnectionUriSuite.scala @@ -23,7 +23,7 @@ import org.apache.hive.jdbc.HiveDriver import org.apache.spark.util.Utils -class JdbcConnectionUriSuite extends HiveThriftServer2Test { +class JDBCConnectionUriSuite extends HiveThriftServer2Test { Utils.classForName(classOf[HiveDriver].getCanonicalName) override def mode: ServerMode.Value = ServerMode.binary