diff --git a/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java b/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java index a7bd4b3799a25..115e1fbb79a2e 100644 --- a/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java +++ b/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java @@ -154,6 +154,7 @@ private void throwOom(final MemoryBlock page, final long required) { taskMemoryManager.freePage(page, this); } taskMemoryManager.showMemoryUsage(); - throw new SparkOutOfMemoryError("Unable to acquire " + required + " bytes of memory, got " + got); + throw new SparkOutOfMemoryError("Unable to acquire " + required + " bytes of memory, got " + + got); } } diff --git a/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java b/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java index 206e1e4699030..b5f5ab0e90540 100644 --- a/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java +++ b/external/kinesis-asl/src/main/java/org/apache/spark/streaming/kinesis/KinesisInitialPositions.java @@ -67,9 +67,10 @@ public Date getTimestamp() { /** - * Returns instance of [[KinesisInitialPosition]] based on the passed [[InitialPositionInStream]]. - * This method is used in KinesisUtils for translating the InitialPositionInStream - * to InitialPosition. This function would be removed when we deprecate the KinesisUtils. + * Returns instance of [[KinesisInitialPosition]] based on the passed + * [[InitialPositionInStream]]. This method is used in KinesisUtils for translating the + * InitialPositionInStream to InitialPosition. This function would be removed when we deprecate + * the KinesisUtils. * * @return [[InitialPosition]] */ @@ -83,9 +84,10 @@ public static KinesisInitialPosition fromKinesisInitialPosition( // InitialPositionInStream.AT_TIMESTAMP is not supported. // Use InitialPosition.atTimestamp(timestamp) instead. throw new UnsupportedOperationException( - "Only InitialPositionInStream.LATEST and InitialPositionInStream.TRIM_HORIZON " + - "supported in initialPositionInStream(). Please use the initialPosition() from " + - "builder API in KinesisInputDStream for using InitialPositionInStream.AT_TIMESTAMP"); + "Only InitialPositionInStream.LATEST and InitialPositionInStream." + + "TRIM_HORIZON supported in initialPositionInStream(). Please use " + + "the initialPosition() from builder API in KinesisInputDStream for " + + "using InitialPositionInStream.AT_TIMESTAMP"); } } } diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java index 3ba180860c325..c120863152a96 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java @@ -31,7 +31,6 @@ import org.apache.parquet.schema.PrimitiveType; import org.apache.spark.sql.catalyst.util.DateTimeUtils; -import org.apache.spark.sql.execution.vectorized.ColumnVector; import org.apache.spark.sql.execution.vectorized.WritableColumnVector; import org.apache.spark.sql.types.DataTypes; import org.apache.spark.sql.types.DecimalType; @@ -96,7 +95,7 @@ public class VectorizedColumnReader { private final OriginalType originalType; // The timezone conversion to apply to int96 timestamps. Null if no conversion. private final TimeZone convertTz; - private final static TimeZone UTC = DateTimeUtils.TimeZoneUTC(); + private static final TimeZone UTC = DateTimeUtils.TimeZoneUTC(); public VectorizedColumnReader( ColumnDescriptor descriptor, diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java index 14f2a58d638f0..6c157e85d411f 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java @@ -79,8 +79,8 @@ public class VectorizedParquetRecordReader extends SpecificParquetRecordReaderBa private boolean[] missingColumns; /** - * The timezone that timestamp INT96 values should be converted to. Null if no conversion. Here to workaround - * incompatibilities between different engines when writing timestamp values. + * The timezone that timestamp INT96 values should be converted to. Null if no conversion. Here to + * workaround incompatibilities between different engines when writing timestamp values. */ private TimeZone convertTz = null; diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java index 95c0d09873d67..8bb33ed5b78c0 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarRow.java @@ -28,7 +28,8 @@ * to be reused, callers should copy the data out if it needs to be stored. */ public final class ColumnarRow extends InternalRow { - // The data for this row. E.g. the value of 3rd int field is `data.getChildColumn(3).getInt(rowId)`. + // The data for this row. + // E.g. the value of 3rd int field is `data.getChildColumn(3).getInt(rowId)`. private final ColumnVector data; private final int rowId; private final int numFields; diff --git a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java index 0b5b6ac675f2c..3cb020d2e0836 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java +++ b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/SessionConfigSupport.java @@ -19,9 +19,6 @@ import org.apache.spark.annotation.InterfaceStability; -import java.util.List; -import java.util.Map; - /** * A mix-in interface for {@link DataSourceV2}. Data sources can implement this interface to * propagate session configs with the specified key-prefix to all data source operations in this diff --git a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java index 8837bae6156b1..3136cee1f655f 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java +++ b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousReadSupport.java @@ -39,5 +39,8 @@ public interface ContinuousReadSupport extends DataSourceV2 { * @param options the options for the returned data source reader, which is an immutable * case-insensitive string-to-string map. */ - ContinuousReader createContinuousReader(Optional schema, String checkpointLocation, DataSourceV2Options options); + ContinuousReader createContinuousReader( + Optional schema, + String checkpointLocation, + DataSourceV2Options options); } diff --git a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java index ec15e436672b4..dee493cadb71e 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java +++ b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/ContinuousWriteSupport.java @@ -39,9 +39,9 @@ public interface ContinuousWriteSupport extends BaseStreamingSink { * Creates an optional {@link ContinuousWriter} to save the data to this data source. Data * sources can return None if there is no writing needed to be done. * - * @param queryId A unique string for the writing query. It's possible that there are many writing - * queries running at the same time, and the returned {@link DataSourceV2Writer} - * can use this id to distinguish itself from others. + * @param queryId A unique string for the writing query. It's possible that there are many + * writing queries running at the same time, and the returned + * {@link DataSourceV2Writer} can use this id to distinguish itself from others. * @param schema the schema of the data to be written. * @param mode the output mode which determines what successive epoch output means to this * sink, please refer to {@link OutputMode} for more details. diff --git a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java index 517fdab16684c..60b87f2ac0756 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java +++ b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/Offset.java @@ -42,7 +42,8 @@ public abstract class Offset extends org.apache.spark.sql.execution.streaming.Of @Override public boolean equals(Object obj) { if (obj instanceof org.apache.spark.sql.execution.streaming.Offset) { - return this.json().equals(((org.apache.spark.sql.execution.streaming.Offset) obj).json()); + return this.json() + .equals(((org.apache.spark.sql.execution.streaming.Offset) obj).json()); } else { return false; } diff --git a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java index 729a6123034f0..eca0085c8a8ce 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java +++ b/sql/core/src/main/java/org/apache/spark/sql/sources/v2/streaming/reader/PartitionOffset.java @@ -26,5 +26,4 @@ * These offsets must be serializable. */ public interface PartitionOffset extends Serializable { - } diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java index fd9108eb53ca9..70c27948de61b 100644 --- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.VariableSubstitution; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.OperationLog; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.SerDe;