Skip to content
Prev Previous commit
Next Next commit
Clarify reason why SqlSerializer2 supports this serializer
  • Loading branch information
JoshRosen committed May 5, 2015
commit 0a7ebd7311c42707d2576cda9a7b28ce96d2973f
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,8 @@ abstract class Serializer {
* serIn.open([obj2bytes] concatenate [obj1bytes]) should return (obj2, obj1)
* }}}
*
* In general, this property should hold for serializers that are stateless.
* In general, this property should hold for serializers that are stateless and that do not
* write special metadata at the beginning or end of the serialization stream.
*
* This API is private to Spark; this method should not be overridden in third-party subclasses
* or called in user code and is subject to removal in future Spark releases.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,10 @@ private[sql] class SparkSqlSerializer2(keySchema: Array[DataType], valueSchema:

def newInstance(): SerializerInstance = new ShuffleSerializerInstance(keySchema, valueSchema)

override def supportsRelocationOfSerializedObjects: Boolean = true
override def supportsRelocationOfSerializedObjects: Boolean = {
// SparkSqlSerializer2 is stateless and writes no stream headers
true
}
}

private[sql] object SparkSqlSerializer2 {
Expand Down