Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,16 @@ import org.apache.spark.rdd.RDD

/** Utilities for serialization / deserialization between Python and Java, using Pickle. */
private[spark] object SerDeUtil extends Logging {
class ByteArrayConstructor extends net.razorvine.pickle.objects.ByteArrayConstructor {
override def construct(args: Array[Object]): Object = {
// Deal with an empty byte array pickled by Python 3.
if (args.length == 0) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see. It looks quite straightforward. I checked in Python 3:

>>> import pickle
>>> import pickletools
>>> print(pickletools.dis(pickle.dumps(bytearray())))
    0: \x80 PROTO      3
    2: c    GLOBAL     'builtins bytearray'
   22: q    BINPUT     0
   24: )    EMPTY_TUPLE
   25: R    REDUCE
   26: q    BINPUT     1
   28: .    STOP

which, up to my knowledge, gives new object[0] for args.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I also checked pickle.dumps(..., protocol=0 - 4) just in case.

Array.empty[Byte]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: use Array.emptyByteArray?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok.

} else {
super.construct(args)
}
}
}
// Unpickle array.array generated by Python 2.6
class ArrayConstructor extends net.razorvine.pickle.objects.ArrayConstructor {
// /* Description of types */
Expand Down Expand Up @@ -108,6 +118,10 @@ private[spark] object SerDeUtil extends Logging {
synchronized{
if (!initialized) {
Unpickler.registerConstructor("array", "array", new ArrayConstructor())
Unpickler.registerConstructor("__builtin__", "bytearray", new ByteArrayConstructor())
Unpickler.registerConstructor("builtins", "bytearray", new ByteArrayConstructor())
Unpickler.registerConstructor("__builtin__", "bytes", new ByteArrayConstructor())
Unpickler.registerConstructor("_codecs", "encode", new ByteArrayConstructor())
initialized = true
}
}
Expand Down
5 changes: 5 additions & 0 deletions python/pyspark/sql/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -2480,6 +2480,11 @@ def assertCollectSuccess(typecode, value):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()

# test for SPARK-21534
def test_empty_bytearray(self):
rdd = self.spark.sql("select unhex('') as xx").rdd.map(lambda x: {"abc": x.xx})
self.spark.createDataFrame(rdd).collect()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about reusing a test SQLTests.test_BinaryType_serialization by adding bytearray()?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. Added.


def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
Expand Down