Skip to content
Closed
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Modify tests for unsupported types.
  • Loading branch information
ueshin committed Dec 22, 2017
commit e29f8330e455e6bf66d2c69f2875a70ae2c71cdb
14 changes: 8 additions & 6 deletions python/pyspark/sql/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -3194,10 +3194,11 @@ def create_pandas_data_frame(self):
return pd.DataFrame(data=data_dict)

def test_unsupported_datatype(self):
schema = StructType([StructField("decimal", DecimalType(), True)])
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: df.toPandas())
with self.assertRaisesRegexp(Exception, 'Unsupported data type'):
df.toPandas()

def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
Expand Down Expand Up @@ -3733,12 +3734,12 @@ def test_vectorized_udf_varargs(self):

def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, col
schema = StructType([StructField("dt", DecimalType(), True)])
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
f = pandas_udf(lambda x: x, DecimalType())
f = pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported data type'):
df.select(f(col('dt'))).collect()
df.select(f(col('map'))).collect()

def test_vectorized_udf_null_date(self):
from pyspark.sql.functions import pandas_udf, col
Expand Down Expand Up @@ -4032,7 +4033,8 @@ def test_wrong_args(self):
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True), StructField("dt", DecimalType(), True)])
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(1, None,)], schema=schema)
f = pandas_udf(lambda x: x, df.schema, PandasUDFType.GROUP_MAP)
with QuietTest(self.sc):
Expand Down