Skip to content

Commit 9873d57

Browse files
kiszkhvanhovell
authored andcommitted
[SPARK-17490][SQL] Optimize SerializeFromObject() for a primitive array
Waiting for merging #13680 This PR optimizes `SerializeFromObject()` for an primitive array. This is derived from #13758 to address one of problems by using a simple way in #13758. The current implementation always generates `GenericArrayData` from `SerializeFromObject()` for any type of an array in a logical plan. This involves a boxing at a constructor of `GenericArrayData` when `SerializedFromObject()` has an primitive array. This PR enables to generate `UnsafeArrayData` from `SerializeFromObject()` for a primitive array. It can avoid boxing to create an instance of `ArrayData` in the generated code by Catalyst. This PR also generate `UnsafeArrayData` in a case for `RowEncoder.serializeFor` or `CatalystTypeConverters.createToCatalystConverter`. Performance improvement of `SerializeFromObject()` is up to 2.0x ``` OpenJDK 64-Bit Server VM 1.8.0_91-b14 on Linux 4.4.11-200.fc22.x86_64 Intel Xeon E3-12xx v2 (Ivy Bridge) Without this PR Write an array in Dataset: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ Int 556 / 608 15.1 66.3 1.0X Double 1668 / 1746 5.0 198.8 0.3X with this PR Write an array in Dataset: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------ Int 352 / 401 23.8 42.0 1.0X Double 821 / 885 10.2 97.9 0.4X ``` Here is an example program that will happen in mllib as described in [SPARK-16070](https://issues.apache.org/jira/browse/SPARK-16070). ``` sparkContext.parallelize(Seq(Array(1, 2)), 1).toDS.map(e => e).show ``` Generated code before applying this PR ``` java /* 039 */ protected void processNext() throws java.io.IOException { /* 040 */ while (inputadapter_input.hasNext()) { /* 041 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next(); /* 042 */ int[] inputadapter_value = (int[])inputadapter_row.get(0, null); /* 043 */ /* 044 */ Object mapelements_obj = ((Expression) references[0]).eval(null); /* 045 */ scala.Function1 mapelements_value1 = (scala.Function1) mapelements_obj; /* 046 */ /* 047 */ boolean mapelements_isNull = false || false; /* 048 */ int[] mapelements_value = null; /* 049 */ if (!mapelements_isNull) { /* 050 */ Object mapelements_funcResult = null; /* 051 */ mapelements_funcResult = mapelements_value1.apply(inputadapter_value); /* 052 */ if (mapelements_funcResult == null) { /* 053 */ mapelements_isNull = true; /* 054 */ } else { /* 055 */ mapelements_value = (int[]) mapelements_funcResult; /* 056 */ } /* 057 */ /* 058 */ } /* 059 */ mapelements_isNull = mapelements_value == null; /* 060 */ /* 061 */ serializefromobject_argIsNulls[0] = mapelements_isNull; /* 062 */ serializefromobject_argValue = mapelements_value; /* 063 */ /* 064 */ boolean serializefromobject_isNull = false; /* 065 */ for (int idx = 0; idx < 1; idx++) { /* 066 */ if (serializefromobject_argIsNulls[idx]) { serializefromobject_isNull = true; break; } /* 067 */ } /* 068 */ /* 069 */ final ArrayData serializefromobject_value = serializefromobject_isNull ? null : new org.apache.spark.sql.catalyst.util.GenericArrayData(serializefromobject_argValue); /* 070 */ serializefromobject_holder.reset(); /* 071 */ /* 072 */ serializefromobject_rowWriter.zeroOutNullBytes(); /* 073 */ /* 074 */ if (serializefromobject_isNull) { /* 075 */ serializefromobject_rowWriter.setNullAt(0); /* 076 */ } else { /* 077 */ // Remember the current cursor so that we can calculate how many bytes are /* 078 */ // written later. /* 079 */ final int serializefromobject_tmpCursor = serializefromobject_holder.cursor; /* 080 */ /* 081 */ if (serializefromobject_value instanceof UnsafeArrayData) { /* 082 */ final int serializefromobject_sizeInBytes = ((UnsafeArrayData) serializefromobject_value).getSizeInBytes(); /* 083 */ // grow the global buffer before writing data. /* 084 */ serializefromobject_holder.grow(serializefromobject_sizeInBytes); /* 085 */ ((UnsafeArrayData) serializefromobject_value).writeToMemory(serializefromobject_holder.buffer, serializefromobject_holder.cursor); /* 086 */ serializefromobject_holder.cursor += serializefromobject_sizeInBytes; /* 087 */ /* 088 */ } else { /* 089 */ final int serializefromobject_numElements = serializefromobject_value.numElements(); /* 090 */ serializefromobject_arrayWriter.initialize(serializefromobject_holder, serializefromobject_numElements, 4); /* 091 */ /* 092 */ for (int serializefromobject_index = 0; serializefromobject_index < serializefromobject_numElements; serializefromobject_index++) { /* 093 */ if (serializefromobject_value.isNullAt(serializefromobject_index)) { /* 094 */ serializefromobject_arrayWriter.setNullInt(serializefromobject_index); /* 095 */ } else { /* 096 */ final int serializefromobject_element = serializefromobject_value.getInt(serializefromobject_index); /* 097 */ serializefromobject_arrayWriter.write(serializefromobject_index, serializefromobject_element); /* 098 */ } /* 099 */ } /* 100 */ } /* 101 */ /* 102 */ serializefromobject_rowWriter.setOffsetAndSize(0, serializefromobject_tmpCursor, serializefromobject_holder.cursor - serializefromobject_tmpCursor); /* 103 */ } /* 104 */ serializefromobject_result.setTotalSize(serializefromobject_holder.totalSize()); /* 105 */ append(serializefromobject_result); /* 106 */ if (shouldStop()) return; /* 107 */ } /* 108 */ } /* 109 */ } ``` Generated code after applying this PR ``` java /* 035 */ protected void processNext() throws java.io.IOException { /* 036 */ while (inputadapter_input.hasNext()) { /* 037 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next(); /* 038 */ int[] inputadapter_value = (int[])inputadapter_row.get(0, null); /* 039 */ /* 040 */ Object mapelements_obj = ((Expression) references[0]).eval(null); /* 041 */ scala.Function1 mapelements_value1 = (scala.Function1) mapelements_obj; /* 042 */ /* 043 */ boolean mapelements_isNull = false || false; /* 044 */ int[] mapelements_value = null; /* 045 */ if (!mapelements_isNull) { /* 046 */ Object mapelements_funcResult = null; /* 047 */ mapelements_funcResult = mapelements_value1.apply(inputadapter_value); /* 048 */ if (mapelements_funcResult == null) { /* 049 */ mapelements_isNull = true; /* 050 */ } else { /* 051 */ mapelements_value = (int[]) mapelements_funcResult; /* 052 */ } /* 053 */ /* 054 */ } /* 055 */ mapelements_isNull = mapelements_value == null; /* 056 */ /* 057 */ boolean serializefromobject_isNull = mapelements_isNull; /* 058 */ final ArrayData serializefromobject_value = serializefromobject_isNull ? null : org.apache.spark.sql.catalyst.expressions.UnsafeArrayData.fromPrimitiveArray(mapelements_value); /* 059 */ serializefromobject_isNull = serializefromobject_value == null; /* 060 */ serializefromobject_holder.reset(); /* 061 */ /* 062 */ serializefromobject_rowWriter.zeroOutNullBytes(); /* 063 */ /* 064 */ if (serializefromobject_isNull) { /* 065 */ serializefromobject_rowWriter.setNullAt(0); /* 066 */ } else { /* 067 */ // Remember the current cursor so that we can calculate how many bytes are /* 068 */ // written later. /* 069 */ final int serializefromobject_tmpCursor = serializefromobject_holder.cursor; /* 070 */ /* 071 */ if (serializefromobject_value instanceof UnsafeArrayData) { /* 072 */ final int serializefromobject_sizeInBytes = ((UnsafeArrayData) serializefromobject_value).getSizeInBytes(); /* 073 */ // grow the global buffer before writing data. /* 074 */ serializefromobject_holder.grow(serializefromobject_sizeInBytes); /* 075 */ ((UnsafeArrayData) serializefromobject_value).writeToMemory(serializefromobject_holder.buffer, serializefromobject_holder.cursor); /* 076 */ serializefromobject_holder.cursor += serializefromobject_sizeInBytes; /* 077 */ /* 078 */ } else { /* 079 */ final int serializefromobject_numElements = serializefromobject_value.numElements(); /* 080 */ serializefromobject_arrayWriter.initialize(serializefromobject_holder, serializefromobject_numElements, 4); /* 081 */ /* 082 */ for (int serializefromobject_index = 0; serializefromobject_index < serializefromobject_numElements; serializefromobject_index++) { /* 083 */ if (serializefromobject_value.isNullAt(serializefromobject_index)) { /* 084 */ serializefromobject_arrayWriter.setNullInt(serializefromobject_index); /* 085 */ } else { /* 086 */ final int serializefromobject_element = serializefromobject_value.getInt(serializefromobject_index); /* 087 */ serializefromobject_arrayWriter.write(serializefromobject_index, serializefromobject_element); /* 088 */ } /* 089 */ } /* 090 */ } /* 091 */ /* 092 */ serializefromobject_rowWriter.setOffsetAndSize(0, serializefromobject_tmpCursor, serializefromobject_holder.cursor - serializefromobject_tmpCursor); /* 093 */ } /* 094 */ serializefromobject_result.setTotalSize(serializefromobject_holder.totalSize()); /* 095 */ append(serializefromobject_result); /* 096 */ if (shouldStop()) return; /* 097 */ } /* 098 */ } /* 099 */ } ``` Added a test in `DatasetSuite`, `RowEncoderSuite`, and `CatalystTypeConvertersSuite` Author: Kazuaki Ishizaki <[email protected]> Closes #15044 from kiszk/SPARK-17490. (cherry picked from commit 19cf208) Signed-off-by: Herman van Hovell <[email protected]>
1 parent d1eac3e commit 9873d57

File tree

7 files changed

+203
-14
lines changed

7 files changed

+203
-14
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -441,6 +441,22 @@ object ScalaReflection extends ScalaReflection {
441441
val newPath = s"""- array element class: "$clsName"""" +: walkedTypePath
442442
MapObjects(serializerFor(_, elementType, newPath), input, dt)
443443

444+
case dt @ (BooleanType | ByteType | ShortType | IntegerType | LongType |
445+
FloatType | DoubleType) =>
446+
val cls = input.dataType.asInstanceOf[ObjectType].cls
447+
if (cls.isArray && cls.getComponentType.isPrimitive) {
448+
StaticInvoke(
449+
classOf[UnsafeArrayData],
450+
ArrayType(dt, false),
451+
"fromPrimitiveArray",
452+
input :: Nil)
453+
} else {
454+
NewInstance(
455+
classOf[GenericArrayData],
456+
input :: Nil,
457+
dataType = ArrayType(dt, schemaFor(elementType).nullable))
458+
}
459+
444460
case dt =>
445461
NewInstance(
446462
classOf[GenericArrayData],

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/RowEncoder.scala

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import scala.reflect.ClassTag
2323
import org.apache.spark.SparkException
2424
import org.apache.spark.sql.Row
2525
import org.apache.spark.sql.catalyst.expressions._
26-
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeUtils, GenericArrayData}
26+
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, ArrayData, DateTimeUtils, GenericArrayData}
2727
import org.apache.spark.sql.catalyst.ScalaReflection
2828
import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal
2929
import org.apache.spark.sql.catalyst.expressions.objects._
@@ -119,18 +119,19 @@ object RowEncoder {
119119
"fromString",
120120
inputObject :: Nil)
121121

122-
case t @ ArrayType(et, _) => et match {
123-
case BooleanType | ByteType | ShortType | IntegerType | LongType | FloatType | DoubleType =>
124-
// TODO: validate input type for primitive array.
125-
NewInstance(
126-
classOf[GenericArrayData],
127-
inputObject :: Nil,
128-
dataType = t)
129-
case _ => MapObjects(
130-
element => serializerFor(ValidateExternalType(element, et), et),
131-
inputObject,
132-
ObjectType(classOf[Object]))
133-
}
122+
case t @ ArrayType(et, cn) =>
123+
et match {
124+
case BooleanType | ByteType | ShortType | IntegerType | LongType | FloatType | DoubleType =>
125+
StaticInvoke(
126+
classOf[ArrayData],
127+
t,
128+
"toArrayData",
129+
inputObject :: Nil)
130+
case _ => MapObjects(
131+
element => serializerFor(ValidateExternalType(element, et), et),
132+
inputObject,
133+
ObjectType(classOf[Object]))
134+
}
134135

135136
case t @ MapType(kt, vt, valueNullable) =>
136137
val keys =

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/ArrayData.scala

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,22 @@ package org.apache.spark.sql.catalyst.util
1919

2020
import scala.reflect.ClassTag
2121

22-
import org.apache.spark.sql.catalyst.expressions.SpecializedGetters
22+
import org.apache.spark.sql.catalyst.expressions.{SpecializedGetters, UnsafeArrayData}
2323
import org.apache.spark.sql.types.DataType
2424

25+
object ArrayData {
26+
def toArrayData(input: Any): ArrayData = input match {
27+
case a: Array[Boolean] => UnsafeArrayData.fromPrimitiveArray(a)
28+
case a: Array[Byte] => UnsafeArrayData.fromPrimitiveArray(a)
29+
case a: Array[Short] => UnsafeArrayData.fromPrimitiveArray(a)
30+
case a: Array[Int] => UnsafeArrayData.fromPrimitiveArray(a)
31+
case a: Array[Long] => UnsafeArrayData.fromPrimitiveArray(a)
32+
case a: Array[Float] => UnsafeArrayData.fromPrimitiveArray(a)
33+
case a: Array[Double] => UnsafeArrayData.fromPrimitiveArray(a)
34+
case other => new GenericArrayData(other)
35+
}
36+
}
37+
2538
abstract class ArrayData extends SpecializedGetters with Serializable {
2639
def numElements(): Int
2740

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/CatalystTypeConvertersSuite.scala

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ package org.apache.spark.sql.catalyst
1919

2020
import org.apache.spark.SparkFunSuite
2121
import org.apache.spark.sql.Row
22+
import org.apache.spark.sql.catalyst.expressions.UnsafeArrayData
23+
import org.apache.spark.sql.catalyst.util.GenericArrayData
2224
import org.apache.spark.sql.types._
2325

2426
class CatalystTypeConvertersSuite extends SparkFunSuite {
@@ -61,4 +63,35 @@ class CatalystTypeConvertersSuite extends SparkFunSuite {
6163
test("option handling in createToCatalystConverter") {
6264
assert(CatalystTypeConverters.createToCatalystConverter(IntegerType)(Some(123)) === 123)
6365
}
66+
67+
test("primitive array handling") {
68+
val intArray = Array(1, 100, 10000)
69+
val intUnsafeArray = UnsafeArrayData.fromPrimitiveArray(intArray)
70+
val intArrayType = ArrayType(IntegerType, false)
71+
assert(CatalystTypeConverters.createToScalaConverter(intArrayType)(intUnsafeArray) === intArray)
72+
73+
val doubleArray = Array(1.1, 111.1, 11111.1)
74+
val doubleUnsafeArray = UnsafeArrayData.fromPrimitiveArray(doubleArray)
75+
val doubleArrayType = ArrayType(DoubleType, false)
76+
assert(CatalystTypeConverters.createToScalaConverter(doubleArrayType)(doubleUnsafeArray)
77+
=== doubleArray)
78+
}
79+
80+
test("An array with null handling") {
81+
val intArray = Array(1, null, 100, null, 10000)
82+
val intGenericArray = new GenericArrayData(intArray)
83+
val intArrayType = ArrayType(IntegerType, true)
84+
assert(CatalystTypeConverters.createToScalaConverter(intArrayType)(intGenericArray)
85+
=== intArray)
86+
assert(CatalystTypeConverters.createToCatalystConverter(intArrayType)(intArray)
87+
== intGenericArray)
88+
89+
val doubleArray = Array(1.1, null, 111.1, null, 11111.1)
90+
val doubleGenericArray = new GenericArrayData(doubleArray)
91+
val doubleArrayType = ArrayType(DoubleType, true)
92+
assert(CatalystTypeConverters.createToScalaConverter(doubleArrayType)(doubleGenericArray)
93+
=== doubleArray)
94+
assert(CatalystTypeConverters.createToCatalystConverter(doubleArrayType)(doubleArray)
95+
== doubleGenericArray)
96+
}
6497
}

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,32 @@ class RowEncoderSuite extends SparkFunSuite {
191191
assert(encoder.serializer.head.nullable == false)
192192
}
193193

194+
test("RowEncoder should support primitive arrays") {
195+
val schema = new StructType()
196+
.add("booleanPrimitiveArray", ArrayType(BooleanType, false))
197+
.add("bytePrimitiveArray", ArrayType(ByteType, false))
198+
.add("shortPrimitiveArray", ArrayType(ShortType, false))
199+
.add("intPrimitiveArray", ArrayType(IntegerType, false))
200+
.add("longPrimitiveArray", ArrayType(LongType, false))
201+
.add("floatPrimitiveArray", ArrayType(FloatType, false))
202+
.add("doublePrimitiveArray", ArrayType(DoubleType, false))
203+
val encoder = RowEncoder(schema).resolveAndBind()
204+
val input = Seq(
205+
Array(true, false),
206+
Array(1.toByte, 64.toByte, Byte.MaxValue),
207+
Array(1.toShort, 255.toShort, Short.MaxValue),
208+
Array(1, 10000, Int.MaxValue),
209+
Array(1.toLong, 1000000.toLong, Long.MaxValue),
210+
Array(1.1.toFloat, 123.456.toFloat, Float.MaxValue),
211+
Array(11.1111, 123456.7890123, Double.MaxValue)
212+
)
213+
val row = encoder.toRow(Row.fromSeq(input))
214+
val convertedBack = encoder.fromRow(row)
215+
input.zipWithIndex.map { case (array, index) =>
216+
assert(convertedBack.getSeq(index) === array)
217+
}
218+
}
219+
194220
test("RowEncoder should support array as the external type for ArrayType") {
195221
val schema = new StructType()
196222
.add("array", ArrayType(IntegerType))

sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1033,6 +1033,24 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
10331033
checkAnswer(agg, ds.groupBy('id % 2).agg(count('id)))
10341034
}
10351035
}
1036+
1037+
test("identity map for primitive arrays") {
1038+
val arrayByte = Array(1.toByte, 2.toByte, 3.toByte)
1039+
val arrayInt = Array(1, 2, 3)
1040+
val arrayLong = Array(1.toLong, 2.toLong, 3.toLong)
1041+
val arrayDouble = Array(1.1, 2.2, 3.3)
1042+
val arrayString = Array("a", "b", "c")
1043+
val dsByte = sparkContext.parallelize(Seq(arrayByte), 1).toDS.map(e => e)
1044+
val dsInt = sparkContext.parallelize(Seq(arrayInt), 1).toDS.map(e => e)
1045+
val dsLong = sparkContext.parallelize(Seq(arrayLong), 1).toDS.map(e => e)
1046+
val dsDouble = sparkContext.parallelize(Seq(arrayDouble), 1).toDS.map(e => e)
1047+
val dsString = sparkContext.parallelize(Seq(arrayString), 1).toDS.map(e => e)
1048+
checkDataset(dsByte, arrayByte)
1049+
checkDataset(dsInt, arrayInt)
1050+
checkDataset(dsLong, arrayLong)
1051+
checkDataset(dsDouble, arrayDouble)
1052+
checkDataset(dsString, arrayString)
1053+
}
10361054
}
10371055

10381056
case class Generic[T](id: T, value: Double)
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark.sql.execution.benchmark
19+
20+
import scala.concurrent.duration._
21+
22+
import org.apache.spark.SparkConf
23+
import org.apache.spark.sql.catalyst.util._
24+
import org.apache.spark.util.Benchmark
25+
26+
/**
27+
* Benchmark [[PrimitiveArray]] for DataFrame and Dataset program using primitive array
28+
* To run this:
29+
* 1. replace ignore(...) with test(...)
30+
* 2. build/sbt "sql/test-only *benchmark.PrimitiveArrayBenchmark"
31+
*
32+
* Benchmarks in this file are skipped in normal builds.
33+
*/
34+
class PrimitiveArrayBenchmark extends BenchmarkBase {
35+
36+
def writeDatasetArray(iters: Int): Unit = {
37+
import sparkSession.implicits._
38+
39+
val count = 1024 * 1024 * 2
40+
41+
val sc = sparkSession.sparkContext
42+
val primitiveIntArray = Array.fill[Int](count)(65535)
43+
val dsInt = sc.parallelize(Seq(primitiveIntArray), 1).toDS
44+
dsInt.count // force to build dataset
45+
val intArray = { i: Int =>
46+
var n = 0
47+
var len = 0
48+
while (n < iters) {
49+
len += dsInt.map(e => e).queryExecution.toRdd.collect.length
50+
n += 1
51+
}
52+
}
53+
val primitiveDoubleArray = Array.fill[Double](count)(65535.0)
54+
val dsDouble = sc.parallelize(Seq(primitiveDoubleArray), 1).toDS
55+
dsDouble.count // force to build dataset
56+
val doubleArray = { i: Int =>
57+
var n = 0
58+
var len = 0
59+
while (n < iters) {
60+
len += dsDouble.map(e => e).queryExecution.toRdd.collect.length
61+
n += 1
62+
}
63+
}
64+
65+
val benchmark = new Benchmark("Write an array in Dataset", count * iters)
66+
benchmark.addCase("Int ")(intArray)
67+
benchmark.addCase("Double")(doubleArray)
68+
benchmark.run
69+
/*
70+
OpenJDK 64-Bit Server VM 1.8.0_91-b14 on Linux 4.4.11-200.fc22.x86_64
71+
Intel Xeon E3-12xx v2 (Ivy Bridge)
72+
Write an array in Dataset: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
73+
------------------------------------------------------------------------------------------------
74+
Int 352 / 401 23.8 42.0 1.0X
75+
Double 821 / 885 10.2 97.9 0.4X
76+
*/
77+
}
78+
79+
ignore("Write an array in Dataset") {
80+
writeDatasetArray(4)
81+
}
82+
}

0 commit comments

Comments
 (0)