Skip to content

Commit dd4385b

Browse files
committed
Make AppendOnlyMap use the same growth strategy of OpenHashSet and consistent exception message
1 parent e86fbdb commit dd4385b

File tree

1 file changed

+3
-5
lines changed

1 file changed

+3
-5
lines changed

core/src/main/scala/org/apache/spark/util/collection/AppendOnlyMap.scala

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -199,11 +199,8 @@ class AppendOnlyMap[K, V](initialCapacity: Int = 64)
199199

200200
/** Increase table size by 1, rehashing if necessary */
201201
private def incrementSize() {
202-
if (curSize == MAXIMUM_CAPACITY) {
203-
throw new IllegalStateException(s"Can't put more that ${MAXIMUM_CAPACITY} elements")
204-
}
205202
curSize += 1
206-
if (curSize > growThreshold && capacity < MAXIMUM_CAPACITY) {
203+
if (curSize > growThreshold) {
207204
growTable()
208205
}
209206
}
@@ -216,7 +213,8 @@ class AppendOnlyMap[K, V](initialCapacity: Int = 64)
216213
/** Double the table's size and re-hash everything */
217214
protected def growTable() {
218215
// capacity < MAXIMUM_CAPACITY (2 ^ 29) so capacity * 2 won't overflow
219-
val newCapacity = (capacity * 2).min(MAXIMUM_CAPACITY)
216+
val newCapacity = capacity * 2
217+
require(newCapacity <= MAXIMUM_CAPACITY, s"Can't contain more than ${growThreshold} elements")
220218
val newData = new Array[AnyRef](2 * newCapacity)
221219
val newMask = newCapacity - 1
222220
// Insert all our old values into the new array. Note that because our old keys are

0 commit comments

Comments
 (0)