From 7ec8c6c01e4c75f99ba84237b09a1db732bf7af7 Mon Sep 17 00:00:00 2001
From: CyC2018 <1029579233@qq.com>
Date: Sat, 24 Mar 2018 10:36:31 +0800
Subject: [PATCH 1/2] Change to JDK 1.8
---
README.md | 2 +-
src/ArrayList.java | 494 ++-
src/ConcurrentHashMap.java | 7156 ++++++++++++++++++++++++++++------
src/ConcurrentMap.java | 517 +++
src/HashMap.java | 2578 +++++++++---
src/HashSet.java | 139 +-
src/Hashtable.java | 1402 +++++++
src/Iterator.java | 118 +
src/LinkedBlockingQueue.java | 1044 +++++
src/LinkedHashMap.java | 755 +++-
src/LinkedHashSet.java | 138 +
src/LinkedList.java | 205 +-
src/List.java | 734 ++++
src/Map.java | 1183 ++++++
src/PriorityQueue.java | 237 +-
src/Queue.java | 156 +-
src/Set.java | 413 ++
src/Stack.java | 44 +
src/String.java | 1505 +++----
src/StringBuffer.java | 318 +-
src/StringBuilder.java | 214 +-
src/ThreadLocal.java | 722 ++++
src/TreeMap.java | 883 ++++-
src/TreeSet.java | 127 +-
src/Vector.java | 296 +-
src/WeakHashMap.java | 1331 +++++++
26 files changed, 19837 insertions(+), 2874 deletions(-)
create mode 100644 src/ConcurrentMap.java
create mode 100644 src/Hashtable.java
create mode 100644 src/Iterator.java
create mode 100644 src/LinkedBlockingQueue.java
create mode 100644 src/List.java
create mode 100644 src/Map.java
create mode 100644 src/Set.java
create mode 100644 src/ThreadLocal.java
create mode 100644 src/WeakHashMap.java
diff --git a/README.md b/README.md
index 9a95fbc..f882dd7 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,3 @@
# JDK-Source-Code
-Extract from : [OpenJDK 1.7](http://download.java.net/openjdk/jdk7)
+Extract from : jdk1.8.0_20
diff --git a/src/ArrayList.java b/src/ArrayList.java
index 17179ae..3218f1a 100644
--- a/src/ArrayList.java
+++ b/src/ArrayList.java
@@ -1,16 +1,137 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
package java.util;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.function.UnaryOperator;
+
+/**
+ * Resizable-array implementation of the List interface. Implements
+ * all optional list operations, and permits all elements, including
+ * null. In addition to implementing the List interface,
+ * this class provides methods to manipulate the size of the array that is
+ * used internally to store the list. (This class is roughly equivalent to
+ * Vector, except that it is unsynchronized.)
+ *
+ *
The size, isEmpty, get, set,
+ * iterator, and listIterator operations run in constant
+ * time. The add operation runs in amortized constant time,
+ * that is, adding n elements requires O(n) time. All of the other operations
+ * run in linear time (roughly speaking). The constant factor is low compared
+ * to that for the LinkedList implementation.
+ *
+ *
Each ArrayList instance has a capacity. The capacity is
+ * the size of the array used to store the elements in the list. It is always
+ * at least as large as the list size. As elements are added to an ArrayList,
+ * its capacity grows automatically. The details of the growth policy are not
+ * specified beyond the fact that adding an element has constant amortized
+ * time cost.
+ *
+ *
An application can increase the capacity of an ArrayList instance
+ * before adding a large number of elements using the ensureCapacity
+ * operation. This may reduce the amount of incremental reallocation.
+ *
+ *
Note that this implementation is not synchronized.
+ * If multiple threads access an ArrayList instance concurrently,
+ * and at least one of the threads modifies the list structurally, it
+ * must be synchronized externally. (A structural modification is
+ * any operation that adds or deletes one or more elements, or explicitly
+ * resizes the backing array; merely setting the value of an element is not
+ * a structural modification.) This is typically accomplished by
+ * synchronizing on some object that naturally encapsulates the list.
+ *
+ * If no such object exists, the list should be "wrapped" using the
+ * {@link Collections#synchronizedList Collections.synchronizedList}
+ * method. This is best done at creation time, to prevent accidental
+ * unsynchronized access to the list:
+ * List list = Collections.synchronizedList(new ArrayList(...));
+ *
+ *
+ * The iterators returned by this class's {@link #iterator() iterator} and
+ * {@link #listIterator(int) listIterator} methods are fail-fast:
+ * if the list is structurally modified at any time after the iterator is
+ * created, in any way except through the iterator's own
+ * {@link ListIterator#remove() remove} or
+ * {@link ListIterator#add(Object) add} methods, the iterator will throw a
+ * {@link ConcurrentModificationException}. Thus, in the face of
+ * concurrent modification, the iterator fails quickly and cleanly, rather
+ * than risking arbitrary, non-deterministic behavior at an undetermined
+ * time in the future.
+ *
+ *
Note that the fail-fast behavior of an iterator cannot be guaranteed
+ * as it is, generally speaking, impossible to make any hard guarantees in the
+ * presence of unsynchronized concurrent modification. Fail-fast iterators
+ * throw {@code ConcurrentModificationException} on a best-effort basis.
+ * Therefore, it would be wrong to write a program that depended on this
+ * exception for its correctness: the fail-fast behavior of iterators
+ * should be used only to detect bugs.
+ *
+ *
This class is a member of the
+ *
+ * Java Collections Framework.
+ *
+ * @author Josh Bloch
+ * @author Neal Gafter
+ * @see Collection
+ * @see List
+ * @see LinkedList
+ * @see Vector
+ * @since 1.2
+ */
public class ArrayList extends AbstractList
implements List, RandomAccess, Cloneable, java.io.Serializable
{
private static final long serialVersionUID = 8683452581122892189L;
+ /**
+ * Default initial capacity.
+ */
+ private static final int DEFAULT_CAPACITY = 10;
+
+ /**
+ * Shared empty array instance used for empty instances.
+ */
+ private static final Object[] EMPTY_ELEMENTDATA = {};
+
+ /**
+ * Shared empty array instance used for default sized empty instances. We
+ * distinguish this from EMPTY_ELEMENTDATA to know how much to inflate when
+ * first element is added.
+ */
+ private static final Object[] DEFAULTCAPACITY_EMPTY_ELEMENTDATA = {};
+
/**
* The array buffer into which the elements of the ArrayList are stored.
- * The capacity of the ArrayList is the length of this array buffer.
+ * The capacity of the ArrayList is the length of this array buffer. Any
+ * empty ArrayList with elementData == DEFAULTCAPACITY_EMPTY_ELEMENTDATA
+ * will be expanded to DEFAULT_CAPACITY when the first element is added.
*/
- private transient Object[] elementData;
+ transient Object[] elementData; // non-private to simplify nested class access
/**
* The size of the ArrayList (the number of elements it contains).
@@ -27,18 +148,21 @@ public class ArrayList extends AbstractList
* is negative
*/
public ArrayList(int initialCapacity) {
- super();
- if (initialCapacity < 0)
+ if (initialCapacity > 0) {
+ this.elementData = new Object[initialCapacity];
+ } else if (initialCapacity == 0) {
+ this.elementData = EMPTY_ELEMENTDATA;
+ } else {
throw new IllegalArgumentException("Illegal Capacity: "+
initialCapacity);
- this.elementData = new Object[initialCapacity];
+ }
}
/**
* Constructs an empty list with an initial capacity of ten.
*/
public ArrayList() {
- this(10);
+ this.elementData = DEFAULTCAPACITY_EMPTY_ELEMENTDATA;
}
/**
@@ -51,10 +175,14 @@ public ArrayList() {
*/
public ArrayList(Collection extends E> c) {
elementData = c.toArray();
- size = elementData.length;
- // c.toArray might (incorrectly) not return Object[] (see 6260652)
- if (elementData.getClass() != Object[].class)
- elementData = Arrays.copyOf(elementData, size, Object[].class);
+ if ((size = elementData.length) != 0) {
+ // c.toArray might (incorrectly) not return Object[] (see 6260652)
+ if (elementData.getClass() != Object[].class)
+ elementData = Arrays.copyOf(elementData, size, Object[].class);
+ } else {
+ // replace with empty array.
+ this.elementData = EMPTY_ELEMENTDATA;
+ }
}
/**
@@ -64,9 +192,10 @@ public ArrayList(Collection extends E> c) {
*/
public void trimToSize() {
modCount++;
- int oldCapacity = elementData.length;
- if (size < oldCapacity) {
- elementData = Arrays.copyOf(elementData, size);
+ if (size < elementData.length) {
+ elementData = (size == 0)
+ ? EMPTY_ELEMENTDATA
+ : Arrays.copyOf(elementData, size);
}
}
@@ -78,12 +207,29 @@ public void trimToSize() {
* @param minCapacity the desired minimum capacity
*/
public void ensureCapacity(int minCapacity) {
- if (minCapacity > 0)
- ensureCapacityInternal(minCapacity);
+ int minExpand = (elementData != DEFAULTCAPACITY_EMPTY_ELEMENTDATA)
+ // any size if not default element table
+ ? 0
+ // larger than default for default empty table. It's already
+ // supposed to be at default size.
+ : DEFAULT_CAPACITY;
+
+ if (minCapacity > minExpand) {
+ ensureExplicitCapacity(minCapacity);
+ }
}
private void ensureCapacityInternal(int minCapacity) {
+ if (elementData == DEFAULTCAPACITY_EMPTY_ELEMENTDATA) {
+ minCapacity = Math.max(DEFAULT_CAPACITY, minCapacity);
+ }
+
+ ensureExplicitCapacity(minCapacity);
+ }
+
+ private void ensureExplicitCapacity(int minCapacity) {
modCount++;
+
// overflow-conscious code
if (minCapacity - elementData.length > 0)
grow(minCapacity);
@@ -202,14 +348,13 @@ public int lastIndexOf(Object o) {
*/
public Object clone() {
try {
- @SuppressWarnings("unchecked")
- ArrayList v = (ArrayList) super.clone();
+ ArrayList> v = (ArrayList>) super.clone();
v.elementData = Arrays.copyOf(elementData, size);
v.modCount = 0;
return v;
} catch (CloneNotSupportedException e) {
// this shouldn't happen, since we are Cloneable
- throw new InternalError();
+ throw new InternalError(e);
}
}
@@ -353,7 +498,7 @@ public E remove(int index) {
if (numMoved > 0)
System.arraycopy(elementData, index+1, elementData, index,
numMoved);
- elementData[--size] = null; // Let gc do its work
+ elementData[--size] = null; // clear to let GC do its work
return oldValue;
}
@@ -398,7 +543,7 @@ private void fastRemove(int index) {
if (numMoved > 0)
System.arraycopy(elementData, index+1, elementData, index,
numMoved);
- elementData[--size] = null; // Let gc do its work
+ elementData[--size] = null; // clear to let GC do its work
}
/**
@@ -408,7 +553,7 @@ private void fastRemove(int index) {
public void clear() {
modCount++;
- // Let gc do its work
+ // clear to let GC do its work
for (int i = 0; i < size; i++)
elementData[i] = null;
@@ -489,10 +634,12 @@ protected void removeRange(int fromIndex, int toIndex) {
System.arraycopy(elementData, toIndex, elementData, fromIndex,
numMoved);
- // Let gc do its work
+ // clear to let GC do its work
int newSize = size - (toIndex-fromIndex);
- while (size != newSize)
- elementData[--size] = null;
+ for (int i = newSize; i < size; i++) {
+ elementData[i] = null;
+ }
+ size = newSize;
}
/**
@@ -539,6 +686,7 @@ private String outOfBoundsMsg(int index) {
* @see Collection#contains(Object)
*/
public boolean removeAll(Collection> c) {
+ Objects.requireNonNull(c);
return batchRemove(c, false);
}
@@ -559,6 +707,7 @@ public boolean removeAll(Collection> c) {
* @see Collection#contains(Object)
*/
public boolean retainAll(Collection> c) {
+ Objects.requireNonNull(c);
return batchRemove(c, true);
}
@@ -580,6 +729,7 @@ private boolean batchRemove(Collection> c, boolean complement) {
w += size - r;
}
if (w != size) {
+ // clear to let GC do its work
for (int i = w; i < size; i++)
elementData[i] = null;
modCount += size - w;
@@ -604,17 +754,17 @@ private void writeObject(java.io.ObjectOutputStream s)
int expectedModCount = modCount;
s.defaultWriteObject();
- // Write out array length
- s.writeInt(elementData.length);
+ // Write out size as capacity for behavioural compatibility with clone()
+ s.writeInt(size);
// Write out all elements in the proper order.
- for (int i=0; i 0) {
+ // be like clone(), allocate array based upon size not capacity
+ ensureCapacityInternal(size);
+
+ Object[] a = elementData;
+ // Read in all elements in the proper order.
+ for (int i=0; i consumer) {
+ Objects.requireNonNull(consumer);
+ final int size = ArrayList.this.size;
+ int i = cursor;
+ if (i >= size) {
+ return;
+ }
+ final Object[] elementData = ArrayList.this.elementData;
+ if (i >= elementData.length) {
+ throw new ConcurrentModificationException();
+ }
+ while (i != size && modCount == expectedModCount) {
+ consumer.accept((E) elementData[i++]);
+ }
+ // update once at end of iteration to reduce heap write traffic
+ cursor = i;
+ lastRet = i - 1;
+ checkForComodification();
+ }
+
final void checkForComodification() {
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
@@ -951,6 +1131,26 @@ public E previous() {
return (E) elementData[offset + (lastRet = i)];
}
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer super E> consumer) {
+ Objects.requireNonNull(consumer);
+ final int size = SubList.this.size;
+ int i = cursor;
+ if (i >= size) {
+ return;
+ }
+ final Object[] elementData = ArrayList.this.elementData;
+ if (offset + i >= elementData.length) {
+ throw new ConcurrentModificationException();
+ }
+ while (i != size && modCount == expectedModCount) {
+ consumer.accept((E) elementData[offset + (i++)]);
+ }
+ // update once at end of iteration to reduce heap write traffic
+ lastRet = cursor = i;
+ checkForComodification();
+ }
+
public int nextIndex() {
return cursor;
}
@@ -1030,5 +1230,231 @@ private void checkForComodification() {
if (ArrayList.this.modCount != this.modCount)
throw new ConcurrentModificationException();
}
+
+ public Spliterator spliterator() {
+ checkForComodification();
+ return new ArrayListSpliterator(ArrayList.this, offset,
+ offset + this.size, this.modCount);
+ }
+ }
+
+ @Override
+ public void forEach(Consumer super E> action) {
+ Objects.requireNonNull(action);
+ final int expectedModCount = modCount;
+ @SuppressWarnings("unchecked")
+ final E[] elementData = (E[]) this.elementData;
+ final int size = this.size;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ action.accept(elementData[i]);
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /**
+ * Creates a late-binding
+ * and fail-fast {@link Spliterator} over the elements in this
+ * list.
+ *
+ * The {@code Spliterator} reports {@link Spliterator#SIZED},
+ * {@link Spliterator#SUBSIZED}, and {@link Spliterator#ORDERED}.
+ * Overriding implementations should document the reporting of additional
+ * characteristic values.
+ *
+ * @return a {@code Spliterator} over the elements in this list
+ * @since 1.8
+ */
+ @Override
+ public Spliterator spliterator() {
+ return new ArrayListSpliterator<>(this, 0, -1, 0);
+ }
+
+ /** Index-based split-by-two, lazily initialized Spliterator */
+ static final class ArrayListSpliterator implements Spliterator {
+
+ /*
+ * If ArrayLists were immutable, or structurally immutable (no
+ * adds, removes, etc), we could implement their spliterators
+ * with Arrays.spliterator. Instead we detect as much
+ * interference during traversal as practical without
+ * sacrificing much performance. We rely primarily on
+ * modCounts. These are not guaranteed to detect concurrency
+ * violations, and are sometimes overly conservative about
+ * within-thread interference, but detect enough problems to
+ * be worthwhile in practice. To carry this out, we (1) lazily
+ * initialize fence and expectedModCount until the latest
+ * point that we need to commit to the state we are checking
+ * against; thus improving precision. (This doesn't apply to
+ * SubLists, that create spliterators with current non-lazy
+ * values). (2) We perform only a single
+ * ConcurrentModificationException check at the end of forEach
+ * (the most performance-sensitive method). When using forEach
+ * (as opposed to iterators), we can normally only detect
+ * interference after actions, not before. Further
+ * CME-triggering checks apply to all other possible
+ * violations of assumptions for example null or too-small
+ * elementData array given its size(), that could only have
+ * occurred due to interference. This allows the inner loop
+ * of forEach to run without any further checks, and
+ * simplifies lambda-resolution. While this does entail a
+ * number of checks, note that in the common case of
+ * list.stream().forEach(a), no checks or other computation
+ * occur anywhere other than inside forEach itself. The other
+ * less-often-used methods cannot take advantage of most of
+ * these streamlinings.
+ */
+
+ private final ArrayList list;
+ private int index; // current index, modified on advance/split
+ private int fence; // -1 until used; then one past last index
+ private int expectedModCount; // initialized when fence set
+
+ /** Create new spliterator covering the given range */
+ ArrayListSpliterator(ArrayList list, int origin, int fence,
+ int expectedModCount) {
+ this.list = list; // OK if null unless traversed
+ this.index = origin;
+ this.fence = fence;
+ this.expectedModCount = expectedModCount;
+ }
+
+ private int getFence() { // initialize fence to size on first use
+ int hi; // (a specialized variant appears in method forEach)
+ ArrayList lst;
+ if ((hi = fence) < 0) {
+ if ((lst = list) == null)
+ hi = fence = 0;
+ else {
+ expectedModCount = lst.modCount;
+ hi = fence = lst.size;
+ }
+ }
+ return hi;
+ }
+
+ public ArrayListSpliterator trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null : // divide range in half unless too small
+ new ArrayListSpliterator(list, lo, index = mid,
+ expectedModCount);
+ }
+
+ public boolean tryAdvance(Consumer super E> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int hi = getFence(), i = index;
+ if (i < hi) {
+ index = i + 1;
+ @SuppressWarnings("unchecked") E e = (E)list.elementData[i];
+ action.accept(e);
+ if (list.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ return false;
+ }
+
+ public void forEachRemaining(Consumer super E> action) {
+ int i, hi, mc; // hoist accesses and checks from loop
+ ArrayList lst; Object[] a;
+ if (action == null)
+ throw new NullPointerException();
+ if ((lst = list) != null && (a = lst.elementData) != null) {
+ if ((hi = fence) < 0) {
+ mc = lst.modCount;
+ hi = lst.size;
+ }
+ else
+ mc = expectedModCount;
+ if ((i = index) >= 0 && (index = hi) <= a.length) {
+ for (; i < hi; ++i) {
+ @SuppressWarnings("unchecked") E e = (E) a[i];
+ action.accept(e);
+ }
+ if (lst.modCount == mc)
+ return;
+ }
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ public long estimateSize() {
+ return (long) (getFence() - index);
+ }
+
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED;
+ }
+ }
+
+ @Override
+ public boolean removeIf(Predicate super E> filter) {
+ Objects.requireNonNull(filter);
+ // figure out which elements are to be removed
+ // any exception thrown from the filter predicate at this stage
+ // will leave the collection unmodified
+ int removeCount = 0;
+ final BitSet removeSet = new BitSet(size);
+ final int expectedModCount = modCount;
+ final int size = this.size;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ @SuppressWarnings("unchecked")
+ final E element = (E) elementData[i];
+ if (filter.test(element)) {
+ removeSet.set(i);
+ removeCount++;
+ }
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+
+ // shift surviving elements left over the spaces left by removed elements
+ final boolean anyToRemove = removeCount > 0;
+ if (anyToRemove) {
+ final int newSize = size - removeCount;
+ for (int i=0, j=0; (i < size) && (j < newSize); i++, j++) {
+ i = removeSet.nextClearBit(i);
+ elementData[j] = elementData[i];
+ }
+ for (int k=newSize; k < size; k++) {
+ elementData[k] = null; // Let gc do its work
+ }
+ this.size = newSize;
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ return anyToRemove;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void replaceAll(UnaryOperator operator) {
+ Objects.requireNonNull(operator);
+ final int expectedModCount = modCount;
+ final int size = this.size;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ elementData[i] = operator.apply((E) elementData[i]);
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void sort(Comparator super E> c) {
+ final int expectedModCount = modCount;
+ Arrays.sort((E[]) elementData, 0, size, c);
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
}
}
diff --git a/src/ConcurrentHashMap.java b/src/ConcurrentHashMap.java
index 338f7d6..a5e5880 100644
--- a/src/ConcurrentHashMap.java
+++ b/src/ConcurrentHashMap.java
@@ -1,816 +1,923 @@
+/*
+ * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+/*
+ *
+ *
+ *
+ *
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
package java.util.concurrent;
-import java.util.concurrent.locks.*;
-import java.util.*;
+
+import java.io.ObjectStreamField;
import java.io.Serializable;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.AbstractMap;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.LockSupport;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.DoubleBinaryOperator;
+import java.util.function.Function;
+import java.util.function.IntBinaryOperator;
+import java.util.function.LongBinaryOperator;
+import java.util.function.ToDoubleBiFunction;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntBiFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongBiFunction;
+import java.util.function.ToLongFunction;
+import java.util.stream.Stream;
-public class ConcurrentHashMap extends AbstractMap
- implements ConcurrentMap, Serializable {
+/**
+ * A hash table supporting full concurrency of retrievals and
+ * high expected concurrency for updates. This class obeys the
+ * same functional specification as {@link java.util.Hashtable}, and
+ * includes versions of methods corresponding to each method of
+ * {@code Hashtable}. However, even though all operations are
+ * thread-safe, retrieval operations do not entail locking,
+ * and there is not any support for locking the entire table
+ * in a way that prevents all access. This class is fully
+ * interoperable with {@code Hashtable} in programs that rely on its
+ * thread safety but not on its synchronization details.
+ *
+ * Retrieval operations (including {@code get}) generally do not
+ * block, so may overlap with update operations (including {@code put}
+ * and {@code remove}). Retrievals reflect the results of the most
+ * recently completed update operations holding upon their
+ * onset. (More formally, an update operation for a given key bears a
+ * happens-before relation with any (non-null) retrieval for
+ * that key reporting the updated value.) For aggregate operations
+ * such as {@code putAll} and {@code clear}, concurrent retrievals may
+ * reflect insertion or removal of only some entries. Similarly,
+ * Iterators, Spliterators and Enumerations return elements reflecting the
+ * state of the hash table at some point at or since the creation of the
+ * iterator/enumeration. They do not throw {@link
+ * java.util.ConcurrentModificationException ConcurrentModificationException}.
+ * However, iterators are designed to be used by only one thread at a time.
+ * Bear in mind that the results of aggregate status methods including
+ * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
+ * useful only when a map is not undergoing concurrent updates in other threads.
+ * Otherwise the results of these methods reflect transient states
+ * that may be adequate for monitoring or estimation purposes, but not
+ * for program control.
+ *
+ *
The table is dynamically expanded when there are too many
+ * collisions (i.e., keys that have distinct hash codes but fall into
+ * the same slot modulo the table size), with the expected average
+ * effect of maintaining roughly two bins per mapping (corresponding
+ * to a 0.75 load factor threshold for resizing). There may be much
+ * variance around this average as mappings are added and removed, but
+ * overall, this maintains a commonly accepted time/space tradeoff for
+ * hash tables. However, resizing this or any other kind of hash
+ * table may be a relatively slow operation. When possible, it is a
+ * good idea to provide a size estimate as an optional {@code
+ * initialCapacity} constructor argument. An additional optional
+ * {@code loadFactor} constructor argument provides a further means of
+ * customizing initial table capacity by specifying the table density
+ * to be used in calculating the amount of space to allocate for the
+ * given number of elements. Also, for compatibility with previous
+ * versions of this class, constructors may optionally specify an
+ * expected {@code concurrencyLevel} as an additional hint for
+ * internal sizing. Note that using many keys with exactly the same
+ * {@code hashCode()} is a sure way to slow down performance of any
+ * hash table. To ameliorate impact, when keys are {@link Comparable},
+ * this class may use comparison order among keys to help break ties.
+ *
+ *
A {@link Set} projection of a ConcurrentHashMap may be created
+ * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
+ * (using {@link #keySet(Object)} when only keys are of interest, and the
+ * mapped values are (perhaps transiently) not used or all take the
+ * same mapping value.
+ *
+ *
A ConcurrentHashMap can be used as scalable frequency map (a
+ * form of histogram or multiset) by using {@link
+ * java.util.concurrent.atomic.LongAdder} values and initializing via
+ * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
+ * to a {@code ConcurrentHashMap freqs}, you can use
+ * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();}
+ *
+ * This class and its views and iterators implement all of the
+ * optional methods of the {@link Map} and {@link Iterator}
+ * interfaces.
+ *
+ *
Like {@link Hashtable} but unlike {@link HashMap}, this class
+ * does not allow {@code null} to be used as a key or value.
+ *
+ *
ConcurrentHashMaps support a set of sequential and parallel bulk
+ * operations that, unlike most {@link Stream} methods, are designed
+ * to be safely, and often sensibly, applied even with maps that are
+ * being concurrently updated by other threads; for example, when
+ * computing a snapshot summary of the values in a shared registry.
+ * There are three kinds of operation, each with four forms, accepting
+ * functions with Keys, Values, Entries, and (Key, Value) arguments
+ * and/or return values. Because the elements of a ConcurrentHashMap
+ * are not ordered in any particular way, and may be processed in
+ * different orders in different parallel executions, the correctness
+ * of supplied functions should not depend on any ordering, or on any
+ * other objects or values that may transiently change while
+ * computation is in progress; and except for forEach actions, should
+ * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
+ * objects do not support method {@code setValue}.
+ *
+ *
+ * - forEach: Perform a given action on each element.
+ * A variant form applies a given transformation on each element
+ * before performing the action.
+ *
+ * - search: Return the first available non-null result of
+ * applying a given function on each element; skipping further
+ * search when a result is found.
+ *
+ * - reduce: Accumulate each element. The supplied reduction
+ * function cannot rely on ordering (more formally, it should be
+ * both associative and commutative). There are five variants:
+ *
+ *
+ *
+ * - Plain reductions. (There is not a form of this method for
+ * (key, value) function arguments since there is no corresponding
+ * return type.)
+ *
+ * - Mapped reductions that accumulate the results of a given
+ * function applied to each element.
+ *
+ * - Reductions to scalar doubles, longs, and ints, using a
+ * given basis value.
+ *
+ *
+ *
+ *
+ *
+ * These bulk operations accept a {@code parallelismThreshold}
+ * argument. Methods proceed sequentially if the current map size is
+ * estimated to be less than the given threshold. Using a value of
+ * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
+ * of {@code 1} results in maximal parallelism by partitioning into
+ * enough subtasks to fully utilize the {@link
+ * ForkJoinPool#commonPool()} that is used for all parallel
+ * computations. Normally, you would initially choose one of these
+ * extreme values, and then measure performance of using in-between
+ * values that trade off overhead versus throughput.
+ *
+ *
The concurrency properties of bulk operations follow
+ * from those of ConcurrentHashMap: Any non-null result returned
+ * from {@code get(key)} and related access methods bears a
+ * happens-before relation with the associated insertion or
+ * update. The result of any bulk operation reflects the
+ * composition of these per-element relations (but is not
+ * necessarily atomic with respect to the map as a whole unless it
+ * is somehow known to be quiescent). Conversely, because keys
+ * and values in the map are never null, null serves as a reliable
+ * atomic indicator of the current lack of any result. To
+ * maintain this property, null serves as an implicit basis for
+ * all non-scalar reduction operations. For the double, long, and
+ * int versions, the basis should be one that, when combined with
+ * any other value, returns that other value (more formally, it
+ * should be the identity element for the reduction). Most common
+ * reductions have these properties; for example, computing a sum
+ * with basis 0 or a minimum with basis MAX_VALUE.
+ *
+ *
Search and transformation functions provided as arguments
+ * should similarly return null to indicate the lack of any result
+ * (in which case it is not used). In the case of mapped
+ * reductions, this also enables transformations to serve as
+ * filters, returning null (or, in the case of primitive
+ * specializations, the identity basis) if the element should not
+ * be combined. You can create compound transformations and
+ * filterings by composing them yourself under this "null means
+ * there is nothing there now" rule before using them in search or
+ * reduce operations.
+ *
+ *
Methods accepting and/or returning Entry arguments maintain
+ * key-value associations. They may be useful for example when
+ * finding the key for the greatest value. Note that "plain" Entry
+ * arguments can be supplied using {@code new
+ * AbstractMap.SimpleEntry(k,v)}.
+ *
+ *
Bulk operations may complete abruptly, throwing an
+ * exception encountered in the application of a supplied
+ * function. Bear in mind when handling such exceptions that other
+ * concurrently executing functions could also have thrown
+ * exceptions, or would have done so if the first exception had
+ * not occurred.
+ *
+ *
Speedups for parallel compared to sequential forms are common
+ * but not guaranteed. Parallel operations involving brief functions
+ * on small maps may execute more slowly than sequential forms if the
+ * underlying work to parallelize the computation is more expensive
+ * than the computation itself. Similarly, parallelization may not
+ * lead to much actual parallelism if all processors are busy
+ * performing unrelated tasks.
+ *
+ *
All arguments to all task methods must be non-null.
+ *
+ *
This class is a member of the
+ *
+ * Java Collections Framework.
+ *
+ * @since 1.5
+ * @author Doug Lea
+ * @param the type of keys maintained by this map
+ * @param the type of mapped values
+ */
+public class ConcurrentHashMap extends AbstractMap
+ implements ConcurrentMap, Serializable {
private static final long serialVersionUID = 7249069246763182397L;
/*
- * The basic strategy is to subdivide the table among Segments,
- * each of which itself is a concurrently readable hash table. To
- * reduce footprint, all but one segments are constructed only
- * when first needed (see ensureSegment). To maintain visibility
- * in the presence of lazy construction, accesses to segments as
- * well as elements of segment's table must use volatile access,
- * which is done via Unsafe within methods segmentAt etc
- * below. These provide the functionality of AtomicReferenceArrays
- * but reduce the levels of indirection. Additionally,
- * volatile-writes of table elements and entry "next" fields
- * within locked operations use the cheaper "lazySet" forms of
- * writes (via putOrderedObject) because these writes are always
- * followed by lock releases that maintain sequential consistency
- * of table updates.
- *
- * Historical note: The previous version of this class relied
- * heavily on "final" fields, which avoided some volatile reads at
- * the expense of a large initial footprint. Some remnants of
- * that design (including forced construction of segment 0) exist
- * to ensure serialization compatibility.
+ * Overview:
+ *
+ * The primary design goal of this hash table is to maintain
+ * concurrent readability (typically method get(), but also
+ * iterators and related methods) while minimizing update
+ * contention. Secondary goals are to keep space consumption about
+ * the same or better than java.util.HashMap, and to support high
+ * initial insertion rates on an empty table by many threads.
+ *
+ * This map usually acts as a binned (bucketed) hash table. Each
+ * key-value mapping is held in a Node. Most nodes are instances
+ * of the basic Node class with hash, key, value, and next
+ * fields. However, various subclasses exist: TreeNodes are
+ * arranged in balanced trees, not lists. TreeBins hold the roots
+ * of sets of TreeNodes. ForwardingNodes are placed at the heads
+ * of bins during resizing. ReservationNodes are used as
+ * placeholders while establishing values in computeIfAbsent and
+ * related methods. The types TreeBin, ForwardingNode, and
+ * ReservationNode do not hold normal user keys, values, or
+ * hashes, and are readily distinguishable during search etc
+ * because they have negative hash fields and null key and value
+ * fields. (These special nodes are either uncommon or transient,
+ * so the impact of carrying around some unused fields is
+ * insignificant.)
+ *
+ * The table is lazily initialized to a power-of-two size upon the
+ * first insertion. Each bin in the table normally contains a
+ * list of Nodes (most often, the list has only zero or one Node).
+ * Table accesses require volatile/atomic reads, writes, and
+ * CASes. Because there is no other way to arrange this without
+ * adding further indirections, we use intrinsics
+ * (sun.misc.Unsafe) operations.
+ *
+ * We use the top (sign) bit of Node hash fields for control
+ * purposes -- it is available anyway because of addressing
+ * constraints. Nodes with negative hash fields are specially
+ * handled or ignored in map methods.
+ *
+ * Insertion (via put or its variants) of the first node in an
+ * empty bin is performed by just CASing it to the bin. This is
+ * by far the most common case for put operations under most
+ * key/hash distributions. Other update operations (insert,
+ * delete, and replace) require locks. We do not want to waste
+ * the space required to associate a distinct lock object with
+ * each bin, so instead use the first node of a bin list itself as
+ * a lock. Locking support for these locks relies on builtin
+ * "synchronized" monitors.
+ *
+ * Using the first node of a list as a lock does not by itself
+ * suffice though: When a node is locked, any update must first
+ * validate that it is still the first node after locking it, and
+ * retry if not. Because new nodes are always appended to lists,
+ * once a node is first in a bin, it remains first until deleted
+ * or the bin becomes invalidated (upon resizing).
+ *
+ * The main disadvantage of per-bin locks is that other update
+ * operations on other nodes in a bin list protected by the same
+ * lock can stall, for example when user equals() or mapping
+ * functions take a long time. However, statistically, under
+ * random hash codes, this is not a common problem. Ideally, the
+ * frequency of nodes in bins follows a Poisson distribution
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
+ * parameter of about 0.5 on average, given the resizing threshold
+ * of 0.75, although with a large variance because of resizing
+ * granularity. Ignoring variance, the expected occurrences of
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
+ * first values are:
+ *
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
+ *
+ * Lock contention probability for two threads accessing distinct
+ * elements is roughly 1 / (8 * #elements) under random hashes.
+ *
+ * Actual hash code distributions encountered in practice
+ * sometimes deviate significantly from uniform randomness. This
+ * includes the case when N > (1<<30), so some keys MUST collide.
+ * Similarly for dumb or hostile usages in which multiple keys are
+ * designed to have identical hash codes or ones that differs only
+ * in masked-out high bits. So we use a secondary strategy that
+ * applies when the number of nodes in a bin exceeds a
+ * threshold. These TreeBins use a balanced tree to hold nodes (a
+ * specialized form of red-black trees), bounding search time to
+ * O(log N). Each search step in a TreeBin is at least twice as
+ * slow as in a regular list, but given that N cannot exceed
+ * (1<<64) (before running out of addresses) this bounds search
+ * steps, lock hold times, etc, to reasonable constants (roughly
+ * 100 nodes inspected per operation worst case) so long as keys
+ * are Comparable (which is very common -- String, Long, etc).
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
+ * traversal pointers as regular nodes, so can be traversed in
+ * iterators in the same way.
+ *
+ * The table is resized when occupancy exceeds a percentage
+ * threshold (nominally, 0.75, but see below). Any thread
+ * noticing an overfull bin may assist in resizing after the
+ * initiating thread allocates and sets up the replacement array.
+ * However, rather than stalling, these other threads may proceed
+ * with insertions etc. The use of TreeBins shields us from the
+ * worst case effects of overfilling while resizes are in
+ * progress. Resizing proceeds by transferring bins, one by one,
+ * from the table to the next table. However, threads claim small
+ * blocks of indices to transfer (via field transferIndex) before
+ * doing so, reducing contention. A generation stamp in field
+ * sizeCtl ensures that resizings do not overlap. Because we are
+ * using power-of-two expansion, the elements from each bin must
+ * either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next fields
+ * won't change. On average, only about one-sixth of them need
+ * cloning when a table doubles. The nodes they replace will be
+ * garbage collectable as soon as they are no longer referenced by
+ * any reader thread that may be in the midst of concurrently
+ * traversing table. Upon transfer, the old table bin contains
+ * only a special forwarding node (with hash field "MOVED") that
+ * contains the next table as its key. On encountering a
+ * forwarding node, access and update operations restart, using
+ * the new table.
+ *
+ * Each bin transfer requires its bin lock, which can stall
+ * waiting for locks while resizing. However, because other
+ * threads can join in and help resize rather than contend for
+ * locks, average aggregate waits become shorter as resizing
+ * progresses. The transfer operation must also ensure that all
+ * accessible bins in both the old and new table are usable by any
+ * traversal. This is arranged in part by proceeding from the
+ * last bin (table.length - 1) up towards the first. Upon seeing
+ * a forwarding node, traversals (see class Traverser) arrange to
+ * move to the new table without revisiting nodes. To ensure that
+ * no intervening nodes are skipped even when moved out of order,
+ * a stack (see class TableStack) is created on first encounter of
+ * a forwarding node during a traversal, to maintain its place if
+ * later processing the current table. The need for these
+ * save/restore mechanics is relatively rare, but when one
+ * forwarding node is encountered, typically many more will be.
+ * So Traversers use a simple caching scheme to avoid creating so
+ * many new TableStack nodes. (Thanks to Peter Levart for
+ * suggesting use of a stack here.)
+ *
+ * The traversal scheme also applies to partial traversals of
+ * ranges of bins (via an alternate Traverser constructor)
+ * to support partitioned aggregate operations. Also, read-only
+ * operations give up if ever forwarded to a null table, which
+ * provides support for shutdown-style clearing, which is also not
+ * currently implemented.
+ *
+ * Lazy table initialization minimizes footprint until first use,
+ * and also avoids resizings when the first operation is from a
+ * putAll, constructor with map argument, or deserialization.
+ * These cases attempt to override the initial capacity settings,
+ * but harmlessly fail to take effect in cases of races.
+ *
+ * The element count is maintained using a specialization of
+ * LongAdder. We need to incorporate a specialization rather than
+ * just use a LongAdder in order to access implicit
+ * contention-sensing that leads to creation of multiple
+ * CounterCells. The counter mechanics avoid contention on
+ * updates but can encounter cache thrashing if read too
+ * frequently during concurrent access. To avoid reading so often,
+ * resizing under contention is attempted only upon adding to a
+ * bin already holding two or more nodes. Under uniform hash
+ * distributions, the probability of this occurring at threshold
+ * is around 13%, meaning that only about 1 in 8 puts check
+ * threshold (and after resizing, many fewer do so).
+ *
+ * TreeBins use a special form of comparison for search and
+ * related operations (which is the main reason we cannot use
+ * existing collections such as TreeMaps). TreeBins contain
+ * Comparable elements, but may contain others, as well as
+ * elements that are Comparable but not necessarily Comparable for
+ * the same T, so we cannot invoke compareTo among them. To handle
+ * this, the tree is ordered primarily by hash value, then by
+ * Comparable.compareTo order if applicable. On lookup at a node,
+ * if elements are not comparable or compare as 0 then both left
+ * and right children may need to be searched in the case of tied
+ * hash values. (This corresponds to the full list search that
+ * would be necessary if all elements were non-Comparable and had
+ * tied hashes.) On insertion, to keep a total ordering (or as
+ * close as is required here) across rebalancings, we compare
+ * classes and identityHashCodes as tie-breakers. The red-black
+ * balancing code is updated from pre-jdk-collections
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
+ * Algorithms" (CLR).
+ *
+ * TreeBins also require an additional locking mechanism. While
+ * list traversal is always possible by readers even during
+ * updates, tree traversal is not, mainly because of tree-rotations
+ * that may change the root node and/or its linkages. TreeBins
+ * include a simple read-write lock mechanism parasitic on the
+ * main bin-synchronization strategy: Structural adjustments
+ * associated with an insertion or removal are already bin-locked
+ * (and so cannot conflict with other writers) but must wait for
+ * ongoing readers to finish. Since there can be only one such
+ * waiter, we use a simple scheme using a single "waiter" field to
+ * block writers. However, readers need never block. If the root
+ * lock is held, they proceed along the slow traversal path (via
+ * next-pointers) until the lock becomes available or the list is
+ * exhausted, whichever comes first. These cases are not fast, but
+ * maximize aggregate expected throughput.
+ *
+ * Maintaining API and serialization compatibility with previous
+ * versions of this class introduces several oddities. Mainly: We
+ * leave untouched but unused constructor arguments refering to
+ * concurrencyLevel. We accept a loadFactor constructor argument,
+ * but apply it only to initial table capacity (which is the only
+ * time that we can guarantee to honor it.) We also declare an
+ * unused "Segment" class that is instantiated in minimal form
+ * only when serializing.
+ *
+ * Also, solely for compatibility with previous versions of this
+ * class, it extends AbstractMap, even though all of its methods
+ * are overridden, so it is just useless baggage.
+ *
+ * This file is organized to make things a little easier to follow
+ * while reading than they might otherwise: First the main static
+ * declarations and utilities, then fields, then main public
+ * methods (with a few factorings of multiple public methods into
+ * internal ones), then sizing methods, trees, traversers, and
+ * bulk operations.
*/
/* ---------------- Constants -------------- */
/**
- * The default initial capacity for this table,
- * used when not otherwise specified in a constructor.
+ * The largest possible table capacity. This value must be
+ * exactly 1<<30 to stay within Java array allocation and indexing
+ * bounds for power of two table sizes, and is further required
+ * because the top two bits of 32bit hash fields are used for
+ * control purposes.
*/
- static final int DEFAULT_INITIAL_CAPACITY = 16;
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
- * The default load factor for this table, used when not
- * otherwise specified in a constructor.
+ * The default initial table capacity. Must be a power of 2
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
*/
- static final float DEFAULT_LOAD_FACTOR = 0.75f;
+ private static final int DEFAULT_CAPACITY = 16;
/**
- * The default concurrency level for this table, used when not
- * otherwise specified in a constructor.
+ * The largest possible (non-power of two) array size.
+ * Needed by toArray and related methods.
*/
- static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
- * The maximum capacity, used if a higher value is implicitly
- * specified by either of the constructors with arguments. MUST
- * be a power of two <= 1<<30 to ensure that entries are indexable
- * using ints.
+ * The default concurrency level for this table. Unused but
+ * defined for compatibility with previous versions of this class.
*/
- static final int MAXIMUM_CAPACITY = 1 << 30;
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
- * The minimum capacity for per-segment tables. Must be a power
- * of two, at least two to avoid immediate resizing on next use
- * after lazy construction.
+ * The load factor for this table. Overrides of this value in
+ * constructors affect only the initial table capacity. The
+ * actual floating point value isn't normally used -- it is
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
+ * the associated resizing threshold.
*/
- static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
+ private static final float LOAD_FACTOR = 0.75f;
/**
- * The maximum number of segments to allow; used to bound
- * constructor arguments. Must be power of two less than 1 << 24.
+ * The bin count threshold for using a tree rather than list for a
+ * bin. Bins are converted to trees when adding an element to a
+ * bin with at least this many nodes. The value must be greater
+ * than 2, and should be at least 8 to mesh with assumptions in
+ * tree removal about conversion back to plain bins upon
+ * shrinkage.
*/
- static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
+ static final int TREEIFY_THRESHOLD = 8;
/**
- * Number of unsynchronized retries in size and containsValue
- * methods before resorting to locking. This is used to avoid
- * unbounded retries if tables undergo continuous modification
- * which would make it impossible to obtain an accurate result.
+ * The bin count threshold for untreeifying a (split) bin during a
+ * resize operation. Should be less than TREEIFY_THRESHOLD, and at
+ * most 6 to mesh with shrinkage detection under removal.
*/
- static final int RETRIES_BEFORE_LOCK = 2;
+ static final int UNTREEIFY_THRESHOLD = 6;
- /* ---------------- Fields -------------- */
+ /**
+ * The smallest table capacity for which bins may be treeified.
+ * (Otherwise the table is resized if too many nodes in a bin.)
+ * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
+ * conflicts between resizing and treeification thresholds.
+ */
+ static final int MIN_TREEIFY_CAPACITY = 64;
+
+ /**
+ * Minimum number of rebinnings per transfer step. Ranges are
+ * subdivided to allow multiple resizer threads. This value
+ * serves as a lower bound to avoid resizers encountering
+ * excessive memory contention. The value should be at least
+ * DEFAULT_CAPACITY.
+ */
+ private static final int MIN_TRANSFER_STRIDE = 16;
/**
- * Mask value for indexing into segments. The upper bits of a
- * key's hash code are used to choose the segment.
+ * The number of bits used for generation stamp in sizeCtl.
+ * Must be at least 6 for 32bit arrays.
*/
- final int segmentMask;
+ private static int RESIZE_STAMP_BITS = 16;
/**
- * Shift value for indexing within segments.
+ * The maximum number of threads that can help resize.
+ * Must fit in 32 - RESIZE_STAMP_BITS bits.
*/
- final int segmentShift;
+ private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
/**
- * The segments, each of which is a specialized hash table.
+ * The bit shift for recording size stamp in sizeCtl.
*/
- final Segment[] segments;
+ private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
+
+ /*
+ * Encodings for Node hash fields. See above for explanation.
+ */
+ static final int MOVED = -1; // hash for forwarding nodes
+ static final int TREEBIN = -2; // hash for roots of trees
+ static final int RESERVED = -3; // hash for transient reservations
+ static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
+
+ /** Number of CPUS, to place bounds on some sizings */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
- transient Set keySet;
- transient Set> entrySet;
- transient Collection values;
+ /** For serialization compatibility. */
+ private static final ObjectStreamField[] serialPersistentFields = {
+ new ObjectStreamField("segments", Segment[].class),
+ new ObjectStreamField("segmentMask", Integer.TYPE),
+ new ObjectStreamField("segmentShift", Integer.TYPE)
+ };
+
+ /* ---------------- Nodes -------------- */
/**
- * ConcurrentHashMap list entry. Note that this is never exported
- * out as a user-visible Map.Entry.
+ * Key-value entry. This class is never exported out as a
+ * user-mutable Map.Entry (i.e., one supporting setValue; see
+ * MapEntry below), but can be used for read-only traversals used
+ * in bulk tasks. Subclasses of Node with a negative hash field
+ * are special, and contain null keys and values (but are never
+ * exported). Otherwise, keys and vals are never null.
*/
- static final class HashEntry {
+ static class Node implements Map.Entry {
final int hash;
final K key;
- volatile V value;
- volatile HashEntry next;
+ volatile V val;
+ volatile Node next;
- HashEntry(int hash, K key, V value, HashEntry next) {
+ Node(int hash, K key, V val, Node next) {
this.hash = hash;
this.key = key;
- this.value = value;
+ this.val = val;
this.next = next;
}
- /**
- * Sets next field with volatile write semantics. (See above
- * about use of putOrderedObject.)
- */
- final void setNext(HashEntry n) {
- UNSAFE.putOrderedObject(this, nextOffset, n);
+ public final K getKey() { return key; }
+ public final V getValue() { return val; }
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public final String toString(){ return key + "=" + val; }
+ public final V setValue(V value) {
+ throw new UnsupportedOperationException();
}
- // Unsafe mechanics
- static final sun.misc.Unsafe UNSAFE;
- static final long nextOffset;
- static {
- try {
- UNSAFE = sun.misc.Unsafe.getUnsafe();
- Class k = HashEntry.class;
- nextOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("next"));
- } catch (Exception e) {
- throw new Error(e);
+ public final boolean equals(Object o) {
+ Object k, v, u; Map.Entry,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == (u = val) || v.equals(u)));
+ }
+
+ /**
+ * Virtualized support for map.get(); overridden in subclasses.
+ */
+ Node find(int h, Object k) {
+ Node e = this;
+ if (k != null) {
+ do {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ } while ((e = e.next) != null);
}
+ return null;
}
}
+ /* ---------------- Static utilities -------------- */
+
/**
- * Gets the ith element of given table (if nonnull) with volatile
- * read semantics. Note: This is manually integrated into a few
- * performance-sensitive methods to reduce call overhead.
+ * Spreads (XORs) higher bits of hash to lower and also forces top
+ * bit to 0. Because the table uses power-of-two masking, sets of
+ * hashes that vary only in bits above the current mask will
+ * always collide. (Among known examples are sets of Float keys
+ * holding consecutive whole numbers in small tables.) So we
+ * apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed (so don't benefit from
+ * spreading), and because we use trees to handle large sets of
+ * collisions in bins, we just XOR some shifted bits in the
+ * cheapest possible way to reduce systematic lossage, as well as
+ * to incorporate impact of the highest bits that would otherwise
+ * never be used in index calculations because of table bounds.
*/
- @SuppressWarnings("unchecked")
- static final HashEntry entryAt(HashEntry[] tab, int i) {
- return (tab == null) ? null :
- (HashEntry) UNSAFE.getObjectVolatile
- (tab, ((long)i << TSHIFT) + TBASE);
+ static final int spread(int h) {
+ return (h ^ (h >>> 16)) & HASH_BITS;
}
/**
- * Sets the ith element of given table, with volatile write
- * semantics. (See above about use of putOrderedObject.)
+ * Returns a power of two table size for the given desired capacity.
+ * See Hackers Delight, sec 3.2
*/
- static final void setEntryAt(HashEntry[] tab, int i,
- HashEntry e) {
- UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
+ private static final int tableSizeFor(int c) {
+ int n = c - 1;
+ n |= n >>> 1;
+ n |= n >>> 2;
+ n |= n >>> 4;
+ n |= n >>> 8;
+ n |= n >>> 16;
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
- * Applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. This is critical
- * because ConcurrentHashMap uses power-of-two length hash tables,
- * that otherwise encounter collisions for hashCodes that do not
- * differ in lower or upper bits.
+ * Returns x's Class if it is of the form "class C implements
+ * Comparable", else null.
*/
- private static int hash(int h) {
- // Spread bits to regularize both segment and index locations,
- // using variant of single-word Wang/Jenkins hash.
- h += (h << 15) ^ 0xffffcd7d;
- h ^= (h >>> 10);
- h += (h << 3);
- h ^= (h >>> 6);
- h += (h << 2) + (h << 14);
- return h ^ (h >>> 16);
+ static Class> comparableClassFor(Object x) {
+ if (x instanceof Comparable) {
+ Class> c; Type[] ts, as; Type t; ParameterizedType p;
+ if ((c = x.getClass()) == String.class) // bypass checks
+ return c;
+ if ((ts = c.getGenericInterfaces()) != null) {
+ for (int i = 0; i < ts.length; ++i) {
+ if (((t = ts[i]) instanceof ParameterizedType) &&
+ ((p = (ParameterizedType)t).getRawType() ==
+ Comparable.class) &&
+ (as = p.getActualTypeArguments()) != null &&
+ as.length == 1 && as[0] == c) // type arg is c
+ return c;
+ }
+ }
+ }
+ return null;
}
/**
- * Segments are specialized versions of hash tables. This
- * subclasses from ReentrantLock opportunistically, just to
- * simplify some locking and avoid separate construction.
+ * Returns k.compareTo(x) if x matches kc (k's screened comparable
+ * class), else 0.
*/
- static final class Segment extends ReentrantLock implements Serializable {
- /*
- * Segments maintain a table of entry lists that are always
- * kept in a consistent state, so can be read (via volatile
- * reads of segments and tables) without locking. This
- * requires replicating nodes when necessary during table
- * resizing, so the old lists can be traversed by readers
- * still using old version of table.
- *
- * This class defines only mutative methods requiring locking.
- * Except as noted, the methods of this class perform the
- * per-segment versions of ConcurrentHashMap methods. (Other
- * methods are integrated directly into ConcurrentHashMap
- * methods.) These mutative methods use a form of controlled
- * spinning on contention via methods scanAndLock and
- * scanAndLockForPut. These intersperse tryLocks with
- * traversals to locate nodes. The main benefit is to absorb
- * cache misses (which are very common for hash tables) while
- * obtaining locks so that traversal is faster once
- * acquired. We do not actually use the found nodes since they
- * must be re-acquired under lock anyway to ensure sequential
- * consistency of updates (and in any case may be undetectably
- * stale), but they will normally be much faster to re-locate.
- * Also, scanAndLockForPut speculatively creates a fresh node
- * to use in put if no node is found.
- */
-
- private static final long serialVersionUID = 2249069246763182397L;
+ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
+ static int compareComparables(Class> kc, Object k, Object x) {
+ return (x == null || x.getClass() != kc ? 0 :
+ ((Comparable)k).compareTo(x));
+ }
- /**
- * The maximum number of times to tryLock in a prescan before
- * possibly blocking on acquire in preparation for a locked
- * segment operation. On multiprocessors, using a bounded
- * number of retries maintains cache acquired while locating
- * nodes.
- */
- static final int MAX_SCAN_RETRIES =
- Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
+ /* ---------------- Table element access -------------- */
- /**
- * The per-segment table. Elements are accessed via
- * entryAt/setEntryAt providing volatile semantics.
- */
- transient volatile HashEntry[] table;
+ /*
+ * Volatile access methods are used for table elements as well as
+ * elements of in-progress next table while resizing. All uses of
+ * the tab arguments must be null checked by callers. All callers
+ * also paranoically precheck that tab's length is not zero (or an
+ * equivalent check), thus ensuring that any index argument taking
+ * the form of a hash value anded with (length - 1) is a valid
+ * index. Note that, to be correct wrt arbitrary concurrency
+ * errors by users, these checks must operate on local variables,
+ * which accounts for some odd-looking inline assignments below.
+ * Note that calls to setTabAt always occur within locked regions,
+ * and so in principle require only release ordering, not
+ * full volatile semantics, but are currently coded as volatile
+ * writes to be conservative.
+ */
- /**
- * The number of elements. Accessed only either within locks
- * or among other volatile reads that maintain visibility.
- */
- transient int count;
+ @SuppressWarnings("unchecked")
+ static final Node tabAt(Node[] tab, int i) {
+ return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
+ }
- /**
- * The total number of mutative operations in this segment.
- * Even though this may overflows 32 bits, it provides
- * sufficient accuracy for stability checks in CHM isEmpty()
- * and size() methods. Accessed only either within locks or
- * among other volatile reads that maintain visibility.
- */
- transient int modCount;
+ static final boolean casTabAt(Node[] tab, int i,
+ Node c, Node v) {
+ return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
+ }
- /**
- * The table is rehashed when its size exceeds this threshold.
- * (The value of this field is always (int)(capacity *
- * loadFactor).)
- */
- transient int threshold;
+ static final void setTabAt(Node[] tab, int i, Node v) {
+ U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
+ }
- /**
- * The load factor for the hash table. Even though this value
- * is same for all segments, it is replicated to avoid needing
- * links to outer object.
- * @serial
- */
- final float loadFactor;
+ /* ---------------- Fields -------------- */
- Segment(float lf, int threshold, HashEntry[] tab) {
- this.loadFactor = lf;
- this.threshold = threshold;
- this.table = tab;
- }
+ /**
+ * The array of bins. Lazily initialized upon first insertion.
+ * Size is always a power of two. Accessed directly by iterators.
+ */
+ transient volatile Node[] table;
- final V put(K key, int hash, V value, boolean onlyIfAbsent) {
- HashEntry node = tryLock() ? null :
- scanAndLockForPut(key, hash, value);
- V oldValue;
- try {
- HashEntry[] tab = table;
- int index = (tab.length - 1) & hash;
- HashEntry first = entryAt(tab, index);
- for (HashEntry e = first;;) {
- if (e != null) {
- K k;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- oldValue = e.value;
- if (!onlyIfAbsent) {
- e.value = value;
- ++modCount;
- }
- break;
- }
- e = e.next;
- }
- else {
- if (node != null)
- node.setNext(first);
- else
- node = new HashEntry(hash, key, value, first);
- int c = count + 1;
- if (c > threshold && tab.length < MAXIMUM_CAPACITY)
- rehash(node);
- else
- setEntryAt(tab, index, node);
- ++modCount;
- count = c;
- oldValue = null;
- break;
- }
- }
- } finally {
- unlock();
- }
- return oldValue;
- }
+ /**
+ * The next table to use; non-null only while resizing.
+ */
+ private transient volatile Node[] nextTable;
- /**
- * Doubles size of table and repacks entries, also adding the
- * given node to new table
- */
- @SuppressWarnings("unchecked")
- private void rehash(HashEntry node) {
- /*
- * Reclassify nodes in each list to new table. Because we
- * are using power-of-two expansion, the elements from
- * each bin must either stay at same index, or move with a
- * power of two offset. We eliminate unnecessary node
- * creation by catching cases where old nodes can be
- * reused because their next fields won't change.
- * Statistically, at the default threshold, only about
- * one-sixth of them need cloning when a table
- * doubles. The nodes they replace will be garbage
- * collectable as soon as they are no longer referenced by
- * any reader thread that may be in the midst of
- * concurrently traversing table. Entry accesses use plain
- * array indexing because they are followed by volatile
- * table write.
- */
- HashEntry[] oldTable = table;
- int oldCapacity = oldTable.length;
- int newCapacity = oldCapacity << 1;
- threshold = (int)(newCapacity * loadFactor);
- HashEntry[] newTable =
- (HashEntry[]) new HashEntry[newCapacity];
- int sizeMask = newCapacity - 1;
- for (int i = 0; i < oldCapacity ; i++) {
- HashEntry e = oldTable[i];
- if (e != null) {
- HashEntry next = e.next;
- int idx = e.hash & sizeMask;
- if (next == null) // Single node on list
- newTable[idx] = e;
- else { // Reuse consecutive sequence at same slot
- HashEntry lastRun = e;
- int lastIdx = idx;
- for (HashEntry last = next;
- last != null;
- last = last.next) {
- int k = last.hash & sizeMask;
- if (k != lastIdx) {
- lastIdx = k;
- lastRun = last;
- }
- }
- newTable[lastIdx] = lastRun;
- // Clone remaining nodes
- for (HashEntry p = e; p != lastRun; p = p.next) {
- V v = p.value;
- int h = p.hash;
- int k = h & sizeMask;
- HashEntry n = newTable[k];
- newTable[k] = new HashEntry(h, p.key, v, n);
- }
- }
- }
- }
- int nodeIndex = node.hash & sizeMask; // add the new node
- node.setNext(newTable[nodeIndex]);
- newTable[nodeIndex] = node;
- table = newTable;
- }
+ /**
+ * Base counter value, used mainly when there is no contention,
+ * but also as a fallback during table initialization
+ * races. Updated via CAS.
+ */
+ private transient volatile long baseCount;
- /**
- * Scans for a node containing given key while trying to
- * acquire lock, creating and returning one if not found. Upon
- * return, guarantees that lock is held. UNlike in most
- * methods, calls to method equals are not screened: Since
- * traversal speed doesn't matter, we might as well help warm
- * up the associated code and accesses as well.
- *
- * @return a new node if key not found, else null
- */
- private HashEntry scanAndLockForPut(K key, int hash, V value) {
- HashEntry first = entryForHash(this, hash);
- HashEntry e = first;
- HashEntry node = null;
- int retries = -1; // negative while locating node
- while (!tryLock()) {
- HashEntry f; // to recheck first below
- if (retries < 0) {
- if (e == null) {
- if (node == null) // speculatively create node
- node = new HashEntry(hash, key, value, null);
- retries = 0;
- }
- else if (key.equals(e.key))
- retries = 0;
- else
- e = e.next;
- }
- else if (++retries > MAX_SCAN_RETRIES) {
- lock();
- break;
- }
- else if ((retries & 1) == 0 &&
- (f = entryForHash(this, hash)) != first) {
- e = first = f; // re-traverse if entry changed
- retries = -1;
- }
- }
- return node;
- }
+ /**
+ * Table initialization and resizing control. When negative, the
+ * table is being initialized or resized: -1 for initialization,
+ * else -(1 + the number of active resizing threads). Otherwise,
+ * when table is null, holds the initial table size to use upon
+ * creation, or 0 for default. After initialization, holds the
+ * next element count value upon which to resize the table.
+ */
+ private transient volatile int sizeCtl;
- /**
- * Scans for a node containing the given key while trying to
- * acquire lock for a remove or replace operation. Upon
- * return, guarantees that lock is held. Note that we must
- * lock even if the key is not found, to ensure sequential
- * consistency of updates.
- */
- private void scanAndLock(Object key, int hash) {
- // similar to but simpler than scanAndLockForPut
- HashEntry first = entryForHash(this, hash);
- HashEntry e = first;
- int retries = -1;
- while (!tryLock()) {
- HashEntry f;
- if (retries < 0) {
- if (e == null || key.equals(e.key))
- retries = 0;
- else
- e = e.next;
- }
- else if (++retries > MAX_SCAN_RETRIES) {
- lock();
- break;
- }
- else if ((retries & 1) == 0 &&
- (f = entryForHash(this, hash)) != first) {
- e = first = f;
- retries = -1;
- }
- }
- }
+ /**
+ * The next table index (plus one) to split while resizing.
+ */
+ private transient volatile int transferIndex;
- /**
- * Remove; match on key only if value null, else match both.
- */
- final V remove(Object key, int hash, Object value) {
- if (!tryLock())
- scanAndLock(key, hash);
- V oldValue = null;
- try {
- HashEntry[] tab = table;
- int index = (tab.length - 1) & hash;
- HashEntry e = entryAt(tab, index);
- HashEntry pred = null;
- while (e != null) {
- K k;
- HashEntry next = e.next;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- V v = e.value;
- if (value == null || value == v || value.equals(v)) {
- if (pred == null)
- setEntryAt(tab, index, next);
- else
- pred.setNext(next);
- ++modCount;
- --count;
- oldValue = v;
- }
- break;
- }
- pred = e;
- e = next;
- }
- } finally {
- unlock();
- }
- return oldValue;
- }
+ /**
+ * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
+ */
+ private transient volatile int cellsBusy;
- final boolean replace(K key, int hash, V oldValue, V newValue) {
- if (!tryLock())
- scanAndLock(key, hash);
- boolean replaced = false;
- try {
- HashEntry e;
- for (e = entryForHash(this, hash); e != null; e = e.next) {
- K k;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- if (oldValue.equals(e.value)) {
- e.value = newValue;
- ++modCount;
- replaced = true;
- }
- break;
- }
- }
- } finally {
- unlock();
- }
- return replaced;
- }
+ /**
+ * Table of counter cells. When non-null, size is a power of 2.
+ */
+ private transient volatile CounterCell[] counterCells;
- final V replace(K key, int hash, V value) {
- if (!tryLock())
- scanAndLock(key, hash);
- V oldValue = null;
- try {
- HashEntry e;
- for (e = entryForHash(this, hash); e != null; e = e.next) {
- K k;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- oldValue = e.value;
- e.value = value;
- ++modCount;
- break;
- }
- }
- } finally {
- unlock();
- }
- return oldValue;
- }
+ // views
+ private transient KeySetView keySet;
+ private transient ValuesView values;
+ private transient EntrySetView entrySet;
- final void clear() {
- lock();
- try {
- HashEntry[] tab = table;
- for (int i = 0; i < tab.length ; i++)
- setEntryAt(tab, i, null);
- ++modCount;
- count = 0;
- } finally {
- unlock();
- }
- }
- }
- // Accessing segments
+ /* ---------------- Public operations -------------- */
/**
- * Gets the jth element of given segment array (if nonnull) with
- * volatile element access semantics via Unsafe. (The null check
- * can trigger harmlessly only during deserialization.) Note:
- * because each element of segments array is set only once (using
- * fully ordered writes), some performance-sensitive methods rely
- * on this method only as a recheck upon null reads.
+ * Creates a new, empty map with the default initial table size (16).
*/
- @SuppressWarnings("unchecked")
- static final Segment segmentAt(Segment[] ss, int j) {
- long u = (j << SSHIFT) + SBASE;
- return ss == null ? null :
- (Segment) UNSAFE.getObjectVolatile(ss, u);
+ public ConcurrentHashMap() {
}
/**
- * Returns the segment for the given index, creating it and
- * recording in segment table (via CAS) if not already present.
+ * Creates a new, empty map with an initial table size
+ * accommodating the specified number of elements without the need
+ * to dynamically resize.
*
- * @param k the index
- * @return the segment
- */
- @SuppressWarnings("unchecked")
- private Segment ensureSegment(int k) {
- final Segment[] ss = this.segments;
- long u = (k << SSHIFT) + SBASE; // raw offset
- Segment seg;
- if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) == null) {
- Segment proto = ss[0]; // use segment 0 as prototype
- int cap = proto.table.length;
- float lf = proto.loadFactor;
- int threshold = (int)(cap * lf);
- HashEntry[] tab = (HashEntry[])new HashEntry[cap];
- if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u))
- == null) { // recheck
- Segment s = new Segment(lf, threshold, tab);
- while ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u))
- == null) {
- if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
- break;
- }
- }
- }
- return seg;
- }
-
- // Hash-based segment and entry accesses
-
- /**
- * Get the segment for the given hash
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
*/
- @SuppressWarnings("unchecked")
- private Segment segmentForHash(int h) {
- long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
- return (Segment) UNSAFE.getObjectVolatile(segments, u);
+ public ConcurrentHashMap(int initialCapacity) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException();
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
+ this.sizeCtl = cap;
}
/**
- * Gets the table entry for the given segment and hash
+ * Creates a new map with the same mappings as the given map.
+ *
+ * @param m the map
*/
- @SuppressWarnings("unchecked")
- static final HashEntry entryForHash(Segment seg, int h) {
- HashEntry[] tab;
- return (seg == null || (tab = seg.table) == null) ? null :
- (HashEntry) UNSAFE.getObjectVolatile
- (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
+ public ConcurrentHashMap(Map extends K, ? extends V> m) {
+ this.sizeCtl = DEFAULT_CAPACITY;
+ putAll(m);
}
- /* ---------------- Public operations -------------- */
-
/**
- * Creates a new, empty map with the specified initial
- * capacity, load factor and concurrency level.
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}) and
+ * initial table density ({@code loadFactor}).
*
* @param initialCapacity the initial capacity. The implementation
- * performs internal sizing to accommodate this many elements.
- * @param loadFactor the load factor threshold, used to control resizing.
- * Resizing may be performed when the average number of elements per
- * bin exceeds this threshold.
- * @param concurrencyLevel the estimated number of concurrently
- * updating threads. The implementation performs internal sizing
- * to try to accommodate this many threads.
- * @throws IllegalArgumentException if the initial capacity is
- * negative or the load factor or concurrencyLevel are
- * nonpositive.
- */
- @SuppressWarnings("unchecked")
- public ConcurrentHashMap(int initialCapacity,
- float loadFactor, int concurrencyLevel) {
- if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
- throw new IllegalArgumentException();
- if (concurrencyLevel > MAX_SEGMENTS)
- concurrencyLevel = MAX_SEGMENTS;
- // Find power-of-two sizes best matching arguments
- int sshift = 0;
- int ssize = 1;
- while (ssize < concurrencyLevel) {
- ++sshift;
- ssize <<= 1;
- }
- this.segmentShift = 32 - sshift;
- this.segmentMask = ssize - 1;
- if (initialCapacity > MAXIMUM_CAPACITY)
- initialCapacity = MAXIMUM_CAPACITY;
- int c = initialCapacity / ssize;
- if (c * ssize < initialCapacity)
- ++c;
- int cap = MIN_SEGMENT_TABLE_CAPACITY;
- while (cap < c)
- cap <<= 1;
- // create segments and segments[0]
- Segment s0 =
- new Segment(loadFactor, (int)(cap * loadFactor),
- (HashEntry[])new HashEntry[cap]);
- Segment[] ss = (Segment[])new Segment[ssize];
- UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
- this.segments = ss;
- }
-
- /**
- * Creates a new, empty map with the specified initial capacity
- * and load factor and with the default concurrencyLevel (16).
- *
- * @param initialCapacity The implementation performs internal
- * sizing to accommodate this many elements.
- * @param loadFactor the load factor threshold, used to control resizing.
- * Resizing may be performed when the average number of elements per
- * bin exceeds this threshold.
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
* @throws IllegalArgumentException if the initial capacity of
* elements is negative or the load factor is nonpositive
*
* @since 1.6
*/
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
- this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
+ this(initialCapacity, loadFactor, 1);
}
/**
- * Creates a new, empty map with the specified initial capacity,
- * and with default load factor (0.75) and concurrencyLevel (16).
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}), table
+ * density ({@code loadFactor}), and number of concurrently
+ * updating threads ({@code concurrencyLevel}).
*
* @param initialCapacity the initial capacity. The implementation
- * performs internal sizing to accommodate this many elements.
- * @throws IllegalArgumentException if the initial capacity of
- * elements is negative.
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation may use this value as
+ * a sizing hint.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive
*/
- public ConcurrentHashMap(int initialCapacity) {
- this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ public ConcurrentHashMap(int initialCapacity,
+ float loadFactor, int concurrencyLevel) {
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
+ throw new IllegalArgumentException();
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
+ initialCapacity = concurrencyLevel; // as estimated threads
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
+ int cap = (size >= (long)MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
+ this.sizeCtl = cap;
}
- /**
- * Creates a new, empty map with a default initial capacity (16),
- * load factor (0.75) and concurrencyLevel (16).
- */
- public ConcurrentHashMap() {
- this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
- }
+ // Original (since JDK1.2) Map methods
/**
- * Creates a new map with the same mappings as the given map.
- * The map is created with a capacity of 1.5 times the number
- * of mappings in the given map or 16 (whichever is greater),
- * and a default load factor (0.75) and concurrencyLevel (16).
- *
- * @param m the map
+ * {@inheritDoc}
*/
- public ConcurrentHashMap(Map extends K, ? extends V> m) {
- this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
- DEFAULT_INITIAL_CAPACITY),
- DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
- putAll(m);
+ public int size() {
+ long n = sumCount();
+ return ((n < 0L) ? 0 :
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
+ (int)n);
}
/**
- * Returns true if this map contains no key-value mappings.
- *
- * @return true if this map contains no key-value mappings
+ * {@inheritDoc}
*/
public boolean isEmpty() {
- /*
- * Sum per-segment modCounts to avoid mis-reporting when
- * elements are concurrently added and removed in one segment
- * while checking another, in which case the table was never
- * actually empty at any point. (The sum ensures accuracy up
- * through at least 1<<31 per-segment modifications before
- * recheck.) Methods size() and containsValue() use similar
- * constructions for stability checks.
- */
- long sum = 0L;
- final Segment[] segments = this.segments;
- for (int j = 0; j < segments.length; ++j) {
- Segment seg = segmentAt(segments, j);
- if (seg != null) {
- if (seg.count != 0)
- return false;
- sum += seg.modCount;
- }
- }
- if (sum != 0L) { // recheck unless no modifications
- for (int j = 0; j < segments.length; ++j) {
- Segment seg = segmentAt(segments, j);
- if (seg != null) {
- if (seg.count != 0)
- return false;
- sum -= seg.modCount;
- }
- }
- if (sum != 0L)
- return false;
- }
- return true;
- }
-
- /**
- * Returns the number of key-value mappings in this map. If the
- * map contains more than Integer.MAX_VALUE elements, returns
- * Integer.MAX_VALUE.
- *
- * @return the number of key-value mappings in this map
- */
- public int size() {
- // Try a few times to get accurate count. On failure due to
- // continuous async changes in table, resort to locking.
- final Segment[] segments = this.segments;
- int size;
- boolean overflow; // true if size overflows 32 bits
- long sum; // sum of modCounts
- long last = 0L; // previous sum
- int retries = -1; // first iteration isn't retry
- try {
- for (;;) {
- if (retries++ == RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- ensureSegment(j).lock(); // force creation
- }
- sum = 0L;
- size = 0;
- overflow = false;
- for (int j = 0; j < segments.length; ++j) {
- Segment seg = segmentAt(segments, j);
- if (seg != null) {
- sum += seg.modCount;
- int c = seg.count;
- if (c < 0 || (size += c) < 0)
- overflow = true;
- }
- }
- if (sum == last)
- break;
- last = sum;
- }
- } finally {
- if (retries > RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- segmentAt(segments, j).unlock();
- }
- }
- return overflow ? Integer.MAX_VALUE : size;
+ return sumCount() <= 0L; // ignore transient negative values
}
/**
@@ -825,18 +932,20 @@ public int size() {
* @throws NullPointerException if the specified key is null
*/
public V get(Object key) {
- Segment s; // manually integrate access methods to reduce overhead
- HashEntry[] tab;
- int h = hash(key.hashCode());
- long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
- if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null &&
- (tab = s.table) != null) {
- for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile
- (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
- e != null; e = e.next) {
- K k;
- if ((k = e.key) == key || (e.hash == h && key.equals(k)))
- return e.value;
+ Node[] tab; Node e, p; int n, eh; K ek;
+ int h = spread(key.hashCode());
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ (e = tabAt(tab, (n - 1) & h)) != null) {
+ if ((eh = e.hash) == h) {
+ if ((ek = e.key) == key || (ek != null && key.equals(ek)))
+ return e.val;
+ }
+ else if (eh < 0)
+ return (p = e.find(h, key)) != null ? p.val : null;
+ while ((e = e.next) != null) {
+ if (e.hash == h &&
+ ((ek = e.key) == key || (ek != null && key.equals(ek))))
+ return e.val;
}
}
return null;
@@ -845,151 +954,121 @@ public V get(Object key) {
/**
* Tests if the specified object is a key in this table.
*
- * @param key possible key
- * @return true if and only if the specified object
+ * @param key possible key
+ * @return {@code true} if and only if the specified object
* is a key in this table, as determined by the
- * equals method; false otherwise.
+ * {@code equals} method; {@code false} otherwise
* @throws NullPointerException if the specified key is null
*/
- @SuppressWarnings("unchecked")
public boolean containsKey(Object key) {
- Segment s; // same as get() except no need for volatile value read
- HashEntry[] tab;
- int h = hash(key.hashCode());
- long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
- if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null &&
- (tab = s.table) != null) {
- for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile
- (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
- e != null; e = e.next) {
- K k;
- if ((k = e.key) == key || (e.hash == h && key.equals(k)))
- return true;
- }
- }
- return false;
+ return get(key) != null;
}
/**
- * Returns true if this map maps one or more keys to the
- * specified value. Note: This method requires a full internal
- * traversal of the hash table, and so is much slower than
- * method containsKey.
+ * Returns {@code true} if this map maps one or more keys to the
+ * specified value. Note: This method may require a full traversal
+ * of the map, and is much slower than method {@code containsKey}.
*
* @param value value whose presence in this map is to be tested
- * @return true if this map maps one or more keys to the
+ * @return {@code true} if this map maps one or more keys to the
* specified value
* @throws NullPointerException if the specified value is null
*/
public boolean containsValue(Object value) {
- // Same idea as size()
if (value == null)
throw new NullPointerException();
- final Segment[] segments = this.segments;
- boolean found = false;
- long last = 0;
- int retries = -1;
- try {
- outer: for (;;) {
- if (retries++ == RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- ensureSegment(j).lock(); // force creation
- }
- long hashSum = 0L;
- int sum = 0;
- for (int j = 0; j < segments.length; ++j) {
- HashEntry[] tab;
- Segment seg = segmentAt(segments, j);
- if (seg != null && (tab = seg.table) != null) {
- for (int i = 0 ; i < tab.length; i++) {
- HashEntry e;
- for (e = entryAt(tab, i); e != null; e = e.next) {
- V v = e.value;
- if (v != null && value.equals(v)) {
- found = true;
- break outer;
- }
- }
- }
- sum += seg.modCount;
- }
- }
- if (retries > 0 && sum == last)
- break;
- last = sum;
- }
- } finally {
- if (retries > RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- segmentAt(segments, j).unlock();
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; ) {
+ V v;
+ if ((v = p.val) == value || (v != null && value.equals(v)))
+ return true;
}
}
- return found;
- }
-
- /**
- * Legacy method testing if some key maps into the specified value
- * in this table. This method is identical in functionality to
- * {@link #containsValue}, and exists solely to ensure
- * full compatibility with class {@link java.util.Hashtable},
- * which supported this method prior to introduction of the
- * Java Collections framework.
-
- * @param value a value to search for
- * @return true if and only if some key maps to the
- * value argument in this table as
- * determined by the equals method;
- * false otherwise
- * @throws NullPointerException if the specified value is null
- */
- public boolean contains(Object value) {
- return containsValue(value);
+ return false;
}
/**
* Maps the specified key to the specified value in this table.
* Neither the key nor the value can be null.
*
- * The value can be retrieved by calling the get method
+ *
The value can be retrieved by calling the {@code get} method
* with a key that is equal to the original key.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
- * @return the previous value associated with key, or
- * null if there was no mapping for key
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key or value is null
*/
- @SuppressWarnings("unchecked")
public V put(K key, V value) {
- Segment s;
- if (value == null)
- throw new NullPointerException();
- int hash = hash(key.hashCode());
- int j = (hash >>> segmentShift) & segmentMask;
- if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck
- (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment
- s = ensureSegment(j);
- return s.put(key, hash, value, false);
+ return putVal(key, value, false);
}
- /**
- * {@inheritDoc}
- *
- * @return the previous value associated with the specified key,
- * or null if there was no mapping for the key
- * @throws NullPointerException if the specified key or value is null
- */
- @SuppressWarnings("unchecked")
- public V putIfAbsent(K key, V value) {
- Segment s;
- if (value == null)
- throw new NullPointerException();
- int hash = hash(key.hashCode());
- int j = (hash >>> segmentShift) & segmentMask;
- if ((s = (Segment)UNSAFE.getObject
- (segments, (j << SSHIFT) + SBASE)) == null)
- s = ensureSegment(j);
- return s.put(key, hash, value, true);
+ /** Implementation for put and putIfAbsent */
+ final V putVal(K key, V value, boolean onlyIfAbsent) {
+ if (key == null || value == null) throw new NullPointerException();
+ int hash = spread(key.hashCode());
+ int binCount = 0;
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
+ if (casTabAt(tab, i, null,
+ new Node(hash, key, value, null)))
+ break; // no lock when adding to empty bin
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ V oldVal = null;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node e = f;; ++binCount) {
+ K ek;
+ if (e.hash == hash &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ oldVal = e.val;
+ if (!onlyIfAbsent)
+ e.val = value;
+ break;
+ }
+ Node pred = e;
+ if ((e = e.next) == null) {
+ pred.next = new Node(hash, key,
+ value, null);
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ Node p;
+ binCount = 2;
+ if ((p = ((TreeBin)f).putTreeVal(hash, key,
+ value)) != null) {
+ oldVal = p.val;
+ if (!onlyIfAbsent)
+ p.val = value;
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ }
+ addCount(1L, binCount);
+ return null;
}
/**
@@ -1000,8 +1079,9 @@ public V putIfAbsent(K key, V value) {
* @param m mappings to be stored in this map
*/
public void putAll(Map extends K, ? extends V> m) {
+ tryPresize(m.size());
for (Map.Entry extends K, ? extends V> e : m.entrySet())
- put(e.getKey(), e.getValue());
+ putVal(e.getKey(), e.getValue(), false);
}
/**
@@ -1009,87 +1089,146 @@ public void putAll(Map extends K, ? extends V> m) {
* This method does nothing if the key is not in the map.
*
* @param key the key that needs to be removed
- * @return the previous value associated with key, or
- * null if there was no mapping for key
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key is null
*/
public V remove(Object key) {
- int hash = hash(key.hashCode());
- Segment s = segmentForHash(hash);
- return s == null ? null : s.remove(key, hash, null);
- }
-
- /**
- * {@inheritDoc}
- *
- * @throws NullPointerException if the specified key is null
- */
- public boolean remove(Object key, Object value) {
- int hash = hash(key.hashCode());
- Segment s;
- return value != null && (s = segmentForHash(hash)) != null &&
- s.remove(key, hash, value) != null;
+ return replaceNode(key, null, null);
}
/**
- * {@inheritDoc}
- *
- * @throws NullPointerException if any of the arguments are null
- */
- public boolean replace(K key, V oldValue, V newValue) {
- int hash = hash(key.hashCode());
- if (oldValue == null || newValue == null)
- throw new NullPointerException();
- Segment s = segmentForHash(hash);
- return s != null && s.replace(key, hash, oldValue, newValue);
- }
-
- /**
- * {@inheritDoc}
- *
- * @return the previous value associated with the specified key,
- * or null if there was no mapping for the key
- * @throws NullPointerException if the specified key or value is null
+ * Implementation for the four public remove/replace methods:
+ * Replaces node value with v, conditional upon match of cv if
+ * non-null. If resulting value is null, delete.
*/
- public V replace(K key, V value) {
- int hash = hash(key.hashCode());
- if (value == null)
- throw new NullPointerException();
- Segment s = segmentForHash(hash);
- return s == null ? null : s.replace(key, hash, value);
+ final V replaceNode(Object key, V value, Object cv) {
+ int hash = spread(key.hashCode());
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0 ||
+ (f = tabAt(tab, i = (n - 1) & hash)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ V oldVal = null;
+ boolean validated = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ validated = true;
+ for (Node e = f, pred = null;;) {
+ K ek;
+ if (e.hash == hash &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ V ev = e.val;
+ if (cv == null || cv == ev ||
+ (ev != null && cv.equals(ev))) {
+ oldVal = ev;
+ if (value != null)
+ e.val = value;
+ else if (pred != null)
+ pred.next = e.next;
+ else
+ setTabAt(tab, i, e.next);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ else if (f instanceof TreeBin) {
+ validated = true;
+ TreeBin t = (TreeBin)f;
+ TreeNode r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(hash, key, null)) != null) {
+ V pv = p.val;
+ if (cv == null || cv == pv ||
+ (pv != null && cv.equals(pv))) {
+ oldVal = pv;
+ if (value != null)
+ p.val = value;
+ else if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ }
+ if (validated) {
+ if (oldVal != null) {
+ if (value == null)
+ addCount(-1L, -1);
+ return oldVal;
+ }
+ break;
+ }
+ }
+ }
+ return null;
}
/**
* Removes all of the mappings from this map.
*/
public void clear() {
- final Segment[] segments = this.segments;
- for (int j = 0; j < segments.length; ++j) {
- Segment s = segmentAt(segments, j);
- if (s != null)
- s.clear();
+ long delta = 0L; // negative number of deletions
+ int i = 0;
+ Node[] tab = table;
+ while (tab != null && i < tab.length) {
+ int fh;
+ Node f = tabAt(tab, i);
+ if (f == null)
+ ++i;
+ else if ((fh = f.hash) == MOVED) {
+ tab = helpTransfer(tab, f);
+ i = 0; // restart
+ }
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ Node p = (fh >= 0 ? f :
+ (f instanceof TreeBin) ?
+ ((TreeBin)f).first : null);
+ while (p != null) {
+ --delta;
+ p = p.next;
+ }
+ setTabAt(tab, i++, null);
+ }
+ }
+ }
}
+ if (delta != 0L)
+ addCount(delta, -1);
}
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
- * reflected in the set, and vice-versa. The set supports element
+ * reflected in the set, and vice-versa. The set supports element
* removal, which removes the corresponding mapping from this map,
- * via the Iterator.remove, Set.remove,
- * removeAll, retainAll, and clear
- * operations. It does not support the add or
- * addAll operations.
+ * via the {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations. It does not support the {@code add} or
+ * {@code addAll} operations.
+ *
+ * The view's iterators and spliterators are
+ * weakly consistent.
+ *
+ *
The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
+ * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
*
- *
The view's iterator is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
+ * @return the set view
*/
- public Set keySet() {
- Set ks = keySet;
- return (ks != null) ? ks : (keySet = new KeySet());
+ public KeySetView keySet() {
+ KeySetView ks;
+ return (ks = keySet) != null ? ks : (keySet = new KeySetView(this, null));
}
/**
@@ -1097,20 +1236,22 @@ public Set keySet() {
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. The collection
* supports element removal, which removes the corresponding
- * mapping from this map, via the Iterator.remove,
- * Collection.remove, removeAll,
- * retainAll, and clear operations. It does not
- * support the add or addAll operations.
+ * mapping from this map, via the {@code Iterator.remove},
+ * {@code Collection.remove}, {@code removeAll},
+ * {@code retainAll}, and {@code clear} operations. It does not
+ * support the {@code add} or {@code addAll} operations.
*
- * The view's iterator is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
+ *
The view's iterators and spliterators are
+ * weakly consistent.
+ *
+ *
The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
+ * and {@link Spliterator#NONNULL}.
+ *
+ * @return the collection view
*/
public Collection values() {
- Collection vs = values;
- return (vs != null) ? vs : (values = new Values());
+ ValuesView vs;
+ return (vs = values) != null ? vs : (values = new ValuesView(this));
}
/**
@@ -1118,313 +1259,5054 @@ public Collection values() {
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. The set supports element
* removal, which removes the corresponding mapping from the map,
- * via the Iterator.remove, Set.remove,
- * removeAll, retainAll, and clear
- * operations. It does not support the add or
- * addAll operations.
+ * via the {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations.
+ *
+ * The view's iterators and spliterators are
+ * weakly consistent.
+ *
+ *
The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
+ * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
*
- *
The view's iterator is a "weakly consistent" iterator
- * that will never throw {@link ConcurrentModificationException},
- * and guarantees to traverse elements as they existed upon
- * construction of the iterator, and may (but is not guaranteed to)
- * reflect any modifications subsequent to construction.
+ * @return the set view
*/
public Set> entrySet() {
- Set> es = entrySet;
- return (es != null) ? es : (entrySet = new EntrySet());
+ EntrySetView es;
+ return (es = entrySet) != null ? es : (entrySet = new EntrySetView(this));
}
/**
- * Returns an enumeration of the keys in this table.
+ * Returns the hash code value for this {@link Map}, i.e.,
+ * the sum of, for each key-value pair in the map,
+ * {@code key.hashCode() ^ value.hashCode()}.
*
- * @return an enumeration of the keys in this table
- * @see #keySet()
+ * @return the hash code value for this map
*/
- public Enumeration keys() {
- return new KeyIterator();
+ public int hashCode() {
+ int h = 0;
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; )
+ h += p.key.hashCode() ^ p.val.hashCode();
+ }
+ return h;
}
/**
- * Returns an enumeration of the values in this table.
+ * Returns a string representation of this map. The string
+ * representation consists of a list of key-value mappings (in no
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
+ * mappings are separated by the characters {@code ", "} (comma
+ * and space). Each key-value mapping is rendered as the key
+ * followed by an equals sign ("{@code =}") followed by the
+ * associated value.
*
- * @return an enumeration of the values in this table
- * @see #values()
+ * @return a string representation of this map
*/
- public Enumeration elements() {
- return new ValueIterator();
- }
-
- /* ---------------- Iterator Support -------------- */
-
- abstract class HashIterator {
- int nextSegmentIndex;
- int nextTableIndex;
- HashEntry[] currentTable;
- HashEntry nextEntry;
- HashEntry lastReturned;
-
- HashIterator() {
- nextSegmentIndex = segments.length - 1;
- nextTableIndex = -1;
- advance();
- }
-
- /**
- * Set nextEntry to first node of next non-empty table
- * (in backwards order, to simplify checks).
- */
- final void advance() {
+ public String toString() {
+ Node[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser it = new Traverser(t, f, 0, f);
+ StringBuilder sb = new StringBuilder();
+ sb.append('{');
+ Node p;
+ if ((p = it.advance()) != null) {
for (;;) {
- if (nextTableIndex >= 0) {
- if ((nextEntry = entryAt(currentTable,
- nextTableIndex--)) != null)
- break;
- }
- else if (nextSegmentIndex >= 0) {
- Segment seg = segmentAt(segments, nextSegmentIndex--);
- if (seg != null && (currentTable = seg.table) != null)
- nextTableIndex = currentTable.length - 1;
- }
- else
+ K k = p.key;
+ V v = p.val;
+ sb.append(k == this ? "(this Map)" : k);
+ sb.append('=');
+ sb.append(v == this ? "(this Map)" : v);
+ if ((p = it.advance()) == null)
break;
+ sb.append(',').append(' ');
}
}
-
- final HashEntry nextEntry() {
- HashEntry e = nextEntry;
- if (e == null)
- throw new NoSuchElementException();
- lastReturned = e; // cannot assign until after null check
- if ((nextEntry = e.next) == null)
- advance();
- return e;
- }
-
- public final boolean hasNext() { return nextEntry != null; }
- public final boolean hasMoreElements() { return nextEntry != null; }
-
- public final void remove() {
- if (lastReturned == null)
- throw new IllegalStateException();
- ConcurrentHashMap.this.remove(lastReturned.key);
- lastReturned = null;
- }
- }
-
- final class KeyIterator
- extends HashIterator
- implements Iterator, Enumeration
- {
- public final K next() { return super.nextEntry().key; }
- public final K nextElement() { return super.nextEntry().key; }
- }
-
- final class ValueIterator
- extends HashIterator
- implements Iterator, Enumeration
- {
- public final V next() { return super.nextEntry().value; }
- public final V nextElement() { return super.nextEntry().value; }
+ return sb.append('}').toString();
}
/**
- * Custom Entry class used by EntryIterator.next(), that relays
- * setValue changes to the underlying map.
+ * Compares the specified object with this map for equality.
+ * Returns {@code true} if the given object is a map with the same
+ * mappings as this map. This operation may return misleading
+ * results if either map is concurrently modified during execution
+ * of this method.
+ *
+ * @param o object to be compared for equality with this map
+ * @return {@code true} if the specified object is equal to this map
*/
- final class WriteThroughEntry
- extends AbstractMap.SimpleEntry
- {
- WriteThroughEntry(K k, V v) {
- super(k,v);
- }
-
- /**
- * Set our entry's value and write through to the map. The
- * value to return is somewhat arbitrary here. Since a
- * WriteThroughEntry does not necessarily track asynchronous
- * changes, the most recent "previous" value could be
- * different from what we return (or could even have been
- * removed in which case the put will re-establish). We do not
- * and cannot guarantee more.
- */
- public V setValue(V value) {
- if (value == null) throw new NullPointerException();
- V v = super.setValue(value);
- ConcurrentHashMap.this.put(getKey(), value);
- return v;
- }
- }
-
- final class EntryIterator
- extends HashIterator
- implements Iterator>
- {
- public Map.Entry next() {
- HashEntry e = super.nextEntry();
- return new WriteThroughEntry(e.key, e.value);
- }
- }
-
- final class KeySet extends AbstractSet {
- public Iterator iterator() {
- return new KeyIterator();
- }
- public int size() {
- return ConcurrentHashMap.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMap.this.isEmpty();
- }
- public boolean contains(Object o) {
- return ConcurrentHashMap.this.containsKey(o);
- }
- public boolean remove(Object o) {
- return ConcurrentHashMap.this.remove(o) != null;
- }
- public void clear() {
- ConcurrentHashMap.this.clear();
- }
- }
-
- final class Values extends AbstractCollection {
- public Iterator iterator() {
- return new ValueIterator();
- }
- public int size() {
- return ConcurrentHashMap.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMap.this.isEmpty();
- }
- public boolean contains(Object o) {
- return ConcurrentHashMap.this.containsValue(o);
- }
- public void clear() {
- ConcurrentHashMap.this.clear();
- }
- }
-
- final class EntrySet extends AbstractSet> {
- public Iterator> iterator() {
- return new EntryIterator();
- }
- public boolean contains(Object o) {
- if (!(o instanceof Map.Entry))
- return false;
- Map.Entry,?> e = (Map.Entry,?>)o;
- V v = ConcurrentHashMap.this.get(e.getKey());
- return v != null && v.equals(e.getValue());
- }
- public boolean remove(Object o) {
- if (!(o instanceof Map.Entry))
+ public boolean equals(Object o) {
+ if (o != this) {
+ if (!(o instanceof Map))
return false;
- Map.Entry,?> e = (Map.Entry,?>)o;
- return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
- }
- public int size() {
- return ConcurrentHashMap.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMap.this.isEmpty();
- }
- public void clear() {
- ConcurrentHashMap.this.clear();
+ Map,?> m = (Map,?>) o;
+ Node[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser it = new Traverser(t, f, 0, f);
+ for (Node p; (p = it.advance()) != null; ) {
+ V val = p.val;
+ Object v = m.get(p.key);
+ if (v == null || (v != val && !v.equals(val)))
+ return false;
+ }
+ for (Map.Entry,?> e : m.entrySet()) {
+ Object mk, mv, v;
+ if ((mk = e.getKey()) == null ||
+ (mv = e.getValue()) == null ||
+ (v = get(mk)) == null ||
+ (mv != v && !mv.equals(v)))
+ return false;
+ }
}
+ return true;
}
- /* ---------------- Serialization Support -------------- */
+ /**
+ * Stripped-down version of helper class used in previous version,
+ * declared for the sake of serialization compatibility
+ */
+ static class Segment extends ReentrantLock implements Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ final float loadFactor;
+ Segment(float lf) { this.loadFactor = lf; }
+ }
/**
- * Save the state of the ConcurrentHashMap instance to a
- * stream (i.e., serialize it).
+ * Saves the state of the {@code ConcurrentHashMap} instance to a
+ * stream (i.e., serializes it).
* @param s the stream
+ * @throws java.io.IOException if an I/O error occurs
* @serialData
* the key (Object) and value (Object)
* for each key-value mapping, followed by a null pair.
* The key-value mappings are emitted in no particular order.
*/
- private void writeObject(java.io.ObjectOutputStream s) throws IOException {
- // force all segments for serialization compatibility
- for (int k = 0; k < segments.length; ++k)
- ensureSegment(k);
- s.defaultWriteObject();
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ // For serialization compatibility
+ // Emulate segment calculation from previous version of this class
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ int segmentShift = 32 - sshift;
+ int segmentMask = ssize - 1;
+ @SuppressWarnings("unchecked")
+ Segment[] segments = (Segment[])
+ new Segment,?>[DEFAULT_CONCURRENCY_LEVEL];
+ for (int i = 0; i < segments.length; ++i)
+ segments[i] = new Segment(LOAD_FACTOR);
+ s.putFields().put("segments", segments);
+ s.putFields().put("segmentShift", segmentShift);
+ s.putFields().put("segmentMask", segmentMask);
+ s.writeFields();
- final Segment[] segments = this.segments;
- for (int k = 0; k < segments.length; ++k) {
- Segment seg = segmentAt(segments, k);
- seg.lock();
- try {
- HashEntry[] tab = seg.table;
- for (int i = 0; i < tab.length; ++i) {
- HashEntry e;
- for (e = entryAt(tab, i); e != null; e = e.next) {
- s.writeObject(e.key);
- s.writeObject(e.value);
- }
- }
- } finally {
- seg.unlock();
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; ) {
+ s.writeObject(p.key);
+ s.writeObject(p.val);
}
}
s.writeObject(null);
s.writeObject(null);
+ segments = null; // throw away
}
/**
- * Reconstitute the ConcurrentHashMap instance from a
- * stream (i.e., deserialize it).
+ * Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws java.io.IOException if an I/O error occurs
*/
- @SuppressWarnings("unchecked")
private void readObject(java.io.ObjectInputStream s)
- throws IOException, ClassNotFoundException {
+ throws java.io.IOException, ClassNotFoundException {
+ /*
+ * To improve performance in typical cases, we create nodes
+ * while reading, then place in table once size is known.
+ * However, we must also validate uniqueness and deal with
+ * overpopulated bins while doing so, which requires
+ * specialized versions of putVal mechanics.
+ */
+ sizeCtl = -1; // force exclusion for table construction
s.defaultReadObject();
-
- // Re-initialize segments to be minimally sized, and let grow.
- int cap = MIN_SEGMENT_TABLE_CAPACITY;
- final Segment[] segments = this.segments;
- for (int k = 0; k < segments.length; ++k) {
- Segment