From 7ec8c6c01e4c75f99ba84237b09a1db732bf7af7 Mon Sep 17 00:00:00 2001 From: CyC2018 <1029579233@qq.com> Date: Sat, 24 Mar 2018 10:36:31 +0800 Subject: [PATCH 1/2] Change to JDK 1.8 --- README.md | 2 +- src/ArrayList.java | 494 ++- src/ConcurrentHashMap.java | 7156 ++++++++++++++++++++++++++++------ src/ConcurrentMap.java | 517 +++ src/HashMap.java | 2578 +++++++++--- src/HashSet.java | 139 +- src/Hashtable.java | 1402 +++++++ src/Iterator.java | 118 + src/LinkedBlockingQueue.java | 1044 +++++ src/LinkedHashMap.java | 755 +++- src/LinkedHashSet.java | 138 + src/LinkedList.java | 205 +- src/List.java | 734 ++++ src/Map.java | 1183 ++++++ src/PriorityQueue.java | 237 +- src/Queue.java | 156 +- src/Set.java | 413 ++ src/Stack.java | 44 + src/String.java | 1505 +++---- src/StringBuffer.java | 318 +- src/StringBuilder.java | 214 +- src/ThreadLocal.java | 722 ++++ src/TreeMap.java | 883 ++++- src/TreeSet.java | 127 +- src/Vector.java | 296 +- src/WeakHashMap.java | 1331 +++++++ 26 files changed, 19837 insertions(+), 2874 deletions(-) create mode 100644 src/ConcurrentMap.java create mode 100644 src/Hashtable.java create mode 100644 src/Iterator.java create mode 100644 src/LinkedBlockingQueue.java create mode 100644 src/List.java create mode 100644 src/Map.java create mode 100644 src/Set.java create mode 100644 src/ThreadLocal.java create mode 100644 src/WeakHashMap.java diff --git a/README.md b/README.md index 9a95fbc..f882dd7 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,3 @@ # JDK-Source-Code -Extract from : [OpenJDK 1.7](http://download.java.net/openjdk/jdk7) +Extract from : jdk1.8.0_20 diff --git a/src/ArrayList.java b/src/ArrayList.java index 17179ae..3218f1a 100644 --- a/src/ArrayList.java +++ b/src/ArrayList.java @@ -1,16 +1,137 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + package java.util; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.UnaryOperator; + +/** + * Resizable-array implementation of the List interface. Implements + * all optional list operations, and permits all elements, including + * null. In addition to implementing the List interface, + * this class provides methods to manipulate the size of the array that is + * used internally to store the list. (This class is roughly equivalent to + * Vector, except that it is unsynchronized.) + * + *

The size, isEmpty, get, set, + * iterator, and listIterator operations run in constant + * time. The add operation runs in amortized constant time, + * that is, adding n elements requires O(n) time. All of the other operations + * run in linear time (roughly speaking). The constant factor is low compared + * to that for the LinkedList implementation. + * + *

Each ArrayList instance has a capacity. The capacity is + * the size of the array used to store the elements in the list. It is always + * at least as large as the list size. As elements are added to an ArrayList, + * its capacity grows automatically. The details of the growth policy are not + * specified beyond the fact that adding an element has constant amortized + * time cost. + * + *

An application can increase the capacity of an ArrayList instance + * before adding a large number of elements using the ensureCapacity + * operation. This may reduce the amount of incremental reallocation. + * + *

Note that this implementation is not synchronized. + * If multiple threads access an ArrayList instance concurrently, + * and at least one of the threads modifies the list structurally, it + * must be synchronized externally. (A structural modification is + * any operation that adds or deletes one or more elements, or explicitly + * resizes the backing array; merely setting the value of an element is not + * a structural modification.) This is typically accomplished by + * synchronizing on some object that naturally encapsulates the list. + * + * If no such object exists, the list should be "wrapped" using the + * {@link Collections#synchronizedList Collections.synchronizedList} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the list:

+ *   List list = Collections.synchronizedList(new ArrayList(...));
+ * + *

+ * The iterators returned by this class's {@link #iterator() iterator} and + * {@link #listIterator(int) listIterator} methods are fail-fast: + * if the list is structurally modified at any time after the iterator is + * created, in any way except through the iterator's own + * {@link ListIterator#remove() remove} or + * {@link ListIterator#add(Object) add} methods, the iterator will throw a + * {@link ConcurrentModificationException}. Thus, in the face of + * concurrent modification, the iterator fails quickly and cleanly, rather + * than risking arbitrary, non-deterministic behavior at an undetermined + * time in the future. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw {@code ConcurrentModificationException} on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @author Josh Bloch + * @author Neal Gafter + * @see Collection + * @see List + * @see LinkedList + * @see Vector + * @since 1.2 + */ public class ArrayList extends AbstractList implements List, RandomAccess, Cloneable, java.io.Serializable { private static final long serialVersionUID = 8683452581122892189L; + /** + * Default initial capacity. + */ + private static final int DEFAULT_CAPACITY = 10; + + /** + * Shared empty array instance used for empty instances. + */ + private static final Object[] EMPTY_ELEMENTDATA = {}; + + /** + * Shared empty array instance used for default sized empty instances. We + * distinguish this from EMPTY_ELEMENTDATA to know how much to inflate when + * first element is added. + */ + private static final Object[] DEFAULTCAPACITY_EMPTY_ELEMENTDATA = {}; + /** * The array buffer into which the elements of the ArrayList are stored. - * The capacity of the ArrayList is the length of this array buffer. + * The capacity of the ArrayList is the length of this array buffer. Any + * empty ArrayList with elementData == DEFAULTCAPACITY_EMPTY_ELEMENTDATA + * will be expanded to DEFAULT_CAPACITY when the first element is added. */ - private transient Object[] elementData; + transient Object[] elementData; // non-private to simplify nested class access /** * The size of the ArrayList (the number of elements it contains). @@ -27,18 +148,21 @@ public class ArrayList extends AbstractList * is negative */ public ArrayList(int initialCapacity) { - super(); - if (initialCapacity < 0) + if (initialCapacity > 0) { + this.elementData = new Object[initialCapacity]; + } else if (initialCapacity == 0) { + this.elementData = EMPTY_ELEMENTDATA; + } else { throw new IllegalArgumentException("Illegal Capacity: "+ initialCapacity); - this.elementData = new Object[initialCapacity]; + } } /** * Constructs an empty list with an initial capacity of ten. */ public ArrayList() { - this(10); + this.elementData = DEFAULTCAPACITY_EMPTY_ELEMENTDATA; } /** @@ -51,10 +175,14 @@ public ArrayList() { */ public ArrayList(Collection c) { elementData = c.toArray(); - size = elementData.length; - // c.toArray might (incorrectly) not return Object[] (see 6260652) - if (elementData.getClass() != Object[].class) - elementData = Arrays.copyOf(elementData, size, Object[].class); + if ((size = elementData.length) != 0) { + // c.toArray might (incorrectly) not return Object[] (see 6260652) + if (elementData.getClass() != Object[].class) + elementData = Arrays.copyOf(elementData, size, Object[].class); + } else { + // replace with empty array. + this.elementData = EMPTY_ELEMENTDATA; + } } /** @@ -64,9 +192,10 @@ public ArrayList(Collection c) { */ public void trimToSize() { modCount++; - int oldCapacity = elementData.length; - if (size < oldCapacity) { - elementData = Arrays.copyOf(elementData, size); + if (size < elementData.length) { + elementData = (size == 0) + ? EMPTY_ELEMENTDATA + : Arrays.copyOf(elementData, size); } } @@ -78,12 +207,29 @@ public void trimToSize() { * @param minCapacity the desired minimum capacity */ public void ensureCapacity(int minCapacity) { - if (minCapacity > 0) - ensureCapacityInternal(minCapacity); + int minExpand = (elementData != DEFAULTCAPACITY_EMPTY_ELEMENTDATA) + // any size if not default element table + ? 0 + // larger than default for default empty table. It's already + // supposed to be at default size. + : DEFAULT_CAPACITY; + + if (minCapacity > minExpand) { + ensureExplicitCapacity(minCapacity); + } } private void ensureCapacityInternal(int minCapacity) { + if (elementData == DEFAULTCAPACITY_EMPTY_ELEMENTDATA) { + minCapacity = Math.max(DEFAULT_CAPACITY, minCapacity); + } + + ensureExplicitCapacity(minCapacity); + } + + private void ensureExplicitCapacity(int minCapacity) { modCount++; + // overflow-conscious code if (minCapacity - elementData.length > 0) grow(minCapacity); @@ -202,14 +348,13 @@ public int lastIndexOf(Object o) { */ public Object clone() { try { - @SuppressWarnings("unchecked") - ArrayList v = (ArrayList) super.clone(); + ArrayList v = (ArrayList) super.clone(); v.elementData = Arrays.copyOf(elementData, size); v.modCount = 0; return v; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable - throw new InternalError(); + throw new InternalError(e); } } @@ -353,7 +498,7 @@ public E remove(int index) { if (numMoved > 0) System.arraycopy(elementData, index+1, elementData, index, numMoved); - elementData[--size] = null; // Let gc do its work + elementData[--size] = null; // clear to let GC do its work return oldValue; } @@ -398,7 +543,7 @@ private void fastRemove(int index) { if (numMoved > 0) System.arraycopy(elementData, index+1, elementData, index, numMoved); - elementData[--size] = null; // Let gc do its work + elementData[--size] = null; // clear to let GC do its work } /** @@ -408,7 +553,7 @@ private void fastRemove(int index) { public void clear() { modCount++; - // Let gc do its work + // clear to let GC do its work for (int i = 0; i < size; i++) elementData[i] = null; @@ -489,10 +634,12 @@ protected void removeRange(int fromIndex, int toIndex) { System.arraycopy(elementData, toIndex, elementData, fromIndex, numMoved); - // Let gc do its work + // clear to let GC do its work int newSize = size - (toIndex-fromIndex); - while (size != newSize) - elementData[--size] = null; + for (int i = newSize; i < size; i++) { + elementData[i] = null; + } + size = newSize; } /** @@ -539,6 +686,7 @@ private String outOfBoundsMsg(int index) { * @see Collection#contains(Object) */ public boolean removeAll(Collection c) { + Objects.requireNonNull(c); return batchRemove(c, false); } @@ -559,6 +707,7 @@ public boolean removeAll(Collection c) { * @see Collection#contains(Object) */ public boolean retainAll(Collection c) { + Objects.requireNonNull(c); return batchRemove(c, true); } @@ -580,6 +729,7 @@ private boolean batchRemove(Collection c, boolean complement) { w += size - r; } if (w != size) { + // clear to let GC do its work for (int i = w; i < size; i++) elementData[i] = null; modCount += size - w; @@ -604,17 +754,17 @@ private void writeObject(java.io.ObjectOutputStream s) int expectedModCount = modCount; s.defaultWriteObject(); - // Write out array length - s.writeInt(elementData.length); + // Write out size as capacity for behavioural compatibility with clone() + s.writeInt(size); // Write out all elements in the proper order. - for (int i=0; i

The {@code Spliterator} reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, and {@link Spliterator#ORDERED}. + * Overriding implementations should document the reporting of additional + * characteristic values. + * + * @return a {@code Spliterator} over the elements in this list + * @since 1.8 + */ + @Override + public Spliterator spliterator() { + return new ArrayListSpliterator<>(this, 0, -1, 0); + } + + /** Index-based split-by-two, lazily initialized Spliterator */ + static final class ArrayListSpliterator implements Spliterator { + + /* + * If ArrayLists were immutable, or structurally immutable (no + * adds, removes, etc), we could implement their spliterators + * with Arrays.spliterator. Instead we detect as much + * interference during traversal as practical without + * sacrificing much performance. We rely primarily on + * modCounts. These are not guaranteed to detect concurrency + * violations, and are sometimes overly conservative about + * within-thread interference, but detect enough problems to + * be worthwhile in practice. To carry this out, we (1) lazily + * initialize fence and expectedModCount until the latest + * point that we need to commit to the state we are checking + * against; thus improving precision. (This doesn't apply to + * SubLists, that create spliterators with current non-lazy + * values). (2) We perform only a single + * ConcurrentModificationException check at the end of forEach + * (the most performance-sensitive method). When using forEach + * (as opposed to iterators), we can normally only detect + * interference after actions, not before. Further + * CME-triggering checks apply to all other possible + * violations of assumptions for example null or too-small + * elementData array given its size(), that could only have + * occurred due to interference. This allows the inner loop + * of forEach to run without any further checks, and + * simplifies lambda-resolution. While this does entail a + * number of checks, note that in the common case of + * list.stream().forEach(a), no checks or other computation + * occur anywhere other than inside forEach itself. The other + * less-often-used methods cannot take advantage of most of + * these streamlinings. + */ + + private final ArrayList list; + private int index; // current index, modified on advance/split + private int fence; // -1 until used; then one past last index + private int expectedModCount; // initialized when fence set + + /** Create new spliterator covering the given range */ + ArrayListSpliterator(ArrayList list, int origin, int fence, + int expectedModCount) { + this.list = list; // OK if null unless traversed + this.index = origin; + this.fence = fence; + this.expectedModCount = expectedModCount; + } + + private int getFence() { // initialize fence to size on first use + int hi; // (a specialized variant appears in method forEach) + ArrayList lst; + if ((hi = fence) < 0) { + if ((lst = list) == null) + hi = fence = 0; + else { + expectedModCount = lst.modCount; + hi = fence = lst.size; + } + } + return hi; + } + + public ArrayListSpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid) ? null : // divide range in half unless too small + new ArrayListSpliterator(list, lo, index = mid, + expectedModCount); + } + + public boolean tryAdvance(Consumer action) { + if (action == null) + throw new NullPointerException(); + int hi = getFence(), i = index; + if (i < hi) { + index = i + 1; + @SuppressWarnings("unchecked") E e = (E)list.elementData[i]; + action.accept(e); + if (list.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + return false; + } + + public void forEachRemaining(Consumer action) { + int i, hi, mc; // hoist accesses and checks from loop + ArrayList lst; Object[] a; + if (action == null) + throw new NullPointerException(); + if ((lst = list) != null && (a = lst.elementData) != null) { + if ((hi = fence) < 0) { + mc = lst.modCount; + hi = lst.size; + } + else + mc = expectedModCount; + if ((i = index) >= 0 && (index = hi) <= a.length) { + for (; i < hi; ++i) { + @SuppressWarnings("unchecked") E e = (E) a[i]; + action.accept(e); + } + if (lst.modCount == mc) + return; + } + } + throw new ConcurrentModificationException(); + } + + public long estimateSize() { + return (long) (getFence() - index); + } + + public int characteristics() { + return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED; + } + } + + @Override + public boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + // figure out which elements are to be removed + // any exception thrown from the filter predicate at this stage + // will leave the collection unmodified + int removeCount = 0; + final BitSet removeSet = new BitSet(size); + final int expectedModCount = modCount; + final int size = this.size; + for (int i=0; modCount == expectedModCount && i < size; i++) { + @SuppressWarnings("unchecked") + final E element = (E) elementData[i]; + if (filter.test(element)) { + removeSet.set(i); + removeCount++; + } + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + + // shift surviving elements left over the spaces left by removed elements + final boolean anyToRemove = removeCount > 0; + if (anyToRemove) { + final int newSize = size - removeCount; + for (int i=0, j=0; (i < size) && (j < newSize); i++, j++) { + i = removeSet.nextClearBit(i); + elementData[j] = elementData[i]; + } + for (int k=newSize; k < size; k++) { + elementData[k] = null; // Let gc do its work + } + this.size = newSize; + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + return anyToRemove; + } + + @Override + @SuppressWarnings("unchecked") + public void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final int expectedModCount = modCount; + final int size = this.size; + for (int i=0; modCount == expectedModCount && i < size; i++) { + elementData[i] = operator.apply((E) elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + @Override + @SuppressWarnings("unchecked") + public void sort(Comparator c) { + final int expectedModCount = modCount; + Arrays.sort((E[]) elementData, 0, size, c); + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; } } diff --git a/src/ConcurrentHashMap.java b/src/ConcurrentHashMap.java index 338f7d6..a5e5880 100644 --- a/src/ConcurrentHashMap.java +++ b/src/ConcurrentHashMap.java @@ -1,816 +1,923 @@ +/* + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +/* + * + * + * + * + * + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ package java.util.concurrent; -import java.util.concurrent.locks.*; -import java.util.*; + +import java.io.ObjectStreamField; import java.io.Serializable; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.Spliterator; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.LockSupport; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Consumer; +import java.util.function.DoubleBinaryOperator; +import java.util.function.Function; +import java.util.function.IntBinaryOperator; +import java.util.function.LongBinaryOperator; +import java.util.function.ToDoubleBiFunction; +import java.util.function.ToDoubleFunction; +import java.util.function.ToIntBiFunction; +import java.util.function.ToIntFunction; +import java.util.function.ToLongBiFunction; +import java.util.function.ToLongFunction; +import java.util.stream.Stream; -public class ConcurrentHashMap extends AbstractMap - implements ConcurrentMap, Serializable { +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators, Spliterators and Enumerations return elements reflecting the + * state of the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * java.util.ConcurrentModificationException ConcurrentModificationException}. + * However, iterators are designed to be used by only one thread at a time. + * Bear in mind that the results of aggregate status methods including + * {@code size}, {@code isEmpty}, and {@code containsValue} are typically + * useful only when a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. To ameliorate impact, when keys are {@link Comparable}, + * this class may use comparison order among keys to help break ties. + * + *

A {@link Set} projection of a ConcurrentHashMap may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMap can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link + * java.util.concurrent.atomic.LongAdder} values and initializing via + * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count + * to a {@code ConcurrentHashMap freqs}, you can use + * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMaps support a set of sequential and parallel bulk + * operations that, unlike most {@link Stream} methods, are designed + * to be safely, and often sensibly, applied even with maps that are + * being concurrently updated by other threads; for example, when + * computing a snapshot summary of the values in a shared registry. + * There are three kinds of operation, each with four forms, accepting + * functions with Keys, Values, Entries, and (Key, Value) arguments + * and/or return values. Because the elements of a ConcurrentHashMap + * are not ordered in any particular way, and may be processed in + * different orders in different parallel executions, the correctness + * of supplied functions should not depend on any ordering, or on any + * other objects or values that may transiently change while + * computation is in progress; and except for forEach actions, should + * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry} + * objects do not support method {@code setValue}. + * + *

+ * + *

These bulk operations accept a {@code parallelismThreshold} + * argument. Methods proceed sequentially if the current map size is + * estimated to be less than the given threshold. Using a value of + * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value + * of {@code 1} results in maximal parallelism by partitioning into + * enough subtasks to fully utilize the {@link + * ForkJoinPool#commonPool()} that is used for all parallel + * computations. Normally, you would initially choose one of these + * extreme values, and then measure performance of using in-between + * values that trade off overhead versus throughput. + * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMap: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Speedups for parallel compared to sequential forms are common + * but not guaranteed. Parallel operations involving brief functions + * on small maps may execute more slowly than sequential forms if the + * underlying work to parallelize the computation is more expensive + * than the computation itself. Similarly, parallelization may not + * lead to much actual parallelism if all processors are busy + * performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMap extends AbstractMap + implements ConcurrentMap, Serializable { private static final long serialVersionUID = 7249069246763182397L; /* - * The basic strategy is to subdivide the table among Segments, - * each of which itself is a concurrently readable hash table. To - * reduce footprint, all but one segments are constructed only - * when first needed (see ensureSegment). To maintain visibility - * in the presence of lazy construction, accesses to segments as - * well as elements of segment's table must use volatile access, - * which is done via Unsafe within methods segmentAt etc - * below. These provide the functionality of AtomicReferenceArrays - * but reduce the levels of indirection. Additionally, - * volatile-writes of table elements and entry "next" fields - * within locked operations use the cheaper "lazySet" forms of - * writes (via putOrderedObject) because these writes are always - * followed by lock releases that maintain sequential consistency - * of table updates. - * - * Historical note: The previous version of this class relied - * heavily on "final" fields, which avoided some volatile reads at - * the expense of a large initial footprint. Some remnants of - * that design (including forced construction of segment 0) exist - * to ensure serialization compatibility. + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * This map usually acts as a binned (bucketed) hash table. Each + * key-value mapping is held in a Node. Most nodes are instances + * of the basic Node class with hash, key, value, and next + * fields. However, various subclasses exist: TreeNodes are + * arranged in balanced trees, not lists. TreeBins hold the roots + * of sets of TreeNodes. ForwardingNodes are placed at the heads + * of bins during resizing. ReservationNodes are used as + * placeholders while establishing values in computeIfAbsent and + * related methods. The types TreeBin, ForwardingNode, and + * ReservationNode do not hold normal user keys, values, or + * hashes, and are readily distinguishable during search etc + * because they have negative hash fields and null key and value + * fields. (These special nodes are either uncommon or transient, + * so the impact of carrying around some unused fields is + * insignificant.) + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. + * + * We use the top (sign) bit of Node hash fields for control + * purposes -- it is available anyway because of addressing + * constraints. Nodes with negative hash fields are specially + * handled or ignored in map methods. + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Locking support for these locks relies on builtin + * "synchronized" monitors. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes or ones that differs only + * in masked-out high bits. So we use a secondary strategy that + * applies when the number of nodes in a bin exceeds a + * threshold. These TreeBins use a balanced tree to hold nodes (a + * specialized form of red-black trees), bounding search time to + * O(log N). Each search step in a TreeBin is at least twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Any thread + * noticing an overfull bin may assist in resizing after the + * initiating thread allocates and sets up the replacement array. + * However, rather than stalling, these other threads may proceed + * with insertions etc. The use of TreeBins shields us from the + * worst case effects of overfilling while resizes are in + * progress. Resizing proceeds by transferring bins, one by one, + * from the table to the next table. However, threads claim small + * blocks of indices to transfer (via field transferIndex) before + * doing so, reducing contention. A generation stamp in field + * sizeCtl ensures that resizings do not overlap. Because we are + * using power-of-two expansion, the elements from each bin must + * either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next fields + * won't change. On average, only about one-sixth of them need + * cloning when a table doubles. The nodes they replace will be + * garbage collectable as soon as they are no longer referenced by + * any reader thread that may be in the midst of concurrently + * traversing table. Upon transfer, the old table bin contains + * only a special forwarding node (with hash field "MOVED") that + * contains the next table as its key. On encountering a + * forwarding node, access and update operations restart, using + * the new table. + * + * Each bin transfer requires its bin lock, which can stall + * waiting for locks while resizing. However, because other + * threads can join in and help resize rather than contend for + * locks, average aggregate waits become shorter as resizing + * progresses. The transfer operation must also ensure that all + * accessible bins in both the old and new table are usable by any + * traversal. This is arranged in part by proceeding from the + * last bin (table.length - 1) up towards the first. Upon seeing + * a forwarding node, traversals (see class Traverser) arrange to + * move to the new table without revisiting nodes. To ensure that + * no intervening nodes are skipped even when moved out of order, + * a stack (see class TableStack) is created on first encounter of + * a forwarding node during a traversal, to maintain its place if + * later processing the current table. The need for these + * save/restore mechanics is relatively rare, but when one + * forwarding node is encountered, typically many more will be. + * So Traversers use a simple caching scheme to avoid creating so + * many new TableStack nodes. (Thanks to Peter Levart for + * suggesting use of a stack here.) + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a specialization of + * LongAdder. We need to incorporate a specialization rather than + * just use a LongAdder in order to access implicit + * contention-sensing that leads to creation of multiple + * CounterCells. The counter mechanics avoid contention on + * updates but can encounter cache thrashing if read too + * frequently during concurrent access. To avoid reading so often, + * resizing under contention is attempted only upon adding to a + * bin already holding two or more nodes. Under uniform hash + * distributions, the probability of this occurring at threshold + * is around 13%, meaning that only about 1 in 8 puts check + * threshold (and after resizing, many fewer do so). + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable for + * the same T, so we cannot invoke compareTo among them. To handle + * this, the tree is ordered primarily by hash value, then by + * Comparable.compareTo order if applicable. On lookup at a node, + * if elements are not comparable or compare as 0 then both left + * and right children may need to be searched in the case of tied + * hash values. (This corresponds to the full list search that + * would be necessary if all elements were non-Comparable and had + * tied hashes.) On insertion, to keep a total ordering (or as + * close as is required here) across rebalancings, we compare + * classes and identityHashCodes as tie-breakers. The red-black + * balancing code is updated from pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also require an additional locking mechanism. While + * list traversal is always possible by readers even during + * updates, tree traversal is not, mainly because of tree-rotations + * that may change the root node and/or its linkages. TreeBins + * include a simple read-write lock mechanism parasitic on the + * main bin-synchronization strategy: Structural adjustments + * associated with an insertion or removal are already bin-locked + * (and so cannot conflict with other writers) but must wait for + * ongoing readers to finish. Since there can be only one such + * waiter, we use a simple scheme using a single "waiter" field to + * block writers. However, readers need never block. If the root + * lock is held, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. These cases are not fast, but + * maximize aggregate expected throughput. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + * + * Also, solely for compatibility with previous versions of this + * class, it extends AbstractMap, even though all of its methods + * are overridden, so it is just useless baggage. + * + * This file is organized to make things a little easier to follow + * while reading than they might otherwise: First the main static + * declarations and utilities, then fields, then main public + * methods (with a few factorings of multiple public methods into + * internal ones), then sizing methods, trees, traversers, and + * bulk operations. */ /* ---------------- Constants -------------- */ /** - * The default initial capacity for this table, - * used when not otherwise specified in a constructor. + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. */ - static final int DEFAULT_INITIAL_CAPACITY = 16; + private static final int MAXIMUM_CAPACITY = 1 << 30; /** - * The default load factor for this table, used when not - * otherwise specified in a constructor. + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. */ - static final float DEFAULT_LOAD_FACTOR = 0.75f; + private static final int DEFAULT_CAPACITY = 16; /** - * The default concurrency level for this table, used when not - * otherwise specified in a constructor. + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. */ - static final int DEFAULT_CONCURRENCY_LEVEL = 16; + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** - * The maximum capacity, used if a higher value is implicitly - * specified by either of the constructors with arguments. MUST - * be a power of two <= 1<<30 to ensure that entries are indexable - * using ints. + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. */ - static final int MAXIMUM_CAPACITY = 1 << 30; + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; /** - * The minimum capacity for per-segment tables. Must be a power - * of two, at least two to avoid immediate resizing on next use - * after lazy construction. + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. */ - static final int MIN_SEGMENT_TABLE_CAPACITY = 2; + private static final float LOAD_FACTOR = 0.75f; /** - * The maximum number of segments to allow; used to bound - * constructor arguments. Must be power of two less than 1 << 24. + * The bin count threshold for using a tree rather than list for a + * bin. Bins are converted to trees when adding an element to a + * bin with at least this many nodes. The value must be greater + * than 2, and should be at least 8 to mesh with assumptions in + * tree removal about conversion back to plain bins upon + * shrinkage. */ - static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + static final int TREEIFY_THRESHOLD = 8; /** - * Number of unsynchronized retries in size and containsValue - * methods before resorting to locking. This is used to avoid - * unbounded retries if tables undergo continuous modification - * which would make it impossible to obtain an accurate result. + * The bin count threshold for untreeifying a (split) bin during a + * resize operation. Should be less than TREEIFY_THRESHOLD, and at + * most 6 to mesh with shrinkage detection under removal. */ - static final int RETRIES_BEFORE_LOCK = 2; + static final int UNTREEIFY_THRESHOLD = 6; - /* ---------------- Fields -------------- */ + /** + * The smallest table capacity for which bins may be treeified. + * (Otherwise the table is resized if too many nodes in a bin.) + * The value should be at least 4 * TREEIFY_THRESHOLD to avoid + * conflicts between resizing and treeification thresholds. + */ + static final int MIN_TREEIFY_CAPACITY = 64; + + /** + * Minimum number of rebinnings per transfer step. Ranges are + * subdivided to allow multiple resizer threads. This value + * serves as a lower bound to avoid resizers encountering + * excessive memory contention. The value should be at least + * DEFAULT_CAPACITY. + */ + private static final int MIN_TRANSFER_STRIDE = 16; /** - * Mask value for indexing into segments. The upper bits of a - * key's hash code are used to choose the segment. + * The number of bits used for generation stamp in sizeCtl. + * Must be at least 6 for 32bit arrays. */ - final int segmentMask; + private static int RESIZE_STAMP_BITS = 16; /** - * Shift value for indexing within segments. + * The maximum number of threads that can help resize. + * Must fit in 32 - RESIZE_STAMP_BITS bits. */ - final int segmentShift; + private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; /** - * The segments, each of which is a specialized hash table. + * The bit shift for recording size stamp in sizeCtl. */ - final Segment[] segments; + private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; + + /* + * Encodings for Node hash fields. See above for explanation. + */ + static final int MOVED = -1; // hash for forwarding nodes + static final int TREEBIN = -2; // hash for roots of trees + static final int RESERVED = -3; // hash for transient reservations + static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash + + /** Number of CPUS, to place bounds on some sizings */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); - transient Set keySet; - transient Set> entrySet; - transient Collection values; + /** For serialization compatibility. */ + private static final ObjectStreamField[] serialPersistentFields = { + new ObjectStreamField("segments", Segment[].class), + new ObjectStreamField("segmentMask", Integer.TYPE), + new ObjectStreamField("segmentShift", Integer.TYPE) + }; + + /* ---------------- Nodes -------------- */ /** - * ConcurrentHashMap list entry. Note that this is never exported - * out as a user-visible Map.Entry. + * Key-value entry. This class is never exported out as a + * user-mutable Map.Entry (i.e., one supporting setValue; see + * MapEntry below), but can be used for read-only traversals used + * in bulk tasks. Subclasses of Node with a negative hash field + * are special, and contain null keys and values (but are never + * exported). Otherwise, keys and vals are never null. */ - static final class HashEntry { + static class Node implements Map.Entry { final int hash; final K key; - volatile V value; - volatile HashEntry next; + volatile V val; + volatile Node next; - HashEntry(int hash, K key, V value, HashEntry next) { + Node(int hash, K key, V val, Node next) { this.hash = hash; this.key = key; - this.value = value; + this.val = val; this.next = next; } - /** - * Sets next field with volatile write semantics. (See above - * about use of putOrderedObject.) - */ - final void setNext(HashEntry n) { - UNSAFE.putOrderedObject(this, nextOffset, n); + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + public final V setValue(V value) { + throw new UnsupportedOperationException(); } - // Unsafe mechanics - static final sun.misc.Unsafe UNSAFE; - static final long nextOffset; - static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - Class k = HashEntry.class; - nextOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("next")); - } catch (Exception e) { - throw new Error(e); + public final boolean equals(Object o) { + Object k, v, u; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == (u = val) || v.equals(u))); + } + + /** + * Virtualized support for map.get(); overridden in subclasses. + */ + Node find(int h, Object k) { + Node e = this; + if (k != null) { + do { + K ek; + if (e.hash == h && + ((ek = e.key) == k || (ek != null && k.equals(ek)))) + return e; + } while ((e = e.next) != null); } + return null; } } + /* ---------------- Static utilities -------------- */ + /** - * Gets the ith element of given table (if nonnull) with volatile - * read semantics. Note: This is manually integrated into a few - * performance-sensitive methods to reduce call overhead. + * Spreads (XORs) higher bits of hash to lower and also forces top + * bit to 0. Because the table uses power-of-two masking, sets of + * hashes that vary only in bits above the current mask will + * always collide. (Among known examples are sets of Float keys + * holding consecutive whole numbers in small tables.) So we + * apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed (so don't benefit from + * spreading), and because we use trees to handle large sets of + * collisions in bins, we just XOR some shifted bits in the + * cheapest possible way to reduce systematic lossage, as well as + * to incorporate impact of the highest bits that would otherwise + * never be used in index calculations because of table bounds. */ - @SuppressWarnings("unchecked") - static final HashEntry entryAt(HashEntry[] tab, int i) { - return (tab == null) ? null : - (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)i << TSHIFT) + TBASE); + static final int spread(int h) { + return (h ^ (h >>> 16)) & HASH_BITS; } /** - * Sets the ith element of given table, with volatile write - * semantics. (See above about use of putOrderedObject.) + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 */ - static final void setEntryAt(HashEntry[] tab, int i, - HashEntry e) { - UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; } /** - * Applies a supplemental hash function to a given hashCode, which - * defends against poor quality hash functions. This is critical - * because ConcurrentHashMap uses power-of-two length hash tables, - * that otherwise encounter collisions for hashCodes that do not - * differ in lower or upper bits. + * Returns x's Class if it is of the form "class C implements + * Comparable", else null. */ - private static int hash(int h) { - // Spread bits to regularize both segment and index locations, - // using variant of single-word Wang/Jenkins hash. - h += (h << 15) ^ 0xffffcd7d; - h ^= (h >>> 10); - h += (h << 3); - h ^= (h >>> 6); - h += (h << 2) + (h << 14); - return h ^ (h >>> 16); + static Class comparableClassFor(Object x) { + if (x instanceof Comparable) { + Class c; Type[] ts, as; Type t; ParameterizedType p; + if ((c = x.getClass()) == String.class) // bypass checks + return c; + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == + Comparable.class) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } + } + } + return null; } /** - * Segments are specialized versions of hash tables. This - * subclasses from ReentrantLock opportunistically, just to - * simplify some locking and avoid separate construction. + * Returns k.compareTo(x) if x matches kc (k's screened comparable + * class), else 0. */ - static final class Segment extends ReentrantLock implements Serializable { - /* - * Segments maintain a table of entry lists that are always - * kept in a consistent state, so can be read (via volatile - * reads of segments and tables) without locking. This - * requires replicating nodes when necessary during table - * resizing, so the old lists can be traversed by readers - * still using old version of table. - * - * This class defines only mutative methods requiring locking. - * Except as noted, the methods of this class perform the - * per-segment versions of ConcurrentHashMap methods. (Other - * methods are integrated directly into ConcurrentHashMap - * methods.) These mutative methods use a form of controlled - * spinning on contention via methods scanAndLock and - * scanAndLockForPut. These intersperse tryLocks with - * traversals to locate nodes. The main benefit is to absorb - * cache misses (which are very common for hash tables) while - * obtaining locks so that traversal is faster once - * acquired. We do not actually use the found nodes since they - * must be re-acquired under lock anyway to ensure sequential - * consistency of updates (and in any case may be undetectably - * stale), but they will normally be much faster to re-locate. - * Also, scanAndLockForPut speculatively creates a fresh node - * to use in put if no node is found. - */ - - private static final long serialVersionUID = 2249069246763182397L; + @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable + static int compareComparables(Class kc, Object k, Object x) { + return (x == null || x.getClass() != kc ? 0 : + ((Comparable)k).compareTo(x)); + } - /** - * The maximum number of times to tryLock in a prescan before - * possibly blocking on acquire in preparation for a locked - * segment operation. On multiprocessors, using a bounded - * number of retries maintains cache acquired while locating - * nodes. - */ - static final int MAX_SCAN_RETRIES = - Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + /* ---------------- Table element access -------------- */ - /** - * The per-segment table. Elements are accessed via - * entryAt/setEntryAt providing volatile semantics. - */ - transient volatile HashEntry[] table; + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. All uses of + * the tab arguments must be null checked by callers. All callers + * also paranoically precheck that tab's length is not zero (or an + * equivalent check), thus ensuring that any index argument taking + * the form of a hash value anded with (length - 1) is a valid + * index. Note that, to be correct wrt arbitrary concurrency + * errors by users, these checks must operate on local variables, + * which accounts for some odd-looking inline assignments below. + * Note that calls to setTabAt always occur within locked regions, + * and so in principle require only release ordering, not + * full volatile semantics, but are currently coded as volatile + * writes to be conservative. + */ - /** - * The number of elements. Accessed only either within locks - * or among other volatile reads that maintain visibility. - */ - transient int count; + @SuppressWarnings("unchecked") + static final Node tabAt(Node[] tab, int i) { + return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); + } - /** - * The total number of mutative operations in this segment. - * Even though this may overflows 32 bits, it provides - * sufficient accuracy for stability checks in CHM isEmpty() - * and size() methods. Accessed only either within locks or - * among other volatile reads that maintain visibility. - */ - transient int modCount; + static final boolean casTabAt(Node[] tab, int i, + Node c, Node v) { + return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); + } - /** - * The table is rehashed when its size exceeds this threshold. - * (The value of this field is always (int)(capacity * - * loadFactor).) - */ - transient int threshold; + static final void setTabAt(Node[] tab, int i, Node v) { + U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); + } - /** - * The load factor for the hash table. Even though this value - * is same for all segments, it is replicated to avoid needing - * links to outer object. - * @serial - */ - final float loadFactor; + /* ---------------- Fields -------------- */ - Segment(float lf, int threshold, HashEntry[] tab) { - this.loadFactor = lf; - this.threshold = threshold; - this.table = tab; - } + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; - final V put(K key, int hash, V value, boolean onlyIfAbsent) { - HashEntry node = tryLock() ? null : - scanAndLockForPut(key, hash, value); - V oldValue; - try { - HashEntry[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry first = entryAt(tab, index); - for (HashEntry e = first;;) { - if (e != null) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - if (!onlyIfAbsent) { - e.value = value; - ++modCount; - } - break; - } - e = e.next; - } - else { - if (node != null) - node.setNext(first); - else - node = new HashEntry(hash, key, value, first); - int c = count + 1; - if (c > threshold && tab.length < MAXIMUM_CAPACITY) - rehash(node); - else - setEntryAt(tab, index, node); - ++modCount; - count = c; - oldValue = null; - break; - } - } - } finally { - unlock(); - } - return oldValue; - } + /** + * The next table to use; non-null only while resizing. + */ + private transient volatile Node[] nextTable; - /** - * Doubles size of table and repacks entries, also adding the - * given node to new table - */ - @SuppressWarnings("unchecked") - private void rehash(HashEntry node) { - /* - * Reclassify nodes in each list to new table. Because we - * are using power-of-two expansion, the elements from - * each bin must either stay at same index, or move with a - * power of two offset. We eliminate unnecessary node - * creation by catching cases where old nodes can be - * reused because their next fields won't change. - * Statistically, at the default threshold, only about - * one-sixth of them need cloning when a table - * doubles. The nodes they replace will be garbage - * collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of - * concurrently traversing table. Entry accesses use plain - * array indexing because they are followed by volatile - * table write. - */ - HashEntry[] oldTable = table; - int oldCapacity = oldTable.length; - int newCapacity = oldCapacity << 1; - threshold = (int)(newCapacity * loadFactor); - HashEntry[] newTable = - (HashEntry[]) new HashEntry[newCapacity]; - int sizeMask = newCapacity - 1; - for (int i = 0; i < oldCapacity ; i++) { - HashEntry e = oldTable[i]; - if (e != null) { - HashEntry next = e.next; - int idx = e.hash & sizeMask; - if (next == null) // Single node on list - newTable[idx] = e; - else { // Reuse consecutive sequence at same slot - HashEntry lastRun = e; - int lastIdx = idx; - for (HashEntry last = next; - last != null; - last = last.next) { - int k = last.hash & sizeMask; - if (k != lastIdx) { - lastIdx = k; - lastRun = last; - } - } - newTable[lastIdx] = lastRun; - // Clone remaining nodes - for (HashEntry p = e; p != lastRun; p = p.next) { - V v = p.value; - int h = p.hash; - int k = h & sizeMask; - HashEntry n = newTable[k]; - newTable[k] = new HashEntry(h, p.key, v, n); - } - } - } - } - int nodeIndex = node.hash & sizeMask; // add the new node - node.setNext(newTable[nodeIndex]); - newTable[nodeIndex] = node; - table = newTable; - } + /** + * Base counter value, used mainly when there is no contention, + * but also as a fallback during table initialization + * races. Updated via CAS. + */ + private transient volatile long baseCount; - /** - * Scans for a node containing given key while trying to - * acquire lock, creating and returning one if not found. Upon - * return, guarantees that lock is held. UNlike in most - * methods, calls to method equals are not screened: Since - * traversal speed doesn't matter, we might as well help warm - * up the associated code and accesses as well. - * - * @return a new node if key not found, else null - */ - private HashEntry scanAndLockForPut(K key, int hash, V value) { - HashEntry first = entryForHash(this, hash); - HashEntry e = first; - HashEntry node = null; - int retries = -1; // negative while locating node - while (!tryLock()) { - HashEntry f; // to recheck first below - if (retries < 0) { - if (e == null) { - if (node == null) // speculatively create node - node = new HashEntry(hash, key, value, null); - retries = 0; - } - else if (key.equals(e.key)) - retries = 0; - else - e = e.next; - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; // re-traverse if entry changed - retries = -1; - } - } - return node; - } + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized: -1 for initialization, + * else -(1 + the number of active resizing threads). Otherwise, + * when table is null, holds the initial table size to use upon + * creation, or 0 for default. After initialization, holds the + * next element count value upon which to resize the table. + */ + private transient volatile int sizeCtl; - /** - * Scans for a node containing the given key while trying to - * acquire lock for a remove or replace operation. Upon - * return, guarantees that lock is held. Note that we must - * lock even if the key is not found, to ensure sequential - * consistency of updates. - */ - private void scanAndLock(Object key, int hash) { - // similar to but simpler than scanAndLockForPut - HashEntry first = entryForHash(this, hash); - HashEntry e = first; - int retries = -1; - while (!tryLock()) { - HashEntry f; - if (retries < 0) { - if (e == null || key.equals(e.key)) - retries = 0; - else - e = e.next; - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; - retries = -1; - } - } - } + /** + * The next table index (plus one) to split while resizing. + */ + private transient volatile int transferIndex; - /** - * Remove; match on key only if value null, else match both. - */ - final V remove(Object key, int hash, Object value) { - if (!tryLock()) - scanAndLock(key, hash); - V oldValue = null; - try { - HashEntry[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry e = entryAt(tab, index); - HashEntry pred = null; - while (e != null) { - K k; - HashEntry next = e.next; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - V v = e.value; - if (value == null || value == v || value.equals(v)) { - if (pred == null) - setEntryAt(tab, index, next); - else - pred.setNext(next); - ++modCount; - --count; - oldValue = v; - } - break; - } - pred = e; - e = next; - } - } finally { - unlock(); - } - return oldValue; - } + /** + * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. + */ + private transient volatile int cellsBusy; - final boolean replace(K key, int hash, V oldValue, V newValue) { - if (!tryLock()) - scanAndLock(key, hash); - boolean replaced = false; - try { - HashEntry e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - if (oldValue.equals(e.value)) { - e.value = newValue; - ++modCount; - replaced = true; - } - break; - } - } - } finally { - unlock(); - } - return replaced; - } + /** + * Table of counter cells. When non-null, size is a power of 2. + */ + private transient volatile CounterCell[] counterCells; - final V replace(K key, int hash, V value) { - if (!tryLock()) - scanAndLock(key, hash); - V oldValue = null; - try { - HashEntry e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - e.value = value; - ++modCount; - break; - } - } - } finally { - unlock(); - } - return oldValue; - } + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; - final void clear() { - lock(); - try { - HashEntry[] tab = table; - for (int i = 0; i < tab.length ; i++) - setEntryAt(tab, i, null); - ++modCount; - count = 0; - } finally { - unlock(); - } - } - } - // Accessing segments + /* ---------------- Public operations -------------- */ /** - * Gets the jth element of given segment array (if nonnull) with - * volatile element access semantics via Unsafe. (The null check - * can trigger harmlessly only during deserialization.) Note: - * because each element of segments array is set only once (using - * fully ordered writes), some performance-sensitive methods rely - * on this method only as a recheck upon null reads. + * Creates a new, empty map with the default initial table size (16). */ - @SuppressWarnings("unchecked") - static final Segment segmentAt(Segment[] ss, int j) { - long u = (j << SSHIFT) + SBASE; - return ss == null ? null : - (Segment) UNSAFE.getObjectVolatile(ss, u); + public ConcurrentHashMap() { } /** - * Returns the segment for the given index, creating it and - * recording in segment table (via CAS) if not already present. + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. * - * @param k the index - * @return the segment - */ - @SuppressWarnings("unchecked") - private Segment ensureSegment(int k) { - final Segment[] ss = this.segments; - long u = (k << SSHIFT) + SBASE; // raw offset - Segment seg; - if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) == null) { - Segment proto = ss[0]; // use segment 0 as prototype - int cap = proto.table.length; - float lf = proto.loadFactor; - int threshold = (int)(cap * lf); - HashEntry[] tab = (HashEntry[])new HashEntry[cap]; - if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) - == null) { // recheck - Segment s = new Segment(lf, threshold, tab); - while ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) - == null) { - if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) - break; - } - } - } - return seg; - } - - // Hash-based segment and entry accesses - - /** - * Get the segment for the given hash + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative */ - @SuppressWarnings("unchecked") - private Segment segmentForHash(int h) { - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - return (Segment) UNSAFE.getObjectVolatile(segments, u); + public ConcurrentHashMap(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.sizeCtl = cap; } /** - * Gets the table entry for the given segment and hash + * Creates a new map with the same mappings as the given map. + * + * @param m the map */ - @SuppressWarnings("unchecked") - static final HashEntry entryForHash(Segment seg, int h) { - HashEntry[] tab; - return (seg == null || (tab = seg.table) == null) ? null : - (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + public ConcurrentHashMap(Map m) { + this.sizeCtl = DEFAULT_CAPACITY; + putAll(m); } - /* ---------------- Public operations -------------- */ - /** - * Creates a new, empty map with the specified initial - * capacity, load factor and concurrency level. + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). * * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. - * @param concurrencyLevel the estimated number of concurrently - * updating threads. The implementation performs internal sizing - * to try to accommodate this many threads. - * @throws IllegalArgumentException if the initial capacity is - * negative or the load factor or concurrencyLevel are - * nonpositive. - */ - @SuppressWarnings("unchecked") - public ConcurrentHashMap(int initialCapacity, - float loadFactor, int concurrencyLevel) { - if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) - throw new IllegalArgumentException(); - if (concurrencyLevel > MAX_SEGMENTS) - concurrencyLevel = MAX_SEGMENTS; - // Find power-of-two sizes best matching arguments - int sshift = 0; - int ssize = 1; - while (ssize < concurrencyLevel) { - ++sshift; - ssize <<= 1; - } - this.segmentShift = 32 - sshift; - this.segmentMask = ssize - 1; - if (initialCapacity > MAXIMUM_CAPACITY) - initialCapacity = MAXIMUM_CAPACITY; - int c = initialCapacity / ssize; - if (c * ssize < initialCapacity) - ++c; - int cap = MIN_SEGMENT_TABLE_CAPACITY; - while (cap < c) - cap <<= 1; - // create segments and segments[0] - Segment s0 = - new Segment(loadFactor, (int)(cap * loadFactor), - (HashEntry[])new HashEntry[cap]); - Segment[] ss = (Segment[])new Segment[ssize]; - UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] - this.segments = ss; - } - - /** - * Creates a new, empty map with the specified initial capacity - * and load factor and with the default concurrencyLevel (16). - * - * @param initialCapacity The implementation performs internal - * sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public ConcurrentHashMap(int initialCapacity, float loadFactor) { - this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); + this(initialCapacity, loadFactor, 1); } /** - * Creates a new, empty map with the specified initial capacity, - * and with default load factor (0.75) and concurrencyLevel (16). + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). * * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative. + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive */ - public ConcurrentHashMap(int initialCapacity) { - this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + public ConcurrentHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.sizeCtl = cap; } - /** - * Creates a new, empty map with a default initial capacity (16), - * load factor (0.75) and concurrencyLevel (16). - */ - public ConcurrentHashMap() { - this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - } + // Original (since JDK1.2) Map methods /** - * Creates a new map with the same mappings as the given map. - * The map is created with a capacity of 1.5 times the number - * of mappings in the given map or 16 (whichever is greater), - * and a default load factor (0.75) and concurrencyLevel (16). - * - * @param m the map + * {@inheritDoc} */ - public ConcurrentHashMap(Map m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - putAll(m); + public int size() { + long n = sumCount(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); } /** - * Returns true if this map contains no key-value mappings. - * - * @return true if this map contains no key-value mappings + * {@inheritDoc} */ public boolean isEmpty() { - /* - * Sum per-segment modCounts to avoid mis-reporting when - * elements are concurrently added and removed in one segment - * while checking another, in which case the table was never - * actually empty at any point. (The sum ensures accuracy up - * through at least 1<<31 per-segment modifications before - * recheck.) Methods size() and containsValue() use similar - * constructions for stability checks. - */ - long sum = 0L; - final Segment[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) - return false; - sum += seg.modCount; - } - } - if (sum != 0L) { // recheck unless no modifications - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) - return false; - sum -= seg.modCount; - } - } - if (sum != 0L) - return false; - } - return true; - } - - /** - * Returns the number of key-value mappings in this map. If the - * map contains more than Integer.MAX_VALUE elements, returns - * Integer.MAX_VALUE. - * - * @return the number of key-value mappings in this map - */ - public int size() { - // Try a few times to get accurate count. On failure due to - // continuous async changes in table, resort to locking. - final Segment[] segments = this.segments; - int size; - boolean overflow; // true if size overflows 32 bits - long sum; // sum of modCounts - long last = 0L; // previous sum - int retries = -1; // first iteration isn't retry - try { - for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - ensureSegment(j).lock(); // force creation - } - sum = 0L; - size = 0; - overflow = false; - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - sum += seg.modCount; - int c = seg.count; - if (c < 0 || (size += c) < 0) - overflow = true; - } - } - if (sum == last) - break; - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - segmentAt(segments, j).unlock(); - } - } - return overflow ? Integer.MAX_VALUE : size; + return sumCount() <= 0L; // ignore transient negative values } /** @@ -825,18 +932,20 @@ public int size() { * @throws NullPointerException if the specified key is null */ public V get(Object key) { - Segment s; // manually integrate access methods to reduce overhead - HashEntry[] tab; - int h = hash(key.hashCode()); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) - return e.value; + Node[] tab; Node e, p; int n, eh; K ek; + int h = spread(key.hashCode()); + if ((tab = table) != null && (n = tab.length) > 0 && + (e = tabAt(tab, (n - 1) & h)) != null) { + if ((eh = e.hash) == h) { + if ((ek = e.key) == key || (ek != null && key.equals(ek))) + return e.val; + } + else if (eh < 0) + return (p = e.find(h, key)) != null ? p.val : null; + while ((e = e.next) != null) { + if (e.hash == h && + ((ek = e.key) == key || (ek != null && key.equals(ek)))) + return e.val; } } return null; @@ -845,151 +954,121 @@ public V get(Object key) { /** * Tests if the specified object is a key in this table. * - * @param key possible key - * @return true if and only if the specified object + * @param key possible key + * @return {@code true} if and only if the specified object * is a key in this table, as determined by the - * equals method; false otherwise. + * {@code equals} method; {@code false} otherwise * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public boolean containsKey(Object key) { - Segment s; // same as get() except no need for volatile value read - HashEntry[] tab; - int h = hash(key.hashCode()); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) - return true; - } - } - return false; + return get(key) != null; } /** - * Returns true if this map maps one or more keys to the - * specified value. Note: This method requires a full internal - * traversal of the hash table, and so is much slower than - * method containsKey. + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. * * @param value value whose presence in this map is to be tested - * @return true if this map maps one or more keys to the + * @return {@code true} if this map maps one or more keys to the * specified value * @throws NullPointerException if the specified value is null */ public boolean containsValue(Object value) { - // Same idea as size() if (value == null) throw new NullPointerException(); - final Segment[] segments = this.segments; - boolean found = false; - long last = 0; - int retries = -1; - try { - outer: for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - ensureSegment(j).lock(); // force creation - } - long hashSum = 0L; - int sum = 0; - for (int j = 0; j < segments.length; ++j) { - HashEntry[] tab; - Segment seg = segmentAt(segments, j); - if (seg != null && (tab = seg.table) != null) { - for (int i = 0 ; i < tab.length; i++) { - HashEntry e; - for (e = entryAt(tab, i); e != null; e = e.next) { - V v = e.value; - if (v != null && value.equals(v)) { - found = true; - break outer; - } - } - } - sum += seg.modCount; - } - } - if (retries > 0 && sum == last) - break; - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - segmentAt(segments, j).unlock(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + V v; + if ((v = p.val) == value || (v != null && value.equals(v))) + return true; } } - return found; - } - - /** - * Legacy method testing if some key maps into the specified value - * in this table. This method is identical in functionality to - * {@link #containsValue}, and exists solely to ensure - * full compatibility with class {@link java.util.Hashtable}, - * which supported this method prior to introduction of the - * Java Collections framework. - - * @param value a value to search for - * @return true if and only if some key maps to the - * value argument in this table as - * determined by the equals method; - * false otherwise - * @throws NullPointerException if the specified value is null - */ - public boolean contains(Object value) { - return containsValue(value); + return false; } /** * Maps the specified key to the specified value in this table. * Neither the key nor the value can be null. * - *

The value can be retrieved by calling the get method + *

The value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key - * @return the previous value associated with key, or - * null if there was no mapping for key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V put(K key, V value) { - Segment s; - if (value == null) - throw new NullPointerException(); - int hash = hash(key.hashCode()); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck - (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment - s = ensureSegment(j); - return s.put(key, hash, value, false); + return putVal(key, value, false); } - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or null if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - @SuppressWarnings("unchecked") - public V putIfAbsent(K key, V value) { - Segment s; - if (value == null) - throw new NullPointerException(); - int hash = hash(key.hashCode()); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment)UNSAFE.getObject - (segments, (j << SSHIFT) + SBASE)) == null) - s = ensureSegment(j); - return s.put(key, hash, value, true); + /** Implementation for put and putIfAbsent */ + final V putVal(K key, V value, boolean onlyIfAbsent) { + if (key == null || value == null) throw new NullPointerException(); + int hash = spread(key.hashCode()); + int binCount = 0; + for (Node[] tab = table;;) { + Node f; int n, i, fh; + if (tab == null || (n = tab.length) == 0) + tab = initTable(); + else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { + if (casTabAt(tab, i, null, + new Node(hash, key, value, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) + tab = helpTransfer(tab, f); + else { + V oldVal = null; + synchronized (f) { + if (tabAt(tab, i) == f) { + if (fh >= 0) { + binCount = 1; + for (Node e = f;; ++binCount) { + K ek; + if (e.hash == hash && + ((ek = e.key) == key || + (ek != null && key.equals(ek)))) { + oldVal = e.val; + if (!onlyIfAbsent) + e.val = value; + break; + } + Node pred = e; + if ((e = e.next) == null) { + pred.next = new Node(hash, key, + value, null); + break; + } + } + } + else if (f instanceof TreeBin) { + Node p; + binCount = 2; + if ((p = ((TreeBin)f).putTreeVal(hash, key, + value)) != null) { + oldVal = p.val; + if (!onlyIfAbsent) + p.val = value; + } + } + } + } + if (binCount != 0) { + if (binCount >= TREEIFY_THRESHOLD) + treeifyBin(tab, i); + if (oldVal != null) + return oldVal; + break; + } + } + } + addCount(1L, binCount); + return null; } /** @@ -1000,8 +1079,9 @@ public V putIfAbsent(K key, V value) { * @param m mappings to be stored in this map */ public void putAll(Map m) { + tryPresize(m.size()); for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); + putVal(e.getKey(), e.getValue(), false); } /** @@ -1009,87 +1089,146 @@ public void putAll(Map m) { * This method does nothing if the key is not in the map. * * @param key the key that needs to be removed - * @return the previous value associated with key, or - * null if there was no mapping for key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key is null */ public V remove(Object key) { - int hash = hash(key.hashCode()); - Segment s = segmentForHash(hash); - return s == null ? null : s.remove(key, hash, null); - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if the specified key is null - */ - public boolean remove(Object key, Object value) { - int hash = hash(key.hashCode()); - Segment s; - return value != null && (s = segmentForHash(hash)) != null && - s.remove(key, hash, value) != null; + return replaceNode(key, null, null); } /** - * {@inheritDoc} - * - * @throws NullPointerException if any of the arguments are null - */ - public boolean replace(K key, V oldValue, V newValue) { - int hash = hash(key.hashCode()); - if (oldValue == null || newValue == null) - throw new NullPointerException(); - Segment s = segmentForHash(hash); - return s != null && s.replace(key, hash, oldValue, newValue); - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or null if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. */ - public V replace(K key, V value) { - int hash = hash(key.hashCode()); - if (value == null) - throw new NullPointerException(); - Segment s = segmentForHash(hash); - return s == null ? null : s.replace(key, hash, value); + final V replaceNode(Object key, V value, Object cv) { + int hash = spread(key.hashCode()); + for (Node[] tab = table;;) { + Node f; int n, i, fh; + if (tab == null || (n = tab.length) == 0 || + (f = tabAt(tab, i = (n - 1) & hash)) == null) + break; + else if ((fh = f.hash) == MOVED) + tab = helpTransfer(tab, f); + else { + V oldVal = null; + boolean validated = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + if (fh >= 0) { + validated = true; + for (Node e = f, pred = null;;) { + K ek; + if (e.hash == hash && + ((ek = e.key) == key || + (ek != null && key.equals(ek)))) { + V ev = e.val; + if (cv == null || cv == ev || + (ev != null && cv.equals(ev))) { + oldVal = ev; + if (value != null) + e.val = value; + else if (pred != null) + pred.next = e.next; + else + setTabAt(tab, i, e.next); + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + else if (f instanceof TreeBin) { + validated = true; + TreeBin t = (TreeBin)f; + TreeNode r, p; + if ((r = t.root) != null && + (p = r.findTreeNode(hash, key, null)) != null) { + V pv = p.val; + if (cv == null || cv == pv || + (pv != null && cv.equals(pv))) { + oldVal = pv; + if (value != null) + p.val = value; + else if (t.removeTreeNode(p)) + setTabAt(tab, i, untreeify(t.first)); + } + } + } + } + } + if (validated) { + if (oldVal != null) { + if (value == null) + addCount(-1L, -1); + return oldVal; + } + break; + } + } + } + return null; } /** * Removes all of the mappings from this map. */ public void clear() { - final Segment[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment s = segmentAt(segments, j); - if (s != null) - s.clear(); + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + tab = helpTransfer(tab, f); + i = 0; // restart + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + Node p = (fh >= 0 ? f : + (f instanceof TreeBin) ? + ((TreeBin)f).first : null); + while (p != null) { + --delta; + p = p.next; + } + setTabAt(tab, i++, null); + } + } + } } + if (delta != 0L) + addCount(delta, -1); } /** * Returns a {@link Set} view of the keys contained in this map. * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element + * reflected in the set, and vice-versa. The set supports element * removal, which removes the corresponding mapping from this map, - * via the Iterator.remove, Set.remove, - * removeAll, retainAll, and clear - * operations. It does not support the add or - * addAll operations. + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's iterators and spliterators are + * weakly consistent. + * + *

The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, + * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. * - *

The view's iterator is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. + * @return the set view */ - public Set keySet() { - Set ks = keySet; - return (ks != null) ? ks : (keySet = new KeySet()); + public KeySetView keySet() { + KeySetView ks; + return (ks = keySet) != null ? ks : (keySet = new KeySetView(this, null)); } /** @@ -1097,20 +1236,22 @@ public Set keySet() { * The collection is backed by the map, so changes to the map are * reflected in the collection, and vice-versa. The collection * supports element removal, which removes the corresponding - * mapping from this map, via the Iterator.remove, - * Collection.remove, removeAll, - * retainAll, and clear operations. It does not - * support the add or addAll operations. + * mapping from this map, via the {@code Iterator.remove}, + * {@code Collection.remove}, {@code removeAll}, + * {@code retainAll}, and {@code clear} operations. It does not + * support the {@code add} or {@code addAll} operations. * - *

The view's iterator is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. + *

The view's iterators and spliterators are + * weakly consistent. + * + *

The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} + * and {@link Spliterator#NONNULL}. + * + * @return the collection view */ public Collection values() { - Collection vs = values; - return (vs != null) ? vs : (values = new Values()); + ValuesView vs; + return (vs = values) != null ? vs : (values = new ValuesView(this)); } /** @@ -1118,313 +1259,5054 @@ public Collection values() { * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. The set supports element * removal, which removes the corresponding mapping from the map, - * via the Iterator.remove, Set.remove, - * removeAll, retainAll, and clear - * operations. It does not support the add or - * addAll operations. + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. + * + *

The view's iterators and spliterators are + * weakly consistent. + * + *

The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, + * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. * - *

The view's iterator is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. + * @return the set view */ public Set> entrySet() { - Set> es = entrySet; - return (es != null) ? es : (entrySet = new EntrySet()); + EntrySetView es; + return (es = entrySet) != null ? es : (entrySet = new EntrySetView(this)); } /** - * Returns an enumeration of the keys in this table. + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. * - * @return an enumeration of the keys in this table - * @see #keySet() + * @return the hash code value for this map */ - public Enumeration keys() { - return new KeyIterator(); + public int hashCode() { + int h = 0; + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + h += p.key.hashCode() ^ p.val.hashCode(); + } + return h; } /** - * Returns an enumeration of the values in this table. + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. * - * @return an enumeration of the values in this table - * @see #values() + * @return a string representation of this map */ - public Enumeration elements() { - return new ValueIterator(); - } - - /* ---------------- Iterator Support -------------- */ - - abstract class HashIterator { - int nextSegmentIndex; - int nextTableIndex; - HashEntry[] currentTable; - HashEntry nextEntry; - HashEntry lastReturned; - - HashIterator() { - nextSegmentIndex = segments.length - 1; - nextTableIndex = -1; - advance(); - } - - /** - * Set nextEntry to first node of next non-empty table - * (in backwards order, to simplify checks). - */ - final void advance() { + public String toString() { + Node[] t; + int f = (t = table) == null ? 0 : t.length; + Traverser it = new Traverser(t, f, 0, f); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Node p; + if ((p = it.advance()) != null) { for (;;) { - if (nextTableIndex >= 0) { - if ((nextEntry = entryAt(currentTable, - nextTableIndex--)) != null) - break; - } - else if (nextSegmentIndex >= 0) { - Segment seg = segmentAt(segments, nextSegmentIndex--); - if (seg != null && (currentTable = seg.table) != null) - nextTableIndex = currentTable.length - 1; - } - else + K k = p.key; + V v = p.val; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((p = it.advance()) == null) break; + sb.append(',').append(' '); } } - - final HashEntry nextEntry() { - HashEntry e = nextEntry; - if (e == null) - throw new NoSuchElementException(); - lastReturned = e; // cannot assign until after null check - if ((nextEntry = e.next) == null) - advance(); - return e; - } - - public final boolean hasNext() { return nextEntry != null; } - public final boolean hasMoreElements() { return nextEntry != null; } - - public final void remove() { - if (lastReturned == null) - throw new IllegalStateException(); - ConcurrentHashMap.this.remove(lastReturned.key); - lastReturned = null; - } - } - - final class KeyIterator - extends HashIterator - implements Iterator, Enumeration - { - public final K next() { return super.nextEntry().key; } - public final K nextElement() { return super.nextEntry().key; } - } - - final class ValueIterator - extends HashIterator - implements Iterator, Enumeration - { - public final V next() { return super.nextEntry().value; } - public final V nextElement() { return super.nextEntry().value; } + return sb.append('}').toString(); } /** - * Custom Entry class used by EntryIterator.next(), that relays - * setValue changes to the underlying map. + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map */ - final class WriteThroughEntry - extends AbstractMap.SimpleEntry - { - WriteThroughEntry(K k, V v) { - super(k,v); - } - - /** - * Set our entry's value and write through to the map. The - * value to return is somewhat arbitrary here. Since a - * WriteThroughEntry does not necessarily track asynchronous - * changes, the most recent "previous" value could be - * different from what we return (or could even have been - * removed in which case the put will re-establish). We do not - * and cannot guarantee more. - */ - public V setValue(V value) { - if (value == null) throw new NullPointerException(); - V v = super.setValue(value); - ConcurrentHashMap.this.put(getKey(), value); - return v; - } - } - - final class EntryIterator - extends HashIterator - implements Iterator> - { - public Map.Entry next() { - HashEntry e = super.nextEntry(); - return new WriteThroughEntry(e.key, e.value); - } - } - - final class KeySet extends AbstractSet { - public Iterator iterator() { - return new KeyIterator(); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public boolean contains(Object o) { - return ConcurrentHashMap.this.containsKey(o); - } - public boolean remove(Object o) { - return ConcurrentHashMap.this.remove(o) != null; - } - public void clear() { - ConcurrentHashMap.this.clear(); - } - } - - final class Values extends AbstractCollection { - public Iterator iterator() { - return new ValueIterator(); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public boolean contains(Object o) { - return ConcurrentHashMap.this.containsValue(o); - } - public void clear() { - ConcurrentHashMap.this.clear(); - } - } - - final class EntrySet extends AbstractSet> { - public Iterator> iterator() { - return new EntryIterator(); - } - public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) - return false; - Map.Entry e = (Map.Entry)o; - V v = ConcurrentHashMap.this.get(e.getKey()); - return v != null && v.equals(e.getValue()); - } - public boolean remove(Object o) { - if (!(o instanceof Map.Entry)) + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) return false; - Map.Entry e = (Map.Entry)o; - return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public void clear() { - ConcurrentHashMap.this.clear(); + Map m = (Map) o; + Node[] t; + int f = (t = table) == null ? 0 : t.length; + Traverser it = new Traverser(t, f, 0, f); + for (Node p; (p = it.advance()) != null; ) { + V val = p.val; + Object v = m.get(p.key); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = get(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } } + return true; } - /* ---------------- Serialization Support -------------- */ + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment extends ReentrantLock implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } /** - * Save the state of the ConcurrentHashMap instance to a - * stream (i.e., serialize it). + * Saves the state of the {@code ConcurrentHashMap} instance to a + * stream (i.e., serializes it). * @param s the stream + * @throws java.io.IOException if an I/O error occurs * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. * The key-value mappings are emitted in no particular order. */ - private void writeObject(java.io.ObjectOutputStream s) throws IOException { - // force all segments for serialization compatibility - for (int k = 0; k < segments.length; ++k) - ensureSegment(k); - s.defaultWriteObject(); + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + // For serialization compatibility + // Emulate segment calculation from previous version of this class + int sshift = 0; + int ssize = 1; + while (ssize < DEFAULT_CONCURRENCY_LEVEL) { + ++sshift; + ssize <<= 1; + } + int segmentShift = 32 - sshift; + int segmentMask = ssize - 1; + @SuppressWarnings("unchecked") + Segment[] segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + s.putFields().put("segments", segments); + s.putFields().put("segmentShift", segmentShift); + s.putFields().put("segmentMask", segmentMask); + s.writeFields(); - final Segment[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment seg = segmentAt(segments, k); - seg.lock(); - try { - HashEntry[] tab = seg.table; - for (int i = 0; i < tab.length; ++i) { - HashEntry e; - for (e = entryAt(tab, i); e != null; e = e.next) { - s.writeObject(e.key); - s.writeObject(e.value); - } - } - } finally { - seg.unlock(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + s.writeObject(p.key); + s.writeObject(p.val); } } s.writeObject(null); s.writeObject(null); + segments = null; // throw away } /** - * Reconstitute the ConcurrentHashMap instance from a - * stream (i.e., deserialize it). + * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs */ - @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) - throws IOException, ClassNotFoundException { + throws java.io.IOException, ClassNotFoundException { + /* + * To improve performance in typical cases, we create nodes + * while reading, then place in table once size is known. + * However, we must also validate uniqueness and deal with + * overpopulated bins while doing so, which requires + * specialized versions of putVal mechanics. + */ + sizeCtl = -1; // force exclusion for table construction s.defaultReadObject(); - - // Re-initialize segments to be minimally sized, and let grow. - int cap = MIN_SEGMENT_TABLE_CAPACITY; - final Segment[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment seg = segments[k]; - if (seg != null) { - seg.threshold = (int)(cap * seg.loadFactor); - seg.table = (HashEntry[]) new HashEntry[cap]; - } - } - - // Read the keys and values, and put the mappings in the table + long size = 0L; + Node p = null; for (;;) { - K key = (K) s.readObject(); - V value = (V) s.readObject(); - if (key == null) + @SuppressWarnings("unchecked") + K k = (K) s.readObject(); + @SuppressWarnings("unchecked") + V v = (V) s.readObject(); + if (k != null && v != null) { + p = new Node(spread(k.hashCode()), k, v, p); + ++size; + } + else break; - put(key, value); + } + if (size == 0L) + sizeCtl = 0; + else { + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + @SuppressWarnings("unchecked") + Node[] tab = (Node[])new Node[n]; + int mask = n - 1; + long added = 0L; + while (p != null) { + boolean insertAtFront; + Node next = p.next, first; + int h = p.hash, j = h & mask; + if ((first = tabAt(tab, j)) == null) + insertAtFront = true; + else { + K k = p.key; + if (first.hash < 0) { + TreeBin t = (TreeBin)first; + if (t.putTreeVal(h, k, p.val) == null) + ++added; + insertAtFront = false; + } + else { + int binCount = 0; + insertAtFront = true; + Node q; K qk; + for (q = first; q != null; q = q.next) { + if (q.hash == h && + ((qk = q.key) == k || + (qk != null && k.equals(qk)))) { + insertAtFront = false; + break; + } + ++binCount; + } + if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { + insertAtFront = false; + ++added; + p.next = first; + TreeNode hd = null, tl = null; + for (q = p; q != null; q = q.next) { + TreeNode t = new TreeNode + (q.hash, q.key, q.val, null, null); + if ((t.prev = tl) == null) + hd = t; + else + tl.next = t; + tl = t; + } + setTabAt(tab, j, new TreeBin(hd)); + } + } + } + if (insertAtFront) { + ++added; + p.next = first; + setTabAt(tab, j, p); + } + p = next; + } + table = tab; + sizeCtl = n - (n >>> 2); + baseCount = added; } } - // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long SBASE; - private static final int SSHIFT; - private static final long TBASE; - private static final int TSHIFT; + // ConcurrentMap methods - static { - int ss, ts; - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - Class tc = HashEntry[].class; - Class sc = Segment[].class; - TBASE = UNSAFE.arrayBaseOffset(tc); - SBASE = UNSAFE.arrayBaseOffset(sc); - ts = UNSAFE.arrayIndexScale(tc); - ss = UNSAFE.arrayIndexScale(sc); - } catch (Exception e) { - throw new Error(e); - } - if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) - throw new Error("data type scale not a power of two"); - SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); - TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public V putIfAbsent(K key, V value) { + return putVal(key, value, true); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + return value != null && replaceNode(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return replaceNode(key, newValue, oldValue) != null; } + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return replaceNode(key, value, null); + } + + // Overrides of JDK8+ Map extension method defaults + + /** + * Returns the value to which the specified key is mapped, or the + * given default value if this map contains no mapping for the + * key. + * + * @param key the key whose associated value is to be returned + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the default value + * @throws NullPointerException if the specified key is null + */ + public V getOrDefault(Object key, V defaultValue) { + V v; + return (v = get(key)) == null ? defaultValue : v; + } + + public void forEach(BiConsumer action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + action.accept(p.key, p.val); + } + } + } + + public void replaceAll(BiFunction function) { + if (function == null) throw new NullPointerException(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + V oldValue = p.val; + for (K key = p.key;;) { + V newValue = function.apply(key, oldValue); + if (newValue == null) + throw new NullPointerException(); + if (replaceNode(key, newValue, oldValue) != null || + (oldValue = get(key)) == null) + break; + } + } + } + } + + /** + * If the specified key is not already associated with a value, + * attempts to compute its value using the given mapping function + * and enters it into this map unless {@code null}. The entire + * method invocation is performed atomically, so the function is + * applied at most once per key. Some attempted update operations + * on this map by other threads may be blocked while computation + * is in progress, so the computation should be short and simple, + * and must not attempt to update any other mappings of this map. + * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + public V computeIfAbsent(K key, Function mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + int h = spread(key.hashCode()); + V val = null; + int binCount = 0; + for (Node[] tab = table;;) { + Node f; int n, i, fh; + if (tab == null || (n = tab.length) == 0) + tab = initTable(); + else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { + Node r = new ReservationNode(); + synchronized (r) { + if (casTabAt(tab, i, null, r)) { + binCount = 1; + Node node = null; + try { + if ((val = mappingFunction.apply(key)) != null) + node = new Node(h, key, val, null); + } finally { + setTabAt(tab, i, node); + } + } + } + if (binCount != 0) + break; + } + else if ((fh = f.hash) == MOVED) + tab = helpTransfer(tab, f); + else { + boolean added = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + if (fh >= 0) { + binCount = 1; + for (Node e = f;; ++binCount) { + K ek; V ev; + if (e.hash == h && + ((ek = e.key) == key || + (ek != null && key.equals(ek)))) { + val = e.val; + break; + } + Node pred = e; + if ((e = e.next) == null) { + if ((val = mappingFunction.apply(key)) != null) { + added = true; + pred.next = new Node(h, key, val, null); + } + break; + } + } + } + else if (f instanceof TreeBin) { + binCount = 2; + TreeBin t = (TreeBin)f; + TreeNode r, p; + if ((r = t.root) != null && + (p = r.findTreeNode(h, key, null)) != null) + val = p.val; + else if ((val = mappingFunction.apply(key)) != null) { + added = true; + t.putTreeVal(h, key, val); + } + } + } + } + if (binCount != 0) { + if (binCount >= TREEIFY_THRESHOLD) + treeifyBin(tab, i); + if (!added) + return val; + break; + } + } + } + if (val != null) + addCount(1L, binCount); + return val; + } + + /** + * If the value for the specified key is present, attempts to + * compute a new mapping given the key and its current mapped + * value. The entire method invocation is performed atomically. + * Some attempted update operations on this map by other threads + * may be blocked while computation is in progress, so the + * computation should be short and simple, and must not attempt to + * update any other mappings of this map. + * + * @param key key with which a value may be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V computeIfPresent(K key, BiFunction remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + int h = spread(key.hashCode()); + V val = null; + int delta = 0; + int binCount = 0; + for (Node[] tab = table;;) { + Node f; int n, i, fh; + if (tab == null || (n = tab.length) == 0) + tab = initTable(); + else if ((f = tabAt(tab, i = (n - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) + tab = helpTransfer(tab, f); + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + if (fh >= 0) { + binCount = 1; + for (Node e = f, pred = null;; ++binCount) { + K ek; + if (e.hash == h && + ((ek = e.key) == key || + (ek != null && key.equals(ek)))) { + val = remappingFunction.apply(key, e.val); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + else if (f instanceof TreeBin) { + binCount = 2; + TreeBin t = (TreeBin)f; + TreeNode r, p; + if ((r = t.root) != null && + (p = r.findTreeNode(h, key, null)) != null) { + val = remappingFunction.apply(key, p.val); + if (val != null) + p.val = val; + else { + delta = -1; + if (t.removeTreeNode(p)) + setTabAt(tab, i, untreeify(t.first)); + } + } + } + } + } + if (binCount != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, binCount); + return val; + } + + /** + * Attempts to compute a mapping for the specified key and its + * current mapped value (or {@code null} if there is no current + * mapping). The entire method invocation is performed atomically. + * Some attempted update operations on this map by other threads + * may be blocked while computation is in progress, so the + * computation should be short and simple, and must not attempt to + * update any other mappings of this Map. + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V compute(K key, + BiFunction remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + int h = spread(key.hashCode()); + V val = null; + int delta = 0; + int binCount = 0; + for (Node[] tab = table;;) { + Node f; int n, i, fh; + if (tab == null || (n = tab.length) == 0) + tab = initTable(); + else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { + Node r = new ReservationNode(); + synchronized (r) { + if (casTabAt(tab, i, null, r)) { + binCount = 1; + Node node = null; + try { + if ((val = remappingFunction.apply(key, null)) != null) { + delta = 1; + node = new Node(h, key, val, null); + } + } finally { + setTabAt(tab, i, node); + } + } + } + if (binCount != 0) + break; + } + else if ((fh = f.hash) == MOVED) + tab = helpTransfer(tab, f); + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + if (fh >= 0) { + binCount = 1; + for (Node e = f, pred = null;; ++binCount) { + K ek; + if (e.hash == h && + ((ek = e.key) == key || + (ek != null && key.equals(ek)))) { + val = remappingFunction.apply(key, e.val); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = remappingFunction.apply(key, null); + if (val != null) { + delta = 1; + pred.next = + new Node(h, key, val, null); + } + break; + } + } + } + else if (f instanceof TreeBin) { + binCount = 1; + TreeBin t = (TreeBin)f; + TreeNode r, p; + if ((r = t.root) != null) + p = r.findTreeNode(h, key, null); + else + p = null; + V pv = (p == null) ? null : p.val; + val = remappingFunction.apply(key, pv); + if (val != null) { + if (p != null) + p.val = val; + else { + delta = 1; + t.putTreeVal(h, key, val); + } + } + else if (p != null) { + delta = -1; + if (t.removeTreeNode(p)) + setTabAt(tab, i, untreeify(t.first)); + } + } + } + } + if (binCount != 0) { + if (binCount >= TREEIFY_THRESHOLD) + treeifyBin(tab, i); + break; + } + } + } + if (delta != 0) + addCount((long)delta, binCount); + return val; + } + + /** + * If the specified key is not already associated with a + * (non-null) value, associates it with the given value. + * Otherwise, replaces the value with the results of the given + * remapping function, or removes if {@code null}. The entire + * method invocation is performed atomically. Some attempted + * update operations on this map by other threads may be blocked + * while computation is in progress, so the computation should be + * short and simple, and must not attempt to update any other + * mappings of this Map. + * + * @param key key with which the specified value is to be associated + * @param value the value to use if absent + * @param remappingFunction the function to recompute a value if present + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or the + * remappingFunction is null + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V merge(K key, V value, BiFunction remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + int h = spread(key.hashCode()); + V val = null; + int delta = 0; + int binCount = 0; + for (Node[] tab = table;;) { + Node f; int n, i, fh; + if (tab == null || (n = tab.length) == 0) + tab = initTable(); + else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, key, value, null))) { + delta = 1; + val = value; + break; + } + } + else if ((fh = f.hash) == MOVED) + tab = helpTransfer(tab, f); + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + if (fh >= 0) { + binCount = 1; + for (Node e = f, pred = null;; ++binCount) { + K ek; + if (e.hash == h && + ((ek = e.key) == key || + (ek != null && key.equals(ek)))) { + val = remappingFunction.apply(e.val, value); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + delta = 1; + val = value; + pred.next = + new Node(h, key, val, null); + break; + } + } + } + else if (f instanceof TreeBin) { + binCount = 2; + TreeBin t = (TreeBin)f; + TreeNode r = t.root; + TreeNode p = (r == null) ? null : + r.findTreeNode(h, key, null); + val = (p == null) ? value : + remappingFunction.apply(p.val, value); + if (val != null) { + if (p != null) + p.val = val; + else { + delta = 1; + t.putTreeVal(h, key, val); + } + } + else if (p != null) { + delta = -1; + if (t.removeTreeNode(p)) + setTabAt(tab, i, untreeify(t.first)); + } + } + } + } + if (binCount != 0) { + if (binCount >= TREEIFY_THRESHOLD) + treeifyBin(tab, i); + break; + } + } + } + if (delta != 0) + addCount((long)delta, binCount); + return val; + } + + // Hashtable legacy methods + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue(Object)}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + Node[] t; + int f = (t = table) == null ? 0 : t.length; + return new KeyIterator(t, f, 0, f, this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + Node[] t; + int f = (t = table) == null ? 0 : t.length; + return new ValueIterator(t, f, 0, f, this); + } + + // ConcurrentHashMap-only methods + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMap may + * contain more mappings than can be represented as an int. The + * value returned is an estimate; the actual count may differ if + * there are concurrent insertions or removals. + * + * @return the number of mappings + * @since 1.8 + */ + public long mappingCount() { + long n = sumCount(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMap + * from the given type to {@code Boolean.TRUE}. + * + * @param the element type of the returned set + * @return the new set + * @since 1.8 + */ + public static KeySetView newKeySet() { + return new KeySetView + (new ConcurrentHashMap(), Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMap + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @param the element type of the returned set + * @return the new set + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @since 1.8 + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView + (new ConcurrentHashMap(initialCapacity), Boolean.TRUE); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll(Collection)}). + * This is of course only appropriate if it is acceptable to use + * the same value for all additions from this view. + * + * @param mappedValue the mapped value to use for any additions + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /* ---------------- Special Nodes -------------- */ + + /** + * A node inserted at head of bins during transfer operations. + */ + static final class ForwardingNode extends Node { + final Node[] nextTable; + ForwardingNode(Node[] tab) { + super(MOVED, null, null, null); + this.nextTable = tab; + } + + Node find(int h, Object k) { + // loop to avoid arbitrarily deep recursion on forwarding nodes + outer: for (Node[] tab = nextTable;;) { + Node e; int n; + if (k == null || tab == null || (n = tab.length) == 0 || + (e = tabAt(tab, (n - 1) & h)) == null) + return null; + for (;;) { + int eh; K ek; + if ((eh = e.hash) == h && + ((ek = e.key) == k || (ek != null && k.equals(ek)))) + return e; + if (eh < 0) { + if (e instanceof ForwardingNode) { + tab = ((ForwardingNode)e).nextTable; + continue outer; + } + else + return e.find(h, k); + } + if ((e = e.next) == null) + return null; + } + } + } + } + + /** + * A place-holder node used in computeIfAbsent and compute + */ + static final class ReservationNode extends Node { + ReservationNode() { + super(RESERVED, null, null, null); + } + + Node find(int h, Object k) { + return null; + } + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns the stamp bits for resizing a table of size n. + * Must be negative when shifted left by RESIZE_STAMP_SHIFT. + */ + static final int resizeStamp(int n) { + return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null || tab.length == 0) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if ((tab = table) == null || tab.length == 0) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n]; + table = tab = nt; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * Adds to count, and if table is too small and not already + * resizing, initiates transfer. If already resizing, helps + * perform transfer if work is available. Rechecks occupancy + * after a transfer to see if another resize is already needed + * because resizings are lagging additions. + * + * @param x the count to add + * @param check if <0, don't check resize, if <= 1 only check if uncontended + */ + private final void addCount(long x, int check) { + CounterCell[] as; long b, s; + if ((as = counterCells) != null || + !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { + CounterCell a; long v; int m; + boolean uncontended = true; + if (as == null || (m = as.length - 1) < 0 || + (a = as[ThreadLocalRandom.getProbe() & m]) == null || + !(uncontended = + U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { + fullAddCount(x, uncontended); + return; + } + if (check <= 1) + return; + s = sumCount(); + } + if (check >= 0) { + Node[] tab, nt; int n, sc; + while (s >= (long)(sc = sizeCtl) && (tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY) { + int rs = resizeStamp(n); + if (sc < 0) { + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || (nt = nextTable) == null || + transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, + (rs << RESIZE_STAMP_SHIFT) + 2)) + transfer(tab, null); + s = sumCount(); + } + } + } + + /** + * Helps transfer if a resize is in progress. + */ + final Node[] helpTransfer(Node[] tab, Node f) { + Node[] nextTab; int sc; + if (tab != null && (f instanceof ForwardingNode) && + (nextTab = ((ForwardingNode)f).nextTable) != null) { + int rs = resizeStamp(tab.length); + while (nextTab == nextTable && table == tab && + (sc = sizeCtl) < 0) { + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { + transfer(tab, nextTab); + break; + } + } + return nextTab; + } + return table; + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if (table == tab) { + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n]; + table = nt; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (tab == table) { + int rs = resizeStamp(n); + if (sc < 0) { + Node[] nt; + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || (nt = nextTable) == null || + transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, + (rs << RESIZE_STAMP_SHIFT) + 2)) + transfer(tab, null); + } + } + } + + /** + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + */ + private final void transfer(Node[] tab, Node[] nextTab) { + int n = tab.length, stride; + if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) + stride = MIN_TRANSFER_STRIDE; // subdivide range + if (nextTab == null) { // initiating + try { + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n << 1]; + nextTab = nt; + } catch (Throwable ex) { // try to cope with OOME + sizeCtl = Integer.MAX_VALUE; + return; + } + nextTable = nextTab; + transferIndex = n; + } + int nextn = nextTab.length; + ForwardingNode fwd = new ForwardingNode(nextTab); + boolean advance = true; + boolean finishing = false; // to ensure sweep before committing nextTab + for (int i = 0, bound = 0;;) { + Node f; int fh; + while (advance) { + int nextIndex, nextBound; + if (--i >= bound || finishing) + advance = false; + else if ((nextIndex = transferIndex) <= 0) { + i = -1; + advance = false; + } + else if (U.compareAndSwapInt + (this, TRANSFERINDEX, nextIndex, + nextBound = (nextIndex > stride ? + nextIndex - stride : 0))) { + bound = nextBound; + i = nextIndex - 1; + advance = false; + } + } + if (i < 0 || i >= n || i + n >= nextn) { + int sc; + if (finishing) { + nextTable = null; + table = nextTab; + sizeCtl = (n << 1) - (n >>> 1); + return; + } + if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { + if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) + return; + finishing = advance = true; + i = n; // recheck before commit + } + } + else if ((f = tabAt(tab, i)) == null) + advance = casTabAt(tab, i, null, fwd); + else if ((fh = f.hash) == MOVED) + advance = true; // already processed + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + Node ln, hn; + if (fh >= 0) { + int runBit = fh & n; + Node lastRun = f; + for (Node p = f.next; p != null; p = p.next) { + int b = p.hash & n; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) { + ln = lastRun; + hn = null; + } + else { + hn = lastRun; + ln = null; + } + for (Node p = f; p != lastRun; p = p.next) { + int ph = p.hash; K pk = p.key; V pv = p.val; + if ((ph & n) == 0) + ln = new Node(ph, pk, pv, ln); + else + hn = new Node(ph, pk, pv, hn); + } + setTabAt(nextTab, i, ln); + setTabAt(nextTab, i + n, hn); + setTabAt(tab, i, fwd); + advance = true; + } + else if (f instanceof TreeBin) { + TreeBin t = (TreeBin)f; + TreeNode lo = null, loTail = null; + TreeNode hi = null, hiTail = null; + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash; + TreeNode p = new TreeNode + (h, e.key, e.val, null, null); + if ((h & n) == 0) { + if ((p.prev = loTail) == null) + lo = p; + else + loTail.next = p; + loTail = p; + ++lc; + } + else { + if ((p.prev = hiTail) == null) + hi = p; + else + hiTail.next = p; + hiTail = p; + ++hc; + } + } + ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : + (hc != 0) ? new TreeBin(lo) : t; + hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : + (lc != 0) ? new TreeBin(hi) : t; + setTabAt(nextTab, i, ln); + setTabAt(nextTab, i + n, hn); + setTabAt(tab, i, fwd); + advance = true; + } + } + } + } + } + } + + /* ---------------- Counter support -------------- */ + + /** + * A padded cell for distributing counts. Adapted from LongAdder + * and Striped64. See their internal docs for explanation. + */ + @sun.misc.Contended static final class CounterCell { + volatile long value; + CounterCell(long x) { value = x; } + } + + final long sumCount() { + CounterCell[] as = counterCells; CounterCell a; + long sum = baseCount; + if (as != null) { + for (int i = 0; i < as.length; ++i) { + if ((a = as[i]) != null) + sum += a.value; + } + } + return sum; + } + + // See LongAdder version for explanation + private final void fullAddCount(long x, boolean wasUncontended) { + int h; + if ((h = ThreadLocalRandom.getProbe()) == 0) { + ThreadLocalRandom.localInit(); // force initialization + h = ThreadLocalRandom.getProbe(); + wasUncontended = true; + } + boolean collide = false; // True if last slot nonempty + for (;;) { + CounterCell[] as; CounterCell a; int n; long v; + if ((as = counterCells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (cellsBusy == 0) { // Try to attach new Cell + CounterCell r = new CounterCell(x); // Optimistic create + if (cellsBusy == 0 && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + boolean created = false; + try { // Recheck under lock + CounterCell[] rs; int m, j; + if ((rs = counterCells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + cellsBusy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) + break; + else if (counterCells != as || n >= NCPU) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (cellsBusy == 0 && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + try { + if (counterCells == as) {// Expand table unless stale + CounterCell[] rs = new CounterCell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + counterCells = rs; + } + } finally { + cellsBusy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h = ThreadLocalRandom.advanceProbe(h); + } + else if (cellsBusy == 0 && counterCells == as && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + boolean init = false; + try { // Initialize table + if (counterCells == as) { + CounterCell[] rs = new CounterCell[2]; + rs[h & 1] = new CounterCell(x); + counterCells = rs; + init = true; + } + } finally { + cellsBusy = 0; + } + if (init) + break; + } + else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) + break; // Fall back on using base + } + } + + /* ---------------- Conversion from/to TreeBins -------------- */ + + /** + * Replaces all linked nodes in bin at given index unless table is + * too small, in which case resizes instead. + */ + private final void treeifyBin(Node[] tab, int index) { + Node b; int n, sc; + if (tab != null) { + if ((n = tab.length) < MIN_TREEIFY_CAPACITY) + tryPresize(n << 1); + else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { + synchronized (b) { + if (tabAt(tab, index) == b) { + TreeNode hd = null, tl = null; + for (Node e = b; e != null; e = e.next) { + TreeNode p = + new TreeNode(e.hash, e.key, e.val, + null, null); + if ((p.prev = tl) == null) + hd = p; + else + tl.next = p; + tl = p; + } + setTabAt(tab, index, new TreeBin(hd)); + } + } + } + } + } + + /** + * Returns a list on non-TreeNodes replacing those in given list. + */ + static Node untreeify(Node b) { + Node hd = null, tl = null; + for (Node q = b; q != null; q = q.next) { + Node p = new Node(q.hash, q.key, q.val, null); + if (tl == null) + hd = p; + else + tl.next = p; + tl = p; + } + return hd; + } + + /* ---------------- TreeNodes -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, K key, V val, Node next, + TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + + Node find(int h, Object k) { + return findTreeNode(h, k, null); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + final TreeNode findTreeNode(int h, Object k, Class kc) { + if (k != null) { + TreeNode p = this; + do { + int ph, dir; K pk; TreeNode q; + TreeNode pl = p.left, pr = p.right; + if ((ph = p.hash) > h) + p = pl; + else if (ph < h) + p = pr; + else if ((pk = p.key) == k || (pk != null && k.equals(pk))) + return p; + else if (pl == null) + p = pr; + else if (pr == null) + p = pl; + else if ((kc != null || + (kc = comparableClassFor(k)) != null) && + (dir = compareComparables(kc, k, pk)) != 0) + p = (dir < 0) ? pl : pr; + else if ((q = pr.findTreeNode(h, k, kc)) != null) + return q; + else + p = pl; + } while (p != null); + } + return null; + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * TreeNodes used at the heads of bins. TreeBins do not hold user + * keys or values, but instead point to list of TreeNodes and + * their root. They also maintain a parasitic read-write lock + * forcing writers (who hold bin lock) to wait for readers (who do + * not) to complete before tree restructuring operations. + */ + static final class TreeBin extends Node { + TreeNode root; + volatile TreeNode first; + volatile Thread waiter; + volatile int lockState; + // values for lockState + static final int WRITER = 1; // set while holding write lock + static final int WAITER = 2; // set when waiting for write lock + static final int READER = 4; // increment value for setting read lock + + /** + * Tie-breaking utility for ordering insertions when equal + * hashCodes and non-comparable. We don't require a total + * order, just a consistent insertion rule to maintain + * equivalence across rebalancings. Tie-breaking further than + * necessary simplifies testing a bit. + */ + static int tieBreakOrder(Object a, Object b) { + int d; + if (a == null || b == null || + (d = a.getClass().getName(). + compareTo(b.getClass().getName())) == 0) + d = (System.identityHashCode(a) <= System.identityHashCode(b) ? + -1 : 1); + return d; + } + + /** + * Creates bin with initial set of nodes headed by b. + */ + TreeBin(TreeNode b) { + super(TREEBIN, null, null, null); + this.first = b; + TreeNode r = null; + for (TreeNode x = b, next; x != null; x = next) { + next = (TreeNode)x.next; + x.left = x.right = null; + if (r == null) { + x.parent = null; + x.red = false; + r = x; + } + else { + K k = x.key; + int h = x.hash; + Class kc = null; + for (TreeNode p = r;;) { + int dir, ph; + K pk = p.key; + if ((ph = p.hash) > h) + dir = -1; + else if (ph < h) + dir = 1; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) + dir = tieBreakOrder(k, pk); + TreeNode xp = p; + if ((p = (dir <= 0) ? p.left : p.right) == null) { + x.parent = xp; + if (dir <= 0) + xp.left = x; + else + xp.right = x; + r = balanceInsertion(r, x); + break; + } + } + } + } + this.root = r; + assert checkInvariants(root); + } + + /** + * Acquires write lock for tree restructuring. + */ + private final void lockRoot() { + if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) + contendedLock(); // offload to separate method + } + + /** + * Releases write lock for tree restructuring. + */ + private final void unlockRoot() { + lockState = 0; + } + + /** + * Possibly blocks awaiting root lock. + */ + private final void contendedLock() { + boolean waiting = false; + for (int s;;) { + if (((s = lockState) & ~WAITER) == 0) { + if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { + if (waiting) + waiter = null; + return; + } + } + else if ((s & WAITER) == 0) { + if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { + waiting = true; + waiter = Thread.currentThread(); + } + } + else if (waiting) + LockSupport.park(this); + } + } + + /** + * Returns matching node or null if none. Tries to search + * using tree comparisons from root, but continues linear + * search when lock not available. + */ + final Node find(int h, Object k) { + if (k != null) { + for (Node e = first; e != null; ) { + int s; K ek; + if (((s = lockState) & (WAITER|WRITER)) != 0) { + if (e.hash == h && + ((ek = e.key) == k || (ek != null && k.equals(ek)))) + return e; + e = e.next; + } + else if (U.compareAndSwapInt(this, LOCKSTATE, s, + s + READER)) { + TreeNode r, p; + try { + p = ((r = root) == null ? null : + r.findTreeNode(h, k, null)); + } finally { + Thread w; + if (U.getAndAddInt(this, LOCKSTATE, -READER) == + (READER|WAITER) && (w = waiter) != null) + LockSupport.unpark(w); + } + return p; + } + } + } + return null; + } + + /** + * Finds or adds a node. + * @return null if added + */ + final TreeNode putTreeVal(int h, K k, V v) { + Class kc = null; + boolean searched = false; + for (TreeNode p = root;;) { + int dir, ph; K pk; + if (p == null) { + first = root = new TreeNode(h, k, v, null, null); + break; + } + else if ((ph = p.hash) > h) + dir = -1; + else if (ph < h) + dir = 1; + else if ((pk = p.key) == k || (pk != null && k.equals(pk))) + return p; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) { + if (!searched) { + TreeNode q, ch; + searched = true; + if (((ch = p.left) != null && + (q = ch.findTreeNode(h, k, kc)) != null) || + ((ch = p.right) != null && + (q = ch.findTreeNode(h, k, kc)) != null)) + return q; + } + dir = tieBreakOrder(k, pk); + } + + TreeNode xp = p; + if ((p = (dir <= 0) ? p.left : p.right) == null) { + TreeNode x, f = first; + first = x = new TreeNode(h, k, v, f, xp); + if (f != null) + f.prev = x; + if (dir <= 0) + xp.left = x; + else + xp.right = x; + if (!xp.red) + x.red = true; + else { + lockRoot(); + try { + root = balanceInsertion(root, x); + } finally { + unlockRoot(); + } + } + break; + } + } + assert checkInvariants(root); + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + * + * @return true if now too small, so should be untreeified + */ + final boolean removeTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; + TreeNode pred = p.prev; // unlink traversal pointers + TreeNode r, rl; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + if (first == null) { + root = null; + return true; + } + if ((r = root) == null || r.right == null || // too small + (rl = r.left) == null || rl.left == null) + return true; + lockRoot(); + try { + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + r = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + if (sr != null) + replacement = sr; + else + replacement = p; + } + else if (pl != null) + replacement = pl; + else if (pr != null) + replacement = pr; + else + replacement = p; + if (replacement != p) { + TreeNode pp = replacement.parent = p.parent; + if (pp == null) + r = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + + root = (p.red) ? r : balanceDeletion(r, replacement); + + if (p == replacement) { // detach pointers + TreeNode pp; + if ((pp = p.parent) != null) { + if (p == pp.left) + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } finally { + unlockRoot(); + } + assert checkInvariants(root); + return false; + } + + /* ------------------------------------------------------------ */ + // Red-black tree methods, all adapted from CLR + + static TreeNode rotateLeft(TreeNode root, + TreeNode p) { + TreeNode r, pp, rl; + if (p != null && (r = p.right) != null) { + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + (root = r).red = false; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + return root; + } + + static TreeNode rotateRight(TreeNode root, + TreeNode p) { + TreeNode l, pp, lr; + if (p != null && (l = p.left) != null) { + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + (root = l).red = false; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + return root; + } + + static TreeNode balanceInsertion(TreeNode root, + TreeNode x) { + x.red = true; + for (TreeNode xp, xpp, xppl, xppr;;) { + if ((xp = x.parent) == null) { + x.red = false; + return x; + } + else if (!xp.red || (xpp = xp.parent) == null) + return root; + if (xp == (xppl = xpp.left)) { + if ((xppr = xpp.right) != null && xppr.red) { + xppr.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + root = rotateLeft(root, x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + root = rotateRight(root, xpp); + } + } + } + } + else { + if (xppl != null && xppl.red) { + xppl.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + root = rotateRight(root, x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + root = rotateLeft(root, xpp); + } + } + } + } + } + } + + static TreeNode balanceDeletion(TreeNode root, + TreeNode x) { + for (TreeNode xp, xpl, xpr;;) { + if (x == null || x == root) + return root; + else if ((xp = x.parent) == null) { + x.red = false; + return x; + } + else if (x.red) { + x.red = false; + return root; + } + else if ((xpl = xp.left) == x) { + if ((xpr = xp.right) != null && xpr.red) { + xpr.red = false; + xp.red = true; + root = rotateLeft(root, xp); + xpr = (xp = x.parent) == null ? null : xp.right; + } + if (xpr == null) + x = xp; + else { + TreeNode sl = xpr.left, sr = xpr.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + xpr.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + xpr.red = true; + root = rotateRight(root, xpr); + xpr = (xp = x.parent) == null ? + null : xp.right; + } + if (xpr != null) { + xpr.red = (xp == null) ? false : xp.red; + if ((sr = xpr.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + root = rotateLeft(root, xp); + } + x = root; + } + } + } + else { // symmetric + if (xpl != null && xpl.red) { + xpl.red = false; + xp.red = true; + root = rotateRight(root, xp); + xpl = (xp = x.parent) == null ? null : xp.left; + } + if (xpl == null) + x = xp; + else { + TreeNode sl = xpl.left, sr = xpl.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + xpl.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + xpl.red = true; + root = rotateLeft(root, xpl); + xpl = (xp = x.parent) == null ? + null : xp.left; + } + if (xpl != null) { + xpl.red = (xp == null) ? false : xp.red; + if ((sl = xpl.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + root = rotateRight(root, xp); + } + x = root; + } + } + } + } + } + + /** + * Recursive invariant check + */ + static boolean checkInvariants(TreeNode t) { + TreeNode tp = t.parent, tl = t.left, tr = t.right, + tb = t.prev, tn = (TreeNode)t.next; + if (tb != null && tb.next != t) + return false; + if (tn != null && tn.prev != t) + return false; + if (tp != null && t != tp.left && t != tp.right) + return false; + if (tl != null && (tl.parent != t || tl.hash > t.hash)) + return false; + if (tr != null && (tr.parent != t || tr.hash < t.hash)) + return false; + if (t.red && tl != null && tl.red && tr != null && tr.red) + return false; + if (tl != null && !checkInvariants(tl)) + return false; + if (tr != null && !checkInvariants(tr)) + return false; + return true; + } + + private static final sun.misc.Unsafe U; + private static final long LOCKSTATE; + static { + try { + U = sun.misc.Unsafe.getUnsafe(); + Class k = TreeBin.class; + LOCKSTATE = U.objectFieldOffset + (k.getDeclaredField("lockState")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ----------------Table Traversal -------------- */ + + /** + * Records the table, its length, and current traversal index for a + * traverser that must process a region of a forwarded table before + * proceeding with current table. + */ + static final class TableStack { + int length; + int index; + Node[] tab; + TableStack next; + } + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and spliterators. + * + * Method advance visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + */ + static class Traverser { + Node[] tab; // current table; updated if resized + Node next; // the next entry to use + TableStack stack, spare; // to save/restore on ForwardingNodes + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + final int baseSize; // initial table size + + Traverser(Node[] tab, int size, int index, int limit) { + this.tab = tab; + this.baseSize = size; + this.baseIndex = this.index = index; + this.baseLimit = limit; + this.next = null; + } + + /** + * Advances if possible, returning next valid node, or null if none. + */ + final Node advance() { + Node e; + if ((e = next) != null) + e = e.next; + for (;;) { + Node[] t; int i, n; // must use locals in checks + if (e != null) + return next = e; + if (baseIndex >= baseLimit || (t = tab) == null || + (n = t.length) <= (i = index) || i < 0) + return next = null; + if ((e = tabAt(t, i)) != null && e.hash < 0) { + if (e instanceof ForwardingNode) { + tab = ((ForwardingNode)e).nextTable; + e = null; + pushState(t, i, n); + continue; + } + else if (e instanceof TreeBin) + e = ((TreeBin)e).first; + else + e = null; + } + if (stack != null) + recoverState(n); + else if ((index = i + baseSize) >= n) + index = ++baseIndex; // visit upper slots if present + } + } + + /** + * Saves traversal state upon encountering a forwarding node. + */ + private void pushState(Node[] t, int i, int n) { + TableStack s = spare; // reuse if possible + if (s != null) + spare = s.next; + else + s = new TableStack(); + s.tab = t; + s.length = n; + s.index = i; + s.next = stack; + stack = s; + } + + /** + * Possibly pops traversal state. + * + * @param n length of current table + */ + private void recoverState(int n) { + TableStack s; int len; + while ((s = stack) != null && (index += (len = s.length)) >= n) { + n = len; + index = s.index; + tab = s.tab; + s.tab = null; + TableStack next = s.next; + s.next = spare; // save for reuse + stack = next; + spare = s; + } + if (s == null && (index += baseSize) >= n) + index = ++baseIndex; + } + } + + /** + * Base of key, value, and entry Iterators. Adds fields to + * Traverser to support iterator.remove. + */ + static class BaseIterator extends Traverser { + final ConcurrentHashMap map; + Node lastReturned; + BaseIterator(Node[] tab, int size, int index, int limit, + ConcurrentHashMap map) { + super(tab, size, index, limit); + this.map = map; + advance(); + } + + public final boolean hasNext() { return next != null; } + public final boolean hasMoreElements() { return next != null; } + + public final void remove() { + Node p; + if ((p = lastReturned) == null) + throw new IllegalStateException(); + lastReturned = null; + map.replaceNode(p.key, null, null); + } + } + + static final class KeyIterator extends BaseIterator + implements Iterator, Enumeration { + KeyIterator(Node[] tab, int index, int size, int limit, + ConcurrentHashMap map) { + super(tab, index, size, limit, map); + } + + public final K next() { + Node p; + if ((p = next) == null) + throw new NoSuchElementException(); + K k = p.key; + lastReturned = p; + advance(); + return k; + } + + public final K nextElement() { return next(); } + } + + static final class ValueIterator extends BaseIterator + implements Iterator, Enumeration { + ValueIterator(Node[] tab, int index, int size, int limit, + ConcurrentHashMap map) { + super(tab, index, size, limit, map); + } + + public final V next() { + Node p; + if ((p = next) == null) + throw new NoSuchElementException(); + V v = p.val; + lastReturned = p; + advance(); + return v; + } + + public final V nextElement() { return next(); } + } + + static final class EntryIterator extends BaseIterator + implements Iterator> { + EntryIterator(Node[] tab, int index, int size, int limit, + ConcurrentHashMap map) { + super(tab, index, size, limit, map); + } + + public final Map.Entry next() { + Node p; + if ((p = next) == null) + throw new NoSuchElementException(); + K k = p.key; + V v = p.val; + lastReturned = p; + advance(); + return new MapEntry(k, v, map); + } + } + + /** + * Exported Entry for EntryIterator + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMap map; + MapEntry(K key, V val, ConcurrentHashMap map) { + this.key = key; + this.val = val; + this.map = map; + } + public K getKey() { return key; } + public V getValue() { return val; } + public int hashCode() { return key.hashCode() ^ val.hashCode(); } + public String toString() { return key + "=" + val; } + + public boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed, in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + static final class KeySpliterator extends Traverser + implements Spliterator { + long est; // size estimate + KeySpliterator(Node[] tab, int size, int index, int limit, + long est) { + super(tab, size, index, limit); + this.est = est; + } + + public Spliterator trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new KeySpliterator(tab, baseSize, baseLimit = h, + f, est >>>= 1); + } + + public void forEachRemaining(Consumer action) { + if (action == null) throw new NullPointerException(); + for (Node p; (p = advance()) != null;) + action.accept(p.key); + } + + public boolean tryAdvance(Consumer action) { + if (action == null) throw new NullPointerException(); + Node p; + if ((p = advance()) == null) + return false; + action.accept(p.key); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.CONCURRENT | + Spliterator.NONNULL; + } + } + + static final class ValueSpliterator extends Traverser + implements Spliterator { + long est; // size estimate + ValueSpliterator(Node[] tab, int size, int index, int limit, + long est) { + super(tab, size, index, limit); + this.est = est; + } + + public Spliterator trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new ValueSpliterator(tab, baseSize, baseLimit = h, + f, est >>>= 1); + } + + public void forEachRemaining(Consumer action) { + if (action == null) throw new NullPointerException(); + for (Node p; (p = advance()) != null;) + action.accept(p.val); + } + + public boolean tryAdvance(Consumer action) { + if (action == null) throw new NullPointerException(); + Node p; + if ((p = advance()) == null) + return false; + action.accept(p.val); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.CONCURRENT | Spliterator.NONNULL; + } + } + + static final class EntrySpliterator extends Traverser + implements Spliterator> { + final ConcurrentHashMap map; // To export MapEntry + long est; // size estimate + EntrySpliterator(Node[] tab, int size, int index, int limit, + long est, ConcurrentHashMap map) { + super(tab, size, index, limit); + this.map = map; + this.est = est; + } + + public Spliterator> trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new EntrySpliterator(tab, baseSize, baseLimit = h, + f, est >>>= 1, map); + } + + public void forEachRemaining(Consumer> action) { + if (action == null) throw new NullPointerException(); + for (Node p; (p = advance()) != null; ) + action.accept(new MapEntry(p.key, p.val, map)); + } + + public boolean tryAdvance(Consumer> action) { + if (action == null) throw new NullPointerException(); + Node p; + if ((p = advance()) == null) + return false; + action.accept(new MapEntry(p.key, p.val, map)); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.CONCURRENT | + Spliterator.NONNULL; + } + } + + // Parallel bulk operations + + /** + * Computes initial batch value for bulk tasks. The returned value + * is approximately exp2 of the number of times (minus one) to + * split task by two before executing leaf action. This value is + * faster to compute and more convenient to use as a guide to + * splitting than is the depth, since it is used while dividing by + * two anyway. + */ + final int batchFor(long b) { + long n; + if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) + return 0; + int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 + return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; + } + + /** + * Performs the given action for each (key, value). + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEach(long parallelismThreshold, + BiConsumer action) { + if (action == null) throw new NullPointerException(); + new ForEachMappingTask + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each (key, value). + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @param the return type of the transformer + * @since 1.8 + */ + public void forEach(long parallelismThreshold, + BiFunction transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedMappingTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each (key, value), or null if none. Upon + * success, further element processing is suppressed and the + * results of any other parallel invocations of the search + * function are ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @param the return type of the search function + * @return a non-null result from applying the given search + * function on each (key, value), or null if none + * @since 1.8 + */ + public U search(long parallelismThreshold, + BiFunction searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchMappingsTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @param the return type of the transformer + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public U reduce(long parallelismThreshold, + BiFunction transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public double reduceToDouble(long parallelismThreshold, + ToDoubleBiFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public long reduceToLong(long parallelismThreshold, + ToLongBiFunction transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public int reduceToInt(long parallelismThreshold, + ToIntBiFunction transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each key. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachKey(long parallelismThreshold, + Consumer action) { + if (action == null) throw new NullPointerException(); + new ForEachKeyTask + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each key. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @param the return type of the transformer + * @since 1.8 + */ + public void forEachKey(long parallelismThreshold, + Function transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedKeyTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each key, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @param the return type of the search function + * @return a non-null result from applying the given search + * function on each key, or null if none + * @since 1.8 + */ + public U searchKeys(long parallelismThreshold, + Function searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchKeysTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating all keys using the given + * reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all keys using the given + * reducer to combine values, or null if none + * @since 1.8 + */ + public K reduceKeys(long parallelismThreshold, + BiFunction reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceKeysTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, or + * null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @param the return type of the transformer + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public U reduceKeys(long parallelismThreshold, + Function transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public double reduceKeysToDouble(long parallelismThreshold, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public long reduceKeysToLong(long parallelismThreshold, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public int reduceKeysToInt(long parallelismThreshold, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachValue(long parallelismThreshold, + Consumer action) { + if (action == null) + throw new NullPointerException(); + new ForEachValueTask + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @param the return type of the transformer + * @since 1.8 + */ + public void forEachValue(long parallelismThreshold, + Function transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedValueTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each value, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @param the return type of the search function + * @return a non-null result from applying the given search + * function on each value, or null if none + * @since 1.8 + */ + public U searchValues(long parallelismThreshold, + Function searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchValuesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating all values using the + * given reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all values + * @since 1.8 + */ + public V reduceValues(long parallelismThreshold, + BiFunction reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceValuesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, or + * null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @param the return type of the transformer + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public U reduceValues(long parallelismThreshold, + Function transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public double reduceValuesToDouble(long parallelismThreshold, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public long reduceValuesToLong(long parallelismThreshold, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public int reduceValuesToInt(long parallelismThreshold, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each entry. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachEntry(long parallelismThreshold, + Consumer> action) { + if (action == null) throw new NullPointerException(); + new ForEachEntryTask(null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each entry. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @param the return type of the transformer + * @since 1.8 + */ + public void forEachEntry(long parallelismThreshold, + Function, ? extends U> transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedEntryTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each entry, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @param the return type of the search function + * @return a non-null result from applying the given search + * function on each entry, or null if none + * @since 1.8 + */ + public U searchEntries(long parallelismThreshold, + Function, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchEntriesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating all entries using the + * given reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all entries + * @since 1.8 + */ + public Map.Entry reduceEntries(long parallelismThreshold, + BiFunction, Map.Entry, ? extends Map.Entry> reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceEntriesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @param the return type of the transformer + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public U reduceEntries(long parallelismThreshold, + Function, ? extends U> transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public double reduceEntriesToDouble(long parallelismThreshold, + ToDoubleFunction> transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public long reduceEntriesToLong(long parallelismThreshold, + ToLongFunction> transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public int reduceEntriesToInt(long parallelismThreshold, + ToIntFunction> transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + abstract static class CollectionView + implements Collection, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + final ConcurrentHashMap map; + CollectionView(ConcurrentHashMap map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMap getMap() { return map; } + + /** + * Removes all of the elements from this view, by removing all + * the mappings from the map backing this view. + */ + public final void clear() { map.clear(); } + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + + // implementations below rely on concrete classes supplying these + // abstract methods + /** + * Returns an iterator over the elements in this collection. + * + *

The returned iterator is + * weakly consistent. + * + * @return an iterator over the elements in this collection + */ + public abstract Iterator iterator(); + public abstract boolean contains(Object o); + public abstract boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + for (E e : this) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = e; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") + public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + for (E e : this) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)e; + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + /** + * Returns a string representation of this collection. + * The string representation consists of the string representations + * of the collection's elements in the order they are returned by + * its iterator, enclosed in square brackets ({@code "[]"}). + * Adjacent elements are separated by the characters {@code ", "} + * (comma and space). Elements are converted to strings as by + * {@link String#valueOf(Object)}. + * + * @return a string representation of this collection + */ + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Object e : c) { + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + if (c == null) throw new NullPointerException(); + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + if (c == null) throw new NullPointerException(); + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMap as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. + * See {@link #keySet() keySet()}, + * {@link #keySet(Object) keySet(V)}, + * {@link #newKeySet() newKeySet()}, + * {@link #newKeySet(int) newKeySet(int)}. + * + * @since 1.8 + */ + public static class KeySetView extends CollectionView + implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMap map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported + */ + public V getMappedValue() { return value; } + + /** + * {@inheritDoc} + * @throws NullPointerException if the specified key is null + */ + public boolean contains(Object o) { return map.containsKey(o); } + + /** + * Removes the key from this map view, by removing the key (and its + * corresponding value) from the backing map. This method does + * nothing if the key is not in the map. + * + * @param o the key to be removed from the backing map + * @return {@code true} if the backing map contained the specified key + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * @return an iterator over the keys of the backing map + */ + public Iterator iterator() { + Node[] t; + ConcurrentHashMap m = map; + int f = (t = m.table) == null ? 0 : t.length; + return new KeyIterator(t, f, 0, f, m); + } + + /** + * Adds the specified key to this set view by mapping the key to + * the default mapped value in the backing map, if defined. + * + * @param e key to be added + * @return {@code true} if this set changed as a result of the call + * @throws NullPointerException if the specified key is null + * @throws UnsupportedOperationException if no default mapped value + * for additions was provided + */ + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + return map.putVal(e, v, true) == null; + } + + /** + * Adds all of the elements in the specified collection to this set, + * as if by calling {@link #add} on each one. + * + * @param c the elements to be inserted into this set + * @return {@code true} if this set changed as a result of the call + * @throws NullPointerException if the collection or any of its + * elements are {@code null} + * @throws UnsupportedOperationException if no default mapped value + * for additions was provided + */ + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (map.putVal(e, v, true) == null) + added = true; + } + return added; + } + + public int hashCode() { + int h = 0; + for (K e : this) + h += e.hashCode(); + return h; + } + + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + public Spliterator spliterator() { + Node[] t; + ConcurrentHashMap m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new KeySpliterator(t, f, 0, f, n < 0L ? 0L : n); + } + + public void forEach(Consumer action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + action.accept(p.key); + } + } + } + + /** + * A view of a ConcurrentHashMap as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values()}. + */ + static final class ValuesView extends CollectionView + implements Collection, java.io.Serializable { + private static final long serialVersionUID = 2249069246763182397L; + ValuesView(ConcurrentHashMap map) { super(map); } + public final boolean contains(Object o) { + return map.containsValue(o); + } + + public final boolean remove(Object o) { + if (o != null) { + for (Iterator it = iterator(); it.hasNext();) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + public final Iterator iterator() { + ConcurrentHashMap m = map; + Node[] t; + int f = (t = m.table) == null ? 0 : t.length; + return new ValueIterator(t, f, 0, f, m); + } + + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public Spliterator spliterator() { + Node[] t; + ConcurrentHashMap m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new ValueSpliterator(t, f, 0, f, n < 0L ? 0L : n); + } + + public void forEach(Consumer action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + action.accept(p.val); + } + } + } + + /** + * A view of a ConcurrentHashMap as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet()}. + */ + static final class EntrySetView extends CollectionView> + implements Set>, java.io.Serializable { + private static final long serialVersionUID = 2249069246763182397L; + EntrySetView(ConcurrentHashMap map) { super(map); } + + public boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + + public boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * @return an iterator over the entries of the backing map + */ + public Iterator> iterator() { + ConcurrentHashMap m = map; + Node[] t; + int f = (t = m.table) == null ? 0 : t.length; + return new EntryIterator(t, f, 0, f, m); + } + + public boolean add(Entry e) { + return map.putVal(e.getKey(), e.getValue(), false) == null; + } + + public boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + + public final int hashCode() { + int h = 0; + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + h += p.hashCode(); + } + } + return h; + } + + public final boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + public Spliterator> spliterator() { + Node[] t; + ConcurrentHashMap m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new EntrySpliterator(t, f, 0, f, n < 0L ? 0L : n, m); + } + + public void forEach(Consumer> action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + action.accept(new MapEntry(p.key, p.val, map)); + } + } + + } + + // ------------------------------------------------------- + + /** + * Base class for bulk tasks. Repeats some fields and code from + * class Traverser, because we need to subclass CountedCompleter. + */ + @SuppressWarnings("serial") + abstract static class BulkTask extends CountedCompleter { + Node[] tab; // same as Traverser + Node next; + TableStack stack, spare; + int index; + int baseIndex; + int baseLimit; + final int baseSize; + int batch; // split control + + BulkTask(BulkTask par, int b, int i, int f, Node[] t) { + super(par); + this.batch = b; + this.index = this.baseIndex = i; + if ((this.tab = t) == null) + this.baseSize = this.baseLimit = 0; + else if (par == null) + this.baseSize = this.baseLimit = t.length; + else { + this.baseLimit = f; + this.baseSize = par.baseSize; + } + } + + /** + * Same as Traverser version + */ + final Node advance() { + Node e; + if ((e = next) != null) + e = e.next; + for (;;) { + Node[] t; int i, n; + if (e != null) + return next = e; + if (baseIndex >= baseLimit || (t = tab) == null || + (n = t.length) <= (i = index) || i < 0) + return next = null; + if ((e = tabAt(t, i)) != null && e.hash < 0) { + if (e instanceof ForwardingNode) { + tab = ((ForwardingNode)e).nextTable; + e = null; + pushState(t, i, n); + continue; + } + else if (e instanceof TreeBin) + e = ((TreeBin)e).first; + else + e = null; + } + if (stack != null) + recoverState(n); + else if ((index = i + baseSize) >= n) + index = ++baseIndex; + } + } + + private void pushState(Node[] t, int i, int n) { + TableStack s = spare; + if (s != null) + spare = s.next; + else + s = new TableStack(); + s.tab = t; + s.length = n; + s.index = i; + s.next = stack; + stack = s; + } + + private void recoverState(int n) { + TableStack s; int len; + while ((s = stack) != null && (index += (len = s.length)) >= n) { + n = len; + index = s.index; + tab = s.tab; + s.tab = null; + TableStack next = s.next; + s.next = spare; // save for reuse + stack = next; + spare = s; + } + if (s == null && (index += baseSize) >= n) + index = ++baseIndex; + } + } + + /* + * Task classes. Coded in a regular but ugly format/style to + * simplify checks that each variant differs in the right way from + * others. The null screenings exist because compilers cannot tell + * that we've already null-checked task arguments, so we force + * simplest hoisted bypass to help avoid convoluted traps. + */ + @SuppressWarnings("serial") + static final class ForEachKeyTask + extends BulkTask { + final Consumer action; + ForEachKeyTask + (BulkTask p, int b, int i, int f, Node[] t, + Consumer action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachKeyTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null;) + action.accept(p.key); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachValueTask + extends BulkTask { + final Consumer action; + ForEachValueTask + (BulkTask p, int b, int i, int f, Node[] t, + Consumer action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachValueTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null;) + action.accept(p.val); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachEntryTask + extends BulkTask { + final Consumer> action; + ForEachEntryTask + (BulkTask p, int b, int i, int f, Node[] t, + Consumer> action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer> action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachEntryTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null; ) + action.accept(p); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachMappingTask + extends BulkTask { + final BiConsumer action; + ForEachMappingTask + (BulkTask p, int b, int i, int f, Node[] t, + BiConsumer action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final BiConsumer action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachMappingTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null; ) + action.accept(p.key, p.val); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachTransformedKeyTask + extends BulkTask { + final Function transformer; + final Consumer action; + ForEachTransformedKeyTask + (BulkTask p, int b, int i, int f, Node[] t, + Function transformer, Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedKeyTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.key)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachTransformedValueTask + extends BulkTask { + final Function transformer; + final Consumer action; + ForEachTransformedValueTask + (BulkTask p, int b, int i, int f, Node[] t, + Function transformer, Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedValueTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.val)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachTransformedEntryTask + extends BulkTask { + final Function, ? extends U> transformer; + final Consumer action; + ForEachTransformedEntryTask + (BulkTask p, int b, int i, int f, Node[] t, + Function, ? extends U> transformer, Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function, ? extends U> transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedEntryTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class ForEachTransformedMappingTask + extends BulkTask { + final BiFunction transformer; + final Consumer action; + ForEachTransformedMappingTask + (BulkTask p, int b, int i, int f, Node[] t, + BiFunction transformer, + Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final BiFunction transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedMappingTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.key, p.val)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") + static final class SearchKeysTask + extends BulkTask { + final Function searchFunction; + final AtomicReference result; + SearchKeysTask + (BulkTask p, int b, int i, int f, Node[] t, + Function searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchKeysTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p.key)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class SearchValuesTask + extends BulkTask { + final Function searchFunction; + final AtomicReference result; + SearchValuesTask + (BulkTask p, int b, int i, int f, Node[] t, + Function searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchValuesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p.val)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class SearchEntriesTask + extends BulkTask { + final Function, ? extends U> searchFunction; + final AtomicReference result; + SearchEntriesTask + (BulkTask p, int b, int i, int f, Node[] t, + Function, ? extends U> searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function, ? extends U> searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchEntriesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + return; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class SearchMappingsTask + extends BulkTask { + final BiFunction searchFunction; + final AtomicReference result; + SearchMappingsTask + (BulkTask p, int b, int i, int f, Node[] t, + BiFunction searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final BiFunction searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchMappingsTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p.key, p.val)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class ReduceKeysTask + extends BulkTask { + final BiFunction reducer; + K result; + ReduceKeysTask rights, nextRight; + ReduceKeysTask + (BulkTask p, int b, int i, int f, Node[] t, + ReduceKeysTask nextRight, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final K getRawResult() { return result; } + public final void compute() { + final BiFunction reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceKeysTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + K r = null; + for (Node p; (p = advance()) != null; ) { + K u = p.key; + r = (r == null) ? u : u == null ? r : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + ReduceKeysTask + t = (ReduceKeysTask)c, + s = t.rights; + while (s != null) { + K tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class ReduceValuesTask + extends BulkTask { + final BiFunction reducer; + V result; + ReduceValuesTask rights, nextRight; + ReduceValuesTask + (BulkTask p, int b, int i, int f, Node[] t, + ReduceValuesTask nextRight, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final V getRawResult() { return result; } + public final void compute() { + final BiFunction reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceValuesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + V r = null; + for (Node p; (p = advance()) != null; ) { + V v = p.val; + r = (r == null) ? v : reducer.apply(r, v); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + ReduceValuesTask + t = (ReduceValuesTask)c, + s = t.rights; + while (s != null) { + V tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class ReduceEntriesTask + extends BulkTask> { + final BiFunction, Map.Entry, ? extends Map.Entry> reducer; + Map.Entry result; + ReduceEntriesTask rights, nextRight; + ReduceEntriesTask + (BulkTask p, int b, int i, int f, Node[] t, + ReduceEntriesTask nextRight, + BiFunction, Map.Entry, ? extends Map.Entry> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final Map.Entry getRawResult() { return result; } + public final void compute() { + final BiFunction, Map.Entry, ? extends Map.Entry> reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceEntriesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + Map.Entry r = null; + for (Node p; (p = advance()) != null; ) + r = (r == null) ? p : reducer.apply(r, p); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + ReduceEntriesTask + t = (ReduceEntriesTask)c, + s = t.rights; + while (s != null) { + Map.Entry tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceKeysTask + extends BulkTask { + final Function transformer; + final BiFunction reducer; + U result; + MapReduceKeysTask rights, nextRight; + MapReduceKeysTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysTask nextRight, + Function transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.key)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceKeysTask + t = (MapReduceKeysTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceValuesTask + extends BulkTask { + final Function transformer; + final BiFunction reducer; + U result; + MapReduceValuesTask rights, nextRight; + MapReduceValuesTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesTask nextRight, + Function transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.val)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceValuesTask + t = (MapReduceValuesTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceEntriesTask + extends BulkTask { + final Function, ? extends U> transformer; + final BiFunction reducer; + U result; + MapReduceEntriesTask rights, nextRight; + MapReduceEntriesTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesTask nextRight, + Function, ? extends U> transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function, ? extends U> transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceEntriesTask + t = (MapReduceEntriesTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceMappingsTask + extends BulkTask { + final BiFunction transformer; + final BiFunction reducer; + U result; + MapReduceMappingsTask rights, nextRight; + MapReduceMappingsTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsTask nextRight, + BiFunction transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final BiFunction transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.key, p.val)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceMappingsTask + t = (MapReduceMappingsTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceKeysToDoubleTask + extends BulkTask { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceKeysToDoubleTask rights, nextRight; + MapReduceKeysToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysToDoubleTask nextRight, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceKeysToDoubleTask + t = (MapReduceKeysToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceValuesToDoubleTask + extends BulkTask { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceValuesToDoubleTask rights, nextRight; + MapReduceValuesToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesToDoubleTask nextRight, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceValuesToDoubleTask + t = (MapReduceValuesToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceEntriesToDoubleTask + extends BulkTask { + final ToDoubleFunction> transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceEntriesToDoubleTask rights, nextRight; + MapReduceEntriesToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesToDoubleTask nextRight, + ToDoubleFunction> transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction> transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceEntriesToDoubleTask + t = (MapReduceEntriesToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceMappingsToDoubleTask + extends BulkTask { + final ToDoubleBiFunction transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceMappingsToDoubleTask rights, nextRight; + MapReduceMappingsToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsToDoubleTask nextRight, + ToDoubleBiFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleBiFunction transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceMappingsToDoubleTask + t = (MapReduceMappingsToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceKeysToLongTask + extends BulkTask { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceKeysToLongTask rights, nextRight; + MapReduceKeysToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysToLongTask nextRight, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceKeysToLongTask + t = (MapReduceKeysToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceValuesToLongTask + extends BulkTask { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceValuesToLongTask rights, nextRight; + MapReduceValuesToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesToLongTask nextRight, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceValuesToLongTask + t = (MapReduceValuesToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceEntriesToLongTask + extends BulkTask { + final ToLongFunction> transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceEntriesToLongTask rights, nextRight; + MapReduceEntriesToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesToLongTask nextRight, + ToLongFunction> transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction> transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceEntriesToLongTask + t = (MapReduceEntriesToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceMappingsToLongTask + extends BulkTask { + final ToLongBiFunction transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceMappingsToLongTask rights, nextRight; + MapReduceMappingsToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsToLongTask nextRight, + ToLongBiFunction transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongBiFunction transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceMappingsToLongTask + t = (MapReduceMappingsToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceKeysToIntTask + extends BulkTask { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceKeysToIntTask rights, nextRight; + MapReduceKeysToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysToIntTask nextRight, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceKeysToIntTask + t = (MapReduceKeysToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceValuesToIntTask + extends BulkTask { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceValuesToIntTask rights, nextRight; + MapReduceValuesToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesToIntTask nextRight, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceValuesToIntTask + t = (MapReduceValuesToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceEntriesToIntTask + extends BulkTask { + final ToIntFunction> transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceEntriesToIntTask rights, nextRight; + MapReduceEntriesToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesToIntTask nextRight, + ToIntFunction> transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction> transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceEntriesToIntTask + t = (MapReduceEntriesToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") + static final class MapReduceMappingsToIntTask + extends BulkTask { + final ToIntBiFunction transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceMappingsToIntTask rights, nextRight; + MapReduceMappingsToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsToIntTask nextRight, + ToIntBiFunction transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntBiFunction transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + @SuppressWarnings("unchecked") + MapReduceMappingsToIntTask + t = (MapReduceMappingsToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long SIZECTL; + private static final long TRANSFERINDEX; + private static final long BASECOUNT; + private static final long CELLSBUSY; + private static final long CELLVALUE; + private static final long ABASE; + private static final int ASHIFT; + + static { + try { + U = sun.misc.Unsafe.getUnsafe(); + Class k = ConcurrentHashMap.class; + SIZECTL = U.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + TRANSFERINDEX = U.objectFieldOffset + (k.getDeclaredField("transferIndex")); + BASECOUNT = U.objectFieldOffset + (k.getDeclaredField("baseCount")); + CELLSBUSY = U.objectFieldOffset + (k.getDeclaredField("cellsBusy")); + Class ck = CounterCell.class; + CELLVALUE = U.objectFieldOffset + (ck.getDeclaredField("value")); + Class ak = Node[].class; + ABASE = U.arrayBaseOffset(ak); + int scale = U.arrayIndexScale(ak); + if ((scale & (scale - 1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); + } catch (Exception e) { + throw new Error(e); + } + } } diff --git a/src/ConcurrentMap.java b/src/ConcurrentMap.java new file mode 100644 index 0000000..a56e000 --- /dev/null +++ b/src/ConcurrentMap.java @@ -0,0 +1,517 @@ +/* + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +/* + * + * + * + * + * + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package java.util.concurrent; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * A {@link java.util.Map} providing thread safety and atomicity + * guarantees. + * + *

Memory consistency effects: As with other concurrent + * collections, actions in a thread prior to placing an object into a + * {@code ConcurrentMap} as a key or value + * happen-before + * actions subsequent to the access or removal of that object from + * the {@code ConcurrentMap} in another thread. + * + *

This interface is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public interface ConcurrentMap extends Map { + + /** + * {@inheritDoc} + * + * @implNote This implementation assumes that the ConcurrentMap cannot + * contain null values and {@code get()} returning null unambiguously means + * the key is absent. Implementations which support null values + * must override this default implementation. + * + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + default V getOrDefault(Object key, V defaultValue) { + V v; + return ((v = get(key)) != null) ? v : defaultValue; + } + + /** + * {@inheritDoc} + * + * @implSpec The default implementation is equivalent to, for this + * {@code map}: + *

 {@code
+     * for ((Map.Entry entry : map.entrySet())
+     *     action.accept(entry.getKey(), entry.getValue());
+     * }
+ * + * @implNote The default implementation assumes that + * {@code IllegalStateException} thrown by {@code getKey()} or + * {@code getValue()} indicates that the entry has been removed and cannot + * be processed. Operation continues for subsequent entries. + * + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + default void forEach(BiConsumer action) { + Objects.requireNonNull(action); + for (Map.Entry entry : entrySet()) { + K k; + V v; + try { + k = entry.getKey(); + v = entry.getValue(); + } catch(IllegalStateException ise) { + // this usually means the entry is no longer in the map. + continue; + } + action.accept(k, v); + } + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * This is equivalent to + *
 {@code
+     * if (!map.containsKey(key))
+     *   return map.put(key, value);
+     * else
+     *   return map.get(key);
+     * }
+ * + * except that the action is performed atomically. + * + * @implNote This implementation intentionally re-abstracts the + * inappropriate default provided in {@code Map}. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with the specified key, or + * {@code null} if there was no mapping for the key. + * (A {@code null} return can also indicate that the map + * previously associated {@code null} with the key, + * if the implementation supports null values.) + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * @throws NullPointerException if the specified key or value is null, + * and this map does not permit null keys or values + * @throws IllegalArgumentException if some property of the specified key + * or value prevents it from being stored in this map + */ + V putIfAbsent(K key, V value); + + /** + * Removes the entry for a key only if currently mapped to a given value. + * This is equivalent to + *
 {@code
+     * if (map.containsKey(key) && Objects.equals(map.get(key), value)) {
+     *   map.remove(key);
+     *   return true;
+     * } else
+     *   return false;
+     * }
+ * + * except that the action is performed atomically. + * + * @implNote This implementation intentionally re-abstracts the + * inappropriate default provided in {@code Map}. + * + * @param key key with which the specified value is associated + * @param value value expected to be associated with the specified key + * @return {@code true} if the value was removed + * @throws UnsupportedOperationException if the {@code remove} operation + * is not supported by this map + * @throws ClassCastException if the key or value is of an inappropriate + * type for this map + * (optional) + * @throws NullPointerException if the specified key or value is null, + * and this map does not permit null keys or values + * (optional) + */ + boolean remove(Object key, Object value); + + /** + * Replaces the entry for a key only if currently mapped to a given value. + * This is equivalent to + *
 {@code
+     * if (map.containsKey(key) && Objects.equals(map.get(key), oldValue)) {
+     *   map.put(key, newValue);
+     *   return true;
+     * } else
+     *   return false;
+     * }
+ * + * except that the action is performed atomically. + * + * @implNote This implementation intentionally re-abstracts the + * inappropriate default provided in {@code Map}. + * + * @param key key with which the specified value is associated + * @param oldValue value expected to be associated with the specified key + * @param newValue value to be associated with the specified key + * @return {@code true} if the value was replaced + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * @throws ClassCastException if the class of a specified key or value + * prevents it from being stored in this map + * @throws NullPointerException if a specified key or value is null, + * and this map does not permit null keys or values + * @throws IllegalArgumentException if some property of a specified key + * or value prevents it from being stored in this map + */ + boolean replace(K key, V oldValue, V newValue); + + /** + * Replaces the entry for a key only if currently mapped to some value. + * This is equivalent to + *
 {@code
+     * if (map.containsKey(key)) {
+     *   return map.put(key, value);
+     * } else
+     *   return null;
+     * }
+ * + * except that the action is performed atomically. + * + * @implNote This implementation intentionally re-abstracts the + * inappropriate default provided in {@code Map}. + * + * @param key key with which the specified value is associated + * @param value value to be associated with the specified key + * @return the previous value associated with the specified key, or + * {@code null} if there was no mapping for the key. + * (A {@code null} return can also indicate that the map + * previously associated {@code null} with the key, + * if the implementation supports null values.) + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * @throws NullPointerException if the specified key or value is null, + * and this map does not permit null keys or values + * @throws IllegalArgumentException if some property of the specified key + * or value prevents it from being stored in this map + */ + V replace(K key, V value); + + /** + * {@inheritDoc} + * + * @implSpec + *

The default implementation is equivalent to, for this {@code map}: + *

 {@code
+     * for ((Map.Entry entry : map.entrySet())
+     *     do {
+     *        K k = entry.getKey();
+     *        V v = entry.getValue();
+     *     } while(!replace(k, v, function.apply(k, v)));
+     * }
+ * + * The default implementation may retry these steps when multiple + * threads attempt updates including potentially calling the function + * repeatedly for a given key. + * + *

This implementation assumes that the ConcurrentMap cannot contain null + * values and {@code get()} returning null unambiguously means the key is + * absent. Implementations which support null values must + * override this default implementation. + * + * @throws UnsupportedOperationException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + * @since 1.8 + */ + @Override + default void replaceAll(BiFunction function) { + Objects.requireNonNull(function); + forEach((k,v) -> { + while(!replace(k, v, function.apply(k, v))) { + // v changed or k is gone + if ( (v = get(k)) == null) { + // k is no longer in the map. + break; + } + } + }); + } + + /** + * {@inheritDoc} + * + * @implSpec + * The default implementation is equivalent to the following steps for this + * {@code map}, then returning the current value or {@code null} if now + * absent: + * + *

 {@code
+     * if (map.get(key) == null) {
+     *     V newValue = mappingFunction.apply(key);
+     *     if (newValue != null)
+     *         return map.putIfAbsent(key, newValue);
+     * }
+     * }
+ * + * The default implementation may retry these steps when multiple + * threads attempt updates including potentially calling the mapping + * function multiple times. + * + *

This implementation assumes that the ConcurrentMap cannot contain null + * values and {@code get()} returning null unambiguously means the key is + * absent. Implementations which support null values must + * override this default implementation. + * + * @throws UnsupportedOperationException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + default V computeIfAbsent(K key, + Function mappingFunction) { + Objects.requireNonNull(mappingFunction); + V v, newValue; + return ((v = get(key)) == null && + (newValue = mappingFunction.apply(key)) != null && + (v = putIfAbsent(key, newValue)) == null) ? newValue : v; + } + + /** + * {@inheritDoc} + * + * @implSpec + * The default implementation is equivalent to performing the following + * steps for this {@code map}, then returning the current value or + * {@code null} if now absent. : + * + *

 {@code
+     * if (map.get(key) != null) {
+     *     V oldValue = map.get(key);
+     *     V newValue = remappingFunction.apply(key, oldValue);
+     *     if (newValue != null)
+     *         map.replace(key, oldValue, newValue);
+     *     else
+     *         map.remove(key, oldValue);
+     * }
+     * }
+ * + * The default implementation may retry these steps when multiple threads + * attempt updates including potentially calling the remapping function + * multiple times. + * + *

This implementation assumes that the ConcurrentMap cannot contain null + * values and {@code get()} returning null unambiguously means the key is + * absent. Implementations which support null values must + * override this default implementation. + * + * @throws UnsupportedOperationException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + default V computeIfPresent(K key, + BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + V oldValue; + while((oldValue = get(key)) != null) { + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != null) { + if (replace(key, oldValue, newValue)) + return newValue; + } else if (remove(key, oldValue)) + return null; + } + return oldValue; + } + + /** + * {@inheritDoc} + * + * @implSpec + * The default implementation is equivalent to performing the following + * steps for this {@code map}, then returning the current value or + * {@code null} if absent: + * + *

 {@code
+     * V oldValue = map.get(key);
+     * V newValue = remappingFunction.apply(key, oldValue);
+     * if (oldValue != null ) {
+     *    if (newValue != null)
+     *       map.replace(key, oldValue, newValue);
+     *    else
+     *       map.remove(key, oldValue);
+     * } else {
+     *    if (newValue != null)
+     *       map.putIfAbsent(key, newValue);
+     *    else
+     *       return null;
+     * }
+     * }
+ * + * The default implementation may retry these steps when multiple + * threads attempt updates including potentially calling the remapping + * function multiple times. + * + *

This implementation assumes that the ConcurrentMap cannot contain null + * values and {@code get()} returning null unambiguously means the key is + * absent. Implementations which support null values must + * override this default implementation. + * + * @throws UnsupportedOperationException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + default V compute(K key, + BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + V oldValue = get(key); + for(;;) { + V newValue = remappingFunction.apply(key, oldValue); + if (newValue == null) { + // delete mapping + if (oldValue != null || containsKey(key)) { + // something to remove + if (remove(key, oldValue)) { + // removed the old value as expected + return null; + } + + // some other value replaced old value. try again. + oldValue = get(key); + } else { + // nothing to do. Leave things as they were. + return null; + } + } else { + // add or replace old mapping + if (oldValue != null) { + // replace + if (replace(key, oldValue, newValue)) { + // replaced as expected. + return newValue; + } + + // some other value replaced old value. try again. + oldValue = get(key); + } else { + // add (replace if oldValue was null) + if ((oldValue = putIfAbsent(key, newValue)) == null) { + // replaced + return newValue; + } + + // some other value replaced old value. try again. + } + } + } + } + + + /** + * {@inheritDoc} + * + * @implSpec + * The default implementation is equivalent to performing the following + * steps for this {@code map}, then returning the current value or + * {@code null} if absent: + * + *

 {@code
+     * V oldValue = map.get(key);
+     * V newValue = (oldValue == null) ? value :
+     *              remappingFunction.apply(oldValue, value);
+     * if (newValue == null)
+     *     map.remove(key);
+     * else
+     *     map.put(key, newValue);
+     * }
+ * + *

The default implementation may retry these steps when multiple + * threads attempt updates including potentially calling the remapping + * function multiple times. + * + *

This implementation assumes that the ConcurrentMap cannot contain null + * values and {@code get()} returning null unambiguously means the key is + * absent. Implementations which support null values must + * override this default implementation. + * + * @throws UnsupportedOperationException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + default V merge(K key, V value, + BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + Objects.requireNonNull(value); + V oldValue = get(key); + for (;;) { + if (oldValue != null) { + V newValue = remappingFunction.apply(oldValue, value); + if (newValue != null) { + if (replace(key, oldValue, newValue)) + return newValue; + } else if (remove(key, oldValue)) { + return null; + } + oldValue = get(key); + } else { + if ((oldValue = putIfAbsent(key, value)) == null) { + return value; + } + } + } + } +} diff --git a/src/HashMap.java b/src/HashMap.java index 5afaa09..84b2f95 100644 --- a/src/HashMap.java +++ b/src/HashMap.java @@ -1,15 +1,218 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + package java.util; -import java.io.*; -public class HashMap - extends AbstractMap - implements Map, Cloneable, Serializable -{ +import java.io.IOException; +import java.io.InvalidObjectException; +import java.io.Serializable; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * Hash table based implementation of the Map interface. This + * implementation provides all of the optional map operations, and permits + * null values and the null key. (The HashMap + * class is roughly equivalent to Hashtable, except that it is + * unsynchronized and permits nulls.) This class makes no guarantees as to + * the order of the map; in particular, it does not guarantee that the order + * will remain constant over time. + * + *

This implementation provides constant-time performance for the basic + * operations (get and put), assuming the hash function + * disperses the elements properly among the buckets. Iteration over + * collection views requires time proportional to the "capacity" of the + * HashMap instance (the number of buckets) plus its size (the number + * of key-value mappings). Thus, it's very important not to set the initial + * capacity too high (or the load factor too low) if iteration performance is + * important. + * + *

An instance of HashMap has two parameters that affect its + * performance: initial capacity and load factor. The + * capacity is the number of buckets in the hash table, and the initial + * capacity is simply the capacity at the time the hash table is created. The + * load factor is a measure of how full the hash table is allowed to + * get before its capacity is automatically increased. When the number of + * entries in the hash table exceeds the product of the load factor and the + * current capacity, the hash table is rehashed (that is, internal data + * structures are rebuilt) so that the hash table has approximately twice the + * number of buckets. + * + *

As a general rule, the default load factor (.75) offers a good + * tradeoff between time and space costs. Higher values decrease the + * space overhead but increase the lookup cost (reflected in most of + * the operations of the HashMap class, including + * get and put). The expected number of entries in + * the map and its load factor should be taken into account when + * setting its initial capacity, so as to minimize the number of + * rehash operations. If the initial capacity is greater than the + * maximum number of entries divided by the load factor, no rehash + * operations will ever occur. + * + *

If many mappings are to be stored in a HashMap + * instance, creating it with a sufficiently large capacity will allow + * the mappings to be stored more efficiently than letting it perform + * automatic rehashing as needed to grow the table. Note that using + * many keys with the same {@code hashCode()} is a sure way to slow + * down performance of any hash table. To ameliorate impact, when keys + * are {@link Comparable}, this class may use comparison order among + * keys to help break ties. + * + *

Note that this implementation is not synchronized. + * If multiple threads access a hash map concurrently, and at least one of + * the threads modifies the map structurally, it must be + * synchronized externally. (A structural modification is any operation + * that adds or deletes one or more mappings; merely changing the value + * associated with a key that an instance already contains is not a + * structural modification.) This is typically accomplished by + * synchronizing on some object that naturally encapsulates the map. + * + * If no such object exists, the map should be "wrapped" using the + * {@link Collections#synchronizedMap Collections.synchronizedMap} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the map:

+ *   Map m = Collections.synchronizedMap(new HashMap(...));
+ * + *

The iterators returned by all of this class's "collection view methods" + * are fail-fast: if the map is structurally modified at any time after + * the iterator is created, in any way except through the iterator's own + * remove method, the iterator will throw a + * {@link ConcurrentModificationException}. Thus, in the face of concurrent + * modification, the iterator fails quickly and cleanly, rather than risking + * arbitrary, non-deterministic behavior at an undetermined time in the + * future. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw ConcurrentModificationException on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @param the type of keys maintained by this map + * @param the type of mapped values + * + * @author Doug Lea + * @author Josh Bloch + * @author Arthur van Hoff + * @author Neal Gafter + * @see Object#hashCode() + * @see Collection + * @see Map + * @see TreeMap + * @see Hashtable + * @since 1.2 + */ +public class HashMap extends AbstractMap + implements Map, Cloneable, Serializable { + + private static final long serialVersionUID = 362498820763181265L; + + /* + * Implementation notes. + * + * This map usually acts as a binned (bucketed) hash table, but + * when bins get too large, they are transformed into bins of + * TreeNodes, each structured similarly to those in + * java.util.TreeMap. Most methods try to use normal bins, but + * relay to TreeNode methods when applicable (simply by checking + * instanceof a node). Bins of TreeNodes may be traversed and + * used like any others, but additionally support faster lookup + * when overpopulated. However, since the vast majority of bins in + * normal use are not overpopulated, checking for existence of + * tree bins may be delayed in the course of table methods. + * + * Tree bins (i.e., bins whose elements are all TreeNodes) are + * ordered primarily by hashCode, but in the case of ties, if two + * elements are of the same "class C implements Comparable", + * type then their compareTo method is used for ordering. (We + * conservatively check generic types via reflection to validate + * this -- see method comparableClassFor). The added complexity + * of tree bins is worthwhile in providing worst-case O(log n) + * operations when keys either have distinct hashes or are + * orderable, Thus, performance degrades gracefully under + * accidental or malicious usages in which hashCode() methods + * return values that are poorly distributed, as well as those in + * which many keys share a hashCode, so long as they are also + * Comparable. (If neither of these apply, we may waste about a + * factor of two in time and space compared to taking no + * precautions. But the only known cases stem from poor user + * programming practices that are already so slow that this makes + * little difference.) + * + * Because TreeNodes are about twice the size of regular nodes, we + * use them only when bins contain enough nodes to warrant use + * (see TREEIFY_THRESHOLD). And when they become too small (due to + * removal or resizing) they are converted back to plain bins. In + * usages with well-distributed user hashCodes, tree bins are + * rarely used. Ideally, under random hashCodes, the frequency of + * nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average for the default resizing + * threshold of 0.75, although with a large variance because of + * resizing granularity. Ignoring variance, the expected + * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + * factorial(k)). The first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * The root of a tree bin is normally its first node. However, + * sometimes (currently only upon Iterator.remove), the root might + * be elsewhere, but can be recovered following parent links + * (method TreeNode.root()). + * + * All applicable internal methods accept a hash code as an + * argument (as normally supplied from a public method), allowing + * them to call each other without recomputing user hashCodes. + * Most internal methods also accept a "tab" argument, that is + * normally the current table, but may be a new or old one when + * resizing or converting. + * + * When bin lists are treeified, split, or untreeified, we keep + * them in the same relative access/traversal order (i.e., field + * Node.next) to better preserve locality, and to slightly + * simplify handling of splits and traversals that invoke + * iterator.remove. When using comparators on insertion, to keep a + * total ordering (or as close as is required here) across + * rebalancings, we compare classes and identityHashCodes as + * tie-breakers. + * + * The use and transitions among plain vs tree modes is + * complicated by the existence of subclass LinkedHashMap. See + * below for hook methods defined to be invoked upon insertion, + * removal and access that allow LinkedHashMap internals to + * otherwise remain independent of these mechanics. (This also + * requires that a map instance be passed to some utility methods + * that may create new nodes.) + * + * The concurrent-programming-like SSA-based coding style helps + * avoid aliasing errors amid all of the twisty pointer operations. + */ /** * The default initial capacity - MUST be a power of two. */ - static final int DEFAULT_INITIAL_CAPACITY = 16; + static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 /** * The maximum capacity, used if a higher value is implicitly specified @@ -24,19 +227,182 @@ public class HashMap static final float DEFAULT_LOAD_FACTOR = 0.75f; /** - * The table, resized as necessary. Length MUST Always be a power of two. + * The bin count threshold for using a tree rather than list for a + * bin. Bins are converted to trees when adding an element to a + * bin with at least this many nodes. The value must be greater + * than 2 and should be at least 8 to mesh with assumptions in + * tree removal about conversion back to plain bins upon + * shrinkage. + */ + static final int TREEIFY_THRESHOLD = 8; + + /** + * The bin count threshold for untreeifying a (split) bin during a + * resize operation. Should be less than TREEIFY_THRESHOLD, and at + * most 6 to mesh with shrinkage detection under removal. + */ + static final int UNTREEIFY_THRESHOLD = 6; + + /** + * The smallest table capacity for which bins may be treeified. + * (Otherwise the table is resized if too many nodes in a bin.) + * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts + * between resizing and treeification thresholds. + */ + static final int MIN_TREEIFY_CAPACITY = 64; + + /** + * Basic hash bin node, used for most entries. (See below for + * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) + */ + static class Node implements Map.Entry { + final int hash; + final K key; + V value; + Node next; + + Node(int hash, K key, V value, Node next) { + this.hash = hash; + this.key = key; + this.value = value; + this.next = next; + } + + public final K getKey() { return key; } + public final V getValue() { return value; } + public final String toString() { return key + "=" + value; } + + public final int hashCode() { + return Objects.hashCode(key) ^ Objects.hashCode(value); + } + + public final V setValue(V newValue) { + V oldValue = value; + value = newValue; + return oldValue; + } + + public final boolean equals(Object o) { + if (o == this) + return true; + if (o instanceof Map.Entry) { + Map.Entry e = (Map.Entry)o; + if (Objects.equals(key, e.getKey()) && + Objects.equals(value, e.getValue())) + return true; + } + return false; + } + } + + /* ---------------- Static utilities -------------- */ + + /** + * Computes key.hashCode() and spreads (XORs) higher bits of hash + * to lower. Because the table uses power-of-two masking, sets of + * hashes that vary only in bits above the current mask will + * always collide. (Among known examples are sets of Float keys + * holding consecutive whole numbers in small tables.) So we + * apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed (so don't benefit from + * spreading), and because we use trees to handle large sets of + * collisions in bins, we just XOR some shifted bits in the + * cheapest possible way to reduce systematic lossage, as well as + * to incorporate impact of the highest bits that would otherwise + * never be used in index calculations because of table bounds. + */ + static final int hash(Object key) { + int h; + return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); + } + + /** + * Returns x's Class if it is of the form "class C implements + * Comparable", else null. + */ + static Class comparableClassFor(Object x) { + if (x instanceof Comparable) { + Class c; Type[] ts, as; Type t; ParameterizedType p; + if ((c = x.getClass()) == String.class) // bypass checks + return c; + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == + Comparable.class) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } + } + } + return null; + } + + /** + * Returns k.compareTo(x) if x matches kc (k's screened comparable + * class), else 0. + */ + @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable + static int compareComparables(Class kc, Object k, Object x) { + return (x == null || x.getClass() != kc ? 0 : + ((Comparable)k).compareTo(x)); + } + + /** + * Returns a power of two size for the given target capacity. */ - transient Entry[] table; + static final int tableSizeFor(int cap) { + int n = cap - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /* ---------------- Fields -------------- */ + + /** + * The table, initialized on first use, and resized as + * necessary. When allocated, length is always a power of two. + * (We also tolerate length zero in some operations to allow + * bootstrapping mechanics that are currently not needed.) + */ + transient Node[] table; + + /** + * Holds cached entrySet(). Note that AbstractMap fields are used + * for keySet() and values(). + */ + transient Set> entrySet; /** * The number of key-value mappings contained in this map. */ transient int size; + /** + * The number of times this HashMap has been structurally modified + * Structural modifications are those that change the number of mappings in + * the HashMap or otherwise modify its internal structure (e.g., + * rehash). This field is used to make iterators on Collection-views of + * the HashMap fail-fast. (See ConcurrentModificationException). + */ + transient int modCount; + /** * The next size value at which to resize (capacity * load factor). + * * @serial */ + // (The javadoc description is true upon serialization. + // Additionally, if the table array has not been allocated, this + // field holds the initial array capacity, or zero signifying + // DEFAULT_INITIAL_CAPACITY.) int threshold; /** @@ -46,14 +412,7 @@ public class HashMap */ final float loadFactor; - /** - * The number of times this HashMap has been structurally modified - * Structural modifications are those that change the number of mappings in - * the HashMap or otherwise modify its internal structure (e.g., - * rehash). This field is used to make iterators on Collection-views of - * the HashMap fail-fast. (See ConcurrentModificationException). - */ - transient int modCount; + /* ---------------- Public operations -------------- */ /** * Constructs an empty HashMap with the specified initial @@ -73,16 +432,8 @@ public HashMap(int initialCapacity, float loadFactor) { if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new IllegalArgumentException("Illegal load factor: " + loadFactor); - - // Find a power of 2 >= initialCapacity - int capacity = 1; - while (capacity < initialCapacity) - capacity <<= 1; - this.loadFactor = loadFactor; - threshold = (int)(capacity * loadFactor); - table = new Entry[capacity]; - init(); + this.threshold = tableSizeFor(initialCapacity); } /** @@ -101,10 +452,7 @@ public HashMap(int initialCapacity) { * (16) and the default load factor (0.75). */ public HashMap() { - this.loadFactor = DEFAULT_LOAD_FACTOR; - threshold = (int)(DEFAULT_INITIAL_CAPACITY * DEFAULT_LOAD_FACTOR); - table = new Entry[DEFAULT_INITIAL_CAPACITY]; - init(); + this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted } /** @@ -117,43 +465,35 @@ public HashMap() { * @throws NullPointerException if the specified map is null */ public HashMap(Map m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); - putAllForCreate(m); - } - - // internal utilities - - /** - * Initialization hook for subclasses. This method is called - * in all constructors and pseudo-constructors (clone, readObject) - * after HashMap has been initialized but before any entries have - * been inserted. (In the absence of this method, readObject would - * require explicit knowledge of subclasses.) - */ - void init() { - } - - /** - * Applies a supplemental hash function to a given hashCode, which - * defends against poor quality hash functions. This is critical - * because HashMap uses power-of-two length hash tables, that - * otherwise encounter collisions for hashCodes that do not differ - * in lower bits. Note: Null keys always map to hash 0, thus index 0. - */ - static int hash(int h) { - // This function ensures that hashCodes that differ only by - // constant multiples at each bit position have a bounded - // number of collisions (approximately 8 at default load factor). - h ^= (h >>> 20) ^ (h >>> 12); - return h ^ (h >>> 7) ^ (h >>> 4); + this.loadFactor = DEFAULT_LOAD_FACTOR; + putMapEntries(m, false); } /** - * Returns index for hash code h. + * Implements Map.putAll and Map constructor + * + * @param m the map + * @param evict false when initially constructing this map, else + * true (relayed to method afterNodeInsertion). */ - static int indexFor(int h, int length) { - return h & (length-1); + final void putMapEntries(Map m, boolean evict) { + int s = m.size(); + if (s > 0) { + if (table == null) { // pre-size + float ft = ((float)s / loadFactor) + 1.0F; + int t = ((ft < (float)MAXIMUM_CAPACITY) ? + (int)ft : MAXIMUM_CAPACITY); + if (t > threshold) + threshold = tableSizeFor(t); + } + else if (s > threshold) + resize(); + for (Map.Entry e : m.entrySet()) { + K key = e.getKey(); + V value = e.getValue(); + putVal(hash(key), key, value, false, evict); + } + } } /** @@ -192,30 +532,33 @@ public boolean isEmpty() { * @see #put(Object, Object) */ public V get(Object key) { - if (key == null) - return getForNullKey(); - int hash = hash(key.hashCode()); - for (Entry e = table[indexFor(hash, table.length)]; - e != null; - e = e.next) { - Object k; - if (e.hash == hash && ((k = e.key) == key || key.equals(k))) - return e.value; - } - return null; + Node e; + return (e = getNode(hash(key), key)) == null ? null : e.value; } /** - * Offloaded version of get() to look up null keys. Null keys map - * to index 0. This null case is split out into separate methods - * for the sake of performance in the two most commonly used - * operations (get and put), but incorporated with conditionals in - * others. + * Implements Map.get and related methods + * + * @param hash hash for key + * @param key the key + * @return the node, or null if none */ - private V getForNullKey() { - for (Entry e = table[0]; e != null; e = e.next) { - if (e.key == null) - return e.value; + final Node getNode(int hash, Object key) { + Node[] tab; Node first, e; int n; K k; + if ((tab = table) != null && (n = tab.length) > 0 && + (first = tab[(n - 1) & hash]) != null) { + if (first.hash == hash && // always check first node + ((k = first.key) == key || (key != null && key.equals(k)))) + return first; + if ((e = first.next) != null) { + if (first instanceof TreeNode) + return ((TreeNode)first).getTreeNode(hash, key); + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) + return e; + } while ((e = e.next) != null); + } } return null; } @@ -229,28 +572,9 @@ private V getForNullKey() { * key. */ public boolean containsKey(Object key) { - return getEntry(key) != null; + return getNode(hash(key), key) != null; } - /** - * Returns the entry associated with the specified key in the - * HashMap. Returns null if the HashMap contains no mapping - * for the key. - */ - final Entry getEntry(Object key) { - int hash = (key == null) ? 0 : hash(key.hashCode()); - for (Entry e = table[indexFor(hash, table.length)]; - e != null; - e = e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) - return e; - } - return null; - } - - /** * Associates the specified value with the specified key in this map. * If the map previously contained a mapping for the key, the old @@ -264,120 +588,167 @@ final Entry getEntry(Object key) { * previously associated null with key.) */ public V put(K key, V value) { - if (key == null) - return putForNullKey(value); - int hash = hash(key.hashCode()); - int i = indexFor(hash, table.length); - for (Entry e = table[i]; e != null; e = e.next) { - Object k; - if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); - return oldValue; - } - } - - modCount++; - addEntry(hash, key, value, i); - return null; + return putVal(hash(key), key, value, false, true); } /** - * Offloaded version of put for null keys + * Implements Map.put and related methods + * + * @param hash hash for key + * @param key the key + * @param value the value to put + * @param onlyIfAbsent if true, don't change existing value + * @param evict if false, the table is in creation mode. + * @return previous value, or null if none */ - private V putForNullKey(V value) { - for (Entry e = table[0]; e != null; e = e.next) { - if (e.key == null) { + final V putVal(int hash, K key, V value, boolean onlyIfAbsent, + boolean evict) { + Node[] tab; Node p; int n, i; + if ((tab = table) == null || (n = tab.length) == 0) + n = (tab = resize()).length; + if ((p = tab[i = (n - 1) & hash]) == null) + tab[i] = newNode(hash, key, value, null); + else { + Node e; K k; + if (p.hash == hash && + ((k = p.key) == key || (key != null && key.equals(k)))) + e = p; + else if (p instanceof TreeNode) + e = ((TreeNode)p).putTreeVal(this, tab, hash, key, value); + else { + for (int binCount = 0; ; ++binCount) { + if ((e = p.next) == null) { + p.next = newNode(hash, key, value, null); + if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st + treeifyBin(tab, hash); + break; + } + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) + break; + p = e; + } + } + if (e != null) { // existing mapping for key V oldValue = e.value; - e.value = value; - e.recordAccess(this); + if (!onlyIfAbsent || oldValue == null) + e.value = value; + afterNodeAccess(e); return oldValue; } } - modCount++; - addEntry(0, null, value, 0); + ++modCount; + if (++size > threshold) + resize(); + afterNodeInsertion(evict); return null; } /** - * This method is used instead of put by constructors and - * pseudoconstructors (clone, readObject). It does not resize the table, - * check for comodification, etc. It calls createEntry rather than - * addEntry. + * Initializes or doubles table size. If null, allocates in + * accord with initial capacity target held in field threshold. + * Otherwise, because we are using power-of-two expansion, the + * elements from each bin must either stay at same index, or move + * with a power of two offset in the new table. + * + * @return the table */ - private void putForCreate(K key, V value) { - int hash = (key == null) ? 0 : hash(key.hashCode()); - int i = indexFor(hash, table.length); - - /** - * Look for preexisting entry for key. This will never happen for - * clone or deserialize. It will only happen for construction if the - * input Map is a sorted map whose ordering is inconsistent w/ equals. - */ - for (Entry e = table[i]; e != null; e = e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) { - e.value = value; - return; + final Node[] resize() { + Node[] oldTab = table; + int oldCap = (oldTab == null) ? 0 : oldTab.length; + int oldThr = threshold; + int newCap, newThr = 0; + if (oldCap > 0) { + if (oldCap >= MAXIMUM_CAPACITY) { + threshold = Integer.MAX_VALUE; + return oldTab; } + else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && + oldCap >= DEFAULT_INITIAL_CAPACITY) + newThr = oldThr << 1; // double threshold } - - createEntry(hash, key, value, i); - } - - private void putAllForCreate(Map m) { - for (Map.Entry e : m.entrySet()) - putForCreate(e.getKey(), e.getValue()); - } - - /** - * Rehashes the contents of this map into a new array with a - * larger capacity. This method is called automatically when the - * number of keys in this map reaches its threshold. - * - * If current capacity is MAXIMUM_CAPACITY, this method does not - * resize the map, but sets threshold to Integer.MAX_VALUE. - * This has the effect of preventing future calls. - * - * @param newCapacity the new capacity, MUST be a power of two; - * must be greater than current capacity unless current - * capacity is MAXIMUM_CAPACITY (in which case value - * is irrelevant). - */ - void resize(int newCapacity) { - Entry[] oldTable = table; - int oldCapacity = oldTable.length; - if (oldCapacity == MAXIMUM_CAPACITY) { - threshold = Integer.MAX_VALUE; - return; + else if (oldThr > 0) // initial capacity was placed in threshold + newCap = oldThr; + else { // zero initial threshold signifies using defaults + newCap = DEFAULT_INITIAL_CAPACITY; + newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); } - - Entry[] newTable = new Entry[newCapacity]; - transfer(newTable); - table = newTable; - threshold = (int)(newCapacity * loadFactor); + if (newThr == 0) { + float ft = (float)newCap * loadFactor; + newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? + (int)ft : Integer.MAX_VALUE); + } + threshold = newThr; + @SuppressWarnings({"rawtypes","unchecked"}) + Node[] newTab = (Node[])new Node[newCap]; + table = newTab; + if (oldTab != null) { + for (int j = 0; j < oldCap; ++j) { + Node e; + if ((e = oldTab[j]) != null) { + oldTab[j] = null; + if (e.next == null) + newTab[e.hash & (newCap - 1)] = e; + else if (e instanceof TreeNode) + ((TreeNode)e).split(this, newTab, j, oldCap); + else { // preserve order + Node loHead = null, loTail = null; + Node hiHead = null, hiTail = null; + Node next; + do { + next = e.next; + if ((e.hash & oldCap) == 0) { + if (loTail == null) + loHead = e; + else + loTail.next = e; + loTail = e; + } + else { + if (hiTail == null) + hiHead = e; + else + hiTail.next = e; + hiTail = e; + } + } while ((e = next) != null); + if (loTail != null) { + loTail.next = null; + newTab[j] = loHead; + } + if (hiTail != null) { + hiTail.next = null; + newTab[j + oldCap] = hiHead; + } + } + } + } + } + return newTab; } /** - * Transfers all entries from current table to newTable. + * Replaces all linked nodes in bin at index for given hash unless + * table is too small, in which case resizes instead. */ - void transfer(Entry[] newTable) { - Entry[] src = table; - int newCapacity = newTable.length; - for (int j = 0; j < src.length; j++) { - Entry e = src[j]; - if (e != null) { - src[j] = null; - do { - Entry next = e.next; - int i = indexFor(e.hash, newCapacity); - e.next = newTable[i]; - newTable[i] = e; - e = next; - } while (e != null); - } + final void treeifyBin(Node[] tab, int hash) { + int n, index; Node e; + if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) + resize(); + else if ((e = tab[index = (n - 1) & hash]) != null) { + TreeNode hd = null, tl = null; + do { + TreeNode p = replacementTreeNode(e, null); + if (tl == null) + hd = p; + else { + p.prev = tl; + tl.next = p; + } + tl = p; + } while ((e = e.next) != null); + if ((tab[index] = hd) != null) + hd.treeify(tab); } } @@ -390,32 +761,7 @@ void transfer(Entry[] newTable) { * @throws NullPointerException if the specified map is null */ public void putAll(Map m) { - int numKeysToBeAdded = m.size(); - if (numKeysToBeAdded == 0) - return; - - /* - * Expand the map if the map if the number of mappings to be added - * is greater than or equal to threshold. This is conservative; the - * obvious condition is (m.size() + size) >= threshold, but this - * condition could result in a map with twice the appropriate capacity, - * if the keys to be added overlap with the keys already in this map. - * By using the conservative calculation, we subject ourself - * to at most one extra resize. - */ - if (numKeysToBeAdded > threshold) { - int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); - if (targetCapacity > MAXIMUM_CAPACITY) - targetCapacity = MAXIMUM_CAPACITY; - int newCapacity = table.length; - while (newCapacity < targetCapacity) - newCapacity <<= 1; - if (newCapacity > table.length) - resize(newCapacity); - } - - for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); + putMapEntries(m, true); } /** @@ -428,73 +774,60 @@ public void putAll(Map m) { * previously associated null with key.) */ public V remove(Object key) { - Entry e = removeEntryForKey(key); - return (e == null ? null : e.value); - } - - /** - * Removes and returns the entry associated with the specified key - * in the HashMap. Returns null if the HashMap contains no mapping - * for this key. - */ - final Entry removeEntryForKey(Object key) { - int hash = (key == null) ? 0 : hash(key.hashCode()); - int i = indexFor(hash, table.length); - Entry prev = table[i]; - Entry e = prev; - - while (e != null) { - Entry next = e.next; - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; - } - prev = e; - e = next; - } - - return e; + Node e; + return (e = removeNode(hash(key), key, null, false, true)) == null ? + null : e.value; } /** - * Special version of remove for EntrySet. + * Implements Map.remove and related methods + * + * @param hash hash for key + * @param key the key + * @param value the value to match if matchValue, else ignored + * @param matchValue if true only remove if value is equal + * @param movable if false do not move other nodes while removing + * @return the node, or null if none */ - final Entry removeMapping(Object o) { - if (!(o instanceof Map.Entry)) - return null; - - Map.Entry entry = (Map.Entry) o; - Object key = entry.getKey(); - int hash = (key == null) ? 0 : hash(key.hashCode()); - int i = indexFor(hash, table.length); - Entry prev = table[i]; - Entry e = prev; - - while (e != null) { - Entry next = e.next; - if (e.hash == hash && e.equals(entry)) { - modCount++; - size--; - if (prev == e) - table[i] = next; + final Node removeNode(int hash, Object key, Object value, + boolean matchValue, boolean movable) { + Node[] tab; Node p; int n, index; + if ((tab = table) != null && (n = tab.length) > 0 && + (p = tab[index = (n - 1) & hash]) != null) { + Node node = null, e; K k; V v; + if (p.hash == hash && + ((k = p.key) == key || (key != null && key.equals(k)))) + node = p; + else if ((e = p.next) != null) { + if (p instanceof TreeNode) + node = ((TreeNode)p).getTreeNode(hash, key); + else { + do { + if (e.hash == hash && + ((k = e.key) == key || + (key != null && key.equals(k)))) { + node = e; + break; + } + p = e; + } while ((e = e.next) != null); + } + } + if (node != null && (!matchValue || (v = node.value) == value || + (value != null && value.equals(v)))) { + if (node instanceof TreeNode) + ((TreeNode)node).removeTreeNode(this, tab, movable); + else if (node == p) + tab[index] = node.next; else - prev.next = next; - e.recordRemoval(this); - return e; + p.next = node.next; + ++modCount; + --size; + afterNodeRemoval(node); + return node; } - prev = e; - e = next; } - - return e; + return null; } /** @@ -502,11 +835,13 @@ final Entry removeMapping(Object o) { * The map will be empty after this call returns. */ public void clear() { + Node[] tab; modCount++; - Entry[] tab = table; - for (int i = 0; i < tab.length; i++) - tab[i] = null; - size = 0; + if ((tab = table) != null && size > 0) { + size = 0; + for (int i = 0; i < tab.length; ++i) + tab[i] = null; + } } /** @@ -518,162 +853,548 @@ public void clear() { * specified value */ public boolean containsValue(Object value) { - if (value == null) - return containsNullValue(); - - Entry[] tab = table; - for (int i = 0; i < tab.length ; i++) - for (Entry e = tab[i] ; e != null ; e = e.next) - if (value.equals(e.value)) - return true; + Node[] tab; V v; + if ((tab = table) != null && size > 0) { + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) { + if ((v = e.value) == value || + (value != null && value.equals(v))) + return true; + } + } + } return false; } /** - * Special-case code for containsValue with null argument + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation), the results of + * the iteration are undefined. The set supports element removal, + * which removes the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or addAll + * operations. + * + * @return a set view of the keys contained in this map */ - private boolean containsNullValue() { - Entry[] tab = table; - for (int i = 0; i < tab.length ; i++) - for (Entry e = tab[i] ; e != null ; e = e.next) - if (e.value == null) - return true; - return false; + public Set keySet() { + Set ks; + return (ks = keySet) == null ? (keySet = new KeySet()) : ks; + } + + final class KeySet extends AbstractSet { + public final int size() { return size; } + public final void clear() { HashMap.this.clear(); } + public final Iterator iterator() { return new KeyIterator(); } + public final boolean contains(Object o) { return containsKey(o); } + public final boolean remove(Object key) { + return removeNode(hash(key), key, null, false, true) != null; + } + public final Spliterator spliterator() { + return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); + } + public final void forEach(Consumer action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e.key); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } } /** - * Returns a shallow copy of this HashMap instance: the keys and - * values themselves are not cloned. + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. If the map is + * modified while an iteration over the collection is in progress + * (except through the iterator's own remove operation), + * the results of the iteration are undefined. The collection + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll and clear operations. It does not + * support the add or addAll operations. * - * @return a shallow copy of this map + * @return a view of the values contained in this map */ - public Object clone() { - HashMap result = null; - try { - result = (HashMap)super.clone(); - } catch (CloneNotSupportedException e) { - // assert false; - } - result.table = new Entry[table.length]; - result.entrySet = null; - result.modCount = 0; - result.size = 0; - result.init(); - result.putAllForCreate(this); - - return result; + public Collection values() { + Collection vs; + return (vs = values) == null ? (values = new Values()) : vs; } - static class Entry implements Map.Entry { - final K key; - V value; - Entry next; - final int hash; - - /** - * Creates new entry. - */ - Entry(int h, K k, V v, Entry n) { - value = v; - next = n; - key = k; - hash = h; + final class Values extends AbstractCollection { + public final int size() { return size; } + public final void clear() { HashMap.this.clear(); } + public final Iterator iterator() { return new ValueIterator(); } + public final boolean contains(Object o) { return containsValue(o); } + public final Spliterator spliterator() { + return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); } - - public final K getKey() { - return key; + public final void forEach(Consumer action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e.value); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } } + } - public final V getValue() { - return value; - } + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation, or through the + * setValue operation on a map entry returned by the + * iterator) the results of the iteration are undefined. The set + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Set.remove, removeAll, retainAll and + * clear operations. It does not support the + * add or addAll operations. + * + * @return a set view of the mappings contained in this map + */ + public Set> entrySet() { + Set> es; + return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; + } - public final V setValue(V newValue) { - V oldValue = value; - value = newValue; - return oldValue; + final class EntrySet extends AbstractSet> { + public final int size() { return size; } + public final void clear() { HashMap.this.clear(); } + public final Iterator> iterator() { + return new EntryIterator(); } - - public final boolean equals(Object o) { + public final boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; - Map.Entry e = (Map.Entry)o; - Object k1 = getKey(); - Object k2 = e.getKey(); - if (k1 == k2 || (k1 != null && k1.equals(k2))) { - Object v1 = getValue(); - Object v2 = e.getValue(); - if (v1 == v2 || (v1 != null && v1.equals(v2))) - return true; + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Node candidate = getNode(hash(key), key); + return candidate != null && candidate.equals(e); + } + public final boolean remove(Object o) { + if (o instanceof Map.Entry) { + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Object value = e.getValue(); + return removeNode(hash(key), key, value, true, true) != null; } return false; } - - public final int hashCode() { - return (key==null ? 0 : key.hashCode()) ^ - (value==null ? 0 : value.hashCode()); + public final Spliterator> spliterator() { + return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); } - - public final String toString() { - return getKey() + "=" + getValue(); + public final void forEach(Consumer> action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } } + } - /** - * This method is invoked whenever the value in an entry is - * overwritten by an invocation of put(k,v) for a key k that's already - * in the HashMap. - */ - void recordAccess(HashMap m) { + // Overrides of JDK8 Map extension methods + + @Override + public V getOrDefault(Object key, V defaultValue) { + Node e; + return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; + } + + @Override + public V putIfAbsent(K key, V value) { + return putVal(hash(key), key, value, true, true); + } + + @Override + public boolean remove(Object key, Object value) { + return removeNode(hash(key), key, value, true, true) != null; + } + + @Override + public boolean replace(K key, V oldValue, V newValue) { + Node e; V v; + if ((e = getNode(hash(key), key)) != null && + ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { + e.value = newValue; + afterNodeAccess(e); + return true; } + return false; + } - /** - * This method is invoked whenever the entry is - * removed from the table. - */ - void recordRemoval(HashMap m) { + @Override + public V replace(K key, V value) { + Node e; + if ((e = getNode(hash(key), key)) != null) { + V oldValue = e.value; + e.value = value; + afterNodeAccess(e); + return oldValue; + } + return null; + } + + @Override + public V computeIfAbsent(K key, + Function mappingFunction) { + if (mappingFunction == null) + throw new NullPointerException(); + int hash = hash(key); + Node[] tab; Node first; int n, i; + int binCount = 0; + TreeNode t = null; + Node old = null; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((first = tab[i = (n - 1) & hash]) != null) { + if (first instanceof TreeNode) + old = (t = (TreeNode)first).getTreeNode(hash, key); + else { + Node e = first; K k; + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) { + old = e; + break; + } + ++binCount; + } while ((e = e.next) != null); + } + V oldValue; + if (old != null && (oldValue = old.value) != null) { + afterNodeAccess(old); + return oldValue; + } + } + V v = mappingFunction.apply(key); + if (v == null) { + return null; + } else if (old != null) { + old.value = v; + afterNodeAccess(old); + return v; + } + else if (t != null) + t.putTreeVal(this, tab, hash, key, v); + else { + tab[i] = newNode(hash, key, v, first); + if (binCount >= TREEIFY_THRESHOLD - 1) + treeifyBin(tab, hash); + } + ++modCount; + ++size; + afterNodeInsertion(true); + return v; + } + + public V computeIfPresent(K key, + BiFunction remappingFunction) { + if (remappingFunction == null) + throw new NullPointerException(); + Node e; V oldValue; + int hash = hash(key); + if ((e = getNode(hash, key)) != null && + (oldValue = e.value) != null) { + V v = remappingFunction.apply(key, oldValue); + if (v != null) { + e.value = v; + afterNodeAccess(e); + return v; + } + else + removeNode(hash, key, null, false, true); + } + return null; + } + + @Override + public V compute(K key, + BiFunction remappingFunction) { + if (remappingFunction == null) + throw new NullPointerException(); + int hash = hash(key); + Node[] tab; Node first; int n, i; + int binCount = 0; + TreeNode t = null; + Node old = null; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((first = tab[i = (n - 1) & hash]) != null) { + if (first instanceof TreeNode) + old = (t = (TreeNode)first).getTreeNode(hash, key); + else { + Node e = first; K k; + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) { + old = e; + break; + } + ++binCount; + } while ((e = e.next) != null); + } + } + V oldValue = (old == null) ? null : old.value; + V v = remappingFunction.apply(key, oldValue); + if (old != null) { + if (v != null) { + old.value = v; + afterNodeAccess(old); + } + else + removeNode(hash, key, null, false, true); + } + else if (v != null) { + if (t != null) + t.putTreeVal(this, tab, hash, key, v); + else { + tab[i] = newNode(hash, key, v, first); + if (binCount >= TREEIFY_THRESHOLD - 1) + treeifyBin(tab, hash); + } + ++modCount; + ++size; + afterNodeInsertion(true); } + return v; } + @Override + public V merge(K key, V value, + BiFunction remappingFunction) { + if (value == null) + throw new NullPointerException(); + if (remappingFunction == null) + throw new NullPointerException(); + int hash = hash(key); + Node[] tab; Node first; int n, i; + int binCount = 0; + TreeNode t = null; + Node old = null; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((first = tab[i = (n - 1) & hash]) != null) { + if (first instanceof TreeNode) + old = (t = (TreeNode)first).getTreeNode(hash, key); + else { + Node e = first; K k; + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) { + old = e; + break; + } + ++binCount; + } while ((e = e.next) != null); + } + } + if (old != null) { + V v; + if (old.value != null) + v = remappingFunction.apply(old.value, value); + else + v = value; + if (v != null) { + old.value = v; + afterNodeAccess(old); + } + else + removeNode(hash, key, null, false, true); + return v; + } + if (value != null) { + if (t != null) + t.putTreeVal(this, tab, hash, key, value); + else { + tab[i] = newNode(hash, key, value, first); + if (binCount >= TREEIFY_THRESHOLD - 1) + treeifyBin(tab, hash); + } + ++modCount; + ++size; + afterNodeInsertion(true); + } + return value; + } + + @Override + public void forEach(BiConsumer action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e.key, e.value); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + @Override + public void replaceAll(BiFunction function) { + Node[] tab; + if (function == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) { + e.value = function.apply(e.key, e.value); + } + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + /* ------------------------------------------------------------ */ + // Cloning and serialization + /** - * Adds a new entry with the specified key, value and hash code to - * the specified bucket. It is the responsibility of this - * method to resize the table if appropriate. + * Returns a shallow copy of this HashMap instance: the keys and + * values themselves are not cloned. * - * Subclass overrides this to alter the behavior of put method. + * @return a shallow copy of this map */ - void addEntry(int hash, K key, V value, int bucketIndex) { - Entry e = table[bucketIndex]; - table[bucketIndex] = new Entry<>(hash, key, value, e); - if (size++ >= threshold) - resize(2 * table.length); + @SuppressWarnings("unchecked") + @Override + public Object clone() { + HashMap result; + try { + result = (HashMap)super.clone(); + } catch (CloneNotSupportedException e) { + // this shouldn't happen, since we are Cloneable + throw new InternalError(e); + } + result.reinitialize(); + result.putMapEntries(this, false); + return result; + } + + // These methods are also used when serializing HashSets + final float loadFactor() { return loadFactor; } + final int capacity() { + return (table != null) ? table.length : + (threshold > 0) ? threshold : + DEFAULT_INITIAL_CAPACITY; } /** - * Like addEntry except that this version is used when creating entries - * as part of Map construction or "pseudo-construction" (cloning, - * deserialization). This version needn't worry about resizing the table. + * Save the state of the HashMap instance to a stream (i.e., + * serialize it). * - * Subclass overrides this to alter the behavior of HashMap(Map), - * clone, and readObject. + * @serialData The capacity of the HashMap (the length of the + * bucket array) is emitted (int), followed by the + * size (an int, the number of key-value + * mappings), followed by the key (Object) and value (Object) + * for each key-value mapping. The key-value mappings are + * emitted in no particular order. */ - void createEntry(int hash, K key, V value, int bucketIndex) { - Entry e = table[bucketIndex]; - table[bucketIndex] = new Entry<>(hash, key, value, e); - size++; + private void writeObject(java.io.ObjectOutputStream s) + throws IOException { + int buckets = capacity(); + // Write out the threshold, loadfactor, and any hidden stuff + s.defaultWriteObject(); + s.writeInt(buckets); + s.writeInt(size); + internalWriteEntries(s); } - private abstract class HashIterator implements Iterator { - Entry next; // next entry to return - int expectedModCount; // For fast-fail - int index; // current slot - Entry current; // current entry + /** + * Reconstitute the {@code HashMap} instance from a stream (i.e., + * deserialize it). + */ + private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException { + // Read in the threshold (ignored), loadfactor, and any hidden stuff + s.defaultReadObject(); + reinitialize(); + if (loadFactor <= 0 || Float.isNaN(loadFactor)) + throw new InvalidObjectException("Illegal load factor: " + + loadFactor); + s.readInt(); // Read and ignore number of buckets + int mappings = s.readInt(); // Read number of mappings (size) + if (mappings < 0) + throw new InvalidObjectException("Illegal mappings count: " + + mappings); + else if (mappings > 0) { // (if zero, use defaults) + // Size the table using given load factor only if within + // range of 0.25...4.0 + float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); + float fc = (float)mappings / lf + 1.0f; + int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? + DEFAULT_INITIAL_CAPACITY : + (fc >= MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : + tableSizeFor((int)fc)); + float ft = (float)cap * lf; + threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? + (int)ft : Integer.MAX_VALUE); + @SuppressWarnings({"rawtypes","unchecked"}) + Node[] tab = (Node[])new Node[cap]; + table = tab; + + // Read the keys and values, and put the mappings in the HashMap + for (int i = 0; i < mappings; i++) { + @SuppressWarnings("unchecked") + K key = (K) s.readObject(); + @SuppressWarnings("unchecked") + V value = (V) s.readObject(); + putVal(hash(key), key, value, false, false); + } + } + } + + /* ------------------------------------------------------------ */ + // iterators + + abstract class HashIterator { + Node next; // next entry to return + Node current; // current entry + int expectedModCount; // for fast-fail + int index; // current slot HashIterator() { expectedModCount = modCount; - if (size > 0) { // advance to first entry - Entry[] t = table; - while (index < t.length && (next = t[index++]) == null) - ; + Node[] t = table; + current = next = null; + index = 0; + if (t != null && size > 0) { // advance to first entry + do {} while (index < t.length && (next = t[index++]) == null); } } @@ -681,251 +1402,958 @@ public final boolean hasNext() { return next != null; } - final Entry nextEntry() { + final Node nextNode() { + Node[] t; + Node e = next; if (modCount != expectedModCount) throw new ConcurrentModificationException(); - Entry e = next; if (e == null) throw new NoSuchElementException(); - - if ((next = e.next) == null) { - Entry[] t = table; - while (index < t.length && (next = t[index++]) == null) - ; + if ((next = (current = e).next) == null && (t = table) != null) { + do {} while (index < t.length && (next = t[index++]) == null); } - current = e; return e; } - public void remove() { - if (current == null) + public final void remove() { + Node p = current; + if (p == null) throw new IllegalStateException(); if (modCount != expectedModCount) throw new ConcurrentModificationException(); - Object k = current.key; current = null; - HashMap.this.removeEntryForKey(k); + K key = p.key; + removeNode(hash(key), key, null, false, false); expectedModCount = modCount; } + } + final class KeyIterator extends HashIterator + implements Iterator { + public final K next() { return nextNode().key; } } - private final class ValueIterator extends HashIterator { - public V next() { - return nextEntry().value; - } + final class ValueIterator extends HashIterator + implements Iterator { + public final V next() { return nextNode().value; } } - private final class KeyIterator extends HashIterator { - public K next() { - return nextEntry().getKey(); - } + final class EntryIterator extends HashIterator + implements Iterator> { + public final Map.Entry next() { return nextNode(); } } - private final class EntryIterator extends HashIterator> { - public Map.Entry next() { - return nextEntry(); + /* ------------------------------------------------------------ */ + // spliterators + + static class HashMapSpliterator { + final HashMap map; + Node current; // current node + int index; // current index, modified on advance/split + int fence; // one past last index + int est; // size estimate + int expectedModCount; // for comodification checks + + HashMapSpliterator(HashMap m, int origin, + int fence, int est, + int expectedModCount) { + this.map = m; + this.index = origin; + this.fence = fence; + this.est = est; + this.expectedModCount = expectedModCount; } - } - // Subclass overrides these to alter behavior of views' iterator() method - Iterator newKeyIterator() { - return new KeyIterator(); - } - Iterator newValueIterator() { - return new ValueIterator(); - } - Iterator> newEntryIterator() { - return new EntryIterator(); + final int getFence() { // initialize fence and size on first use + int hi; + if ((hi = fence) < 0) { + HashMap m = map; + est = m.size; + expectedModCount = m.modCount; + Node[] tab = m.table; + hi = fence = (tab == null) ? 0 : tab.length; + } + return hi; + } + + public final long estimateSize() { + getFence(); // force init + return (long) est; + } } + static final class KeySpliterator + extends HashMapSpliterator + implements Spliterator { + KeySpliterator(HashMap m, int origin, int fence, int est, + int expectedModCount) { + super(m, origin, fence, est, expectedModCount); + } - // Views + public KeySpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid || current != null) ? null : + new KeySpliterator<>(map, lo, index = mid, est >>>= 1, + expectedModCount); + } - private transient Set> entrySet = null; + public void forEachRemaining(Consumer action) { + int i, hi, mc; + if (action == null) + throw new NullPointerException(); + HashMap m = map; + Node[] tab = m.table; + if ((hi = fence) < 0) { + mc = expectedModCount = m.modCount; + hi = fence = (tab == null) ? 0 : tab.length; + } + else + mc = expectedModCount; + if (tab != null && tab.length >= hi && + (i = index) >= 0 && (i < (index = hi) || current != null)) { + Node p = current; + current = null; + do { + if (p == null) + p = tab[i++]; + else { + action.accept(p.key); + p = p.next; + } + } while (p != null || i < hi); + if (m.modCount != mc) + throw new ConcurrentModificationException(); + } + } - /** - * Returns a {@link Set} view of the keys contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. If the map is modified - * while an iteration over the set is in progress (except through - * the iterator's own remove operation), the results of - * the iteration are undefined. The set supports element removal, - * which removes the corresponding mapping from the map, via the - * Iterator.remove, Set.remove, - * removeAll, retainAll, and clear - * operations. It does not support the add or addAll - * operations. - */ - public Set keySet() { - Set ks = keySet; - return (ks != null ? ks : (keySet = new KeySet())); + public boolean tryAdvance(Consumer action) { + int hi; + if (action == null) + throw new NullPointerException(); + Node[] tab = map.table; + if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { + while (current != null || index < hi) { + if (current == null) + current = tab[index++]; + else { + K k = current.key; + current = current.next; + action.accept(k); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + } + return false; + } + + public int characteristics() { + return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | + Spliterator.DISTINCT; + } } - private final class KeySet extends AbstractSet { - public Iterator iterator() { - return newKeyIterator(); + static final class ValueSpliterator + extends HashMapSpliterator + implements Spliterator { + ValueSpliterator(HashMap m, int origin, int fence, int est, + int expectedModCount) { + super(m, origin, fence, est, expectedModCount); } - public int size() { - return size; + + public ValueSpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid || current != null) ? null : + new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, + expectedModCount); } - public boolean contains(Object o) { - return containsKey(o); + + public void forEachRemaining(Consumer action) { + int i, hi, mc; + if (action == null) + throw new NullPointerException(); + HashMap m = map; + Node[] tab = m.table; + if ((hi = fence) < 0) { + mc = expectedModCount = m.modCount; + hi = fence = (tab == null) ? 0 : tab.length; + } + else + mc = expectedModCount; + if (tab != null && tab.length >= hi && + (i = index) >= 0 && (i < (index = hi) || current != null)) { + Node p = current; + current = null; + do { + if (p == null) + p = tab[i++]; + else { + action.accept(p.value); + p = p.next; + } + } while (p != null || i < hi); + if (m.modCount != mc) + throw new ConcurrentModificationException(); + } } - public boolean remove(Object o) { - return HashMap.this.removeEntryForKey(o) != null; + + public boolean tryAdvance(Consumer action) { + int hi; + if (action == null) + throw new NullPointerException(); + Node[] tab = map.table; + if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { + while (current != null || index < hi) { + if (current == null) + current = tab[index++]; + else { + V v = current.value; + current = current.next; + action.accept(v); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + } + return false; } - public void clear() { - HashMap.this.clear(); + + public int characteristics() { + return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); } } - /** - * Returns a {@link Collection} view of the values contained in this map. - * The collection is backed by the map, so changes to the map are - * reflected in the collection, and vice-versa. If the map is - * modified while an iteration over the collection is in progress - * (except through the iterator's own remove operation), - * the results of the iteration are undefined. The collection - * supports element removal, which removes the corresponding - * mapping from the map, via the Iterator.remove, - * Collection.remove, removeAll, - * retainAll and clear operations. It does not - * support the add or addAll operations. - */ - public Collection values() { - Collection vs = values; - return (vs != null ? vs : (values = new Values())); - } + static final class EntrySpliterator + extends HashMapSpliterator + implements Spliterator> { + EntrySpliterator(HashMap m, int origin, int fence, int est, + int expectedModCount) { + super(m, origin, fence, est, expectedModCount); + } - private final class Values extends AbstractCollection { - public Iterator iterator() { - return newValueIterator(); + public EntrySpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid || current != null) ? null : + new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, + expectedModCount); } - public int size() { - return size; + + public void forEachRemaining(Consumer> action) { + int i, hi, mc; + if (action == null) + throw new NullPointerException(); + HashMap m = map; + Node[] tab = m.table; + if ((hi = fence) < 0) { + mc = expectedModCount = m.modCount; + hi = fence = (tab == null) ? 0 : tab.length; + } + else + mc = expectedModCount; + if (tab != null && tab.length >= hi && + (i = index) >= 0 && (i < (index = hi) || current != null)) { + Node p = current; + current = null; + do { + if (p == null) + p = tab[i++]; + else { + action.accept(p); + p = p.next; + } + } while (p != null || i < hi); + if (m.modCount != mc) + throw new ConcurrentModificationException(); + } } - public boolean contains(Object o) { - return containsValue(o); + + public boolean tryAdvance(Consumer> action) { + int hi; + if (action == null) + throw new NullPointerException(); + Node[] tab = map.table; + if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { + while (current != null || index < hi) { + if (current == null) + current = tab[index++]; + else { + Node e = current; + current = current.next; + action.accept(e); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + } + return false; } - public void clear() { - HashMap.this.clear(); + + public int characteristics() { + return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | + Spliterator.DISTINCT; } } + /* ------------------------------------------------------------ */ + // LinkedHashMap support + + + /* + * The following package-protected methods are designed to be + * overridden by LinkedHashMap, but not by any other subclass. + * Nearly all other internal methods are also package-protected + * but are declared final, so can be used by LinkedHashMap, view + * classes, and HashSet. + */ + + // Create a regular (non-tree) node + Node newNode(int hash, K key, V value, Node next) { + return new Node<>(hash, key, value, next); + } + + // For conversion from TreeNodes to plain nodes + Node replacementNode(Node p, Node next) { + return new Node<>(p.hash, p.key, p.value, next); + } + + // Create a tree bin node + TreeNode newTreeNode(int hash, K key, V value, Node next) { + return new TreeNode<>(hash, key, value, next); + } + + // For treeifyBin + TreeNode replacementTreeNode(Node p, Node next) { + return new TreeNode<>(p.hash, p.key, p.value, next); + } + /** - * Returns a {@link Set} view of the mappings contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. If the map is modified - * while an iteration over the set is in progress (except through - * the iterator's own remove operation, or through the - * setValue operation on a map entry returned by the - * iterator) the results of the iteration are undefined. The set - * supports element removal, which removes the corresponding - * mapping from the map, via the Iterator.remove, - * Set.remove, removeAll, retainAll and - * clear operations. It does not support the - * add or addAll operations. - * - * @return a set view of the mappings contained in this map + * Reset to initial default state. Called by clone and readObject. */ - public Set> entrySet() { - return entrySet0(); + void reinitialize() { + table = null; + entrySet = null; + keySet = null; + values = null; + modCount = 0; + threshold = 0; + size = 0; } - private Set> entrySet0() { - Set> es = entrySet; - return es != null ? es : (entrySet = new EntrySet()); + // Callbacks to allow LinkedHashMap post-actions + void afterNodeAccess(Node p) { } + void afterNodeInsertion(boolean evict) { } + void afterNodeRemoval(Node p) { } + + // Called only from writeObject, to ensure compatible ordering. + void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { + Node[] tab; + if (size > 0 && (tab = table) != null) { + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } + } } - private final class EntrySet extends AbstractSet> { - public Iterator> iterator() { - return newEntryIterator(); + /* ------------------------------------------------------------ */ + // Tree bins + + /** + * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn + * extends Node) so can be used as extension of either regular or + * linked node. + */ + static final class TreeNode extends LinkedHashMap.Entry { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + TreeNode(int hash, K key, V val, Node next) { + super(hash, key, val, next); } - public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) - return false; - Map.Entry e = (Map.Entry) o; - Entry candidate = getEntry(e.getKey()); - return candidate != null && candidate.equals(e); + + /** + * Returns root of tree containing this node. + */ + final TreeNode root() { + for (TreeNode r = this, p;;) { + if ((p = r.parent) == null) + return r; + r = p; + } } - public boolean remove(Object o) { - return removeMapping(o) != null; + + /** + * Ensures that the given root is the first node of its bin. + */ + static void moveRootToFront(Node[] tab, TreeNode root) { + int n; + if (root != null && tab != null && (n = tab.length) > 0) { + int index = (n - 1) & root.hash; + TreeNode first = (TreeNode)tab[index]; + if (root != first) { + Node rn; + tab[index] = root; + TreeNode rp = root.prev; + if ((rn = root.next) != null) + ((TreeNode)rn).prev = rp; + if (rp != null) + rp.next = rn; + if (first != null) + first.prev = root; + root.next = first; + root.prev = null; + } + assert checkInvariants(root); + } } - public int size() { - return size; + + /** + * Finds the node starting at root p with the given hash and key. + * The kc argument caches comparableClassFor(key) upon first use + * comparing keys. + */ + final TreeNode find(int h, Object k, Class kc) { + TreeNode p = this; + do { + int ph, dir; K pk; + TreeNode pl = p.left, pr = p.right, q; + if ((ph = p.hash) > h) + p = pl; + else if (ph < h) + p = pr; + else if ((pk = p.key) == k || (k != null && k.equals(pk))) + return p; + else if (pl == null) + p = pr; + else if (pr == null) + p = pl; + else if ((kc != null || + (kc = comparableClassFor(k)) != null) && + (dir = compareComparables(kc, k, pk)) != 0) + p = (dir < 0) ? pl : pr; + else if ((q = pr.find(h, k, kc)) != null) + return q; + else + p = pl; + } while (p != null); + return null; } - public void clear() { - HashMap.this.clear(); + + /** + * Calls find for root node. + */ + final TreeNode getTreeNode(int h, Object k) { + return ((parent != null) ? root() : this).find(h, k, null); } - } - /** - * Save the state of the HashMap instance to a stream (i.e., - * serialize it). - * - * @serialData The capacity of the HashMap (the length of the - * bucket array) is emitted (int), followed by the - * size (an int, the number of key-value - * mappings), followed by the key (Object) and value (Object) - * for each key-value mapping. The key-value mappings are - * emitted in no particular order. - */ - private void writeObject(java.io.ObjectOutputStream s) - throws IOException - { - Iterator> i = - (size > 0) ? entrySet0().iterator() : null; + /** + * Tie-breaking utility for ordering insertions when equal + * hashCodes and non-comparable. We don't require a total + * order, just a consistent insertion rule to maintain + * equivalence across rebalancings. Tie-breaking further than + * necessary simplifies testing a bit. + */ + static int tieBreakOrder(Object a, Object b) { + int d; + if (a == null || b == null || + (d = a.getClass().getName(). + compareTo(b.getClass().getName())) == 0) + d = (System.identityHashCode(a) <= System.identityHashCode(b) ? + -1 : 1); + return d; + } - // Write out the threshold, loadfactor, and any hidden stuff - s.defaultWriteObject(); + /** + * Forms tree of the nodes linked from this node. + * @return root of tree + */ + final void treeify(Node[] tab) { + TreeNode root = null; + for (TreeNode x = this, next; x != null; x = next) { + next = (TreeNode)x.next; + x.left = x.right = null; + if (root == null) { + x.parent = null; + x.red = false; + root = x; + } + else { + K k = x.key; + int h = x.hash; + Class kc = null; + for (TreeNode p = root;;) { + int dir, ph; + K pk = p.key; + if ((ph = p.hash) > h) + dir = -1; + else if (ph < h) + dir = 1; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) + dir = tieBreakOrder(k, pk); + + TreeNode xp = p; + if ((p = (dir <= 0) ? p.left : p.right) == null) { + x.parent = xp; + if (dir <= 0) + xp.left = x; + else + xp.right = x; + root = balanceInsertion(root, x); + break; + } + } + } + } + moveRootToFront(tab, root); + } + + /** + * Returns a list of non-TreeNodes replacing those linked from + * this node. + */ + final Node untreeify(HashMap map) { + Node hd = null, tl = null; + for (Node q = this; q != null; q = q.next) { + Node p = map.replacementNode(q, null); + if (tl == null) + hd = p; + else + tl.next = p; + tl = p; + } + return hd; + } - // Write out number of buckets - s.writeInt(table.length); + /** + * Tree version of putVal. + */ + final TreeNode putTreeVal(HashMap map, Node[] tab, + int h, K k, V v) { + Class kc = null; + boolean searched = false; + TreeNode root = (parent != null) ? root() : this; + for (TreeNode p = root;;) { + int dir, ph; K pk; + if ((ph = p.hash) > h) + dir = -1; + else if (ph < h) + dir = 1; + else if ((pk = p.key) == k || (k != null && k.equals(pk))) + return p; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) { + if (!searched) { + TreeNode q, ch; + searched = true; + if (((ch = p.left) != null && + (q = ch.find(h, k, kc)) != null) || + ((ch = p.right) != null && + (q = ch.find(h, k, kc)) != null)) + return q; + } + dir = tieBreakOrder(k, pk); + } + + TreeNode xp = p; + if ((p = (dir <= 0) ? p.left : p.right) == null) { + Node xpn = xp.next; + TreeNode x = map.newTreeNode(h, k, v, xpn); + if (dir <= 0) + xp.left = x; + else + xp.right = x; + xp.next = x; + x.parent = x.prev = xp; + if (xpn != null) + ((TreeNode)xpn).prev = x; + moveRootToFront(tab, balanceInsertion(root, x)); + return null; + } + } + } - // Write out size (number of Mappings) - s.writeInt(size); + /** + * Removes the given node, that must be present before this call. + * This is messier than typical red-black deletion code because we + * cannot swap the contents of an interior node with a leaf + * successor that is pinned by "next" pointers that are accessible + * independently during traversal. So instead we swap the tree + * linkages. If the current tree appears to have too few nodes, + * the bin is converted back to a plain bin. (The test triggers + * somewhere between 2 and 6 nodes, depending on tree structure). + */ + final void removeTreeNode(HashMap map, Node[] tab, + boolean movable) { + int n; + if (tab == null || (n = tab.length) == 0) + return; + int index = (n - 1) & hash; + TreeNode first = (TreeNode)tab[index], root = first, rl; + TreeNode succ = (TreeNode)next, pred = prev; + if (pred == null) + tab[index] = first = succ; + else + pred.next = succ; + if (succ != null) + succ.prev = pred; + if (first == null) + return; + if (root.parent != null) + root = root.root(); + if (root == null || root.right == null || + (rl = root.left) == null || rl.left == null) { + tab[index] = first.untreeify(map); // too small + return; + } + TreeNode p = this, pl = left, pr = right, replacement; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + if (sr != null) + replacement = sr; + else + replacement = p; + } + else if (pl != null) + replacement = pl; + else if (pr != null) + replacement = pr; + else + replacement = p; + if (replacement != p) { + TreeNode pp = replacement.parent = p.parent; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } - // Write out keys and values (alternating) - if (i != null) { - while (i.hasNext()) { - Map.Entry e = i.next(); - s.writeObject(e.getKey()); - s.writeObject(e.getValue()); + TreeNode r = p.red ? root : balanceDeletion(root, replacement); + + if (replacement == p) { // detach + TreeNode pp = p.parent; + p.parent = null; + if (pp != null) { + if (p == pp.left) + pp.left = null; + else if (p == pp.right) + pp.right = null; + } } + if (movable) + moveRootToFront(tab, r); } - } - private static final long serialVersionUID = 362498820763181265L; + /** + * Splits nodes in a tree bin into lower and upper tree bins, + * or untreeifies if now too small. Called only from resize; + * see above discussion about split bits and indices. + * + * @param map the map + * @param tab the table for recording bin heads + * @param index the index of the table being split + * @param bit the bit of hash to split on + */ + final void split(HashMap map, Node[] tab, int index, int bit) { + TreeNode b = this; + // Relink into lo and hi lists, preserving order + TreeNode loHead = null, loTail = null; + TreeNode hiHead = null, hiTail = null; + int lc = 0, hc = 0; + for (TreeNode e = b, next; e != null; e = next) { + next = (TreeNode)e.next; + e.next = null; + if ((e.hash & bit) == 0) { + if ((e.prev = loTail) == null) + loHead = e; + else + loTail.next = e; + loTail = e; + ++lc; + } + else { + if ((e.prev = hiTail) == null) + hiHead = e; + else + hiTail.next = e; + hiTail = e; + ++hc; + } + } - /** - * Reconstitute the HashMap instance from a stream (i.e., - * deserialize it). - */ - private void readObject(java.io.ObjectInputStream s) - throws IOException, ClassNotFoundException - { - // Read in the threshold, loadfactor, and any hidden stuff - s.defaultReadObject(); + if (loHead != null) { + if (lc <= UNTREEIFY_THRESHOLD) + tab[index] = loHead.untreeify(map); + else { + tab[index] = loHead; + if (hiHead != null) // (else is already treeified) + loHead.treeify(tab); + } + } + if (hiHead != null) { + if (hc <= UNTREEIFY_THRESHOLD) + tab[index + bit] = hiHead.untreeify(map); + else { + tab[index + bit] = hiHead; + if (loHead != null) + hiHead.treeify(tab); + } + } + } + + /* ------------------------------------------------------------ */ + // Red-black tree methods, all adapted from CLR + + static TreeNode rotateLeft(TreeNode root, + TreeNode p) { + TreeNode r, pp, rl; + if (p != null && (r = p.right) != null) { + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + (root = r).red = false; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + return root; + } - // Read in number of buckets and allocate the bucket array; - int numBuckets = s.readInt(); - table = new Entry[numBuckets]; + static TreeNode rotateRight(TreeNode root, + TreeNode p) { + TreeNode l, pp, lr; + if (p != null && (l = p.left) != null) { + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + (root = l).red = false; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + return root; + } - init(); // Give subclass a chance to do its thing. + static TreeNode balanceInsertion(TreeNode root, + TreeNode x) { + x.red = true; + for (TreeNode xp, xpp, xppl, xppr;;) { + if ((xp = x.parent) == null) { + x.red = false; + return x; + } + else if (!xp.red || (xpp = xp.parent) == null) + return root; + if (xp == (xppl = xpp.left)) { + if ((xppr = xpp.right) != null && xppr.red) { + xppr.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + root = rotateLeft(root, x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + root = rotateRight(root, xpp); + } + } + } + } + else { + if (xppl != null && xppl.red) { + xppl.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + root = rotateRight(root, x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + root = rotateLeft(root, xpp); + } + } + } + } + } + } - // Read in size (number of Mappings) - int size = s.readInt(); + static TreeNode balanceDeletion(TreeNode root, + TreeNode x) { + for (TreeNode xp, xpl, xpr;;) { + if (x == null || x == root) + return root; + else if ((xp = x.parent) == null) { + x.red = false; + return x; + } + else if (x.red) { + x.red = false; + return root; + } + else if ((xpl = xp.left) == x) { + if ((xpr = xp.right) != null && xpr.red) { + xpr.red = false; + xp.red = true; + root = rotateLeft(root, xp); + xpr = (xp = x.parent) == null ? null : xp.right; + } + if (xpr == null) + x = xp; + else { + TreeNode sl = xpr.left, sr = xpr.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + xpr.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + xpr.red = true; + root = rotateRight(root, xpr); + xpr = (xp = x.parent) == null ? + null : xp.right; + } + if (xpr != null) { + xpr.red = (xp == null) ? false : xp.red; + if ((sr = xpr.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + root = rotateLeft(root, xp); + } + x = root; + } + } + } + else { // symmetric + if (xpl != null && xpl.red) { + xpl.red = false; + xp.red = true; + root = rotateRight(root, xp); + xpl = (xp = x.parent) == null ? null : xp.left; + } + if (xpl == null) + x = xp; + else { + TreeNode sl = xpl.left, sr = xpl.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + xpl.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + xpl.red = true; + root = rotateLeft(root, xpl); + xpl = (xp = x.parent) == null ? + null : xp.left; + } + if (xpl != null) { + xpl.red = (xp == null) ? false : xp.red; + if ((sl = xpl.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + root = rotateRight(root, xp); + } + x = root; + } + } + } + } + } - // Read the keys and values, and put the mappings in the HashMap - for (int i=0; i boolean checkInvariants(TreeNode t) { + TreeNode tp = t.parent, tl = t.left, tr = t.right, + tb = t.prev, tn = (TreeNode)t.next; + if (tb != null && tb.next != t) + return false; + if (tn != null && tn.prev != t) + return false; + if (tp != null && t != tp.left && t != tp.right) + return false; + if (tl != null && (tl.parent != t || tl.hash > t.hash)) + return false; + if (tr != null && (tr.parent != t || tr.hash < t.hash)) + return false; + if (t.red && tl != null && tl.red && tr != null && tr.red) + return false; + if (tl != null && !checkInvariants(tl)) + return false; + if (tr != null && !checkInvariants(tr)) + return false; + return true; } } - // These methods are used when serializing HashSets - int capacity() { return table.length; } - float loadFactor() { return loadFactor; } } diff --git a/src/HashSet.java b/src/HashSet.java index b2fca16..ad56f4e 100644 --- a/src/HashSet.java +++ b/src/HashSet.java @@ -1,5 +1,91 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + package java.util; +import java.io.InvalidObjectException; + +/** + * This class implements the Set interface, backed by a hash table + * (actually a HashMap instance). It makes no guarantees as to the + * iteration order of the set; in particular, it does not guarantee that the + * order will remain constant over time. This class permits the null + * element. + * + *

This class offers constant time performance for the basic operations + * (add, remove, contains and size), + * assuming the hash function disperses the elements properly among the + * buckets. Iterating over this set requires time proportional to the sum of + * the HashSet instance's size (the number of elements) plus the + * "capacity" of the backing HashMap instance (the number of + * buckets). Thus, it's very important not to set the initial capacity too + * high (or the load factor too low) if iteration performance is important. + * + *

Note that this implementation is not synchronized. + * If multiple threads access a hash set concurrently, and at least one of + * the threads modifies the set, it must be synchronized externally. + * This is typically accomplished by synchronizing on some object that + * naturally encapsulates the set. + * + * If no such object exists, the set should be "wrapped" using the + * {@link Collections#synchronizedSet Collections.synchronizedSet} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the set:

+ *   Set s = Collections.synchronizedSet(new HashSet(...));
+ * + *

The iterators returned by this class's iterator method are + * fail-fast: if the set is modified at any time after the iterator is + * created, in any way except through the iterator's own remove + * method, the Iterator throws a {@link ConcurrentModificationException}. + * Thus, in the face of concurrent modification, the iterator fails quickly + * and cleanly, rather than risking arbitrary, non-deterministic behavior at + * an undetermined time in the future. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw ConcurrentModificationException on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @param the type of elements maintained by this set + * + * @author Josh Bloch + * @author Neal Gafter + * @see Collection + * @see Set + * @see TreeSet + * @see HashMap + * @since 1.2 + */ + public class HashSet extends AbstractSet implements Set, Cloneable, java.io.Serializable @@ -163,13 +249,14 @@ public void clear() { * * @return a shallow copy of this set */ + @SuppressWarnings("unchecked") public Object clone() { try { HashSet newSet = (HashSet) super.clone(); newSet.map = (HashMap) map.clone(); return newSet; } catch (CloneNotSupportedException e) { - throw new InternalError(); + throw new InternalError(e); } } @@ -209,20 +296,58 @@ private void readObject(java.io.ObjectInputStream s) // Read in any hidden serialization magic s.defaultReadObject(); - // Read in HashMap capacity and load factor and create backing HashMap + // Read capacity and verify non-negative. int capacity = s.readInt(); + if (capacity < 0) { + throw new InvalidObjectException("Illegal capacity: " + + capacity); + } + + // Read load factor and verify positive and non NaN. float loadFactor = s.readFloat(); - map = (((HashSet)this) instanceof LinkedHashSet ? - new LinkedHashMap(capacity, loadFactor) : - new HashMap(capacity, loadFactor)); + if (loadFactor <= 0 || Float.isNaN(loadFactor)) { + throw new InvalidObjectException("Illegal load factor: " + + loadFactor); + } - // Read in size + // Read size and verify non-negative. int size = s.readInt(); + if (size < 0) { + throw new InvalidObjectException("Illegal size: " + + size); + } + + // Set the capacity according to the size and load factor ensuring that + // the HashMap is at least 25% full but clamping to maximum capacity. + capacity = (int) Math.min(size * Math.min(1 / loadFactor, 4.0f), + HashMap.MAXIMUM_CAPACITY); + + // Create backing HashMap + map = (((HashSet)this) instanceof LinkedHashSet ? + new LinkedHashMap(capacity, loadFactor) : + new HashMap(capacity, loadFactor)); // Read in all elements in the proper order. for (int i=0; ilate-binding + * and fail-fast {@link Spliterator} over the elements in this + * set. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED} and + * {@link Spliterator#DISTINCT}. Overriding implementations should document + * the reporting of additional characteristic values. + * + * @return a {@code Spliterator} over the elements in this set + * @since 1.8 + */ + public Spliterator spliterator() { + return new HashMap.KeySpliterator(map, 0, -1, 0, 0); + } } diff --git a/src/Hashtable.java b/src/Hashtable.java new file mode 100644 index 0000000..b184061 --- /dev/null +++ b/src/Hashtable.java @@ -0,0 +1,1402 @@ +/* + * Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.util; + +import java.io.*; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.BiFunction; + +/** + * This class implements a hash table, which maps keys to values. Any + * non-null object can be used as a key or as a value.

+ * + * To successfully store and retrieve objects from a hashtable, the + * objects used as keys must implement the hashCode + * method and the equals method.

+ * + * An instance of Hashtable has two parameters that affect its + * performance: initial capacity and load factor. The + * capacity is the number of buckets in the hash table, and the + * initial capacity is simply the capacity at the time the hash table + * is created. Note that the hash table is open: in the case of a "hash + * collision", a single bucket stores multiple entries, which must be searched + * sequentially. The load factor is a measure of how full the hash + * table is allowed to get before its capacity is automatically increased. + * The initial capacity and load factor parameters are merely hints to + * the implementation. The exact details as to when and whether the rehash + * method is invoked are implementation-dependent.

+ * + * Generally, the default load factor (.75) offers a good tradeoff between + * time and space costs. Higher values decrease the space overhead but + * increase the time cost to look up an entry (which is reflected in most + * Hashtable operations, including get and put).

+ * + * The initial capacity controls a tradeoff between wasted space and the + * need for rehash operations, which are time-consuming. + * No rehash operations will ever occur if the initial + * capacity is greater than the maximum number of entries the + * Hashtable will contain divided by its load factor. However, + * setting the initial capacity too high can waste space.

+ * + * If many entries are to be made into a Hashtable, + * creating it with a sufficiently large capacity may allow the + * entries to be inserted more efficiently than letting it perform + * automatic rehashing as needed to grow the table.

+ * + * This example creates a hashtable of numbers. It uses the names of + * the numbers as keys: + *

   {@code
+ *   Hashtable numbers
+ *     = new Hashtable();
+ *   numbers.put("one", 1);
+ *   numbers.put("two", 2);
+ *   numbers.put("three", 3);}
+ * + *

To retrieve a number, use the following code: + *

   {@code
+ *   Integer n = numbers.get("two");
+ *   if (n != null) {
+ *     System.out.println("two = " + n);
+ *   }}
+ * + *

The iterators returned by the iterator method of the collections + * returned by all of this class's "collection view methods" are + * fail-fast: if the Hashtable is structurally modified at any time + * after the iterator is created, in any way except through the iterator's own + * remove method, the iterator will throw a {@link + * ConcurrentModificationException}. Thus, in the face of concurrent + * modification, the iterator fails quickly and cleanly, rather than risking + * arbitrary, non-deterministic behavior at an undetermined time in the future. + * The Enumerations returned by Hashtable's keys and elements methods are + * not fail-fast. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw ConcurrentModificationException on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

As of the Java 2 platform v1.2, this class was retrofitted to + * implement the {@link Map} interface, making it a member of the + * + * + * Java Collections Framework. Unlike the new collection + * implementations, {@code Hashtable} is synchronized. If a + * thread-safe implementation is not needed, it is recommended to use + * {@link HashMap} in place of {@code Hashtable}. If a thread-safe + * highly-concurrent implementation is desired, then it is recommended + * to use {@link java.util.concurrent.ConcurrentHashMap} in place of + * {@code Hashtable}. + * + * @author Arthur van Hoff + * @author Josh Bloch + * @author Neal Gafter + * @see Object#equals(java.lang.Object) + * @see Object#hashCode() + * @see Hashtable#rehash() + * @see Collection + * @see Map + * @see HashMap + * @see TreeMap + * @since JDK1.0 + */ +public class Hashtable + extends Dictionary + implements Map, Cloneable, java.io.Serializable { + + /** + * The hash table data. + */ + private transient Entry[] table; + + /** + * The total number of entries in the hash table. + */ + private transient int count; + + /** + * The table is rehashed when its size exceeds this threshold. (The + * value of this field is (int)(capacity * loadFactor).) + * + * @serial + */ + private int threshold; + + /** + * The load factor for the hashtable. + * + * @serial + */ + private float loadFactor; + + /** + * The number of times this Hashtable has been structurally modified + * Structural modifications are those that change the number of entries in + * the Hashtable or otherwise modify its internal structure (e.g., + * rehash). This field is used to make iterators on Collection-views of + * the Hashtable fail-fast. (See ConcurrentModificationException). + */ + private transient int modCount = 0; + + /** use serialVersionUID from JDK 1.0.2 for interoperability */ + private static final long serialVersionUID = 1421746759512286392L; + + /** + * Constructs a new, empty hashtable with the specified initial + * capacity and the specified load factor. + * + * @param initialCapacity the initial capacity of the hashtable. + * @param loadFactor the load factor of the hashtable. + * @exception IllegalArgumentException if the initial capacity is less + * than zero, or if the load factor is nonpositive. + */ + public Hashtable(int initialCapacity, float loadFactor) { + if (initialCapacity < 0) + throw new IllegalArgumentException("Illegal Capacity: "+ + initialCapacity); + if (loadFactor <= 0 || Float.isNaN(loadFactor)) + throw new IllegalArgumentException("Illegal Load: "+loadFactor); + + if (initialCapacity==0) + initialCapacity = 1; + this.loadFactor = loadFactor; + table = new Entry[initialCapacity]; + threshold = (int)Math.min(initialCapacity * loadFactor, MAX_ARRAY_SIZE + 1); + } + + /** + * Constructs a new, empty hashtable with the specified initial capacity + * and default load factor (0.75). + * + * @param initialCapacity the initial capacity of the hashtable. + * @exception IllegalArgumentException if the initial capacity is less + * than zero. + */ + public Hashtable(int initialCapacity) { + this(initialCapacity, 0.75f); + } + + /** + * Constructs a new, empty hashtable with a default initial capacity (11) + * and load factor (0.75). + */ + public Hashtable() { + this(11, 0.75f); + } + + /** + * Constructs a new hashtable with the same mappings as the given + * Map. The hashtable is created with an initial capacity sufficient to + * hold the mappings in the given Map and a default load factor (0.75). + * + * @param t the map whose mappings are to be placed in this map. + * @throws NullPointerException if the specified map is null. + * @since 1.2 + */ + public Hashtable(Map t) { + this(Math.max(2*t.size(), 11), 0.75f); + putAll(t); + } + + /** + * Returns the number of keys in this hashtable. + * + * @return the number of keys in this hashtable. + */ + public synchronized int size() { + return count; + } + + /** + * Tests if this hashtable maps no keys to values. + * + * @return true if this hashtable maps no keys to values; + * false otherwise. + */ + public synchronized boolean isEmpty() { + return count == 0; + } + + /** + * Returns an enumeration of the keys in this hashtable. + * + * @return an enumeration of the keys in this hashtable. + * @see Enumeration + * @see #elements() + * @see #keySet() + * @see Map + */ + public synchronized Enumeration keys() { + return this.getEnumeration(KEYS); + } + + /** + * Returns an enumeration of the values in this hashtable. + * Use the Enumeration methods on the returned object to fetch the elements + * sequentially. + * + * @return an enumeration of the values in this hashtable. + * @see java.util.Enumeration + * @see #keys() + * @see #values() + * @see Map + */ + public synchronized Enumeration elements() { + return this.getEnumeration(VALUES); + } + + /** + * Tests if some key maps into the specified value in this hashtable. + * This operation is more expensive than the {@link #containsKey + * containsKey} method. + * + *

Note that this method is identical in functionality to + * {@link #containsValue containsValue}, (which is part of the + * {@link Map} interface in the collections framework). + * + * @param value a value to search for + * @return true if and only if some key maps to the + * value argument in this hashtable as + * determined by the equals method; + * false otherwise. + * @exception NullPointerException if the value is null + */ + public synchronized boolean contains(Object value) { + if (value == null) { + throw new NullPointerException(); + } + + Entry tab[] = table; + for (int i = tab.length ; i-- > 0 ;) { + for (Entry e = tab[i] ; e != null ; e = e.next) { + if (e.value.equals(value)) { + return true; + } + } + } + return false; + } + + /** + * Returns true if this hashtable maps one or more keys to this value. + * + *

Note that this method is identical in functionality to {@link + * #contains contains} (which predates the {@link Map} interface). + * + * @param value value whose presence in this hashtable is to be tested + * @return true if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the value is null + * @since 1.2 + */ + public boolean containsValue(Object value) { + return contains(value); + } + + /** + * Tests if the specified object is a key in this hashtable. + * + * @param key possible key + * @return true if and only if the specified object + * is a key in this hashtable, as determined by the + * equals method; false otherwise. + * @throws NullPointerException if the key is null + * @see #contains(Object) + */ + public synchronized boolean containsKey(Object key) { + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + for (Entry e = tab[index] ; e != null ; e = e.next) { + if ((e.hash == hash) && e.key.equals(key)) { + return true; + } + } + return false; + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code (key.equals(k))}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @param key the key whose associated value is to be returned + * @return the value to which the specified key is mapped, or + * {@code null} if this map contains no mapping for the key + * @throws NullPointerException if the specified key is null + * @see #put(Object, Object) + */ + @SuppressWarnings("unchecked") + public synchronized V get(Object key) { + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + for (Entry e = tab[index] ; e != null ; e = e.next) { + if ((e.hash == hash) && e.key.equals(key)) { + return (V)e.value; + } + } + return null; + } + + /** + * The maximum size of array to allocate. + * Some VMs reserve some header words in an array. + * Attempts to allocate larger arrays may result in + * OutOfMemoryError: Requested array size exceeds VM limit + */ + private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * Increases the capacity of and internally reorganizes this + * hashtable, in order to accommodate and access its entries more + * efficiently. This method is called automatically when the + * number of keys in the hashtable exceeds this hashtable's capacity + * and load factor. + */ + @SuppressWarnings("unchecked") + protected void rehash() { + int oldCapacity = table.length; + Entry[] oldMap = table; + + // overflow-conscious code + int newCapacity = (oldCapacity << 1) + 1; + if (newCapacity - MAX_ARRAY_SIZE > 0) { + if (oldCapacity == MAX_ARRAY_SIZE) + // Keep running with MAX_ARRAY_SIZE buckets + return; + newCapacity = MAX_ARRAY_SIZE; + } + Entry[] newMap = new Entry[newCapacity]; + + modCount++; + threshold = (int)Math.min(newCapacity * loadFactor, MAX_ARRAY_SIZE + 1); + table = newMap; + + for (int i = oldCapacity ; i-- > 0 ;) { + for (Entry old = (Entry)oldMap[i] ; old != null ; ) { + Entry e = old; + old = old.next; + + int index = (e.hash & 0x7FFFFFFF) % newCapacity; + e.next = (Entry)newMap[index]; + newMap[index] = e; + } + } + } + + private void addEntry(int hash, K key, V value, int index) { + modCount++; + + Entry tab[] = table; + if (count >= threshold) { + // Rehash the table if the threshold is exceeded + rehash(); + + tab = table; + hash = key.hashCode(); + index = (hash & 0x7FFFFFFF) % tab.length; + } + + // Creates the new entry. + @SuppressWarnings("unchecked") + Entry e = (Entry) tab[index]; + tab[index] = new Entry<>(hash, key, value, e); + count++; + } + + /** + * Maps the specified key to the specified + * value in this hashtable. Neither the key nor the + * value can be null.

+ * + * The value can be retrieved by calling the get method + * with a key that is equal to the original key. + * + * @param key the hashtable key + * @param value the value + * @return the previous value of the specified key in this hashtable, + * or null if it did not have one + * @exception NullPointerException if the key or value is + * null + * @see Object#equals(Object) + * @see #get(Object) + */ + public synchronized V put(K key, V value) { + // Make sure the value is not null + if (value == null) { + throw new NullPointerException(); + } + + // Makes sure the key is not already in the hashtable. + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry entry = (Entry)tab[index]; + for(; entry != null ; entry = entry.next) { + if ((entry.hash == hash) && entry.key.equals(key)) { + V old = entry.value; + entry.value = value; + return old; + } + } + + addEntry(hash, key, value, index); + return null; + } + + /** + * Removes the key (and its corresponding value) from this + * hashtable. This method does nothing if the key is not in the hashtable. + * + * @param key the key that needs to be removed + * @return the value to which the key had been mapped in this hashtable, + * or null if the key did not have a mapping + * @throws NullPointerException if the key is null + */ + public synchronized V remove(Object key) { + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for(Entry prev = null ; e != null ; prev = e, e = e.next) { + if ((e.hash == hash) && e.key.equals(key)) { + modCount++; + if (prev != null) { + prev.next = e.next; + } else { + tab[index] = e.next; + } + count--; + V oldValue = e.value; + e.value = null; + return oldValue; + } + } + return null; + } + + /** + * Copies all of the mappings from the specified map to this hashtable. + * These mappings will replace any mappings that this hashtable had for any + * of the keys currently in the specified map. + * + * @param t mappings to be stored in this map + * @throws NullPointerException if the specified map is null + * @since 1.2 + */ + public synchronized void putAll(Map t) { + for (Map.Entry e : t.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Clears this hashtable so that it contains no keys. + */ + public synchronized void clear() { + Entry tab[] = table; + modCount++; + for (int index = tab.length; --index >= 0; ) + tab[index] = null; + count = 0; + } + + /** + * Creates a shallow copy of this hashtable. All the structure of the + * hashtable itself is copied, but the keys and values are not cloned. + * This is a relatively expensive operation. + * + * @return a clone of the hashtable + */ + public synchronized Object clone() { + try { + Hashtable t = (Hashtable)super.clone(); + t.table = new Entry[table.length]; + for (int i = table.length ; i-- > 0 ; ) { + t.table[i] = (table[i] != null) + ? (Entry) table[i].clone() : null; + } + t.keySet = null; + t.entrySet = null; + t.values = null; + t.modCount = 0; + return t; + } catch (CloneNotSupportedException e) { + // this shouldn't happen, since we are Cloneable + throw new InternalError(e); + } + } + + /** + * Returns a string representation of this Hashtable object + * in the form of a set of entries, enclosed in braces and separated + * by the ASCII characters "" (comma and space). Each + * entry is rendered as the key, an equals sign =, and the + * associated element, where the toString method is used to + * convert the key and element to strings. + * + * @return a string representation of this hashtable + */ + public synchronized String toString() { + int max = size() - 1; + if (max == -1) + return "{}"; + + StringBuilder sb = new StringBuilder(); + Iterator> it = entrySet().iterator(); + + sb.append('{'); + for (int i = 0; ; i++) { + Map.Entry e = it.next(); + K key = e.getKey(); + V value = e.getValue(); + sb.append(key == this ? "(this Map)" : key.toString()); + sb.append('='); + sb.append(value == this ? "(this Map)" : value.toString()); + + if (i == max) + return sb.append('}').toString(); + sb.append(", "); + } + } + + + private Enumeration getEnumeration(int type) { + if (count == 0) { + return Collections.emptyEnumeration(); + } else { + return new Enumerator<>(type, false); + } + } + + private Iterator getIterator(int type) { + if (count == 0) { + return Collections.emptyIterator(); + } else { + return new Enumerator<>(type, true); + } + } + + // Views + + /** + * Each of these fields are initialized to contain an instance of the + * appropriate view the first time this view is requested. The views are + * stateless, so there's no reason to create more than one of each. + */ + private transient volatile Set keySet; + private transient volatile Set> entrySet; + private transient volatile Collection values; + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation), the results of + * the iteration are undefined. The set supports element removal, + * which removes the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or addAll + * operations. + * + * @since 1.2 + */ + public Set keySet() { + if (keySet == null) + keySet = Collections.synchronizedSet(new KeySet(), this); + return keySet; + } + + private class KeySet extends AbstractSet { + public Iterator iterator() { + return getIterator(KEYS); + } + public int size() { + return count; + } + public boolean contains(Object o) { + return containsKey(o); + } + public boolean remove(Object o) { + return Hashtable.this.remove(o) != null; + } + public void clear() { + Hashtable.this.clear(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation, or through the + * setValue operation on a map entry returned by the + * iterator) the results of the iteration are undefined. The set + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Set.remove, removeAll, retainAll and + * clear operations. It does not support the + * add or addAll operations. + * + * @since 1.2 + */ + public Set> entrySet() { + if (entrySet==null) + entrySet = Collections.synchronizedSet(new EntrySet(), this); + return entrySet; + } + + private class EntrySet extends AbstractSet> { + public Iterator> iterator() { + return getIterator(ENTRIES); + } + + public boolean add(Map.Entry o) { + return super.add(o); + } + + public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry entry = (Map.Entry)o; + Object key = entry.getKey(); + Entry[] tab = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + + for (Entry e = tab[index]; e != null; e = e.next) + if (e.hash==hash && e.equals(entry)) + return true; + return false; + } + + public boolean remove(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry entry = (Map.Entry) o; + Object key = entry.getKey(); + Entry[] tab = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for(Entry prev = null; e != null; prev = e, e = e.next) { + if (e.hash==hash && e.equals(entry)) { + modCount++; + if (prev != null) + prev.next = e.next; + else + tab[index] = e.next; + + count--; + e.value = null; + return true; + } + } + return false; + } + + public int size() { + return count; + } + + public void clear() { + Hashtable.this.clear(); + } + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. If the map is + * modified while an iteration over the collection is in progress + * (except through the iterator's own remove operation), + * the results of the iteration are undefined. The collection + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll and clear operations. It does not + * support the add or addAll operations. + * + * @since 1.2 + */ + public Collection values() { + if (values==null) + values = Collections.synchronizedCollection(new ValueCollection(), + this); + return values; + } + + private class ValueCollection extends AbstractCollection { + public Iterator iterator() { + return getIterator(VALUES); + } + public int size() { + return count; + } + public boolean contains(Object o) { + return containsValue(o); + } + public void clear() { + Hashtable.this.clear(); + } + } + + // Comparison and hashing + + /** + * Compares the specified Object with this Map for equality, + * as per the definition in the Map interface. + * + * @param o object to be compared for equality with this hashtable + * @return true if the specified Object is equal to this Map + * @see Map#equals(Object) + * @since 1.2 + */ + public synchronized boolean equals(Object o) { + if (o == this) + return true; + + if (!(o instanceof Map)) + return false; + Map t = (Map) o; + if (t.size() != size()) + return false; + + try { + Iterator> i = entrySet().iterator(); + while (i.hasNext()) { + Map.Entry e = i.next(); + K key = e.getKey(); + V value = e.getValue(); + if (value == null) { + if (!(t.get(key)==null && t.containsKey(key))) + return false; + } else { + if (!value.equals(t.get(key))) + return false; + } + } + } catch (ClassCastException unused) { + return false; + } catch (NullPointerException unused) { + return false; + } + + return true; + } + + /** + * Returns the hash code value for this Map as per the definition in the + * Map interface. + * + * @see Map#hashCode() + * @since 1.2 + */ + public synchronized int hashCode() { + /* + * This code detects the recursion caused by computing the hash code + * of a self-referential hash table and prevents the stack overflow + * that would otherwise result. This allows certain 1.1-era + * applets with self-referential hash tables to work. This code + * abuses the loadFactor field to do double-duty as a hashCode + * in progress flag, so as not to worsen the space performance. + * A negative load factor indicates that hash code computation is + * in progress. + */ + int h = 0; + if (count == 0 || loadFactor < 0) + return h; // Returns zero + + loadFactor = -loadFactor; // Mark hashCode computation in progress + Entry[] tab = table; + for (Entry entry : tab) { + while (entry != null) { + h += entry.hashCode(); + entry = entry.next; + } + } + + loadFactor = -loadFactor; // Mark hashCode computation complete + + return h; + } + + @Override + public synchronized V getOrDefault(Object key, V defaultValue) { + V result = get(key); + return (null == result) ? defaultValue : result; + } + + @SuppressWarnings("unchecked") + @Override + public synchronized void forEach(BiConsumer action) { + Objects.requireNonNull(action); // explicit check required in case + // table is empty. + final int expectedModCount = modCount; + + Entry[] tab = table; + for (Entry entry : tab) { + while (entry != null) { + action.accept((K)entry.key, (V)entry.value); + entry = entry.next; + + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } + } + } + } + + @SuppressWarnings("unchecked") + @Override + public synchronized void replaceAll(BiFunction function) { + Objects.requireNonNull(function); // explicit check required in case + // table is empty. + final int expectedModCount = modCount; + + Entry[] tab = (Entry[])table; + for (Entry entry : tab) { + while (entry != null) { + entry.value = Objects.requireNonNull( + function.apply(entry.key, entry.value)); + entry = entry.next; + + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } + } + } + } + + @Override + public synchronized V putIfAbsent(K key, V value) { + Objects.requireNonNull(value); + + // Makes sure the key is not already in the hashtable. + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry entry = (Entry)tab[index]; + for (; entry != null; entry = entry.next) { + if ((entry.hash == hash) && entry.key.equals(key)) { + V old = entry.value; + if (old == null) { + entry.value = value; + } + return old; + } + } + + addEntry(hash, key, value, index); + return null; + } + + @Override + public synchronized boolean remove(Object key, Object value) { + Objects.requireNonNull(value); + + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (Entry prev = null; e != null; prev = e, e = e.next) { + if ((e.hash == hash) && e.key.equals(key) && e.value.equals(value)) { + modCount++; + if (prev != null) { + prev.next = e.next; + } else { + tab[index] = e.next; + } + count--; + e.value = null; + return true; + } + } + return false; + } + + @Override + public synchronized boolean replace(K key, V oldValue, V newValue) { + Objects.requireNonNull(oldValue); + Objects.requireNonNull(newValue); + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (; e != null; e = e.next) { + if ((e.hash == hash) && e.key.equals(key)) { + if (e.value.equals(oldValue)) { + e.value = newValue; + return true; + } else { + return false; + } + } + } + return false; + } + + @Override + public synchronized V replace(K key, V value) { + Objects.requireNonNull(value); + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (; e != null; e = e.next) { + if ((e.hash == hash) && e.key.equals(key)) { + V oldValue = e.value; + e.value = value; + return oldValue; + } + } + return null; + } + + @Override + public synchronized V computeIfAbsent(K key, Function mappingFunction) { + Objects.requireNonNull(mappingFunction); + + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (; e != null; e = e.next) { + if (e.hash == hash && e.key.equals(key)) { + // Hashtable not accept null value + return e.value; + } + } + + V newValue = mappingFunction.apply(key); + if (newValue != null) { + addEntry(hash, key, newValue, index); + } + + return newValue; + } + + @Override + public synchronized V computeIfPresent(K key, BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (Entry prev = null; e != null; prev = e, e = e.next) { + if (e.hash == hash && e.key.equals(key)) { + V newValue = remappingFunction.apply(key, e.value); + if (newValue == null) { + modCount++; + if (prev != null) { + prev.next = e.next; + } else { + tab[index] = e.next; + } + count--; + } else { + e.value = newValue; + } + return newValue; + } + } + return null; + } + + @Override + public synchronized V compute(K key, BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (Entry prev = null; e != null; prev = e, e = e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + V newValue = remappingFunction.apply(key, e.value); + if (newValue == null) { + modCount++; + if (prev != null) { + prev.next = e.next; + } else { + tab[index] = e.next; + } + count--; + } else { + e.value = newValue; + } + return newValue; + } + } + + V newValue = remappingFunction.apply(key, null); + if (newValue != null) { + addEntry(hash, key, newValue, index); + } + + return newValue; + } + + @Override + public synchronized V merge(K key, V value, BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + + Entry tab[] = table; + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for (Entry prev = null; e != null; prev = e, e = e.next) { + if (e.hash == hash && e.key.equals(key)) { + V newValue = remappingFunction.apply(e.value, value); + if (newValue == null) { + modCount++; + if (prev != null) { + prev.next = e.next; + } else { + tab[index] = e.next; + } + count--; + } else { + e.value = newValue; + } + return newValue; + } + } + + if (value != null) { + addEntry(hash, key, value, index); + } + + return value; + } + + /** + * Save the state of the Hashtable to a stream (i.e., serialize it). + * + * @serialData The capacity of the Hashtable (the length of the + * bucket array) is emitted (int), followed by the + * size of the Hashtable (the number of key-value + * mappings), followed by the key (Object) and value (Object) + * for each key-value mapping represented by the Hashtable + * The key-value mappings are emitted in no particular order. + */ + private void writeObject(java.io.ObjectOutputStream s) + throws IOException { + Entry entryStack = null; + + synchronized (this) { + // Write out the length, threshold, loadfactor + s.defaultWriteObject(); + + // Write out length, count of elements + s.writeInt(table.length); + s.writeInt(count); + + // Stack copies of the entries in the table + for (int index = 0; index < table.length; index++) { + Entry entry = table[index]; + + while (entry != null) { + entryStack = + new Entry<>(0, entry.key, entry.value, entryStack); + entry = entry.next; + } + } + } + + // Write out the key/value objects from the stacked entries + while (entryStack != null) { + s.writeObject(entryStack.key); + s.writeObject(entryStack.value); + entryStack = entryStack.next; + } + } + + /** + * Reconstitute the Hashtable from a stream (i.e., deserialize it). + */ + private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException + { + // Read in the length, threshold, and loadfactor + s.defaultReadObject(); + + // Read the original length of the array and number of elements + int origlength = s.readInt(); + int elements = s.readInt(); + + // Compute new size with a bit of room 5% to grow but + // no larger than the original size. Make the length + // odd if it's large enough, this helps distribute the entries. + // Guard against the length ending up zero, that's not valid. + int length = (int)(elements * loadFactor) + (elements / 20) + 3; + if (length > elements && (length & 1) == 0) + length--; + if (origlength > 0 && length > origlength) + length = origlength; + table = new Entry[length]; + threshold = (int)Math.min(length * loadFactor, MAX_ARRAY_SIZE + 1); + count = 0; + + // Read the number of elements and then all the key/value objects + for (; elements > 0; elements--) { + @SuppressWarnings("unchecked") + K key = (K)s.readObject(); + @SuppressWarnings("unchecked") + V value = (V)s.readObject(); + // synch could be eliminated for performance + reconstitutionPut(table, key, value); + } + } + + /** + * The put method used by readObject. This is provided because put + * is overridable and should not be called in readObject since the + * subclass will not yet be initialized. + * + *

This differs from the regular put method in several ways. No + * checking for rehashing is necessary since the number of elements + * initially in the table is known. The modCount is not incremented + * because we are creating a new instance. Also, no return value + * is needed. + */ + private void reconstitutionPut(Entry[] tab, K key, V value) + throws StreamCorruptedException + { + if (value == null) { + throw new java.io.StreamCorruptedException(); + } + // Makes sure the key is not already in the hashtable. + // This should not happen in deserialized version. + int hash = key.hashCode(); + int index = (hash & 0x7FFFFFFF) % tab.length; + for (Entry e = tab[index] ; e != null ; e = e.next) { + if ((e.hash == hash) && e.key.equals(key)) { + throw new java.io.StreamCorruptedException(); + } + } + // Creates the new entry. + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + tab[index] = new Entry<>(hash, key, value, e); + count++; + } + + /** + * Hashtable bucket collision list entry + */ + private static class Entry implements Map.Entry { + final int hash; + final K key; + V value; + Entry next; + + protected Entry(int hash, K key, V value, Entry next) { + this.hash = hash; + this.key = key; + this.value = value; + this.next = next; + } + + @SuppressWarnings("unchecked") + protected Object clone() { + return new Entry<>(hash, key, value, + (next==null ? null : (Entry) next.clone())); + } + + // Map.Entry Ops + + public K getKey() { + return key; + } + + public V getValue() { + return value; + } + + public V setValue(V value) { + if (value == null) + throw new NullPointerException(); + + V oldValue = this.value; + this.value = value; + return oldValue; + } + + public boolean equals(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + + return (key==null ? e.getKey()==null : key.equals(e.getKey())) && + (value==null ? e.getValue()==null : value.equals(e.getValue())); + } + + public int hashCode() { + return hash ^ Objects.hashCode(value); + } + + public String toString() { + return key.toString()+"="+value.toString(); + } + } + + // Types of Enumerations/Iterations + private static final int KEYS = 0; + private static final int VALUES = 1; + private static final int ENTRIES = 2; + + /** + * A hashtable enumerator class. This class implements both the + * Enumeration and Iterator interfaces, but individual instances + * can be created with the Iterator methods disabled. This is necessary + * to avoid unintentionally increasing the capabilities granted a user + * by passing an Enumeration. + */ + private class Enumerator implements Enumeration, Iterator { + Entry[] table = Hashtable.this.table; + int index = table.length; + Entry entry; + Entry lastReturned; + int type; + + /** + * Indicates whether this Enumerator is serving as an Iterator + * or an Enumeration. (true -> Iterator). + */ + boolean iterator; + + /** + * The modCount value that the iterator believes that the backing + * Hashtable should have. If this expectation is violated, the iterator + * has detected concurrent modification. + */ + protected int expectedModCount = modCount; + + Enumerator(int type, boolean iterator) { + this.type = type; + this.iterator = iterator; + } + + public boolean hasMoreElements() { + Entry e = entry; + int i = index; + Entry[] t = table; + /* Use locals for faster loop iteration */ + while (e == null && i > 0) { + e = t[--i]; + } + entry = e; + index = i; + return e != null; + } + + @SuppressWarnings("unchecked") + public T nextElement() { + Entry et = entry; + int i = index; + Entry[] t = table; + /* Use locals for faster loop iteration */ + while (et == null && i > 0) { + et = t[--i]; + } + entry = et; + index = i; + if (et != null) { + Entry e = lastReturned = entry; + entry = e.next; + return type == KEYS ? (T)e.key : (type == VALUES ? (T)e.value : (T)e); + } + throw new NoSuchElementException("Hashtable Enumerator"); + } + + // Iterator methods + public boolean hasNext() { + return hasMoreElements(); + } + + public T next() { + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + return nextElement(); + } + + public void remove() { + if (!iterator) + throw new UnsupportedOperationException(); + if (lastReturned == null) + throw new IllegalStateException("Hashtable Enumerator"); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + + synchronized(Hashtable.this) { + Entry[] tab = Hashtable.this.table; + int index = (lastReturned.hash & 0x7FFFFFFF) % tab.length; + + @SuppressWarnings("unchecked") + Entry e = (Entry)tab[index]; + for(Entry prev = null; e != null; prev = e, e = e.next) { + if (e == lastReturned) { + modCount++; + expectedModCount++; + if (prev == null) + tab[index] = e.next; + else + prev.next = e.next; + count--; + lastReturned = null; + return; + } + } + throw new ConcurrentModificationException(); + } + } + } +} diff --git a/src/Iterator.java b/src/Iterator.java new file mode 100644 index 0000000..862b582 --- /dev/null +++ b/src/Iterator.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.util; + +import java.util.function.Consumer; + +/** + * An iterator over a collection. {@code Iterator} takes the place of + * {@link Enumeration} in the Java Collections Framework. Iterators + * differ from enumerations in two ways: + * + *

    + *
  • Iterators allow the caller to remove elements from the + * underlying collection during the iteration with well-defined + * semantics. + *
  • Method names have been improved. + *
+ * + *

This interface is a member of the + * + * Java Collections Framework. + * + * @param the type of elements returned by this iterator + * + * @author Josh Bloch + * @see Collection + * @see ListIterator + * @see Iterable + * @since 1.2 + */ +public interface Iterator { + /** + * Returns {@code true} if the iteration has more elements. + * (In other words, returns {@code true} if {@link #next} would + * return an element rather than throwing an exception.) + * + * @return {@code true} if the iteration has more elements + */ + boolean hasNext(); + + /** + * Returns the next element in the iteration. + * + * @return the next element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ + E next(); + + /** + * Removes from the underlying collection the last element returned + * by this iterator (optional operation). This method can be called + * only once per call to {@link #next}. The behavior of an iterator + * is unspecified if the underlying collection is modified while the + * iteration is in progress in any way other than by calling this + * method. + * + * @implSpec + * The default implementation throws an instance of + * {@link UnsupportedOperationException} and performs no other action. + * + * @throws UnsupportedOperationException if the {@code remove} + * operation is not supported by this iterator + * + * @throws IllegalStateException if the {@code next} method has not + * yet been called, or the {@code remove} method has already + * been called after the last call to the {@code next} + * method + */ + default void remove() { + throw new UnsupportedOperationException("remove"); + } + + /** + * Performs the given action for each remaining element until all elements + * have been processed or the action throws an exception. Actions are + * performed in the order of iteration, if that order is specified. + * Exceptions thrown by the action are relayed to the caller. + * + * @implSpec + *

The default implementation behaves as if: + *

{@code
+     *     while (hasNext())
+     *         action.accept(next());
+     * }
+ * + * @param action The action to be performed for each element + * @throws NullPointerException if the specified action is null + * @since 1.8 + */ + default void forEachRemaining(Consumer action) { + Objects.requireNonNull(action); + while (hasNext()) + action.accept(next()); + } +} diff --git a/src/LinkedBlockingQueue.java b/src/LinkedBlockingQueue.java new file mode 100644 index 0000000..338d10d --- /dev/null +++ b/src/LinkedBlockingQueue.java @@ -0,0 +1,1044 @@ +/* + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +/* + * + * + * + * + * + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package java.util.concurrent; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.AbstractQueue; +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.function.Consumer; + +/** + * An optionally-bounded {@linkplain BlockingQueue blocking queue} based on + * linked nodes. + * This queue orders elements FIFO (first-in-first-out). + * The head of the queue is that element that has been on the + * queue the longest time. + * The tail of the queue is that element that has been on the + * queue the shortest time. New elements + * are inserted at the tail of the queue, and the queue retrieval + * operations obtain elements at the head of the queue. + * Linked queues typically have higher throughput than array-based queues but + * less predictable performance in most concurrent applications. + * + *

The optional capacity bound constructor argument serves as a + * way to prevent excessive queue expansion. The capacity, if unspecified, + * is equal to {@link Integer#MAX_VALUE}. Linked nodes are + * dynamically created upon each insertion unless this would bring the + * queue above capacity. + * + *

This class and its iterator implement all of the + * optional methods of the {@link Collection} and {@link + * Iterator} interfaces. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of elements held in this collection + */ +public class LinkedBlockingQueue extends AbstractQueue + implements BlockingQueue, java.io.Serializable { + private static final long serialVersionUID = -6903933977591709194L; + + /* + * A variant of the "two lock queue" algorithm. The putLock gates + * entry to put (and offer), and has an associated condition for + * waiting puts. Similarly for the takeLock. The "count" field + * that they both rely on is maintained as an atomic to avoid + * needing to get both locks in most cases. Also, to minimize need + * for puts to get takeLock and vice-versa, cascading notifies are + * used. When a put notices that it has enabled at least one take, + * it signals taker. That taker in turn signals others if more + * items have been entered since the signal. And symmetrically for + * takes signalling puts. Operations such as remove(Object) and + * iterators acquire both locks. + * + * Visibility between writers and readers is provided as follows: + * + * Whenever an element is enqueued, the putLock is acquired and + * count updated. A subsequent reader guarantees visibility to the + * enqueued Node by either acquiring the putLock (via fullyLock) + * or by acquiring the takeLock, and then reading n = count.get(); + * this gives visibility to the first n items. + * + * To implement weakly consistent iterators, it appears we need to + * keep all Nodes GC-reachable from a predecessor dequeued Node. + * That would cause two problems: + * - allow a rogue Iterator to cause unbounded memory retention + * - cause cross-generational linking of old Nodes to new Nodes if + * a Node was tenured while live, which generational GCs have a + * hard time dealing with, causing repeated major collections. + * However, only non-deleted Nodes need to be reachable from + * dequeued Nodes, and reachability does not necessarily have to + * be of the kind understood by the GC. We use the trick of + * linking a Node that has just been dequeued to itself. Such a + * self-link implicitly means to advance to head.next. + */ + + /** + * Linked list node class + */ + static class Node { + E item; + + /** + * One of: + * - the real successor Node + * - this Node, meaning the successor is head.next + * - null, meaning there is no successor (this is the last node) + */ + Node next; + + Node(E x) { item = x; } + } + + /** The capacity bound, or Integer.MAX_VALUE if none */ + private final int capacity; + + /** Current number of elements */ + private final AtomicInteger count = new AtomicInteger(); + + /** + * Head of linked list. + * Invariant: head.item == null + */ + transient Node head; + + /** + * Tail of linked list. + * Invariant: last.next == null + */ + private transient Node last; + + /** Lock held by take, poll, etc */ + private final ReentrantLock takeLock = new ReentrantLock(); + + /** Wait queue for waiting takes */ + private final Condition notEmpty = takeLock.newCondition(); + + /** Lock held by put, offer, etc */ + private final ReentrantLock putLock = new ReentrantLock(); + + /** Wait queue for waiting puts */ + private final Condition notFull = putLock.newCondition(); + + /** + * Signals a waiting take. Called only from put/offer (which do not + * otherwise ordinarily lock takeLock.) + */ + private void signalNotEmpty() { + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + notEmpty.signal(); + } finally { + takeLock.unlock(); + } + } + + /** + * Signals a waiting put. Called only from take/poll. + */ + private void signalNotFull() { + final ReentrantLock putLock = this.putLock; + putLock.lock(); + try { + notFull.signal(); + } finally { + putLock.unlock(); + } + } + + /** + * Links node at end of queue. + * + * @param node the node + */ + private void enqueue(Node node) { + // assert putLock.isHeldByCurrentThread(); + // assert last.next == null; + last = last.next = node; + } + + /** + * Removes a node from head of queue. + * + * @return the node + */ + private E dequeue() { + // assert takeLock.isHeldByCurrentThread(); + // assert head.item == null; + Node h = head; + Node first = h.next; + h.next = h; // help GC + head = first; + E x = first.item; + first.item = null; + return x; + } + + /** + * Locks to prevent both puts and takes. + */ + void fullyLock() { + putLock.lock(); + takeLock.lock(); + } + + /** + * Unlocks to allow both puts and takes. + */ + void fullyUnlock() { + takeLock.unlock(); + putLock.unlock(); + } + +// /** +// * Tells whether both locks are held by current thread. +// */ +// boolean isFullyLocked() { +// return (putLock.isHeldByCurrentThread() && +// takeLock.isHeldByCurrentThread()); +// } + + /** + * Creates a {@code LinkedBlockingQueue} with a capacity of + * {@link Integer#MAX_VALUE}. + */ + public LinkedBlockingQueue() { + this(Integer.MAX_VALUE); + } + + /** + * Creates a {@code LinkedBlockingQueue} with the given (fixed) capacity. + * + * @param capacity the capacity of this queue + * @throws IllegalArgumentException if {@code capacity} is not greater + * than zero + */ + public LinkedBlockingQueue(int capacity) { + if (capacity <= 0) throw new IllegalArgumentException(); + this.capacity = capacity; + last = head = new Node(null); + } + + /** + * Creates a {@code LinkedBlockingQueue} with a capacity of + * {@link Integer#MAX_VALUE}, initially containing the elements of the + * given collection, + * added in traversal order of the collection's iterator. + * + * @param c the collection of elements to initially contain + * @throws NullPointerException if the specified collection or any + * of its elements are null + */ + public LinkedBlockingQueue(Collection c) { + this(Integer.MAX_VALUE); + final ReentrantLock putLock = this.putLock; + putLock.lock(); // Never contended, but necessary for visibility + try { + int n = 0; + for (E e : c) { + if (e == null) + throw new NullPointerException(); + if (n == capacity) + throw new IllegalStateException("Queue full"); + enqueue(new Node(e)); + ++n; + } + count.set(n); + } finally { + putLock.unlock(); + } + } + + // this doc comment is overridden to remove the reference to collections + // greater in size than Integer.MAX_VALUE + /** + * Returns the number of elements in this queue. + * + * @return the number of elements in this queue + */ + public int size() { + return count.get(); + } + + // this doc comment is a modified copy of the inherited doc comment, + // without the reference to unlimited queues. + /** + * Returns the number of additional elements that this queue can ideally + * (in the absence of memory or resource constraints) accept without + * blocking. This is always equal to the initial capacity of this queue + * less the current {@code size} of this queue. + * + *

Note that you cannot always tell if an attempt to insert + * an element will succeed by inspecting {@code remainingCapacity} + * because it may be the case that another thread is about to + * insert or remove an element. + */ + public int remainingCapacity() { + return capacity - count.get(); + } + + /** + * Inserts the specified element at the tail of this queue, waiting if + * necessary for space to become available. + * + * @throws InterruptedException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + */ + public void put(E e) throws InterruptedException { + if (e == null) throw new NullPointerException(); + // Note: convention in all put/take/etc is to preset local var + // holding count negative to indicate failure unless set. + int c = -1; + Node node = new Node(e); + final ReentrantLock putLock = this.putLock; + final AtomicInteger count = this.count; + putLock.lockInterruptibly(); + try { + /* + * Note that count is used in wait guard even though it is + * not protected by lock. This works because count can + * only decrease at this point (all other puts are shut + * out by lock), and we (or some other waiting put) are + * signalled if it ever changes from capacity. Similarly + * for all other uses of count in other wait guards. + */ + while (count.get() == capacity) { + notFull.await(); + } + enqueue(node); + c = count.getAndIncrement(); + if (c + 1 < capacity) + notFull.signal(); + } finally { + putLock.unlock(); + } + if (c == 0) + signalNotEmpty(); + } + + /** + * Inserts the specified element at the tail of this queue, waiting if + * necessary up to the specified wait time for space to become available. + * + * @return {@code true} if successful, or {@code false} if + * the specified waiting time elapses before space is available + * @throws InterruptedException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + */ + public boolean offer(E e, long timeout, TimeUnit unit) + throws InterruptedException { + + if (e == null) throw new NullPointerException(); + long nanos = unit.toNanos(timeout); + int c = -1; + final ReentrantLock putLock = this.putLock; + final AtomicInteger count = this.count; + putLock.lockInterruptibly(); + try { + while (count.get() == capacity) { + if (nanos <= 0) + return false; + nanos = notFull.awaitNanos(nanos); + } + enqueue(new Node(e)); + c = count.getAndIncrement(); + if (c + 1 < capacity) + notFull.signal(); + } finally { + putLock.unlock(); + } + if (c == 0) + signalNotEmpty(); + return true; + } + + /** + * Inserts the specified element at the tail of this queue if it is + * possible to do so immediately without exceeding the queue's capacity, + * returning {@code true} upon success and {@code false} if this queue + * is full. + * When using a capacity-restricted queue, this method is generally + * preferable to method {@link BlockingQueue#add add}, which can fail to + * insert an element only by throwing an exception. + * + * @throws NullPointerException if the specified element is null + */ + public boolean offer(E e) { + if (e == null) throw new NullPointerException(); + final AtomicInteger count = this.count; + if (count.get() == capacity) + return false; + int c = -1; + Node node = new Node(e); + final ReentrantLock putLock = this.putLock; + putLock.lock(); + try { + if (count.get() < capacity) { + enqueue(node); + c = count.getAndIncrement(); + if (c + 1 < capacity) + notFull.signal(); + } + } finally { + putLock.unlock(); + } + if (c == 0) + signalNotEmpty(); + return c >= 0; + } + + public E take() throws InterruptedException { + E x; + int c = -1; + final AtomicInteger count = this.count; + final ReentrantLock takeLock = this.takeLock; + takeLock.lockInterruptibly(); + try { + while (count.get() == 0) { + notEmpty.await(); + } + x = dequeue(); + c = count.getAndDecrement(); + if (c > 1) + notEmpty.signal(); + } finally { + takeLock.unlock(); + } + if (c == capacity) + signalNotFull(); + return x; + } + + public E poll(long timeout, TimeUnit unit) throws InterruptedException { + E x = null; + int c = -1; + long nanos = unit.toNanos(timeout); + final AtomicInteger count = this.count; + final ReentrantLock takeLock = this.takeLock; + takeLock.lockInterruptibly(); + try { + while (count.get() == 0) { + if (nanos <= 0) + return null; + nanos = notEmpty.awaitNanos(nanos); + } + x = dequeue(); + c = count.getAndDecrement(); + if (c > 1) + notEmpty.signal(); + } finally { + takeLock.unlock(); + } + if (c == capacity) + signalNotFull(); + return x; + } + + public E poll() { + final AtomicInteger count = this.count; + if (count.get() == 0) + return null; + E x = null; + int c = -1; + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + if (count.get() > 0) { + x = dequeue(); + c = count.getAndDecrement(); + if (c > 1) + notEmpty.signal(); + } + } finally { + takeLock.unlock(); + } + if (c == capacity) + signalNotFull(); + return x; + } + + public E peek() { + if (count.get() == 0) + return null; + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + Node first = head.next; + if (first == null) + return null; + else + return first.item; + } finally { + takeLock.unlock(); + } + } + + /** + * Unlinks interior Node p with predecessor trail. + */ + void unlink(Node p, Node trail) { + // assert isFullyLocked(); + // p.next is not changed, to allow iterators that are + // traversing p to maintain their weak-consistency guarantee. + p.item = null; + trail.next = p.next; + if (last == p) + last = trail; + if (count.getAndDecrement() == capacity) + notFull.signal(); + } + + /** + * Removes a single instance of the specified element from this queue, + * if it is present. More formally, removes an element {@code e} such + * that {@code o.equals(e)}, if this queue contains one or more such + * elements. + * Returns {@code true} if this queue contained the specified element + * (or equivalently, if this queue changed as a result of the call). + * + * @param o element to be removed from this queue, if present + * @return {@code true} if this queue changed as a result of the call + */ + public boolean remove(Object o) { + if (o == null) return false; + fullyLock(); + try { + for (Node trail = head, p = trail.next; + p != null; + trail = p, p = p.next) { + if (o.equals(p.item)) { + unlink(p, trail); + return true; + } + } + return false; + } finally { + fullyUnlock(); + } + } + + /** + * Returns {@code true} if this queue contains the specified element. + * More formally, returns {@code true} if and only if this queue contains + * at least one element {@code e} such that {@code o.equals(e)}. + * + * @param o object to be checked for containment in this queue + * @return {@code true} if this queue contains the specified element + */ + public boolean contains(Object o) { + if (o == null) return false; + fullyLock(); + try { + for (Node p = head.next; p != null; p = p.next) + if (o.equals(p.item)) + return true; + return false; + } finally { + fullyUnlock(); + } + } + + /** + * Returns an array containing all of the elements in this queue, in + * proper sequence. + * + *

The returned array will be "safe" in that no references to it are + * maintained by this queue. (In other words, this method must allocate + * a new array). The caller is thus free to modify the returned array. + * + *

This method acts as bridge between array-based and collection-based + * APIs. + * + * @return an array containing all of the elements in this queue + */ + public Object[] toArray() { + fullyLock(); + try { + int size = count.get(); + Object[] a = new Object[size]; + int k = 0; + for (Node p = head.next; p != null; p = p.next) + a[k++] = p.item; + return a; + } finally { + fullyUnlock(); + } + } + + /** + * Returns an array containing all of the elements in this queue, in + * proper sequence; the runtime type of the returned array is that of + * the specified array. If the queue fits in the specified array, it + * is returned therein. Otherwise, a new array is allocated with the + * runtime type of the specified array and the size of this queue. + * + *

If this queue fits in the specified array with room to spare + * (i.e., the array has more elements than this queue), the element in + * the array immediately following the end of the queue is set to + * {@code null}. + * + *

Like the {@link #toArray()} method, this method acts as bridge between + * array-based and collection-based APIs. Further, this method allows + * precise control over the runtime type of the output array, and may, + * under certain circumstances, be used to save allocation costs. + * + *

Suppose {@code x} is a queue known to contain only strings. + * The following code can be used to dump the queue into a newly + * allocated array of {@code String}: + * + *

 {@code String[] y = x.toArray(new String[0]);}
+ * + * Note that {@code toArray(new Object[0])} is identical in function to + * {@code toArray()}. + * + * @param a the array into which the elements of the queue are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose + * @return an array containing all of the elements in this queue + * @throws ArrayStoreException if the runtime type of the specified array + * is not a supertype of the runtime type of every element in + * this queue + * @throws NullPointerException if the specified array is null + */ + @SuppressWarnings("unchecked") + public T[] toArray(T[] a) { + fullyLock(); + try { + int size = count.get(); + if (a.length < size) + a = (T[])java.lang.reflect.Array.newInstance + (a.getClass().getComponentType(), size); + + int k = 0; + for (Node p = head.next; p != null; p = p.next) + a[k++] = (T)p.item; + if (a.length > k) + a[k] = null; + return a; + } finally { + fullyUnlock(); + } + } + + public String toString() { + fullyLock(); + try { + Node p = head.next; + if (p == null) + return "[]"; + + StringBuilder sb = new StringBuilder(); + sb.append('['); + for (;;) { + E e = p.item; + sb.append(e == this ? "(this Collection)" : e); + p = p.next; + if (p == null) + return sb.append(']').toString(); + sb.append(',').append(' '); + } + } finally { + fullyUnlock(); + } + } + + /** + * Atomically removes all of the elements from this queue. + * The queue will be empty after this call returns. + */ + public void clear() { + fullyLock(); + try { + for (Node p, h = head; (p = h.next) != null; h = p) { + h.next = h; + p.item = null; + } + head = last; + // assert head.item == null && head.next == null; + if (count.getAndSet(0) == capacity) + notFull.signal(); + } finally { + fullyUnlock(); + } + } + + /** + * @throws UnsupportedOperationException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + public int drainTo(Collection c) { + return drainTo(c, Integer.MAX_VALUE); + } + + /** + * @throws UnsupportedOperationException {@inheritDoc} + * @throws ClassCastException {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + public int drainTo(Collection c, int maxElements) { + if (c == null) + throw new NullPointerException(); + if (c == this) + throw new IllegalArgumentException(); + if (maxElements <= 0) + return 0; + boolean signalNotFull = false; + final ReentrantLock takeLock = this.takeLock; + takeLock.lock(); + try { + int n = Math.min(maxElements, count.get()); + // count.get provides visibility to first n Nodes + Node h = head; + int i = 0; + try { + while (i < n) { + Node p = h.next; + c.add(p.item); + p.item = null; + h.next = h; + h = p; + ++i; + } + return n; + } finally { + // Restore invariants even if c.add() threw + if (i > 0) { + // assert h.item == null; + head = h; + signalNotFull = (count.getAndAdd(-i) == capacity); + } + } + } finally { + takeLock.unlock(); + if (signalNotFull) + signalNotFull(); + } + } + + /** + * Returns an iterator over the elements in this queue in proper sequence. + * The elements will be returned in order from first (head) to last (tail). + * + *

The returned iterator is + * weakly consistent. + * + * @return an iterator over the elements in this queue in proper sequence + */ + public Iterator iterator() { + return new Itr(); + } + + private class Itr implements Iterator { + /* + * Basic weakly-consistent iterator. At all times hold the next + * item to hand out so that if hasNext() reports true, we will + * still have it to return even if lost race with a take etc. + */ + + private Node current; + private Node lastRet; + private E currentElement; + + Itr() { + fullyLock(); + try { + current = head.next; + if (current != null) + currentElement = current.item; + } finally { + fullyUnlock(); + } + } + + public boolean hasNext() { + return current != null; + } + + /** + * Returns the next live successor of p, or null if no such. + * + * Unlike other traversal methods, iterators need to handle both: + * - dequeued nodes (p.next == p) + * - (possibly multiple) interior removed nodes (p.item == null) + */ + private Node nextNode(Node p) { + for (;;) { + Node s = p.next; + if (s == p) + return head.next; + if (s == null || s.item != null) + return s; + p = s; + } + } + + public E next() { + fullyLock(); + try { + if (current == null) + throw new NoSuchElementException(); + E x = currentElement; + lastRet = current; + current = nextNode(current); + currentElement = (current == null) ? null : current.item; + return x; + } finally { + fullyUnlock(); + } + } + + public void remove() { + if (lastRet == null) + throw new IllegalStateException(); + fullyLock(); + try { + Node node = lastRet; + lastRet = null; + for (Node trail = head, p = trail.next; + p != null; + trail = p, p = p.next) { + if (p == node) { + unlink(p, trail); + break; + } + } + } finally { + fullyUnlock(); + } + } + } + + /** A customized variant of Spliterators.IteratorSpliterator */ + static final class LBQSpliterator implements Spliterator { + static final int MAX_BATCH = 1 << 25; // max batch array size; + final LinkedBlockingQueue queue; + Node current; // current node; null until initialized + int batch; // batch size for splits + boolean exhausted; // true when no more nodes + long est; // size estimate + LBQSpliterator(LinkedBlockingQueue queue) { + this.queue = queue; + this.est = queue.size(); + } + + public long estimateSize() { return est; } + + public Spliterator trySplit() { + Node h; + final LinkedBlockingQueue q = this.queue; + int b = batch; + int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1; + if (!exhausted && + ((h = current) != null || (h = q.head.next) != null) && + h.next != null) { + Object[] a = new Object[n]; + int i = 0; + Node p = current; + q.fullyLock(); + try { + if (p != null || (p = q.head.next) != null) { + do { + if ((a[i] = p.item) != null) + ++i; + } while ((p = p.next) != null && i < n); + } + } finally { + q.fullyUnlock(); + } + if ((current = p) == null) { + est = 0L; + exhausted = true; + } + else if ((est -= i) < 0L) + est = 0L; + if (i > 0) { + batch = i; + return Spliterators.spliterator + (a, 0, i, Spliterator.ORDERED | Spliterator.NONNULL | + Spliterator.CONCURRENT); + } + } + return null; + } + + public void forEachRemaining(Consumer action) { + if (action == null) throw new NullPointerException(); + final LinkedBlockingQueue q = this.queue; + if (!exhausted) { + exhausted = true; + Node p = current; + do { + E e = null; + q.fullyLock(); + try { + if (p == null) + p = q.head.next; + while (p != null) { + e = p.item; + p = p.next; + if (e != null) + break; + } + } finally { + q.fullyUnlock(); + } + if (e != null) + action.accept(e); + } while (p != null); + } + } + + public boolean tryAdvance(Consumer action) { + if (action == null) throw new NullPointerException(); + final LinkedBlockingQueue q = this.queue; + if (!exhausted) { + E e = null; + q.fullyLock(); + try { + if (current == null) + current = q.head.next; + while (current != null) { + e = current.item; + current = current.next; + if (e != null) + break; + } + } finally { + q.fullyUnlock(); + } + if (current == null) + exhausted = true; + if (e != null) { + action.accept(e); + return true; + } + } + return false; + } + + public int characteristics() { + return Spliterator.ORDERED | Spliterator.NONNULL | + Spliterator.CONCURRENT; + } + } + + /** + * Returns a {@link Spliterator} over the elements in this queue. + * + *

The returned spliterator is + * weakly consistent. + * + *

The {@code Spliterator} reports {@link Spliterator#CONCURRENT}, + * {@link Spliterator#ORDERED}, and {@link Spliterator#NONNULL}. + * + * @implNote + * The {@code Spliterator} implements {@code trySplit} to permit limited + * parallelism. + * + * @return a {@code Spliterator} over the elements in this queue + * @since 1.8 + */ + public Spliterator spliterator() { + return new LBQSpliterator(this); + } + + /** + * Saves this queue to a stream (that is, serializes it). + * + * @param s the stream + * @throws java.io.IOException if an I/O error occurs + * @serialData The capacity is emitted (int), followed by all of + * its elements (each an {@code Object}) in the proper order, + * followed by a null + */ + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + + fullyLock(); + try { + // Write out any hidden stuff, plus capacity + s.defaultWriteObject(); + + // Write out all elements in the proper order. + for (Node p = head.next; p != null; p = p.next) + s.writeObject(p.item); + + // Use trailing null as sentinel + s.writeObject(null); + } finally { + fullyUnlock(); + } + } + + /** + * Reconstitutes this queue from a stream (that is, deserializes it). + * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs + */ + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + // Read in capacity, and any hidden stuff + s.defaultReadObject(); + + count.set(0); + last = head = new Node(null); + + // Read in all elements and place in queue + for (;;) { + @SuppressWarnings("unchecked") + E item = (E)s.readObject(); + if (item == null) + break; + add(item); + } + } +} diff --git a/src/LinkedHashMap.java b/src/LinkedHashMap.java index 946ac9a..641df45 100644 --- a/src/LinkedHashMap.java +++ b/src/LinkedHashMap.java @@ -1,18 +1,212 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.util; -import java.io.*; +import java.util.function.Consumer; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.io.IOException; + +/** + *

Hash table and linked list implementation of the Map interface, + * with predictable iteration order. This implementation differs from + * HashMap in that it maintains a doubly-linked list running through + * all of its entries. This linked list defines the iteration ordering, + * which is normally the order in which keys were inserted into the map + * (insertion-order). Note that insertion order is not affected + * if a key is re-inserted into the map. (A key k is + * reinserted into a map m if m.put(k, v) is invoked when + * m.containsKey(k) would return true immediately prior to + * the invocation.) + * + *

This implementation spares its clients from the unspecified, generally + * chaotic ordering provided by {@link HashMap} (and {@link Hashtable}), + * without incurring the increased cost associated with {@link TreeMap}. It + * can be used to produce a copy of a map that has the same order as the + * original, regardless of the original map's implementation: + *

+ *     void foo(Map m) {
+ *         Map copy = new LinkedHashMap(m);
+ *         ...
+ *     }
+ * 
+ * This technique is particularly useful if a module takes a map on input, + * copies it, and later returns results whose order is determined by that of + * the copy. (Clients generally appreciate having things returned in the same + * order they were presented.) + * + *

A special {@link #LinkedHashMap(int,float,boolean) constructor} is + * provided to create a linked hash map whose order of iteration is the order + * in which its entries were last accessed, from least-recently accessed to + * most-recently (access-order). This kind of map is well-suited to + * building LRU caches. Invoking the {@code put}, {@code putIfAbsent}, + * {@code get}, {@code getOrDefault}, {@code compute}, {@code computeIfAbsent}, + * {@code computeIfPresent}, or {@code merge} methods results + * in an access to the corresponding entry (assuming it exists after the + * invocation completes). The {@code replace} methods only result in an access + * of the entry if the value is replaced. The {@code putAll} method generates one + * entry access for each mapping in the specified map, in the order that + * key-value mappings are provided by the specified map's entry set iterator. + * No other methods generate entry accesses. In particular, operations + * on collection-views do not affect the order of iteration of the + * backing map. + * + *

The {@link #removeEldestEntry(Map.Entry)} method may be overridden to + * impose a policy for removing stale mappings automatically when new mappings + * are added to the map. + * + *

This class provides all of the optional Map operations, and + * permits null elements. Like HashMap, it provides constant-time + * performance for the basic operations (add, contains and + * remove), assuming the hash function disperses elements + * properly among the buckets. Performance is likely to be just slightly + * below that of HashMap, due to the added expense of maintaining the + * linked list, with one exception: Iteration over the collection-views + * of a LinkedHashMap requires time proportional to the size + * of the map, regardless of its capacity. Iteration over a HashMap + * is likely to be more expensive, requiring time proportional to its + * capacity. + * + *

A linked hash map has two parameters that affect its performance: + * initial capacity and load factor. They are defined precisely + * as for HashMap. Note, however, that the penalty for choosing an + * excessively high value for initial capacity is less severe for this class + * than for HashMap, as iteration times for this class are unaffected + * by capacity. + * + *

Note that this implementation is not synchronized. + * If multiple threads access a linked hash map concurrently, and at least + * one of the threads modifies the map structurally, it must be + * synchronized externally. This is typically accomplished by + * synchronizing on some object that naturally encapsulates the map. + * + * If no such object exists, the map should be "wrapped" using the + * {@link Collections#synchronizedMap Collections.synchronizedMap} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the map:

+ *   Map m = Collections.synchronizedMap(new LinkedHashMap(...));
+ * + * A structural modification is any operation that adds or deletes one or more + * mappings or, in the case of access-ordered linked hash maps, affects + * iteration order. In insertion-ordered linked hash maps, merely changing + * the value associated with a key that is already contained in the map is not + * a structural modification. In access-ordered linked hash maps, + * merely querying the map with get is a structural modification. + * ) + * + *

The iterators returned by the iterator method of the collections + * returned by all of this class's collection view methods are + * fail-fast: if the map is structurally modified at any time after + * the iterator is created, in any way except through the iterator's own + * remove method, the iterator will throw a {@link + * ConcurrentModificationException}. Thus, in the face of concurrent + * modification, the iterator fails quickly and cleanly, rather than risking + * arbitrary, non-deterministic behavior at an undetermined time in the future. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw ConcurrentModificationException on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

The spliterators returned by the spliterator method of the collections + * returned by all of this class's collection view methods are + * late-binding, + * fail-fast, and additionally report {@link Spliterator#ORDERED}. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @implNote + * The spliterators returned by the spliterator method of the collections + * returned by all of this class's collection view methods are created from + * the iterators of the corresponding collections. + * + * @param the type of keys maintained by this map + * @param the type of mapped values + * + * @author Josh Bloch + * @see Object#hashCode() + * @see Collection + * @see Map + * @see HashMap + * @see TreeMap + * @see Hashtable + * @since 1.4 + */ public class LinkedHashMap extends HashMap implements Map { + /* + * Implementation note. A previous version of this class was + * internally structured a little differently. Because superclass + * HashMap now uses trees for some of its nodes, class + * LinkedHashMap.Entry is now treated as intermediary node class + * that can also be converted to tree form. The name of this + * class, LinkedHashMap.Entry, is confusing in several ways in its + * current context, but cannot be changed. Otherwise, even though + * it is not exported outside this package, some existing source + * code is known to have relied on a symbol resolution corner case + * rule in calls to removeEldestEntry that suppressed compilation + * errors due to ambiguous usages. So, we keep the name to + * preserve unmodified compilability. + * + * The changes in node classes also require using two fields + * (head, tail) rather than a pointer to a header node to maintain + * the doubly-linked before/after list. This class also + * previously used a different style of callback methods upon + * access, insertion, and removal. + */ + + /** + * HashMap.Node subclass for normal LinkedHashMap entries. + */ + static class Entry extends HashMap.Node { + Entry before, after; + Entry(int hash, K key, V value, Node next) { + super(hash, key, value, next); + } + } + private static final long serialVersionUID = 3801124242820219131L; /** - * The head of the doubly linked list. + * The head (eldest) of the doubly linked list. + */ + transient LinkedHashMap.Entry head; + + /** + * The tail (youngest) of the doubly linked list. */ - private transient Entry header; + transient LinkedHashMap.Entry tail; /** * The iteration ordering method for this linked hash map: true @@ -20,7 +214,125 @@ public class LinkedHashMap * * @serial */ - private final boolean accessOrder; + final boolean accessOrder; + + // internal utilities + + // link at the end of list + private void linkNodeLast(LinkedHashMap.Entry p) { + LinkedHashMap.Entry last = tail; + tail = p; + if (last == null) + head = p; + else { + p.before = last; + last.after = p; + } + } + + // apply src's links to dst + private void transferLinks(LinkedHashMap.Entry src, + LinkedHashMap.Entry dst) { + LinkedHashMap.Entry b = dst.before = src.before; + LinkedHashMap.Entry a = dst.after = src.after; + if (b == null) + head = dst; + else + b.after = dst; + if (a == null) + tail = dst; + else + a.before = dst; + } + + // overrides of HashMap hook methods + + void reinitialize() { + super.reinitialize(); + head = tail = null; + } + + Node newNode(int hash, K key, V value, Node e) { + LinkedHashMap.Entry p = + new LinkedHashMap.Entry(hash, key, value, e); + linkNodeLast(p); + return p; + } + + Node replacementNode(Node p, Node next) { + LinkedHashMap.Entry q = (LinkedHashMap.Entry)p; + LinkedHashMap.Entry t = + new LinkedHashMap.Entry(q.hash, q.key, q.value, next); + transferLinks(q, t); + return t; + } + + TreeNode newTreeNode(int hash, K key, V value, Node next) { + TreeNode p = new TreeNode(hash, key, value, next); + linkNodeLast(p); + return p; + } + + TreeNode replacementTreeNode(Node p, Node next) { + LinkedHashMap.Entry q = (LinkedHashMap.Entry)p; + TreeNode t = new TreeNode(q.hash, q.key, q.value, next); + transferLinks(q, t); + return t; + } + + void afterNodeRemoval(Node e) { // unlink + LinkedHashMap.Entry p = + (LinkedHashMap.Entry)e, b = p.before, a = p.after; + p.before = p.after = null; + if (b == null) + head = a; + else + b.after = a; + if (a == null) + tail = b; + else + a.before = b; + } + + void afterNodeInsertion(boolean evict) { // possibly remove eldest + LinkedHashMap.Entry first; + if (evict && (first = head) != null && removeEldestEntry(first)) { + K key = first.key; + removeNode(hash(key), key, null, false, true); + } + } + + void afterNodeAccess(Node e) { // move node to last + LinkedHashMap.Entry last; + if (accessOrder && (last = tail) != e) { + LinkedHashMap.Entry p = + (LinkedHashMap.Entry)e, b = p.before, a = p.after; + p.after = null; + if (b == null) + head = a; + else + b.after = a; + if (a != null) + a.before = b; + else + last = b; + if (last == null) + head = p; + else { + p.before = last; + last.after = p; + } + tail = p; + ++modCount; + } + } + + void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { + for (LinkedHashMap.Entry e = head; e != null; e = e.after) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } /** * Constructs an empty insertion-ordered LinkedHashMap instance @@ -67,8 +379,9 @@ public LinkedHashMap() { * @throws NullPointerException if the specified map is null */ public LinkedHashMap(Map m) { - super(m); + super(); accessOrder = false; + putMapEntries(m, false); } /** @@ -89,30 +402,6 @@ public LinkedHashMap(int initialCapacity, this.accessOrder = accessOrder; } - /** - * Called by superclass constructors and pseudoconstructors (clone, - * readObject) before any entries are inserted into the map. Initializes - * the chain. - */ - void init() { - header = new Entry<>(-1, null, null, null); - header.before = header.after = header; - } - - /** - * Transfers all entries to new table array. This method is called - * by superclass resize. It is overridden for performance, as it is - * faster to iterate using our linked list. - */ - void transfer(HashMap.Entry[] newTable) { - int newCapacity = newTable.length; - for (Entry e = header.after; e != header; e = e.after) { - int index = indexFor(e.hash, newCapacity); - e.next = newTable[index]; - newTable[index] = e; - } - } - /** * Returns true if this map maps one or more keys to the @@ -123,15 +412,10 @@ void transfer(HashMap.Entry[] newTable) { * specified value */ public boolean containsValue(Object value) { - // Overridden to take advantage of faster iterator - if (value==null) { - for (Entry e = header.after; e != header; e = e.after) - if (e.value==null) - return true; - } else { - for (Entry e = header.after; e != header; e = e.after) - if (value.equals(e.value)) - return true; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) { + V v = e.value; + if (v == value || (value != null && value.equals(v))) + return true; } return false; } @@ -152,154 +436,32 @@ public boolean containsValue(Object value) { * distinguish these two cases. */ public V get(Object key) { - Entry e = (Entry)getEntry(key); - if (e == null) + Node e; + if ((e = getNode(hash(key), key)) == null) return null; - e.recordAccess(this); + if (accessOrder) + afterNodeAccess(e); return e.value; } /** - * Removes all of the mappings from this map. - * The map will be empty after this call returns. + * {@inheritDoc} */ - public void clear() { - super.clear(); - header.before = header.after = header; - } + public V getOrDefault(Object key, V defaultValue) { + Node e; + if ((e = getNode(hash(key), key)) == null) + return defaultValue; + if (accessOrder) + afterNodeAccess(e); + return e.value; + } /** - * LinkedHashMap entry. + * {@inheritDoc} */ - private static class Entry extends HashMap.Entry { - // These fields comprise the doubly linked list used for iteration. - Entry before, after; - - Entry(int hash, K key, V value, HashMap.Entry next) { - super(hash, key, value, next); - } - - /** - * Removes this entry from the linked list. - */ - private void remove() { - before.after = after; - after.before = before; - } - - /** - * Inserts this entry before the specified existing entry in the list. - */ - private void addBefore(Entry existingEntry) { - after = existingEntry; - before = existingEntry.before; - before.after = this; - after.before = this; - } - - /** - * This method is invoked by the superclass whenever the value - * of a pre-existing entry is read by Map.get or modified by Map.set. - * If the enclosing Map is access-ordered, it moves the entry - * to the end of the list; otherwise, it does nothing. - */ - void recordAccess(HashMap m) { - LinkedHashMap lm = (LinkedHashMap)m; - if (lm.accessOrder) { - lm.modCount++; - remove(); - addBefore(lm.header); - } - } - - void recordRemoval(HashMap m) { - remove(); - } - } - - private abstract class LinkedHashIterator implements Iterator { - Entry nextEntry = header.after; - Entry lastReturned = null; - - /** - * The modCount value that the iterator believes that the backing - * List should have. If this expectation is violated, the iterator - * has detected concurrent modification. - */ - int expectedModCount = modCount; - - public boolean hasNext() { - return nextEntry != header; - } - - public void remove() { - if (lastReturned == null) - throw new IllegalStateException(); - if (modCount != expectedModCount) - throw new ConcurrentModificationException(); - - LinkedHashMap.this.remove(lastReturned.key); - lastReturned = null; - expectedModCount = modCount; - } - - Entry nextEntry() { - if (modCount != expectedModCount) - throw new ConcurrentModificationException(); - if (nextEntry == header) - throw new NoSuchElementException(); - - Entry e = lastReturned = nextEntry; - nextEntry = e.after; - return e; - } - } - - private class KeyIterator extends LinkedHashIterator { - public K next() { return nextEntry().getKey(); } - } - - private class ValueIterator extends LinkedHashIterator { - public V next() { return nextEntry().value; } - } - - private class EntryIterator extends LinkedHashIterator> { - public Map.Entry next() { return nextEntry(); } - } - - // These Overrides alter the behavior of superclass view iterator() methods - Iterator newKeyIterator() { return new KeyIterator(); } - Iterator newValueIterator() { return new ValueIterator(); } - Iterator> newEntryIterator() { return new EntryIterator(); } - - /** - * This override alters behavior of superclass put method. It causes newly - * allocated entry to get inserted at the end of the linked list and - * removes the eldest entry if appropriate. - */ - void addEntry(int hash, K key, V value, int bucketIndex) { - createEntry(hash, key, value, bucketIndex); - - // Remove eldest entry if instructed, else grow capacity if appropriate - Entry eldest = header.after; - if (removeEldestEntry(eldest)) { - removeEntryForKey(eldest.key); - } else { - if (size >= threshold) - resize(2 * table.length); - } - } - - /** - * This override differs from addEntry in that it doesn't resize the - * table or remove the eldest entry. - */ - void createEntry(int hash, K key, V value, int bucketIndex) { - HashMap.Entry old = table[bucketIndex]; - Entry e = new Entry<>(hash, key, value, old); - table[bucketIndex] = e; - e.addBefore(header); - size++; + public void clear() { + super.clear(); + head = tail = null; } /** @@ -317,7 +479,7 @@ void createEntry(int hash, K key, V value, int bucketIndex) { * private static final int MAX_ENTRIES = 100; * * protected boolean removeEldestEntry(Map.Entry eldest) { - * return size() > MAX_ENTRIES; + * return size() > MAX_ENTRIES; * } * * @@ -346,4 +508,241 @@ void createEntry(int hash, K key, V value, int bucketIndex) { protected boolean removeEldestEntry(Map.Entry eldest) { return false; } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation), the results of + * the iteration are undefined. The set supports element removal, + * which removes the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or addAll + * operations. + * Its {@link Spliterator} typically provides faster sequential + * performance but much poorer parallel performance than that of + * {@code HashMap}. + * + * @return a set view of the keys contained in this map + */ + public Set keySet() { + Set ks; + return (ks = keySet) == null ? (keySet = new LinkedKeySet()) : ks; + } + + final class LinkedKeySet extends AbstractSet { + public final int size() { return size; } + public final void clear() { LinkedHashMap.this.clear(); } + public final Iterator iterator() { + return new LinkedKeyIterator(); + } + public final boolean contains(Object o) { return containsKey(o); } + public final boolean remove(Object key) { + return removeNode(hash(key), key, null, false, true) != null; + } + public final Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.SIZED | + Spliterator.ORDERED | + Spliterator.DISTINCT); + } + public final void forEach(Consumer action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e.key); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. If the map is + * modified while an iteration over the collection is in progress + * (except through the iterator's own remove operation), + * the results of the iteration are undefined. The collection + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll and clear operations. It does not + * support the add or addAll operations. + * Its {@link Spliterator} typically provides faster sequential + * performance but much poorer parallel performance than that of + * {@code HashMap}. + * + * @return a view of the values contained in this map + */ + public Collection values() { + Collection vs; + return (vs = values) == null ? (values = new LinkedValues()) : vs; + } + + final class LinkedValues extends AbstractCollection { + public final int size() { return size; } + public final void clear() { LinkedHashMap.this.clear(); } + public final Iterator iterator() { + return new LinkedValueIterator(); + } + public final boolean contains(Object o) { return containsValue(o); } + public final Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.SIZED | + Spliterator.ORDERED); + } + public final void forEach(Consumer action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e.value); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation, or through the + * setValue operation on a map entry returned by the + * iterator) the results of the iteration are undefined. The set + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Set.remove, removeAll, retainAll and + * clear operations. It does not support the + * add or addAll operations. + * Its {@link Spliterator} typically provides faster sequential + * performance but much poorer parallel performance than that of + * {@code HashMap}. + * + * @return a set view of the mappings contained in this map + */ + public Set> entrySet() { + Set> es; + return (es = entrySet) == null ? (entrySet = new LinkedEntrySet()) : es; + } + + final class LinkedEntrySet extends AbstractSet> { + public final int size() { return size; } + public final void clear() { LinkedHashMap.this.clear(); } + public final Iterator> iterator() { + return new LinkedEntryIterator(); + } + public final boolean contains(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Node candidate = getNode(hash(key), key); + return candidate != null && candidate.equals(e); + } + public final boolean remove(Object o) { + if (o instanceof Map.Entry) { + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Object value = e.getValue(); + return removeNode(hash(key), key, value, true, true) != null; + } + return false; + } + public final Spliterator> spliterator() { + return Spliterators.spliterator(this, Spliterator.SIZED | + Spliterator.ORDERED | + Spliterator.DISTINCT); + } + public final void forEach(Consumer> action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + // Map overrides + + public void forEach(BiConsumer action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e.key, e.value); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + + public void replaceAll(BiFunction function) { + if (function == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + e.value = function.apply(e.key, e.value); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + + // Iterators + + abstract class LinkedHashIterator { + LinkedHashMap.Entry next; + LinkedHashMap.Entry current; + int expectedModCount; + + LinkedHashIterator() { + next = head; + expectedModCount = modCount; + current = null; + } + + public final boolean hasNext() { + return next != null; + } + + final LinkedHashMap.Entry nextNode() { + LinkedHashMap.Entry e = next; + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + if (e == null) + throw new NoSuchElementException(); + current = e; + next = e.after; + return e; + } + + public final void remove() { + Node p = current; + if (p == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + current = null; + K key = p.key; + removeNode(hash(key), key, null, false, false); + expectedModCount = modCount; + } + } + + final class LinkedKeyIterator extends LinkedHashIterator + implements Iterator { + public final K next() { return nextNode().getKey(); } + } + + final class LinkedValueIterator extends LinkedHashIterator + implements Iterator { + public final V next() { return nextNode().value; } + } + + final class LinkedEntryIterator extends LinkedHashIterator + implements Iterator> { + public final Map.Entry next() { return nextNode(); } + } + + } diff --git a/src/LinkedHashSet.java b/src/LinkedHashSet.java index 9859c3c..f31ff46 100644 --- a/src/LinkedHashSet.java +++ b/src/LinkedHashSet.java @@ -1,6 +1,120 @@ +/* + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.util; +/** + *

Hash table and linked list implementation of the Set interface, + * with predictable iteration order. This implementation differs from + * HashSet in that it maintains a doubly-linked list running through + * all of its entries. This linked list defines the iteration ordering, + * which is the order in which elements were inserted into the set + * (insertion-order). Note that insertion order is not affected + * if an element is re-inserted into the set. (An element e + * is reinserted into a set s if s.add(e) is invoked when + * s.contains(e) would return true immediately prior to + * the invocation.) + * + *

This implementation spares its clients from the unspecified, generally + * chaotic ordering provided by {@link HashSet}, without incurring the + * increased cost associated with {@link TreeSet}. It can be used to + * produce a copy of a set that has the same order as the original, regardless + * of the original set's implementation: + *

+ *     void foo(Set s) {
+ *         Set copy = new LinkedHashSet(s);
+ *         ...
+ *     }
+ * 
+ * This technique is particularly useful if a module takes a set on input, + * copies it, and later returns results whose order is determined by that of + * the copy. (Clients generally appreciate having things returned in the same + * order they were presented.) + * + *

This class provides all of the optional Set operations, and + * permits null elements. Like HashSet, it provides constant-time + * performance for the basic operations (add, contains and + * remove), assuming the hash function disperses elements + * properly among the buckets. Performance is likely to be just slightly + * below that of HashSet, due to the added expense of maintaining the + * linked list, with one exception: Iteration over a LinkedHashSet + * requires time proportional to the size of the set, regardless of + * its capacity. Iteration over a HashSet is likely to be more + * expensive, requiring time proportional to its capacity. + * + *

A linked hash set has two parameters that affect its performance: + * initial capacity and load factor. They are defined precisely + * as for HashSet. Note, however, that the penalty for choosing an + * excessively high value for initial capacity is less severe for this class + * than for HashSet, as iteration times for this class are unaffected + * by capacity. + * + *

Note that this implementation is not synchronized. + * If multiple threads access a linked hash set concurrently, and at least + * one of the threads modifies the set, it must be synchronized + * externally. This is typically accomplished by synchronizing on some + * object that naturally encapsulates the set. + * + * If no such object exists, the set should be "wrapped" using the + * {@link Collections#synchronizedSet Collections.synchronizedSet} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the set:

+ *   Set s = Collections.synchronizedSet(new LinkedHashSet(...));
+ * + *

The iterators returned by this class's iterator method are + * fail-fast: if the set is modified at any time after the iterator + * is created, in any way except through the iterator's own remove + * method, the iterator will throw a {@link ConcurrentModificationException}. + * Thus, in the face of concurrent modification, the iterator fails quickly + * and cleanly, rather than risking arbitrary, non-deterministic behavior at + * an undetermined time in the future. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw ConcurrentModificationException on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @param the type of elements maintained by this set + * + * @author Josh Bloch + * @see Object#hashCode() + * @see Collection + * @see Set + * @see HashSet + * @see TreeSet + * @see Hashtable + * @since 1.4 + */ + public class LinkedHashSet extends HashSet implements Set, Cloneable, java.io.Serializable { @@ -54,4 +168,28 @@ public LinkedHashSet(Collection c) { super(Math.max(2*c.size(), 11), .75f, true); addAll(c); } + + /** + * Creates a late-binding + * and fail-fast {@code Spliterator} over the elements in this set. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED}, + * {@link Spliterator#DISTINCT}, and {@code ORDERED}. Implementations + * should document the reporting of additional characteristic values. + * + * @implNote + * The implementation creates a + * late-binding spliterator + * from the set's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the set's iterator. + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this set + * @since 1.8 + */ + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.DISTINCT | Spliterator.ORDERED); + } } diff --git a/src/LinkedList.java b/src/LinkedList.java index 10f800d..303d141 100644 --- a/src/LinkedList.java +++ b/src/LinkedList.java @@ -1,6 +1,85 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.util; +import java.util.function.Consumer; + +/** + * Doubly-linked list implementation of the {@code List} and {@code Deque} + * interfaces. Implements all optional list operations, and permits all + * elements (including {@code null}). + * + *

All of the operations perform as could be expected for a doubly-linked + * list. Operations that index into the list will traverse the list from + * the beginning or the end, whichever is closer to the specified index. + * + *

Note that this implementation is not synchronized. + * If multiple threads access a linked list concurrently, and at least + * one of the threads modifies the list structurally, it must be + * synchronized externally. (A structural modification is any operation + * that adds or deletes one or more elements; merely setting the value of + * an element is not a structural modification.) This is typically + * accomplished by synchronizing on some object that naturally + * encapsulates the list. + * + * If no such object exists, the list should be "wrapped" using the + * {@link Collections#synchronizedList Collections.synchronizedList} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the list:

+ *   List list = Collections.synchronizedList(new LinkedList(...));
+ * + *

The iterators returned by this class's {@code iterator} and + * {@code listIterator} methods are fail-fast: if the list is + * structurally modified at any time after the iterator is created, in + * any way except through the Iterator's own {@code remove} or + * {@code add} methods, the iterator will throw a {@link + * ConcurrentModificationException}. Thus, in the face of concurrent + * modification, the iterator fails quickly and cleanly, rather than + * risking arbitrary, non-deterministic behavior at an undetermined + * time in the future. + * + *

Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw {@code ConcurrentModificationException} on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @author Josh Bloch + * @see List + * @see ArrayList + * @since 1.2 + * @param the type of elements held in this collection + */ + public class LinkedList extends AbstractSequentialList implements List, Deque, Cloneable, java.io.Serializable @@ -790,7 +869,7 @@ public ListIterator listIterator(int index) { } private class ListItr implements ListIterator { - private Node lastReturned = null; + private Node lastReturned; private Node next; private int nextIndex; private int expectedModCount = modCount; @@ -871,6 +950,17 @@ public void add(E e) { expectedModCount++; } + public void forEachRemaining(Consumer action) { + Objects.requireNonNull(action); + while (modCount == expectedModCount && nextIndex < size) { + action.accept(next.item); + lastReturned = next; + next = next.next; + nextIndex++; + } + checkForComodification(); + } + final void checkForComodification() { if (modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -917,7 +1007,7 @@ private LinkedList superClone() { try { return (LinkedList) super.clone(); } catch (CloneNotSupportedException e) { - throw new InternalError(); + throw new InternalError(e); } } @@ -1058,4 +1148,115 @@ private void readObject(java.io.ObjectInputStream s) for (int i = 0; i < size; i++) linkLast((E)s.readObject()); } + + /** + * Creates a late-binding + * and fail-fast {@link Spliterator} over the elements in this + * list. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED} and + * {@link Spliterator#ORDERED}. Overriding implementations should document + * the reporting of additional characteristic values. + * + * @implNote + * The {@code Spliterator} additionally reports {@link Spliterator#SUBSIZED} + * and implements {@code trySplit} to permit limited parallelism.. + * + * @return a {@code Spliterator} over the elements in this list + * @since 1.8 + */ + @Override + public Spliterator spliterator() { + return new LLSpliterator(this, -1, 0); + } + + /** A customized variant of Spliterators.IteratorSpliterator */ + static final class LLSpliterator implements Spliterator { + static final int BATCH_UNIT = 1 << 10; // batch array size increment + static final int MAX_BATCH = 1 << 25; // max batch array size; + final LinkedList list; // null OK unless traversed + Node current; // current node; null until initialized + int est; // size estimate; -1 until first needed + int expectedModCount; // initialized when est set + int batch; // batch size for splits + + LLSpliterator(LinkedList list, int est, int expectedModCount) { + this.list = list; + this.est = est; + this.expectedModCount = expectedModCount; + } + + final int getEst() { + int s; // force initialization + final LinkedList lst; + if ((s = est) < 0) { + if ((lst = list) == null) + s = est = 0; + else { + expectedModCount = lst.modCount; + current = lst.first; + s = est = lst.size; + } + } + return s; + } + + public long estimateSize() { return (long) getEst(); } + + public Spliterator trySplit() { + Node p; + int s = getEst(); + if (s > 1 && (p = current) != null) { + int n = batch + BATCH_UNIT; + if (n > s) + n = s; + if (n > MAX_BATCH) + n = MAX_BATCH; + Object[] a = new Object[n]; + int j = 0; + do { a[j++] = p.item; } while ((p = p.next) != null && j < n); + current = p; + batch = j; + est = s - j; + return Spliterators.spliterator(a, 0, j, Spliterator.ORDERED); + } + return null; + } + + public void forEachRemaining(Consumer action) { + Node p; int n; + if (action == null) throw new NullPointerException(); + if ((n = getEst()) > 0 && (p = current) != null) { + current = null; + est = 0; + do { + E e = p.item; + p = p.next; + action.accept(e); + } while (p != null && --n > 0); + } + if (list.modCount != expectedModCount) + throw new ConcurrentModificationException(); + } + + public boolean tryAdvance(Consumer action) { + Node p; + if (action == null) throw new NullPointerException(); + if (getEst() > 0 && (p = current) != null) { + --est; + E e = p.item; + current = p.next; + action.accept(e); + if (list.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + return false; + } + + public int characteristics() { + return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED; + } + } + } diff --git a/src/List.java b/src/List.java new file mode 100644 index 0000000..051d5b5 --- /dev/null +++ b/src/List.java @@ -0,0 +1,734 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.util; + +import java.util.function.UnaryOperator; + +/** + * An ordered collection (also known as a sequence). The user of this + * interface has precise control over where in the list each element is + * inserted. The user can access elements by their integer index (position in + * the list), and search for elements in the list.

+ * + * Unlike sets, lists typically allow duplicate elements. More formally, + * lists typically allow pairs of elements e1 and e2 + * such that e1.equals(e2), and they typically allow multiple + * null elements if they allow null elements at all. It is not inconceivable + * that someone might wish to implement a list that prohibits duplicates, by + * throwing runtime exceptions when the user attempts to insert them, but we + * expect this usage to be rare.

+ * + * The List interface places additional stipulations, beyond those + * specified in the Collection interface, on the contracts of the + * iterator, add, remove, equals, and + * hashCode methods. Declarations for other inherited methods are + * also included here for convenience.

+ * + * The List interface provides four methods for positional (indexed) + * access to list elements. Lists (like Java arrays) are zero based. Note + * that these operations may execute in time proportional to the index value + * for some implementations (the LinkedList class, for + * example). Thus, iterating over the elements in a list is typically + * preferable to indexing through it if the caller does not know the + * implementation.

+ * + * The List interface provides a special iterator, called a + * ListIterator, that allows element insertion and replacement, and + * bidirectional access in addition to the normal operations that the + * Iterator interface provides. A method is provided to obtain a + * list iterator that starts at a specified position in the list.

+ * + * The List interface provides two methods to search for a specified + * object. From a performance standpoint, these methods should be used with + * caution. In many implementations they will perform costly linear + * searches.

+ * + * The List interface provides two methods to efficiently insert and + * remove multiple elements at an arbitrary point in the list.

+ * + * Note: While it is permissible for lists to contain themselves as elements, + * extreme caution is advised: the equals and hashCode + * methods are no longer well defined on such a list. + * + *

Some list implementations have restrictions on the elements that + * they may contain. For example, some implementations prohibit null elements, + * and some have restrictions on the types of their elements. Attempting to + * add an ineligible element throws an unchecked exception, typically + * NullPointerException or ClassCastException. Attempting + * to query the presence of an ineligible element may throw an exception, + * or it may simply return false; some implementations will exhibit the former + * behavior and some will exhibit the latter. More generally, attempting an + * operation on an ineligible element whose completion would not result in + * the insertion of an ineligible element into the list may throw an + * exception or it may succeed, at the option of the implementation. + * Such exceptions are marked as "optional" in the specification for this + * interface. + * + *

This interface is a member of the + * + * Java Collections Framework. + * + * @param the type of elements in this list + * + * @author Josh Bloch + * @author Neal Gafter + * @see Collection + * @see Set + * @see ArrayList + * @see LinkedList + * @see Vector + * @see Arrays#asList(Object[]) + * @see Collections#nCopies(int, Object) + * @see Collections#EMPTY_LIST + * @see AbstractList + * @see AbstractSequentialList + * @since 1.2 + */ + +public interface List extends Collection { + // Query Operations + + /** + * Returns the number of elements in this list. If this list contains + * more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of elements in this list + */ + int size(); + + /** + * Returns true if this list contains no elements. + * + * @return true if this list contains no elements + */ + boolean isEmpty(); + + /** + * Returns true if this list contains the specified element. + * More formally, returns true if and only if this list contains + * at least one element e such that + * (o==null ? e==null : o.equals(e)). + * + * @param o element whose presence in this list is to be tested + * @return true if this list contains the specified element + * @throws ClassCastException if the type of the specified element + * is incompatible with this list + * (optional) + * @throws NullPointerException if the specified element is null and this + * list does not permit null elements + * (optional) + */ + boolean contains(Object o); + + /** + * Returns an iterator over the elements in this list in proper sequence. + * + * @return an iterator over the elements in this list in proper sequence + */ + Iterator iterator(); + + /** + * Returns an array containing all of the elements in this list in proper + * sequence (from first to last element). + * + *

The returned array will be "safe" in that no references to it are + * maintained by this list. (In other words, this method must + * allocate a new array even if this list is backed by an array). + * The caller is thus free to modify the returned array. + * + *

This method acts as bridge between array-based and collection-based + * APIs. + * + * @return an array containing all of the elements in this list in proper + * sequence + * @see Arrays#asList(Object[]) + */ + Object[] toArray(); + + /** + * Returns an array containing all of the elements in this list in + * proper sequence (from first to last element); the runtime type of + * the returned array is that of the specified array. If the list fits + * in the specified array, it is returned therein. Otherwise, a new + * array is allocated with the runtime type of the specified array and + * the size of this list. + * + *

If the list fits in the specified array with room to spare (i.e., + * the array has more elements than the list), the element in the array + * immediately following the end of the list is set to null. + * (This is useful in determining the length of the list only if + * the caller knows that the list does not contain any null elements.) + * + *

Like the {@link #toArray()} method, this method acts as bridge between + * array-based and collection-based APIs. Further, this method allows + * precise control over the runtime type of the output array, and may, + * under certain circumstances, be used to save allocation costs. + * + *

Suppose x is a list known to contain only strings. + * The following code can be used to dump the list into a newly + * allocated array of String: + * + *

{@code
+     *     String[] y = x.toArray(new String[0]);
+     * }
+ * + * Note that toArray(new Object[0]) is identical in function to + * toArray(). + * + * @param a the array into which the elements of this list are to + * be stored, if it is big enough; otherwise, a new array of the + * same runtime type is allocated for this purpose. + * @return an array containing the elements of this list + * @throws ArrayStoreException if the runtime type of the specified array + * is not a supertype of the runtime type of every element in + * this list + * @throws NullPointerException if the specified array is null + */ + T[] toArray(T[] a); + + + // Modification Operations + + /** + * Appends the specified element to the end of this list (optional + * operation). + * + *

Lists that support this operation may place limitations on what + * elements may be added to this list. In particular, some + * lists will refuse to add null elements, and others will impose + * restrictions on the type of elements that may be added. List + * classes should clearly specify in their documentation any restrictions + * on what elements may be added. + * + * @param e element to be appended to this list + * @return true (as specified by {@link Collection#add}) + * @throws UnsupportedOperationException if the add operation + * is not supported by this list + * @throws ClassCastException if the class of the specified element + * prevents it from being added to this list + * @throws NullPointerException if the specified element is null and this + * list does not permit null elements + * @throws IllegalArgumentException if some property of this element + * prevents it from being added to this list + */ + boolean add(E e); + + /** + * Removes the first occurrence of the specified element from this list, + * if it is present (optional operation). If this list does not contain + * the element, it is unchanged. More formally, removes the element with + * the lowest index i such that + * (o==null ? get(i)==null : o.equals(get(i))) + * (if such an element exists). Returns true if this list + * contained the specified element (or equivalently, if this list changed + * as a result of the call). + * + * @param o element to be removed from this list, if present + * @return true if this list contained the specified element + * @throws ClassCastException if the type of the specified element + * is incompatible with this list + * (optional) + * @throws NullPointerException if the specified element is null and this + * list does not permit null elements + * (optional) + * @throws UnsupportedOperationException if the remove operation + * is not supported by this list + */ + boolean remove(Object o); + + + // Bulk Modification Operations + + /** + * Returns true if this list contains all of the elements of the + * specified collection. + * + * @param c collection to be checked for containment in this list + * @return true if this list contains all of the elements of the + * specified collection + * @throws ClassCastException if the types of one or more elements + * in the specified collection are incompatible with this + * list + * (optional) + * @throws NullPointerException if the specified collection contains one + * or more null elements and this list does not permit null + * elements + * (optional), + * or if the specified collection is null + * @see #contains(Object) + */ + boolean containsAll(Collection c); + + /** + * Appends all of the elements in the specified collection to the end of + * this list, in the order that they are returned by the specified + * collection's iterator (optional operation). The behavior of this + * operation is undefined if the specified collection is modified while + * the operation is in progress. (Note that this will occur if the + * specified collection is this list, and it's nonempty.) + * + * @param c collection containing elements to be added to this list + * @return true if this list changed as a result of the call + * @throws UnsupportedOperationException if the addAll operation + * is not supported by this list + * @throws ClassCastException if the class of an element of the specified + * collection prevents it from being added to this list + * @throws NullPointerException if the specified collection contains one + * or more null elements and this list does not permit null + * elements, or if the specified collection is null + * @throws IllegalArgumentException if some property of an element of the + * specified collection prevents it from being added to this list + * @see #add(Object) + */ + boolean addAll(Collection c); + + /** + * Inserts all of the elements in the specified collection into this + * list at the specified position (optional operation). Shifts the + * element currently at that position (if any) and any subsequent + * elements to the right (increases their indices). The new elements + * will appear in this list in the order that they are returned by the + * specified collection's iterator. The behavior of this operation is + * undefined if the specified collection is modified while the + * operation is in progress. (Note that this will occur if the specified + * collection is this list, and it's nonempty.) + * + * @param index index at which to insert the first element from the + * specified collection + * @param c collection containing elements to be added to this list + * @return true if this list changed as a result of the call + * @throws UnsupportedOperationException if the addAll operation + * is not supported by this list + * @throws ClassCastException if the class of an element of the specified + * collection prevents it from being added to this list + * @throws NullPointerException if the specified collection contains one + * or more null elements and this list does not permit null + * elements, or if the specified collection is null + * @throws IllegalArgumentException if some property of an element of the + * specified collection prevents it from being added to this list + * @throws IndexOutOfBoundsException if the index is out of range + * (index < 0 || index > size()) + */ + boolean addAll(int index, Collection c); + + /** + * Removes from this list all of its elements that are contained in the + * specified collection (optional operation). + * + * @param c collection containing elements to be removed from this list + * @return true if this list changed as a result of the call + * @throws UnsupportedOperationException if the removeAll operation + * is not supported by this list + * @throws ClassCastException if the class of an element of this list + * is incompatible with the specified collection + * (optional) + * @throws NullPointerException if this list contains a null element and the + * specified collection does not permit null elements + * (optional), + * or if the specified collection is null + * @see #remove(Object) + * @see #contains(Object) + */ + boolean removeAll(Collection c); + + /** + * Retains only the elements in this list that are contained in the + * specified collection (optional operation). In other words, removes + * from this list all of its elements that are not contained in the + * specified collection. + * + * @param c collection containing elements to be retained in this list + * @return true if this list changed as a result of the call + * @throws UnsupportedOperationException if the retainAll operation + * is not supported by this list + * @throws ClassCastException if the class of an element of this list + * is incompatible with the specified collection + * (optional) + * @throws NullPointerException if this list contains a null element and the + * specified collection does not permit null elements + * (optional), + * or if the specified collection is null + * @see #remove(Object) + * @see #contains(Object) + */ + boolean retainAll(Collection c); + + /** + * Replaces each element of this list with the result of applying the + * operator to that element. Errors or runtime exceptions thrown by + * the operator are relayed to the caller. + * + * @implSpec + * The default implementation is equivalent to, for this {@code list}: + *

{@code
+     *     final ListIterator li = list.listIterator();
+     *     while (li.hasNext()) {
+     *         li.set(operator.apply(li.next()));
+     *     }
+     * }
+ * + * If the list's list-iterator does not support the {@code set} operation + * then an {@code UnsupportedOperationException} will be thrown when + * replacing the first element. + * + * @param operator the operator to apply to each element + * @throws UnsupportedOperationException if this list is unmodifiable. + * Implementations may throw this exception if an element + * cannot be replaced or if, in general, modification is not + * supported + * @throws NullPointerException if the specified operator is null or + * if the operator result is a null value and this list does + * not permit null elements + * (optional) + * @since 1.8 + */ + default void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final ListIterator li = this.listIterator(); + while (li.hasNext()) { + li.set(operator.apply(li.next())); + } + } + + /** + * Sorts this list according to the order induced by the specified + * {@link Comparator}. + * + *

All elements in this list must be mutually comparable using the + * specified comparator (that is, {@code c.compare(e1, e2)} must not throw + * a {@code ClassCastException} for any elements {@code e1} and {@code e2} + * in the list). + * + *

If the specified comparator is {@code null} then all elements in this + * list must implement the {@link Comparable} interface and the elements' + * {@linkplain Comparable natural ordering} should be used. + * + *

This list must be modifiable, but need not be resizable. + * + * @implSpec + * The default implementation obtains an array containing all elements in + * this list, sorts the array, and iterates over this list resetting each + * element from the corresponding position in the array. (This avoids the + * n2 log(n) performance that would result from attempting + * to sort a linked list in place.) + * + * @implNote + * This implementation is a stable, adaptive, iterative mergesort that + * requires far fewer than n lg(n) comparisons when the input array is + * partially sorted, while offering the performance of a traditional + * mergesort when the input array is randomly ordered. If the input array + * is nearly sorted, the implementation requires approximately n + * comparisons. Temporary storage requirements vary from a small constant + * for nearly sorted input arrays to n/2 object references for randomly + * ordered input arrays. + * + *

The implementation takes equal advantage of ascending and + * descending order in its input array, and can take advantage of + * ascending and descending order in different parts of the same + * input array. It is well-suited to merging two or more sorted arrays: + * simply concatenate the arrays and sort the resulting array. + * + *

The implementation was adapted from Tim Peters's list sort for Python + * ( + * TimSort). It uses techniques from Peter McIlroy's "Optimistic + * Sorting and Information Theoretic Complexity", in Proceedings of the + * Fourth Annual ACM-SIAM Symposium on Discrete Algorithms, pp 467-474, + * January 1993. + * + * @param c the {@code Comparator} used to compare list elements. + * A {@code null} value indicates that the elements' + * {@linkplain Comparable natural ordering} should be used + * @throws ClassCastException if the list contains elements that are not + * mutually comparable using the specified comparator + * @throws UnsupportedOperationException if the list's list-iterator does + * not support the {@code set} operation + * @throws IllegalArgumentException + * (optional) + * if the comparator is found to violate the {@link Comparator} + * contract + * @since 1.8 + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + default void sort(Comparator c) { + Object[] a = this.toArray(); + Arrays.sort(a, (Comparator) c); + ListIterator i = this.listIterator(); + for (Object e : a) { + i.next(); + i.set((E) e); + } + } + + /** + * Removes all of the elements from this list (optional operation). + * The list will be empty after this call returns. + * + * @throws UnsupportedOperationException if the clear operation + * is not supported by this list + */ + void clear(); + + + // Comparison and hashing + + /** + * Compares the specified object with this list for equality. Returns + * true if and only if the specified object is also a list, both + * lists have the same size, and all corresponding pairs of elements in + * the two lists are equal. (Two elements e1 and + * e2 are equal if (e1==null ? e2==null : + * e1.equals(e2)).) In other words, two lists are defined to be + * equal if they contain the same elements in the same order. This + * definition ensures that the equals method works properly across + * different implementations of the List interface. + * + * @param o the object to be compared for equality with this list + * @return true if the specified object is equal to this list + */ + boolean equals(Object o); + + /** + * Returns the hash code value for this list. The hash code of a list + * is defined to be the result of the following calculation: + *

{@code
+     *     int hashCode = 1;
+     *     for (E e : list)
+     *         hashCode = 31*hashCode + (e==null ? 0 : e.hashCode());
+     * }
+ * This ensures that list1.equals(list2) implies that + * list1.hashCode()==list2.hashCode() for any two lists, + * list1 and list2, as required by the general + * contract of {@link Object#hashCode}. + * + * @return the hash code value for this list + * @see Object#equals(Object) + * @see #equals(Object) + */ + int hashCode(); + + + // Positional Access Operations + + /** + * Returns the element at the specified position in this list. + * + * @param index index of the element to return + * @return the element at the specified position in this list + * @throws IndexOutOfBoundsException if the index is out of range + * (index < 0 || index >= size()) + */ + E get(int index); + + /** + * Replaces the element at the specified position in this list with the + * specified element (optional operation). + * + * @param index index of the element to replace + * @param element element to be stored at the specified position + * @return the element previously at the specified position + * @throws UnsupportedOperationException if the set operation + * is not supported by this list + * @throws ClassCastException if the class of the specified element + * prevents it from being added to this list + * @throws NullPointerException if the specified element is null and + * this list does not permit null elements + * @throws IllegalArgumentException if some property of the specified + * element prevents it from being added to this list + * @throws IndexOutOfBoundsException if the index is out of range + * (index < 0 || index >= size()) + */ + E set(int index, E element); + + /** + * Inserts the specified element at the specified position in this list + * (optional operation). Shifts the element currently at that position + * (if any) and any subsequent elements to the right (adds one to their + * indices). + * + * @param index index at which the specified element is to be inserted + * @param element element to be inserted + * @throws UnsupportedOperationException if the add operation + * is not supported by this list + * @throws ClassCastException if the class of the specified element + * prevents it from being added to this list + * @throws NullPointerException if the specified element is null and + * this list does not permit null elements + * @throws IllegalArgumentException if some property of the specified + * element prevents it from being added to this list + * @throws IndexOutOfBoundsException if the index is out of range + * (index < 0 || index > size()) + */ + void add(int index, E element); + + /** + * Removes the element at the specified position in this list (optional + * operation). Shifts any subsequent elements to the left (subtracts one + * from their indices). Returns the element that was removed from the + * list. + * + * @param index the index of the element to be removed + * @return the element previously at the specified position + * @throws UnsupportedOperationException if the remove operation + * is not supported by this list + * @throws IndexOutOfBoundsException if the index is out of range + * (index < 0 || index >= size()) + */ + E remove(int index); + + + // Search Operations + + /** + * Returns the index of the first occurrence of the specified element + * in this list, or -1 if this list does not contain the element. + * More formally, returns the lowest index i such that + * (o==null ? get(i)==null : o.equals(get(i))), + * or -1 if there is no such index. + * + * @param o element to search for + * @return the index of the first occurrence of the specified element in + * this list, or -1 if this list does not contain the element + * @throws ClassCastException if the type of the specified element + * is incompatible with this list + * (optional) + * @throws NullPointerException if the specified element is null and this + * list does not permit null elements + * (optional) + */ + int indexOf(Object o); + + /** + * Returns the index of the last occurrence of the specified element + * in this list, or -1 if this list does not contain the element. + * More formally, returns the highest index i such that + * (o==null ? get(i)==null : o.equals(get(i))), + * or -1 if there is no such index. + * + * @param o element to search for + * @return the index of the last occurrence of the specified element in + * this list, or -1 if this list does not contain the element + * @throws ClassCastException if the type of the specified element + * is incompatible with this list + * (optional) + * @throws NullPointerException if the specified element is null and this + * list does not permit null elements + * (optional) + */ + int lastIndexOf(Object o); + + + // List Iterators + + /** + * Returns a list iterator over the elements in this list (in proper + * sequence). + * + * @return a list iterator over the elements in this list (in proper + * sequence) + */ + ListIterator listIterator(); + + /** + * Returns a list iterator over the elements in this list (in proper + * sequence), starting at the specified position in the list. + * The specified index indicates the first element that would be + * returned by an initial call to {@link ListIterator#next next}. + * An initial call to {@link ListIterator#previous previous} would + * return the element with the specified index minus one. + * + * @param index index of the first element to be returned from the + * list iterator (by a call to {@link ListIterator#next next}) + * @return a list iterator over the elements in this list (in proper + * sequence), starting at the specified position in the list + * @throws IndexOutOfBoundsException if the index is out of range + * ({@code index < 0 || index > size()}) + */ + ListIterator listIterator(int index); + + // View + + /** + * Returns a view of the portion of this list between the specified + * fromIndex, inclusive, and toIndex, exclusive. (If + * fromIndex and toIndex are equal, the returned list is + * empty.) The returned list is backed by this list, so non-structural + * changes in the returned list are reflected in this list, and vice-versa. + * The returned list supports all of the optional list operations supported + * by this list.

+ * + * This method eliminates the need for explicit range operations (of + * the sort that commonly exist for arrays). Any operation that expects + * a list can be used as a range operation by passing a subList view + * instead of a whole list. For example, the following idiom + * removes a range of elements from a list: + *

{@code
+     *      list.subList(from, to).clear();
+     * }
+ * Similar idioms may be constructed for indexOf and + * lastIndexOf, and all of the algorithms in the + * Collections class can be applied to a subList.

+ * + * The semantics of the list returned by this method become undefined if + * the backing list (i.e., this list) is structurally modified in + * any way other than via the returned list. (Structural modifications are + * those that change the size of this list, or otherwise perturb it in such + * a fashion that iterations in progress may yield incorrect results.) + * + * @param fromIndex low endpoint (inclusive) of the subList + * @param toIndex high endpoint (exclusive) of the subList + * @return a view of the specified range within this list + * @throws IndexOutOfBoundsException for an illegal endpoint index value + * (fromIndex < 0 || toIndex > size || + * fromIndex > toIndex) + */ + List subList(int fromIndex, int toIndex); + + /** + * Creates a {@link Spliterator} over the elements in this list. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED} and + * {@link Spliterator#ORDERED}. Implementations should document the + * reporting of additional characteristic values. + * + * @implSpec + * The default implementation creates a + * late-binding spliterator + * from the list's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the list's iterator. + * + * @implNote + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this list + * @since 1.8 + */ + @Override + default Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.ORDERED); + } +} diff --git a/src/Map.java b/src/Map.java new file mode 100644 index 0000000..2a35f6a --- /dev/null +++ b/src/Map.java @@ -0,0 +1,1183 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.util; + +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.io.Serializable; + +/** + * An object that maps keys to values. A map cannot contain duplicate keys; + * each key can map to at most one value. + * + *

This interface takes the place of the Dictionary class, which + * was a totally abstract class rather than an interface. + * + *

The Map interface provides three collection views, which + * allow a map's contents to be viewed as a set of keys, collection of values, + * or set of key-value mappings. The order of a map is defined as + * the order in which the iterators on the map's collection views return their + * elements. Some map implementations, like the TreeMap class, make + * specific guarantees as to their order; others, like the HashMap + * class, do not. + * + *

Note: great care must be exercised if mutable objects are used as map + * keys. The behavior of a map is not specified if the value of an object is + * changed in a manner that affects equals comparisons while the + * object is a key in the map. A special case of this prohibition is that it + * is not permissible for a map to contain itself as a key. While it is + * permissible for a map to contain itself as a value, extreme caution is + * advised: the equals and hashCode methods are no longer + * well defined on such a map. + * + *

All general-purpose map implementation classes should provide two + * "standard" constructors: a void (no arguments) constructor which creates an + * empty map, and a constructor with a single argument of type Map, + * which creates a new map with the same key-value mappings as its argument. + * In effect, the latter constructor allows the user to copy any map, + * producing an equivalent map of the desired class. There is no way to + * enforce this recommendation (as interfaces cannot contain constructors) but + * all of the general-purpose map implementations in the JDK comply. + * + *

The "destructive" methods contained in this interface, that is, the + * methods that modify the map on which they operate, are specified to throw + * UnsupportedOperationException if this map does not support the + * operation. If this is the case, these methods may, but are not required + * to, throw an UnsupportedOperationException if the invocation would + * have no effect on the map. For example, invoking the {@link #putAll(Map)} + * method on an unmodifiable map may, but is not required to, throw the + * exception if the map whose mappings are to be "superimposed" is empty. + * + *

Some map implementations have restrictions on the keys and values they + * may contain. For example, some implementations prohibit null keys and + * values, and some have restrictions on the types of their keys. Attempting + * to insert an ineligible key or value throws an unchecked exception, + * typically NullPointerException or ClassCastException. + * Attempting to query the presence of an ineligible key or value may throw an + * exception, or it may simply return false; some implementations will exhibit + * the former behavior and some will exhibit the latter. More generally, + * attempting an operation on an ineligible key or value whose completion + * would not result in the insertion of an ineligible element into the map may + * throw an exception or it may succeed, at the option of the implementation. + * Such exceptions are marked as "optional" in the specification for this + * interface. + * + *

Many methods in Collections Framework interfaces are defined + * in terms of the {@link Object#equals(Object) equals} method. For + * example, the specification for the {@link #containsKey(Object) + * containsKey(Object key)} method says: "returns true if and + * only if this map contains a mapping for a key k such that + * (key==null ? k==null : key.equals(k))." This specification should + * not be construed to imply that invoking Map.containsKey + * with a non-null argument key will cause key.equals(k) to + * be invoked for any key k. Implementations are free to + * implement optimizations whereby the equals invocation is avoided, + * for example, by first comparing the hash codes of the two keys. (The + * {@link Object#hashCode()} specification guarantees that two objects with + * unequal hash codes cannot be equal.) More generally, implementations of + * the various Collections Framework interfaces are free to take advantage of + * the specified behavior of underlying {@link Object} methods wherever the + * implementor deems it appropriate. + * + *

Some map operations which perform recursive traversal of the map may fail + * with an exception for self-referential instances where the map directly or + * indirectly contains itself. This includes the {@code clone()}, + * {@code equals()}, {@code hashCode()} and {@code toString()} methods. + * Implementations may optionally handle the self-referential scenario, however + * most current implementations do not do so. + * + *

This interface is a member of the + * + * Java Collections Framework. + * + * @param the type of keys maintained by this map + * @param the type of mapped values + * + * @author Josh Bloch + * @see HashMap + * @see TreeMap + * @see Hashtable + * @see SortedMap + * @see Collection + * @see Set + * @since 1.2 + */ +public interface Map { + // Query Operations + + /** + * Returns the number of key-value mappings in this map. If the + * map contains more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of key-value mappings in this map + */ + int size(); + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings + */ + boolean isEmpty(); + + /** + * Returns true if this map contains a mapping for the specified + * key. More formally, returns true if and only if + * this map contains a mapping for a key k such that + * (key==null ? k==null : key.equals(k)). (There can be + * at most one such mapping.) + * + * @param key key whose presence in this map is to be tested + * @return true if this map contains a mapping for the specified + * key + * @throws ClassCastException if the key is of an inappropriate type for + * this map + * (optional) + * @throws NullPointerException if the specified key is null and this map + * does not permit null keys + * (optional) + */ + boolean containsKey(Object key); + + /** + * Returns true if this map maps one or more keys to the + * specified value. More formally, returns true if and only if + * this map contains at least one mapping to a value v such that + * (value==null ? v==null : value.equals(v)). This operation + * will probably require time linear in the map size for most + * implementations of the Map interface. + * + * @param value value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the + * specified value + * @throws ClassCastException if the value is of an inappropriate type for + * this map + * (optional) + * @throws NullPointerException if the specified value is null and this + * map does not permit null values + * (optional) + */ + boolean containsValue(Object value); + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code (key==null ? k==null : + * key.equals(k))}, then this method returns {@code v}; otherwise + * it returns {@code null}. (There can be at most one such mapping.) + * + *

If this map permits null values, then a return value of + * {@code null} does not necessarily indicate that the map + * contains no mapping for the key; it's also possible that the map + * explicitly maps the key to {@code null}. The {@link #containsKey + * containsKey} operation may be used to distinguish these two cases. + * + * @param key the key whose associated value is to be returned + * @return the value to which the specified key is mapped, or + * {@code null} if this map contains no mapping for the key + * @throws ClassCastException if the key is of an inappropriate type for + * this map + * (optional) + * @throws NullPointerException if the specified key is null and this map + * does not permit null keys + * (optional) + */ + V get(Object key); + + // Modification Operations + + /** + * Associates the specified value with the specified key in this map + * (optional operation). If the map previously contained a mapping for + * the key, the old value is replaced by the specified value. (A map + * m is said to contain a mapping for a key k if and only + * if {@link #containsKey(Object) m.containsKey(k)} would return + * true.) + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key. + * (A null return can also indicate that the map + * previously associated null with key, + * if the implementation supports null values.) + * @throws UnsupportedOperationException if the put operation + * is not supported by this map + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * @throws NullPointerException if the specified key or value is null + * and this map does not permit null keys or values + * @throws IllegalArgumentException if some property of the specified key + * or value prevents it from being stored in this map + */ + V put(K key, V value); + + /** + * Removes the mapping for a key from this map if it is present + * (optional operation). More formally, if this map contains a mapping + * from key k to value v such that + * (key==null ? k==null : key.equals(k)), that mapping + * is removed. (The map can contain at most one such mapping.) + * + *

Returns the value to which this map previously associated the key, + * or null if the map contained no mapping for the key. + * + *

If this map permits null values, then a return value of + * null does not necessarily indicate that the map + * contained no mapping for the key; it's also possible that the map + * explicitly mapped the key to null. + * + *

The map will not contain a mapping for the specified key once the + * call returns. + * + * @param key key whose mapping is to be removed from the map + * @return the previous value associated with key, or + * null if there was no mapping for key. + * @throws UnsupportedOperationException if the remove operation + * is not supported by this map + * @throws ClassCastException if the key is of an inappropriate type for + * this map + * (optional) + * @throws NullPointerException if the specified key is null and this + * map does not permit null keys + * (optional) + */ + V remove(Object key); + + + // Bulk Operations + + /** + * Copies all of the mappings from the specified map to this map + * (optional operation). The effect of this call is equivalent to that + * of calling {@link #put(Object,Object) put(k, v)} on this map once + * for each mapping from key k to value v in the + * specified map. The behavior of this operation is undefined if the + * specified map is modified while the operation is in progress. + * + * @param m mappings to be stored in this map + * @throws UnsupportedOperationException if the putAll operation + * is not supported by this map + * @throws ClassCastException if the class of a key or value in the + * specified map prevents it from being stored in this map + * @throws NullPointerException if the specified map is null, or if + * this map does not permit null keys or values, and the + * specified map contains null keys or values + * @throws IllegalArgumentException if some property of a key or value in + * the specified map prevents it from being stored in this map + */ + void putAll(Map m); + + /** + * Removes all of the mappings from this map (optional operation). + * The map will be empty after this call returns. + * + * @throws UnsupportedOperationException if the clear operation + * is not supported by this map + */ + void clear(); + + + // Views + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation), the results of + * the iteration are undefined. The set supports element removal, + * which removes the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or addAll + * operations. + * + * @return a set view of the keys contained in this map + */ + Set keySet(); + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. If the map is + * modified while an iteration over the collection is in progress + * (except through the iterator's own remove operation), + * the results of the iteration are undefined. The collection + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll and clear operations. It does not + * support the add or addAll operations. + * + * @return a collection view of the values contained in this map + */ + Collection values(); + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation, or through the + * setValue operation on a map entry returned by the + * iterator) the results of the iteration are undefined. The set + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Set.remove, removeAll, retainAll and + * clear operations. It does not support the + * add or addAll operations. + * + * @return a set view of the mappings contained in this map + */ + Set> entrySet(); + + /** + * A map entry (key-value pair). The Map.entrySet method returns + * a collection-view of the map, whose elements are of this class. The + * only way to obtain a reference to a map entry is from the + * iterator of this collection-view. These Map.Entry objects are + * valid only for the duration of the iteration; more formally, + * the behavior of a map entry is undefined if the backing map has been + * modified after the entry was returned by the iterator, except through + * the setValue operation on the map entry. + * + * @see Map#entrySet() + * @since 1.2 + */ + interface Entry { + /** + * Returns the key corresponding to this entry. + * + * @return the key corresponding to this entry + * @throws IllegalStateException implementations may, but are not + * required to, throw this exception if the entry has been + * removed from the backing map. + */ + K getKey(); + + /** + * Returns the value corresponding to this entry. If the mapping + * has been removed from the backing map (by the iterator's + * remove operation), the results of this call are undefined. + * + * @return the value corresponding to this entry + * @throws IllegalStateException implementations may, but are not + * required to, throw this exception if the entry has been + * removed from the backing map. + */ + V getValue(); + + /** + * Replaces the value corresponding to this entry with the specified + * value (optional operation). (Writes through to the map.) The + * behavior of this call is undefined if the mapping has already been + * removed from the map (by the iterator's remove operation). + * + * @param value new value to be stored in this entry + * @return old value corresponding to the entry + * @throws UnsupportedOperationException if the put operation + * is not supported by the backing map + * @throws ClassCastException if the class of the specified value + * prevents it from being stored in the backing map + * @throws NullPointerException if the backing map does not permit + * null values, and the specified value is null + * @throws IllegalArgumentException if some property of this value + * prevents it from being stored in the backing map + * @throws IllegalStateException implementations may, but are not + * required to, throw this exception if the entry has been + * removed from the backing map. + */ + V setValue(V value); + + /** + * Compares the specified object with this entry for equality. + * Returns true if the given object is also a map entry and + * the two entries represent the same mapping. More formally, two + * entries e1 and e2 represent the same mapping + * if

+         *     (e1.getKey()==null ?
+         *      e2.getKey()==null : e1.getKey().equals(e2.getKey()))  &&
+         *     (e1.getValue()==null ?
+         *      e2.getValue()==null : e1.getValue().equals(e2.getValue()))
+         * 
+ * This ensures that the equals method works properly across + * different implementations of the Map.Entry interface. + * + * @param o object to be compared for equality with this map entry + * @return true if the specified object is equal to this map + * entry + */ + boolean equals(Object o); + + /** + * Returns the hash code value for this map entry. The hash code + * of a map entry e is defined to be:
+         *     (e.getKey()==null   ? 0 : e.getKey().hashCode()) ^
+         *     (e.getValue()==null ? 0 : e.getValue().hashCode())
+         * 
+ * This ensures that e1.equals(e2) implies that + * e1.hashCode()==e2.hashCode() for any two Entries + * e1 and e2, as required by the general + * contract of Object.hashCode. + * + * @return the hash code value for this map entry + * @see Object#hashCode() + * @see Object#equals(Object) + * @see #equals(Object) + */ + int hashCode(); + + /** + * Returns a comparator that compares {@link Map.Entry} in natural order on key. + * + *

The returned comparator is serializable and throws {@link + * NullPointerException} when comparing an entry with a null key. + * + * @param the {@link Comparable} type of then map keys + * @param the type of the map values + * @return a comparator that compares {@link Map.Entry} in natural order on key. + * @see Comparable + * @since 1.8 + */ + public static , V> Comparator> comparingByKey() { + return (Comparator> & Serializable) + (c1, c2) -> c1.getKey().compareTo(c2.getKey()); + } + + /** + * Returns a comparator that compares {@link Map.Entry} in natural order on value. + * + *

The returned comparator is serializable and throws {@link + * NullPointerException} when comparing an entry with null values. + * + * @param the type of the map keys + * @param the {@link Comparable} type of the map values + * @return a comparator that compares {@link Map.Entry} in natural order on value. + * @see Comparable + * @since 1.8 + */ + public static > Comparator> comparingByValue() { + return (Comparator> & Serializable) + (c1, c2) -> c1.getValue().compareTo(c2.getValue()); + } + + /** + * Returns a comparator that compares {@link Map.Entry} by key using the given + * {@link Comparator}. + * + *

The returned comparator is serializable if the specified comparator + * is also serializable. + * + * @param the type of the map keys + * @param the type of the map values + * @param cmp the key {@link Comparator} + * @return a comparator that compares {@link Map.Entry} by the key. + * @since 1.8 + */ + public static Comparator> comparingByKey(Comparator cmp) { + Objects.requireNonNull(cmp); + return (Comparator> & Serializable) + (c1, c2) -> cmp.compare(c1.getKey(), c2.getKey()); + } + + /** + * Returns a comparator that compares {@link Map.Entry} by value using the given + * {@link Comparator}. + * + *

The returned comparator is serializable if the specified comparator + * is also serializable. + * + * @param the type of the map keys + * @param the type of the map values + * @param cmp the value {@link Comparator} + * @return a comparator that compares {@link Map.Entry} by the value. + * @since 1.8 + */ + public static Comparator> comparingByValue(Comparator cmp) { + Objects.requireNonNull(cmp); + return (Comparator> & Serializable) + (c1, c2) -> cmp.compare(c1.getValue(), c2.getValue()); + } + } + + // Comparison and hashing + + /** + * Compares the specified object with this map for equality. Returns + * true if the given object is also a map and the two maps + * represent the same mappings. More formally, two maps m1 and + * m2 represent the same mappings if + * m1.entrySet().equals(m2.entrySet()). This ensures that the + * equals method works properly across different implementations + * of the Map interface. + * + * @param o object to be compared for equality with this map + * @return true if the specified object is equal to this map + */ + boolean equals(Object o); + + /** + * Returns the hash code value for this map. The hash code of a map is + * defined to be the sum of the hash codes of each entry in the map's + * entrySet() view. This ensures that m1.equals(m2) + * implies that m1.hashCode()==m2.hashCode() for any two maps + * m1 and m2, as required by the general contract of + * {@link Object#hashCode}. + * + * @return the hash code value for this map + * @see Map.Entry#hashCode() + * @see Object#equals(Object) + * @see #equals(Object) + */ + int hashCode(); + + // Defaultable methods + + /** + * Returns the value to which the specified key is mapped, or + * {@code defaultValue} if this map contains no mapping for the key. + * + * @implSpec + * The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param key the key whose associated value is to be returned + * @param defaultValue the default mapping of the key + * @return the value to which the specified key is mapped, or + * {@code defaultValue} if this map contains no mapping for the key + * @throws ClassCastException if the key is of an inappropriate type for + * this map + * (optional) + * @throws NullPointerException if the specified key is null and this map + * does not permit null keys + * (optional) + * @since 1.8 + */ + default V getOrDefault(Object key, V defaultValue) { + V v; + return (((v = get(key)) != null) || containsKey(key)) + ? v + : defaultValue; + } + + /** + * Performs the given action for each entry in this map until all entries + * have been processed or the action throws an exception. Unless + * otherwise specified by the implementing class, actions are performed in + * the order of entry set iteration (if an iteration order is specified.) + * Exceptions thrown by the action are relayed to the caller. + * + * @implSpec + * The default implementation is equivalent to, for this {@code map}: + *

 {@code
+     * for (Map.Entry entry : map.entrySet())
+     *     action.accept(entry.getKey(), entry.getValue());
+     * }
+ * + * The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param action The action to be performed for each entry + * @throws NullPointerException if the specified action is null + * @throws ConcurrentModificationException if an entry is found to be + * removed during iteration + * @since 1.8 + */ + default void forEach(BiConsumer action) { + Objects.requireNonNull(action); + for (Map.Entry entry : entrySet()) { + K k; + V v; + try { + k = entry.getKey(); + v = entry.getValue(); + } catch(IllegalStateException ise) { + // this usually means the entry is no longer in the map. + throw new ConcurrentModificationException(ise); + } + action.accept(k, v); + } + } + + /** + * Replaces each entry's value with the result of invoking the given + * function on that entry until all entries have been processed or the + * function throws an exception. Exceptions thrown by the function are + * relayed to the caller. + * + * @implSpec + *

The default implementation is equivalent to, for this {@code map}: + *

 {@code
+     * for (Map.Entry entry : map.entrySet())
+     *     entry.setValue(function.apply(entry.getKey(), entry.getValue()));
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param function the function to apply to each entry + * @throws UnsupportedOperationException if the {@code set} operation + * is not supported by this map's entry set iterator. + * @throws ClassCastException if the class of a replacement value + * prevents it from being stored in this map + * @throws NullPointerException if the specified function is null, or the + * specified replacement value is null, and this map does not permit null + * values + * @throws ClassCastException if a replacement value is of an inappropriate + * type for this map + * (optional) + * @throws NullPointerException if function or a replacement value is null, + * and this map does not permit null keys or values + * (optional) + * @throws IllegalArgumentException if some property of a replacement value + * prevents it from being stored in this map + * (optional) + * @throws ConcurrentModificationException if an entry is found to be + * removed during iteration + * @since 1.8 + */ + default void replaceAll(BiFunction function) { + Objects.requireNonNull(function); + for (Map.Entry entry : entrySet()) { + K k; + V v; + try { + k = entry.getKey(); + v = entry.getValue(); + } catch(IllegalStateException ise) { + // this usually means the entry is no longer in the map. + throw new ConcurrentModificationException(ise); + } + + // ise thrown from function is not a cme. + v = function.apply(k, v); + + try { + entry.setValue(v); + } catch(IllegalStateException ise) { + // this usually means the entry is no longer in the map. + throw new ConcurrentModificationException(ise); + } + } + } + + /** + * If the specified key is not already associated with a value (or is mapped + * to {@code null}) associates it with the given value and returns + * {@code null}, else returns the current value. + * + * @implSpec + * The default implementation is equivalent to, for this {@code + * map}: + * + *

 {@code
+     * V v = map.get(key);
+     * if (v == null)
+     *     v = map.put(key, value);
+     *
+     * return v;
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with the specified key, or + * {@code null} if there was no mapping for the key. + * (A {@code null} return can also indicate that the map + * previously associated {@code null} with the key, + * if the implementation supports null values.) + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the key or value is of an inappropriate + * type for this map + * (optional) + * @throws NullPointerException if the specified key or value is null, + * and this map does not permit null keys or values + * (optional) + * @throws IllegalArgumentException if some property of the specified key + * or value prevents it from being stored in this map + * (optional) + * @since 1.8 + */ + default V putIfAbsent(K key, V value) { + V v = get(key); + if (v == null) { + v = put(key, value); + } + + return v; + } + + /** + * Removes the entry for the specified key only if it is currently + * mapped to the specified value. + * + * @implSpec + * The default implementation is equivalent to, for this {@code map}: + * + *

 {@code
+     * if (map.containsKey(key) && Objects.equals(map.get(key), value)) {
+     *     map.remove(key);
+     *     return true;
+     * } else
+     *     return false;
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param key key with which the specified value is associated + * @param value value expected to be associated with the specified key + * @return {@code true} if the value was removed + * @throws UnsupportedOperationException if the {@code remove} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the key or value is of an inappropriate + * type for this map + * (optional) + * @throws NullPointerException if the specified key or value is null, + * and this map does not permit null keys or values + * (optional) + * @since 1.8 + */ + default boolean remove(Object key, Object value) { + Object curValue = get(key); + if (!Objects.equals(curValue, value) || + (curValue == null && !containsKey(key))) { + return false; + } + remove(key); + return true; + } + + /** + * Replaces the entry for the specified key only if currently + * mapped to the specified value. + * + * @implSpec + * The default implementation is equivalent to, for this {@code map}: + * + *

 {@code
+     * if (map.containsKey(key) && Objects.equals(map.get(key), value)) {
+     *     map.put(key, newValue);
+     *     return true;
+     * } else
+     *     return false;
+     * }
+ * + * The default implementation does not throw NullPointerException + * for maps that do not support null values if oldValue is null unless + * newValue is also null. + * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param key key with which the specified value is associated + * @param oldValue value expected to be associated with the specified key + * @param newValue value to be associated with the specified key + * @return {@code true} if the value was replaced + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the class of a specified key or value + * prevents it from being stored in this map + * @throws NullPointerException if a specified key or newValue is null, + * and this map does not permit null keys or values + * @throws NullPointerException if oldValue is null and this map does not + * permit null values + * (optional) + * @throws IllegalArgumentException if some property of a specified key + * or value prevents it from being stored in this map + * @since 1.8 + */ + default boolean replace(K key, V oldValue, V newValue) { + Object curValue = get(key); + if (!Objects.equals(curValue, oldValue) || + (curValue == null && !containsKey(key))) { + return false; + } + put(key, newValue); + return true; + } + + /** + * Replaces the entry for the specified key only if it is + * currently mapped to some value. + * + * @implSpec + * The default implementation is equivalent to, for this {@code map}: + * + *

 {@code
+     * if (map.containsKey(key)) {
+     *     return map.put(key, value);
+     * } else
+     *     return null;
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. + * + * @param key key with which the specified value is associated + * @param value value to be associated with the specified key + * @return the previous value associated with the specified key, or + * {@code null} if there was no mapping for the key. + * (A {@code null} return can also indicate that the map + * previously associated {@code null} with the key, + * if the implementation supports null values.) + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * (optional) + * @throws NullPointerException if the specified key or value is null, + * and this map does not permit null keys or values + * @throws IllegalArgumentException if some property of the specified key + * or value prevents it from being stored in this map + * @since 1.8 + */ + default V replace(K key, V value) { + V curValue; + if (((curValue = get(key)) != null) || containsKey(key)) { + curValue = put(key, value); + } + return curValue; + } + + /** + * If the specified key is not already associated with a value (or is mapped + * to {@code null}), attempts to compute its value using the given mapping + * function and enters it into this map unless {@code null}. + * + *

If the function returns {@code null} no mapping is recorded. If + * the function itself throws an (unchecked) exception, the + * exception is rethrown, and no mapping is recorded. The most + * common usage is to construct a new object serving as an initial + * mapped value or memoized result, as in: + * + *

 {@code
+     * map.computeIfAbsent(key, k -> new Value(f(k)));
+     * }
+ * + *

Or to implement a multi-value map, {@code Map>}, + * supporting multiple values per key: + * + *

 {@code
+     * map.computeIfAbsent(key, k -> new HashSet()).add(v);
+     * }
+ * + * + * @implSpec + * The default implementation is equivalent to the following steps for this + * {@code map}, then returning the current value or {@code null} if now + * absent: + * + *
 {@code
+     * if (map.get(key) == null) {
+     *     V newValue = mappingFunction.apply(key);
+     *     if (newValue != null)
+     *         map.put(key, newValue);
+     * }
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. In particular, all implementations of + * subinterface {@link java.util.concurrent.ConcurrentMap} must document + * whether the function is applied once atomically only if the value is not + * present. + * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key is null and + * this map does not support null keys, or the mappingFunction + * is null + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * (optional) + * @since 1.8 + */ + default V computeIfAbsent(K key, + Function mappingFunction) { + Objects.requireNonNull(mappingFunction); + V v; + if ((v = get(key)) == null) { + V newValue; + if ((newValue = mappingFunction.apply(key)) != null) { + put(key, newValue); + return newValue; + } + } + + return v; + } + + /** + * If the value for the specified key is present and non-null, attempts to + * compute a new mapping given the key and its current mapped value. + * + *

If the function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception is + * rethrown, and the current mapping is left unchanged. + * + * @implSpec + * The default implementation is equivalent to performing the following + * steps for this {@code map}, then returning the current value or + * {@code null} if now absent: + * + *

 {@code
+     * if (map.get(key) != null) {
+     *     V oldValue = map.get(key);
+     *     V newValue = remappingFunction.apply(key, oldValue);
+     *     if (newValue != null)
+     *         map.put(key, newValue);
+     *     else
+     *         map.remove(key);
+     * }
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. In particular, all implementations of + * subinterface {@link java.util.concurrent.ConcurrentMap} must document + * whether the function is applied once atomically only if the value is not + * present. + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key is null and + * this map does not support null keys, or the + * remappingFunction is null + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * (optional) + * @since 1.8 + */ + default V computeIfPresent(K key, + BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + V oldValue; + if ((oldValue = get(key)) != null) { + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != null) { + put(key, newValue); + return newValue; + } else { + remove(key); + return null; + } + } else { + return null; + } + } + + /** + * Attempts to compute a mapping for the specified key and its current + * mapped value (or {@code null} if there is no current mapping). For + * example, to either create or append a {@code String} msg to a value + * mapping: + * + *

 {@code
+     * map.compute(key, (k, v) -> (v == null) ? msg : v.concat(msg))}
+ * (Method {@link #merge merge()} is often simpler to use for such purposes.) + * + *

If the function returns {@code null}, the mapping is removed (or + * remains absent if initially absent). If the function itself throws an + * (unchecked) exception, the exception is rethrown, and the current mapping + * is left unchanged. + * + * @implSpec + * The default implementation is equivalent to performing the following + * steps for this {@code map}, then returning the current value or + * {@code null} if absent: + * + *

 {@code
+     * V oldValue = map.get(key);
+     * V newValue = remappingFunction.apply(key, oldValue);
+     * if (oldValue != null ) {
+     *    if (newValue != null)
+     *       map.put(key, newValue);
+     *    else
+     *       map.remove(key);
+     * } else {
+     *    if (newValue != null)
+     *       map.put(key, newValue);
+     *    else
+     *       return null;
+     * }
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. In particular, all implementations of + * subinterface {@link java.util.concurrent.ConcurrentMap} must document + * whether the function is applied once atomically only if the value is not + * present. + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key is null and + * this map does not support null keys, or the + * remappingFunction is null + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * (optional) + * @since 1.8 + */ + default V compute(K key, + BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + V oldValue = get(key); + + V newValue = remappingFunction.apply(key, oldValue); + if (newValue == null) { + // delete mapping + if (oldValue != null || containsKey(key)) { + // something to remove + remove(key); + return null; + } else { + // nothing to do. Leave things as they were. + return null; + } + } else { + // add or replace old mapping + put(key, newValue); + return newValue; + } + } + + /** + * If the specified key is not already associated with a value or is + * associated with null, associates it with the given non-null value. + * Otherwise, replaces the associated value with the results of the given + * remapping function, or removes if the result is {@code null}. This + * method may be of use when combining multiple mapped values for a key. + * For example, to either create or append a {@code String msg} to a + * value mapping: + * + *

 {@code
+     * map.merge(key, msg, String::concat)
+     * }
+ * + *

If the function returns {@code null} the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception is + * rethrown, and the current mapping is left unchanged. + * + * @implSpec + * The default implementation is equivalent to performing the following + * steps for this {@code map}, then returning the current value or + * {@code null} if absent: + * + *

 {@code
+     * V oldValue = map.get(key);
+     * V newValue = (oldValue == null) ? value :
+     *              remappingFunction.apply(oldValue, value);
+     * if (newValue == null)
+     *     map.remove(key);
+     * else
+     *     map.put(key, newValue);
+     * }
+ * + *

The default implementation makes no guarantees about synchronization + * or atomicity properties of this method. Any implementation providing + * atomicity guarantees must override this method and document its + * concurrency properties. In particular, all implementations of + * subinterface {@link java.util.concurrent.ConcurrentMap} must document + * whether the function is applied once atomically only if the value is not + * present. + * + * @param key key with which the resulting value is to be associated + * @param value the non-null value to be merged with the existing value + * associated with the key or, if no existing value or a null value + * is associated with the key, to be associated with the key + * @param remappingFunction the function to recompute a value if present + * @return the new value associated with the specified key, or null if no + * value is associated with the key + * @throws UnsupportedOperationException if the {@code put} operation + * is not supported by this map + * (optional) + * @throws ClassCastException if the class of the specified key or value + * prevents it from being stored in this map + * (optional) + * @throws NullPointerException if the specified key is null and this map + * does not support null keys or the value or remappingFunction is + * null + * @since 1.8 + */ + default V merge(K key, V value, + BiFunction remappingFunction) { + Objects.requireNonNull(remappingFunction); + Objects.requireNonNull(value); + V oldValue = get(key); + V newValue = (oldValue == null) ? value : + remappingFunction.apply(oldValue, value); + if(newValue == null) { + remove(key); + } else { + put(key, newValue); + } + return newValue; + } +} diff --git a/src/PriorityQueue.java b/src/PriorityQueue.java index b76c1d9..68470cc 100644 --- a/src/PriorityQueue.java +++ b/src/PriorityQueue.java @@ -1,5 +1,84 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + package java.util; +import java.util.function.Consumer; + +/** + * An unbounded priority {@linkplain Queue queue} based on a priority heap. + * The elements of the priority queue are ordered according to their + * {@linkplain Comparable natural ordering}, or by a {@link Comparator} + * provided at queue construction time, depending on which constructor is + * used. A priority queue does not permit {@code null} elements. + * A priority queue relying on natural ordering also does not permit + * insertion of non-comparable objects (doing so may result in + * {@code ClassCastException}). + * + *

The head of this queue is the least element + * with respect to the specified ordering. If multiple elements are + * tied for least value, the head is one of those elements -- ties are + * broken arbitrarily. The queue retrieval operations {@code poll}, + * {@code remove}, {@code peek}, and {@code element} access the + * element at the head of the queue. + * + *

A priority queue is unbounded, but has an internal + * capacity governing the size of an array used to store the + * elements on the queue. It is always at least as large as the queue + * size. As elements are added to a priority queue, its capacity + * grows automatically. The details of the growth policy are not + * specified. + * + *

This class and its iterator implement all of the + * optional methods of the {@link Collection} and {@link + * Iterator} interfaces. The Iterator provided in method {@link + * #iterator()} is not guaranteed to traverse the elements of + * the priority queue in any particular order. If you need ordered + * traversal, consider using {@code Arrays.sort(pq.toArray())}. + * + *

Note that this implementation is not synchronized. + * Multiple threads should not access a {@code PriorityQueue} + * instance concurrently if any of the threads modifies the queue. + * Instead, use the thread-safe {@link + * java.util.concurrent.PriorityBlockingQueue} class. + * + *

Implementation note: this implementation provides + * O(log(n)) time for the enqueuing and dequeuing methods + * ({@code offer}, {@code poll}, {@code remove()} and {@code add}); + * linear time for the {@code remove(Object)} and {@code contains(Object)} + * methods; and constant time for the retrieval methods + * ({@code peek}, {@code element}, and {@code size}). + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Josh Bloch, Doug Lea + * @param the type of elements held in this collection + */ public class PriorityQueue extends AbstractQueue implements java.io.Serializable { @@ -15,7 +94,7 @@ public class PriorityQueue extends AbstractQueue * heap and each descendant d of n, n <= d. The element with the * lowest value is in queue[0], assuming the queue is nonempty. */ - private transient Object[] queue; + transient Object[] queue; // non-private to simplify nested class access /** * The number of elements in the priority queue. @@ -32,7 +111,7 @@ public class PriorityQueue extends AbstractQueue * The number of times this priority queue has been * structurally modified. See AbstractList for gory details. */ - private transient int modCount = 0; + transient int modCount = 0; // non-private to simplify nested class access /** * Creates a {@code PriorityQueue} with the default initial @@ -56,6 +135,19 @@ public PriorityQueue(int initialCapacity) { this(initialCapacity, null); } + /** + * Creates a {@code PriorityQueue} with the default initial capacity and + * whose elements are ordered according to the specified comparator. + * + * @param comparator the comparator that will be used to order this + * priority queue. If {@code null}, the {@linkplain Comparable + * natural ordering} of the elements will be used. + * @since 1.8 + */ + public PriorityQueue(Comparator comparator) { + this(DEFAULT_INITIAL_CAPACITY, comparator); + } + /** * Creates a {@code PriorityQueue} with the specified initial capacity * that orders its elements according to the specified comparator. @@ -253,10 +345,9 @@ public boolean offer(E e) { return true; } + @SuppressWarnings("unchecked") public E peek() { - if (size == 0) - return null; - return (E) queue[0]; + return (size == 0) ? null : (E) queue[0]; } private int indexOf(Object o) { @@ -353,15 +444,14 @@ public Object[] toArray() { * precise control over the runtime type of the output array, and may, * under certain circumstances, be used to save allocation costs. * - *

Suppose x is a queue known to contain only strings. + *

Suppose {@code x} is a queue known to contain only strings. * The following code can be used to dump the queue into a newly - * allocated array of String: + * allocated array of {@code String}: * - *

-     *     String[] y = x.toArray(new String[0]);
+ *
 {@code String[] y = x.toArray(new String[0]);}
* - * Note that toArray(new Object[0]) is identical in function to - * toArray(). + * Note that {@code toArray(new Object[0])} is identical in function to + * {@code toArray()}. * * @param a the array into which the elements of the queue are to * be stored, if it is big enough; otherwise, a new array of the @@ -372,7 +462,9 @@ public Object[] toArray() { * this queue * @throws NullPointerException if the specified array is null */ + @SuppressWarnings("unchecked") public T[] toArray(T[] a) { + final int size = this.size; if (a.length < size) // Make a new array of a's runtime type, but my contents: return (T[]) Arrays.copyOf(queue, size, a.getClass()); @@ -437,6 +529,7 @@ public boolean hasNext() { (forgetMeNot != null && !forgetMeNot.isEmpty()); } + @SuppressWarnings("unchecked") public E next() { if (expectedModCount != modCount) throw new ConcurrentModificationException(); @@ -489,6 +582,7 @@ public void clear() { size = 0; } + @SuppressWarnings("unchecked") public E poll() { if (size == 0) return null; @@ -514,8 +608,9 @@ public E poll() { * position before i. This fact is used by iterator.remove so as to * avoid missing traversing elements. */ + @SuppressWarnings("unchecked") private E removeAt(int i) { - assert i >= 0 && i < size; + // assert i >= 0 && i < size; modCount++; int s = --size; if (s == i) // removed last element @@ -552,6 +647,7 @@ private void siftUp(int k, E x) { siftUpComparable(k, x); } + @SuppressWarnings("unchecked") private void siftUpComparable(int k, E x) { Comparable key = (Comparable) x; while (k > 0) { @@ -565,6 +661,7 @@ private void siftUpComparable(int k, E x) { queue[k] = key; } + @SuppressWarnings("unchecked") private void siftUpUsingComparator(int k, E x) { while (k > 0) { int parent = (k - 1) >>> 1; @@ -592,6 +689,7 @@ private void siftDown(int k, E x) { siftDownComparable(k, x); } + @SuppressWarnings("unchecked") private void siftDownComparable(int k, E x) { Comparable key = (Comparable)x; int half = size >>> 1; // loop while a non-leaf @@ -610,6 +708,7 @@ private void siftDownComparable(int k, E x) { queue[k] = key; } + @SuppressWarnings("unchecked") private void siftDownUsingComparator(int k, E x) { int half = size >>> 1; while (k < half) { @@ -631,6 +730,7 @@ private void siftDownUsingComparator(int k, E x) { * Establishes the heap invariant (described above) in the entire tree, * assuming nothing about the order of the elements prior to the call. */ + @SuppressWarnings("unchecked") private void heapify() { for (int i = (size >>> 1) - 1; i >= 0; i--) siftDown(i, (E) queue[i]); @@ -650,8 +750,7 @@ public Comparator comparator() { } /** - * Saves the state of the instance to a stream (that - * is, serializes it). + * Saves this queue to a stream (that is, serializes it). * * @serialData The length of the array backing the instance is * emitted (int), followed by all of its elements @@ -659,7 +758,7 @@ public Comparator comparator() { * @param s the stream */ private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException{ + throws java.io.IOException { // Write out element count, and any hidden stuff s.defaultWriteObject(); @@ -695,4 +794,112 @@ private void readObject(java.io.ObjectInputStream s) // spec has never explained what that might be. heapify(); } + + /** + * Creates a late-binding + * and fail-fast {@link Spliterator} over the elements in this + * queue. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, and {@link Spliterator#NONNULL}. + * Overriding implementations should document the reporting of additional + * characteristic values. + * + * @return a {@code Spliterator} over the elements in this queue + * @since 1.8 + */ + public final Spliterator spliterator() { + return new PriorityQueueSpliterator(this, 0, -1, 0); + } + + static final class PriorityQueueSpliterator implements Spliterator { + /* + * This is very similar to ArrayList Spliterator, except for + * extra null checks. + */ + private final PriorityQueue pq; + private int index; // current index, modified on advance/split + private int fence; // -1 until first use + private int expectedModCount; // initialized when fence set + + /** Creates new spliterator covering the given range */ + PriorityQueueSpliterator(PriorityQueue pq, int origin, int fence, + int expectedModCount) { + this.pq = pq; + this.index = origin; + this.fence = fence; + this.expectedModCount = expectedModCount; + } + + private int getFence() { // initialize fence to size on first use + int hi; + if ((hi = fence) < 0) { + expectedModCount = pq.modCount; + hi = fence = pq.size; + } + return hi; + } + + public PriorityQueueSpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid) ? null : + new PriorityQueueSpliterator(pq, lo, index = mid, + expectedModCount); + } + + @SuppressWarnings("unchecked") + public void forEachRemaining(Consumer action) { + int i, hi, mc; // hoist accesses and checks from loop + PriorityQueue q; Object[] a; + if (action == null) + throw new NullPointerException(); + if ((q = pq) != null && (a = q.queue) != null) { + if ((hi = fence) < 0) { + mc = q.modCount; + hi = q.size; + } + else + mc = expectedModCount; + if ((i = index) >= 0 && (index = hi) <= a.length) { + for (E e;; ++i) { + if (i < hi) { + if ((e = (E) a[i]) == null) // must be CME + break; + action.accept(e); + } + else if (q.modCount != mc) + break; + else + return; + } + } + } + throw new ConcurrentModificationException(); + } + + public boolean tryAdvance(Consumer action) { + if (action == null) + throw new NullPointerException(); + int hi = getFence(), lo = index; + if (lo >= 0 && lo < hi) { + index = lo + 1; + @SuppressWarnings("unchecked") E e = (E)pq.queue[lo]; + if (e == null) + throw new ConcurrentModificationException(); + action.accept(e); + if (pq.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + return false; + } + + public long estimateSize() { + return (long) (getFence() - index); + } + + public int characteristics() { + return Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.NONNULL; + } + } } diff --git a/src/Queue.java b/src/Queue.java index 05088a3..f156bbc 100644 --- a/src/Queue.java +++ b/src/Queue.java @@ -1,15 +1,155 @@ +/* + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +/* + * + * + * + * + * + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ package java.util; +/** + * A collection designed for holding elements prior to processing. + * Besides basic {@link java.util.Collection Collection} operations, + * queues provide additional insertion, extraction, and inspection + * operations. Each of these methods exists in two forms: one throws + * an exception if the operation fails, the other returns a special + * value (either {@code null} or {@code false}, depending on the + * operation). The latter form of the insert operation is designed + * specifically for use with capacity-restricted {@code Queue} + * implementations; in most implementations, insert operations cannot + * fail. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Summary of Queue methods
Throws exceptionReturns special value
Insert{@link Queue#add add(e)}{@link Queue#offer offer(e)}
Remove{@link Queue#remove remove()}{@link Queue#poll poll()}
Examine{@link Queue#element element()}{@link Queue#peek peek()}
+ * + *

Queues typically, but do not necessarily, order elements in a + * FIFO (first-in-first-out) manner. Among the exceptions are + * priority queues, which order elements according to a supplied + * comparator, or the elements' natural ordering, and LIFO queues (or + * stacks) which order the elements LIFO (last-in-first-out). + * Whatever the ordering used, the head of the queue is that + * element which would be removed by a call to {@link #remove() } or + * {@link #poll()}. In a FIFO queue, all new elements are inserted at + * the tail of the queue. Other kinds of queues may use + * different placement rules. Every {@code Queue} implementation + * must specify its ordering properties. + * + *

The {@link #offer offer} method inserts an element if possible, + * otherwise returning {@code false}. This differs from the {@link + * java.util.Collection#add Collection.add} method, which can fail to + * add an element only by throwing an unchecked exception. The + * {@code offer} method is designed for use when failure is a normal, + * rather than exceptional occurrence, for example, in fixed-capacity + * (or "bounded") queues. + * + *

The {@link #remove()} and {@link #poll()} methods remove and + * return the head of the queue. + * Exactly which element is removed from the queue is a + * function of the queue's ordering policy, which differs from + * implementation to implementation. The {@code remove()} and + * {@code poll()} methods differ only in their behavior when the + * queue is empty: the {@code remove()} method throws an exception, + * while the {@code poll()} method returns {@code null}. + * + *

The {@link #element()} and {@link #peek()} methods return, but do + * not remove, the head of the queue. + * + *

The {@code Queue} interface does not define the blocking queue + * methods, which are common in concurrent programming. These methods, + * which wait for elements to appear or for space to become available, are + * defined in the {@link java.util.concurrent.BlockingQueue} interface, which + * extends this interface. + * + *

{@code Queue} implementations generally do not allow insertion + * of {@code null} elements, although some implementations, such as + * {@link LinkedList}, do not prohibit insertion of {@code null}. + * Even in the implementations that permit it, {@code null} should + * not be inserted into a {@code Queue}, as {@code null} is also + * used as a special return value by the {@code poll} method to + * indicate that the queue contains no elements. + * + *

{@code Queue} implementations generally do not define + * element-based versions of methods {@code equals} and + * {@code hashCode} but instead inherit the identity based versions + * from class {@code Object}, because element-based equality is not + * always well-defined for queues with the same elements but different + * ordering properties. + * + * + *

This interface is a member of the + * + * Java Collections Framework. + * + * @see java.util.Collection + * @see LinkedList + * @see PriorityQueue + * @see java.util.concurrent.LinkedBlockingQueue + * @see java.util.concurrent.BlockingQueue + * @see java.util.concurrent.ArrayBlockingQueue + * @see java.util.concurrent.LinkedBlockingQueue + * @see java.util.concurrent.PriorityBlockingQueue + * @since 1.5 + * @author Doug Lea + * @param the type of elements held in this collection + */ public interface Queue extends Collection { /** * Inserts the specified element into this queue if it is possible to do so * immediately without violating capacity restrictions, returning - * true upon success and throwing an IllegalStateException + * {@code true} upon success and throwing an {@code IllegalStateException} * if no space is currently available. * * @param e the element to add - * @return true (as specified by {@link Collection#add}) + * @return {@code true} (as specified by {@link Collection#add}) * @throws IllegalStateException if the element cannot be added at this * time due to capacity restrictions * @throws ClassCastException if the class of the specified element @@ -29,8 +169,8 @@ public interface Queue extends Collection { * by throwing an exception. * * @param e the element to add - * @return true if the element was added to this queue, else - * false + * @return {@code true} if the element was added to this queue, else + * {@code false} * @throws ClassCastException if the class of the specified element * prevents it from being added to this queue * @throws NullPointerException if the specified element is null and @@ -52,9 +192,9 @@ public interface Queue extends Collection { /** * Retrieves and removes the head of this queue, - * or returns null if this queue is empty. + * or returns {@code null} if this queue is empty. * - * @return the head of this queue, or null if this queue is empty + * @return the head of this queue, or {@code null} if this queue is empty */ E poll(); @@ -70,9 +210,9 @@ public interface Queue extends Collection { /** * Retrieves, but does not remove, the head of this queue, - * or returns null if this queue is empty. + * or returns {@code null} if this queue is empty. * - * @return the head of this queue, or null if this queue is empty + * @return the head of this queue, or {@code null} if this queue is empty */ E peek(); } diff --git a/src/Set.java b/src/Set.java new file mode 100644 index 0000000..bbba2b1 --- /dev/null +++ b/src/Set.java @@ -0,0 +1,413 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.util; + +/** + * A collection that contains no duplicate elements. More formally, sets + * contain no pair of elements e1 and e2 such that + * e1.equals(e2), and at most one null element. As implied by + * its name, this interface models the mathematical set abstraction. + * + *

The Set interface places additional stipulations, beyond those + * inherited from the Collection interface, on the contracts of all + * constructors and on the contracts of the add, equals and + * hashCode methods. Declarations for other inherited methods are + * also included here for convenience. (The specifications accompanying these + * declarations have been tailored to the Set interface, but they do + * not contain any additional stipulations.) + * + *

The additional stipulation on constructors is, not surprisingly, + * that all constructors must create a set that contains no duplicate elements + * (as defined above). + * + *

Note: Great care must be exercised if mutable objects are used as set + * elements. The behavior of a set is not specified if the value of an object + * is changed in a manner that affects equals comparisons while the + * object is an element in the set. A special case of this prohibition is + * that it is not permissible for a set to contain itself as an element. + * + *

Some set implementations have restrictions on the elements that + * they may contain. For example, some implementations prohibit null elements, + * and some have restrictions on the types of their elements. Attempting to + * add an ineligible element throws an unchecked exception, typically + * NullPointerException or ClassCastException. Attempting + * to query the presence of an ineligible element may throw an exception, + * or it may simply return false; some implementations will exhibit the former + * behavior and some will exhibit the latter. More generally, attempting an + * operation on an ineligible element whose completion would not result in + * the insertion of an ineligible element into the set may throw an + * exception or it may succeed, at the option of the implementation. + * Such exceptions are marked as "optional" in the specification for this + * interface. + * + *

This interface is a member of the + * + * Java Collections Framework. + * + * @param the type of elements maintained by this set + * + * @author Josh Bloch + * @author Neal Gafter + * @see Collection + * @see List + * @see SortedSet + * @see HashSet + * @see TreeSet + * @see AbstractSet + * @see Collections#singleton(java.lang.Object) + * @see Collections#EMPTY_SET + * @since 1.2 + */ + +public interface Set extends Collection { + // Query Operations + + /** + * Returns the number of elements in this set (its cardinality). If this + * set contains more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of elements in this set (its cardinality) + */ + int size(); + + /** + * Returns true if this set contains no elements. + * + * @return true if this set contains no elements + */ + boolean isEmpty(); + + /** + * Returns true if this set contains the specified element. + * More formally, returns true if and only if this set + * contains an element e such that + * (o==null ? e==null : o.equals(e)). + * + * @param o element whose presence in this set is to be tested + * @return true if this set contains the specified element + * @throws ClassCastException if the type of the specified element + * is incompatible with this set + * (optional) + * @throws NullPointerException if the specified element is null and this + * set does not permit null elements + * (optional) + */ + boolean contains(Object o); + + /** + * Returns an iterator over the elements in this set. The elements are + * returned in no particular order (unless this set is an instance of some + * class that provides a guarantee). + * + * @return an iterator over the elements in this set + */ + Iterator iterator(); + + /** + * Returns an array containing all of the elements in this set. + * If this set makes any guarantees as to what order its elements + * are returned by its iterator, this method must return the + * elements in the same order. + * + *

The returned array will be "safe" in that no references to it + * are maintained by this set. (In other words, this method must + * allocate a new array even if this set is backed by an array). + * The caller is thus free to modify the returned array. + * + *

This method acts as bridge between array-based and collection-based + * APIs. + * + * @return an array containing all the elements in this set + */ + Object[] toArray(); + + /** + * Returns an array containing all of the elements in this set; the + * runtime type of the returned array is that of the specified array. + * If the set fits in the specified array, it is returned therein. + * Otherwise, a new array is allocated with the runtime type of the + * specified array and the size of this set. + * + *

If this set fits in the specified array with room to spare + * (i.e., the array has more elements than this set), the element in + * the array immediately following the end of the set is set to + * null. (This is useful in determining the length of this + * set only if the caller knows that this set does not contain + * any null elements.) + * + *

If this set makes any guarantees as to what order its elements + * are returned by its iterator, this method must return the elements + * in the same order. + * + *

Like the {@link #toArray()} method, this method acts as bridge between + * array-based and collection-based APIs. Further, this method allows + * precise control over the runtime type of the output array, and may, + * under certain circumstances, be used to save allocation costs. + * + *

Suppose x is a set known to contain only strings. + * The following code can be used to dump the set into a newly allocated + * array of String: + * + *

+     *     String[] y = x.toArray(new String[0]);
+ * + * Note that toArray(new Object[0]) is identical in function to + * toArray(). + * + * @param a the array into which the elements of this set are to be + * stored, if it is big enough; otherwise, a new array of the same + * runtime type is allocated for this purpose. + * @return an array containing all the elements in this set + * @throws ArrayStoreException if the runtime type of the specified array + * is not a supertype of the runtime type of every element in this + * set + * @throws NullPointerException if the specified array is null + */ + T[] toArray(T[] a); + + + // Modification Operations + + /** + * Adds the specified element to this set if it is not already present + * (optional operation). More formally, adds the specified element + * e to this set if the set contains no element e2 + * such that + * (e==null ? e2==null : e.equals(e2)). + * If this set already contains the element, the call leaves the set + * unchanged and returns false. In combination with the + * restriction on constructors, this ensures that sets never contain + * duplicate elements. + * + *

The stipulation above does not imply that sets must accept all + * elements; sets may refuse to add any particular element, including + * null, and throw an exception, as described in the + * specification for {@link Collection#add Collection.add}. + * Individual set implementations should clearly document any + * restrictions on the elements that they may contain. + * + * @param e element to be added to this set + * @return true if this set did not already contain the specified + * element + * @throws UnsupportedOperationException if the add operation + * is not supported by this set + * @throws ClassCastException if the class of the specified element + * prevents it from being added to this set + * @throws NullPointerException if the specified element is null and this + * set does not permit null elements + * @throws IllegalArgumentException if some property of the specified element + * prevents it from being added to this set + */ + boolean add(E e); + + + /** + * Removes the specified element from this set if it is present + * (optional operation). More formally, removes an element e + * such that + * (o==null ? e==null : o.equals(e)), if + * this set contains such an element. Returns true if this set + * contained the element (or equivalently, if this set changed as a + * result of the call). (This set will not contain the element once the + * call returns.) + * + * @param o object to be removed from this set, if present + * @return true if this set contained the specified element + * @throws ClassCastException if the type of the specified element + * is incompatible with this set + * (optional) + * @throws NullPointerException if the specified element is null and this + * set does not permit null elements + * (optional) + * @throws UnsupportedOperationException if the remove operation + * is not supported by this set + */ + boolean remove(Object o); + + + // Bulk Operations + + /** + * Returns true if this set contains all of the elements of the + * specified collection. If the specified collection is also a set, this + * method returns true if it is a subset of this set. + * + * @param c collection to be checked for containment in this set + * @return true if this set contains all of the elements of the + * specified collection + * @throws ClassCastException if the types of one or more elements + * in the specified collection are incompatible with this + * set + * (optional) + * @throws NullPointerException if the specified collection contains one + * or more null elements and this set does not permit null + * elements + * (optional), + * or if the specified collection is null + * @see #contains(Object) + */ + boolean containsAll(Collection c); + + /** + * Adds all of the elements in the specified collection to this set if + * they're not already present (optional operation). If the specified + * collection is also a set, the addAll operation effectively + * modifies this set so that its value is the union of the two + * sets. The behavior of this operation is undefined if the specified + * collection is modified while the operation is in progress. + * + * @param c collection containing elements to be added to this set + * @return true if this set changed as a result of the call + * + * @throws UnsupportedOperationException if the addAll operation + * is not supported by this set + * @throws ClassCastException if the class of an element of the + * specified collection prevents it from being added to this set + * @throws NullPointerException if the specified collection contains one + * or more null elements and this set does not permit null + * elements, or if the specified collection is null + * @throws IllegalArgumentException if some property of an element of the + * specified collection prevents it from being added to this set + * @see #add(Object) + */ + boolean addAll(Collection c); + + /** + * Retains only the elements in this set that are contained in the + * specified collection (optional operation). In other words, removes + * from this set all of its elements that are not contained in the + * specified collection. If the specified collection is also a set, this + * operation effectively modifies this set so that its value is the + * intersection of the two sets. + * + * @param c collection containing elements to be retained in this set + * @return true if this set changed as a result of the call + * @throws UnsupportedOperationException if the retainAll operation + * is not supported by this set + * @throws ClassCastException if the class of an element of this set + * is incompatible with the specified collection + * (optional) + * @throws NullPointerException if this set contains a null element and the + * specified collection does not permit null elements + * (optional), + * or if the specified collection is null + * @see #remove(Object) + */ + boolean retainAll(Collection c); + + /** + * Removes from this set all of its elements that are contained in the + * specified collection (optional operation). If the specified + * collection is also a set, this operation effectively modifies this + * set so that its value is the asymmetric set difference of + * the two sets. + * + * @param c collection containing elements to be removed from this set + * @return true if this set changed as a result of the call + * @throws UnsupportedOperationException if the removeAll operation + * is not supported by this set + * @throws ClassCastException if the class of an element of this set + * is incompatible with the specified collection + * (optional) + * @throws NullPointerException if this set contains a null element and the + * specified collection does not permit null elements + * (optional), + * or if the specified collection is null + * @see #remove(Object) + * @see #contains(Object) + */ + boolean removeAll(Collection c); + + /** + * Removes all of the elements from this set (optional operation). + * The set will be empty after this call returns. + * + * @throws UnsupportedOperationException if the clear method + * is not supported by this set + */ + void clear(); + + + // Comparison and hashing + + /** + * Compares the specified object with this set for equality. Returns + * true if the specified object is also a set, the two sets + * have the same size, and every member of the specified set is + * contained in this set (or equivalently, every member of this set is + * contained in the specified set). This definition ensures that the + * equals method works properly across different implementations of the + * set interface. + * + * @param o object to be compared for equality with this set + * @return true if the specified object is equal to this set + */ + boolean equals(Object o); + + /** + * Returns the hash code value for this set. The hash code of a set is + * defined to be the sum of the hash codes of the elements in the set, + * where the hash code of a null element is defined to be zero. + * This ensures that s1.equals(s2) implies that + * s1.hashCode()==s2.hashCode() for any two sets s1 + * and s2, as required by the general contract of + * {@link Object#hashCode}. + * + * @return the hash code value for this set + * @see Object#equals(Object) + * @see Set#equals(Object) + */ + int hashCode(); + + /** + * Creates a {@code Spliterator} over the elements in this set. + * + *

The {@code Spliterator} reports {@link Spliterator#DISTINCT}. + * Implementations should document the reporting of additional + * characteristic values. + * + * @implSpec + * The default implementation creates a + * late-binding spliterator + * from the set's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the set's iterator. + *

+ * The created {@code Spliterator} additionally reports + * {@link Spliterator#SIZED}. + * + * @implNote + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this set + * @since 1.8 + */ + @Override + default Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.DISTINCT); + } +} diff --git a/src/Stack.java b/src/Stack.java index 96acf05..4610e65 100644 --- a/src/Stack.java +++ b/src/Stack.java @@ -1,6 +1,50 @@ +/* + * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.util; +/** + * The Stack class represents a last-in-first-out + * (LIFO) stack of objects. It extends class Vector with five + * operations that allow a vector to be treated as a stack. The usual + * push and pop operations are provided, as well as a + * method to peek at the top item on the stack, a method to test + * for whether the stack is empty, and a method to search + * the stack for an item and discover how far it is from the top. + *

+ * When a stack is first created, it contains no items. + * + *

A more complete and consistent set of LIFO stack operations is + * provided by the {@link Deque} interface and its implementations, which + * should be used in preference to this class. For example: + *

   {@code
+ *   Deque stack = new ArrayDeque();}
+ * + * @author Jonathan Payne + * @since JDK1.0 + */ public class Stack extends Vector { /** diff --git a/src/String.java b/src/String.java index b68f7ff..08eb5db 100644 --- a/src/String.java +++ b/src/String.java @@ -1,7 +1,30 @@ +/* + * Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.lang; -import java.io.ObjectStreamClass; import java.io.ObjectStreamField; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; @@ -10,22 +33,86 @@ import java.util.Comparator; import java.util.Formatter; import java.util.Locale; +import java.util.Objects; +import java.util.StringJoiner; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; +/** + * The {@code String} class represents character strings. All + * string literals in Java programs, such as {@code "abc"}, are + * implemented as instances of this class. + *

+ * Strings are constant; their values cannot be changed after they + * are created. String buffers support mutable strings. + * Because String objects are immutable they can be shared. For example: + *

+ *     String str = "abc";
+ * 

+ * is equivalent to: + *

+ *     char data[] = {'a', 'b', 'c'};
+ *     String str = new String(data);
+ * 

+ * Here are some more examples of how strings can be used: + *

+ *     System.out.println("abc");
+ *     String cde = "cde";
+ *     System.out.println("abc" + cde);
+ *     String c = "abc".substring(2,3);
+ *     String d = cde.substring(1, 2);
+ * 
+ *

+ * The class {@code String} includes methods for examining + * individual characters of the sequence, for comparing strings, for + * searching strings, for extracting substrings, and for creating a + * copy of a string with all characters translated to uppercase or to + * lowercase. Case mapping is based on the Unicode Standard version + * specified by the {@link java.lang.Character Character} class. + *

+ * The Java language provides special support for the string + * concatenation operator ( + ), and for conversion of + * other objects to strings. String concatenation is implemented + * through the {@code StringBuilder}(or {@code StringBuffer}) + * class and its {@code append} method. + * String conversions are implemented through the method + * {@code toString}, defined by {@code Object} and + * inherited by all classes in Java. For additional information on + * string concatenation and conversion, see Gosling, Joy, and Steele, + * The Java Language Specification. + * + *

Unless otherwise noted, passing a null argument to a constructor + * or method in this class will cause a {@link NullPointerException} to be + * thrown. + * + *

A {@code String} represents a string in the UTF-16 format + * in which supplementary characters are represented by surrogate + * pairs (see the section Unicode + * Character Representations in the {@code Character} class for + * more information). + * Index values refer to {@code char} code units, so a supplementary + * character uses two positions in a {@code String}. + *

The {@code String} class provides methods for dealing with + * Unicode code points (i.e., characters), in addition to those for + * dealing with Unicode code units (i.e., {@code char} values). + * + * @author Lee Boynton + * @author Arthur van Hoff + * @author Martin Buchholz + * @author Ulf Zibis + * @see java.lang.Object#toString() + * @see java.lang.StringBuffer + * @see java.lang.StringBuilder + * @see java.nio.charset.Charset + * @since JDK1.0 + */ + public final class String - implements java.io.Serializable, Comparable, CharSequence -{ + implements java.io.Serializable, Comparable, CharSequence { /** The value is used for character storage. */ private final char value[]; - /** The offset is the first index of the storage that is used. */ - private final int offset; - - /** The count is the number of characters in the String. */ - private final int count; - /** Cache the hash code for the string */ private int hash; // Default to 0 @@ -35,14 +122,9 @@ public final class String /** * Class String is special cased within the Serialization Stream Protocol. * - * A String instance is written initially into an ObjectOutputStream in the - * following format: - *

-     *      TC_STRING (utf String)
-     * 
- * The String is written by method DataOutput.writeUTF. - * A new handle is generated to refer to all future references to the - * string instance within the stream. + * A String instance is written into an ObjectOutputStream according to + * + * Object Serialization Specification, Section 6.2, "Stream Elements" */ private static final ObjectStreamField[] serialPersistentFields = new ObjectStreamField[0]; @@ -53,8 +135,6 @@ public final class String * unnecessary since Strings are immutable. */ public String() { - this.offset = 0; - this.count = 0; this.value = new char[0]; } @@ -69,23 +149,8 @@ public String() { * A {@code String} */ public String(String original) { - int size = original.count; - char[] originalValue = original.value; - char[] v; - if (originalValue.length > size) { - // The array representing the String is bigger than the new - // String itself. Perhaps this constructor is being called - // in order to trim the baggage, so make a copy of the array. - int off = original.offset; - v = Arrays.copyOfRange(originalValue, off, off+size); - } else { - // The array representing the String is the same - // size as the String, so no point in making a copy. - v = originalValue; - } - this.offset = 0; - this.count = size; - this.value = v; + this.value = original.value; + this.hash = original.hash; } /** @@ -98,10 +163,7 @@ public String(String original) { * The initial value of the string */ public String(char value[]) { - int size = value.length; - this.offset = 0; - this.count = size; - this.value = Arrays.copyOf(value, size); + this.value = Arrays.copyOf(value, value.length); } /** @@ -136,8 +198,6 @@ public String(char value[], int offset, int count) { if (offset > value.length - count) { throw new StringIndexOutOfBoundsException(offset + count); } - this.offset = 0; - this.count = count; this.value = Arrays.copyOfRange(value, offset, offset+count); } @@ -200,14 +260,12 @@ else if (Character.isValidCodePoint(c)) for (int i = offset, j = 0; i < end; i++, j++) { int c = codePoints[i]; if (Character.isBmpCodePoint(c)) - v[j] = (char) c; + v[j] = (char)c; else Character.toSurrogates(c, v, j++); } - this.value = v; - this.count = n; - this.offset = 0; + this.value = v; } /** @@ -255,17 +313,15 @@ public String(byte ascii[], int hibyte, int offset, int count) { char value[] = new char[count]; if (hibyte == 0) { - for (int i = count ; i-- > 0 ;) { - value[i] = (char) (ascii[i + offset] & 0xff); + for (int i = count; i-- > 0;) { + value[i] = (char)(ascii[i + offset] & 0xff); } } else { hibyte <<= 8; - for (int i = count ; i-- > 0 ;) { - value[i] = (char) (hibyte | (ascii[i + offset] & 0xff)); + for (int i = count; i-- > 0;) { + value[i] = (char)(hibyte | (ascii[i + offset] & 0xff)); } } - this.offset = 0; - this.count = count; this.value = value; } @@ -351,15 +407,11 @@ private static void checkBounds(byte[] bytes, int offset, int length) { * @since JDK1.1 */ public String(byte bytes[], int offset, int length, String charsetName) - throws UnsupportedEncodingException - { + throws UnsupportedEncodingException { if (charsetName == null) throw new NullPointerException("charsetName"); checkBounds(bytes, offset, length); - char[] v = StringCoding.decode(charsetName, bytes, offset, length); - this.offset = 0; - this.count = v.length; - this.value = v; + this.value = StringCoding.decode(charsetName, bytes, offset, length); } /** @@ -396,10 +448,7 @@ public String(byte bytes[], int offset, int length, Charset charset) { if (charset == null) throw new NullPointerException("charset"); checkBounds(bytes, offset, length); - char[] v = StringCoding.decode(charset, bytes, offset, length); - this.offset = 0; - this.count = v.length; - this.value = v; + this.value = StringCoding.decode(charset, bytes, offset, length); } /** @@ -426,8 +475,7 @@ public String(byte bytes[], int offset, int length, Charset charset) { * @since JDK1.1 */ public String(byte bytes[], String charsetName) - throws UnsupportedEncodingException - { + throws UnsupportedEncodingException { this(bytes, 0, bytes.length, charsetName); } @@ -483,10 +531,7 @@ public String(byte bytes[], Charset charset) { */ public String(byte bytes[], int offset, int length) { checkBounds(bytes, offset, length); - char[] v = StringCoding.decode(bytes, offset, length); - this.offset = 0; - this.count = v.length; - this.value = v; + this.value = StringCoding.decode(bytes, offset, length); } /** @@ -519,10 +564,9 @@ public String(byte bytes[]) { * A {@code StringBuffer} */ public String(StringBuffer buffer) { - String result = buffer.toString(); - this.value = result.value; - this.count = result.count; - this.offset = result.offset; + synchronized(buffer) { + this.value = Arrays.copyOf(buffer.getValue(), buffer.length()); + } } /** @@ -541,18 +585,18 @@ public String(StringBuffer buffer) { * @since 1.5 */ public String(StringBuilder builder) { - String result = builder.toString(); - this.value = result.value; - this.count = result.count; - this.offset = result.offset; + this.value = Arrays.copyOf(builder.getValue(), builder.length()); } - - // Package private constructor which shares value array for speed. - String(int offset, int count, char value[]) { + /* + * Package private constructor which shares value array for speed. + * this constructor is always expected to be called with share==true. + * a separate constructor is needed because we already have a public + * String(char[]) constructor that makes a copy of the given char[]. + */ + String(char[] value, boolean share) { + // assert share : "unshared not supported"; this.value = value; - this.offset = offset; - this.count = count; } /** @@ -564,159 +608,159 @@ public String(StringBuilder builder) { * object. */ public int length() { - return count; + return value.length; } /** - * Returns true if, and only if, {@link #length()} is 0. + * Returns {@code true} if, and only if, {@link #length()} is {@code 0}. * - * @return true if {@link #length()} is 0, otherwise - * false + * @return {@code true} if {@link #length()} is {@code 0}, otherwise + * {@code false} * * @since 1.6 */ public boolean isEmpty() { - return count == 0; + return value.length == 0; } /** - * Returns the char value at the - * specified index. An index ranges from 0 to - * length() - 1. The first char value of the sequence - * is at index 0, the next at index 1, + * Returns the {@code char} value at the + * specified index. An index ranges from {@code 0} to + * {@code length() - 1}. The first {@code char} value of the sequence + * is at index {@code 0}, the next at index {@code 1}, * and so on, as for array indexing. * - *

If the char value specified by the index is a + *

If the {@code char} value specified by the index is a * surrogate, the surrogate * value is returned. * - * @param index the index of the char value. - * @return the char value at the specified index of this string. - * The first char value is at index 0. - * @exception IndexOutOfBoundsException if the index + * @param index the index of the {@code char} value. + * @return the {@code char} value at the specified index of this string. + * The first {@code char} value is at index {@code 0}. + * @exception IndexOutOfBoundsException if the {@code index} * argument is negative or not less than the length of this * string. */ public char charAt(int index) { - if ((index < 0) || (index >= count)) { + if ((index < 0) || (index >= value.length)) { throw new StringIndexOutOfBoundsException(index); } - return value[index + offset]; + return value[index]; } /** * Returns the character (Unicode code point) at the specified - * index. The index refers to char values - * (Unicode code units) and ranges from 0 to - * {@link #length()} - 1. + * index. The index refers to {@code char} values + * (Unicode code units) and ranges from {@code 0} to + * {@link #length()}{@code - 1}. * - *

If the char value specified at the given index + *

If the {@code char} value specified at the given index * is in the high-surrogate range, the following index is less - * than the length of this String, and the - * char value at the following index is in the + * than the length of this {@code String}, and the + * {@code char} value at the following index is in the * low-surrogate range, then the supplementary code point * corresponding to this surrogate pair is returned. Otherwise, - * the char value at the given index is returned. + * the {@code char} value at the given index is returned. * - * @param index the index to the char values + * @param index the index to the {@code char} values * @return the code point value of the character at the - * index - * @exception IndexOutOfBoundsException if the index + * {@code index} + * @exception IndexOutOfBoundsException if the {@code index} * argument is negative or not less than the length of this * string. * @since 1.5 */ public int codePointAt(int index) { - if ((index < 0) || (index >= count)) { + if ((index < 0) || (index >= value.length)) { throw new StringIndexOutOfBoundsException(index); } - return Character.codePointAtImpl(value, offset + index, offset + count); + return Character.codePointAtImpl(value, index, value.length); } /** * Returns the character (Unicode code point) before the specified - * index. The index refers to char values - * (Unicode code units) and ranges from 1 to {@link + * index. The index refers to {@code char} values + * (Unicode code units) and ranges from {@code 1} to {@link * CharSequence#length() length}. * - *

If the char value at (index - 1) - * is in the low-surrogate range, (index - 2) is not - * negative, and the char value at (index - - * 2) is in the high-surrogate range, then the + *

If the {@code char} value at {@code (index - 1)} + * is in the low-surrogate range, {@code (index - 2)} is not + * negative, and the {@code char} value at {@code (index - + * 2)} is in the high-surrogate range, then the * supplementary code point value of the surrogate pair is - * returned. If the char value at index - - * 1 is an unpaired low-surrogate or a high-surrogate, the + * returned. If the {@code char} value at {@code index - + * 1} is an unpaired low-surrogate or a high-surrogate, the * surrogate value is returned. * * @param index the index following the code point that should be returned * @return the Unicode code point value before the given index. - * @exception IndexOutOfBoundsException if the index + * @exception IndexOutOfBoundsException if the {@code index} * argument is less than 1 or greater than the length * of this string. * @since 1.5 */ public int codePointBefore(int index) { int i = index - 1; - if ((i < 0) || (i >= count)) { + if ((i < 0) || (i >= value.length)) { throw new StringIndexOutOfBoundsException(index); } - return Character.codePointBeforeImpl(value, offset + index, offset); + return Character.codePointBeforeImpl(value, index, 0); } /** * Returns the number of Unicode code points in the specified text - * range of this String. The text range begins at the - * specified beginIndex and extends to the - * char at index endIndex - 1. Thus the - * length (in chars) of the text range is - * endIndex-beginIndex. Unpaired surrogates within + * range of this {@code String}. The text range begins at the + * specified {@code beginIndex} and extends to the + * {@code char} at index {@code endIndex - 1}. Thus the + * length (in {@code char}s) of the text range is + * {@code endIndex-beginIndex}. Unpaired surrogates within * the text range count as one code point each. * - * @param beginIndex the index to the first char of + * @param beginIndex the index to the first {@code char} of * the text range. - * @param endIndex the index after the last char of + * @param endIndex the index after the last {@code char} of * the text range. * @return the number of Unicode code points in the specified text * range * @exception IndexOutOfBoundsException if the - * beginIndex is negative, or endIndex - * is larger than the length of this String, or - * beginIndex is larger than endIndex. + * {@code beginIndex} is negative, or {@code endIndex} + * is larger than the length of this {@code String}, or + * {@code beginIndex} is larger than {@code endIndex}. * @since 1.5 */ public int codePointCount(int beginIndex, int endIndex) { - if (beginIndex < 0 || endIndex > count || beginIndex > endIndex) { + if (beginIndex < 0 || endIndex > value.length || beginIndex > endIndex) { throw new IndexOutOfBoundsException(); } - return Character.codePointCountImpl(value, offset+beginIndex, endIndex-beginIndex); + return Character.codePointCountImpl(value, beginIndex, endIndex - beginIndex); } /** - * Returns the index within this String that is - * offset from the given index by - * codePointOffset code points. Unpaired surrogates - * within the text range given by index and - * codePointOffset count as one code point each. + * Returns the index within this {@code String} that is + * offset from the given {@code index} by + * {@code codePointOffset} code points. Unpaired surrogates + * within the text range given by {@code index} and + * {@code codePointOffset} count as one code point each. * * @param index the index to be offset * @param codePointOffset the offset in code points - * @return the index within this String - * @exception IndexOutOfBoundsException if index + * @return the index within this {@code String} + * @exception IndexOutOfBoundsException if {@code index} * is negative or larger then the length of this - * String, or if codePointOffset is positive - * and the substring starting with index has fewer - * than codePointOffset code points, - * or if codePointOffset is negative and the substring - * before index has fewer than the absolute value - * of codePointOffset code points. + * {@code String}, or if {@code codePointOffset} is positive + * and the substring starting with {@code index} has fewer + * than {@code codePointOffset} code points, + * or if {@code codePointOffset} is negative and the substring + * before {@code index} has fewer than the absolute value + * of {@code codePointOffset} code points. * @since 1.5 */ public int offsetByCodePoints(int index, int codePointOffset) { - if (index < 0 || index > count) { + if (index < 0 || index > value.length) { throw new IndexOutOfBoundsException(); } - return Character.offsetByCodePointsImpl(value, offset, count, - offset+index, codePointOffset) - offset; + return Character.offsetByCodePointsImpl(value, 0, value.length, + index, codePointOffset); } /** @@ -724,20 +768,20 @@ public int offsetByCodePoints(int index, int codePointOffset) { * This method doesn't perform any range checking. */ void getChars(char dst[], int dstBegin) { - System.arraycopy(value, offset, dst, dstBegin, count); + System.arraycopy(value, 0, dst, dstBegin, value.length); } /** * Copies characters from this string into the destination character * array. *

- * The first character to be copied is at index srcBegin; - * the last character to be copied is at index srcEnd-1 + * The first character to be copied is at index {@code srcBegin}; + * the last character to be copied is at index {@code srcEnd-1} * (thus the total number of characters to be copied is - * srcEnd-srcBegin). The characters are copied into the - * subarray of dst starting at index dstBegin + * {@code srcEnd-srcBegin}). The characters are copied into the + * subarray of {@code dst} starting at index {@code dstBegin} * and ending at index: - *

+     * 
      *     dstbegin + (srcEnd-srcBegin) - 1
      * 
* @@ -749,26 +793,25 @@ void getChars(char dst[], int dstBegin) { * @param dstBegin the start offset in the destination array. * @exception IndexOutOfBoundsException If any of the following * is true: - *
  • srcBegin is negative. - *
  • srcBegin is greater than srcEnd - *
  • srcEnd is greater than the length of this + *
    • {@code srcBegin} is negative. + *
    • {@code srcBegin} is greater than {@code srcEnd} + *
    • {@code srcEnd} is greater than the length of this * string - *
    • dstBegin is negative - *
    • dstBegin+(srcEnd-srcBegin) is larger than - * dst.length
    + *
  • {@code dstBegin} is negative + *
  • {@code dstBegin+(srcEnd-srcBegin)} is larger than + * {@code dst.length}
*/ public void getChars(int srcBegin, int srcEnd, char dst[], int dstBegin) { if (srcBegin < 0) { throw new StringIndexOutOfBoundsException(srcBegin); } - if (srcEnd > count) { + if (srcEnd > value.length) { throw new StringIndexOutOfBoundsException(srcEnd); } if (srcBegin > srcEnd) { throw new StringIndexOutOfBoundsException(srcEnd - srcBegin); } - System.arraycopy(value, offset + srcBegin, dst, dstBegin, - srcEnd - srcBegin); + System.arraycopy(value, srcBegin, dst, dstBegin, srcEnd - srcBegin); } /** @@ -819,15 +862,17 @@ public void getBytes(int srcBegin, int srcEnd, byte dst[], int dstBegin) { if (srcBegin < 0) { throw new StringIndexOutOfBoundsException(srcBegin); } - if (srcEnd > count) { + if (srcEnd > value.length) { throw new StringIndexOutOfBoundsException(srcEnd); } if (srcBegin > srcEnd) { throw new StringIndexOutOfBoundsException(srcEnd - srcBegin); } + Objects.requireNonNull(dst); + int j = dstBegin; - int n = offset + srcEnd; - int i = offset + srcBegin; + int n = srcEnd; + int i = srcBegin; char[] val = value; /* avoid getfield opcode */ while (i < n) { @@ -856,10 +901,9 @@ public void getBytes(int srcBegin, int srcEnd, byte dst[], int dstBegin) { * @since JDK1.1 */ public byte[] getBytes(String charsetName) - throws UnsupportedEncodingException - { + throws UnsupportedEncodingException { if (charsetName == null) throw new NullPointerException(); - return StringCoding.encode(charsetName, value, offset, count); + return StringCoding.encode(charsetName, value, 0, value.length); } /** @@ -882,7 +926,7 @@ public byte[] getBytes(String charsetName) */ public byte[] getBytes(Charset charset) { if (charset == null) throw new NullPointerException(); - return StringCoding.encode(charset, value, offset, count); + return StringCoding.encode(charset, value, 0, value.length); } /** @@ -899,7 +943,7 @@ public byte[] getBytes(Charset charset) { * @since JDK1.1 */ public byte[] getBytes() { - return StringCoding.encode(value, offset, count); + return StringCoding.encode(value, 0, value.length); } /** @@ -923,15 +967,15 @@ public boolean equals(Object anObject) { } if (anObject instanceof String) { String anotherString = (String)anObject; - int n = count; - if (n == anotherString.count) { + int n = value.length; + if (n == anotherString.value.length) { char v1[] = value; char v2[] = anotherString.value; - int i = offset; - int j = anotherString.offset; + int i = 0; while (n-- != 0) { - if (v1[i++] != v2[j++]) + if (v1[i] != v2[i]) return false; + i++; } return true; } @@ -942,7 +986,8 @@ public boolean equals(Object anObject) { /** * Compares this string to the specified {@code StringBuffer}. The result * is {@code true} if and only if this {@code String} represents the same - * sequence of characters as the specified {@code StringBuffer}. + * sequence of characters as the specified {@code StringBuffer}. This method + * synchronizes on the {@code StringBuffer}. * * @param sb * The {@code StringBuffer} to compare this {@code String} against @@ -954,15 +999,30 @@ public boolean equals(Object anObject) { * @since 1.4 */ public boolean contentEquals(StringBuffer sb) { - synchronized(sb) { - return contentEquals((CharSequence)sb); + return contentEquals((CharSequence)sb); + } + + private boolean nonSyncContentEquals(AbstractStringBuilder sb) { + char v1[] = value; + char v2[] = sb.getValue(); + int n = v1.length; + if (n != sb.length()) { + return false; } + for (int i = 0; i < n; i++) { + if (v1[i] != v2[i]) { + return false; + } + } + return true; } /** - * Compares this string to the specified {@code CharSequence}. The result - * is {@code true} if and only if this {@code String} represents the same - * sequence of char values as the specified sequence. + * Compares this string to the specified {@code CharSequence}. The + * result is {@code true} if and only if this {@code String} represents the + * same sequence of char values as the specified sequence. Note that if the + * {@code CharSequence} is a {@code StringBuffer} then the method + * synchronizes on it. * * @param cs * The sequence to compare this {@code String} against @@ -974,32 +1034,29 @@ public boolean contentEquals(StringBuffer sb) { * @since 1.5 */ public boolean contentEquals(CharSequence cs) { - if (count != cs.length()) - return false; // Argument is a StringBuffer, StringBuilder if (cs instanceof AbstractStringBuilder) { - char v1[] = value; - char v2[] = ((AbstractStringBuilder)cs).getValue(); - int i = offset; - int j = 0; - int n = count; - while (n-- != 0) { - if (v1[i++] != v2[j++]) - return false; + if (cs instanceof StringBuffer) { + synchronized(cs) { + return nonSyncContentEquals((AbstractStringBuilder)cs); + } + } else { + return nonSyncContentEquals((AbstractStringBuilder)cs); } - return true; } // Argument is a String if (cs.equals(this)) return true; // Argument is a generic CharSequence char v1[] = value; - int i = offset; - int j = 0; - int n = count; - while (n-- != 0) { - if (v1[i++] != cs.charAt(j++)) + int n = v1.length; + if (n != cs.length()) { + return false; + } + for (int i = 0; i < n; i++) { + if (v1[i] != cs.charAt(i)) { return false; + } } return true; } @@ -1033,23 +1090,24 @@ public boolean contentEquals(CharSequence cs) { * @see #equals(Object) */ public boolean equalsIgnoreCase(String anotherString) { - return (this == anotherString) ? true : - (anotherString != null) && (anotherString.count == count) && - regionMatches(true, 0, anotherString, 0, count); + return (this == anotherString) ? true + : (anotherString != null) + && (anotherString.value.length == value.length) + && regionMatches(true, 0, anotherString, 0, value.length); } /** * Compares two strings lexicographically. * The comparison is based on the Unicode value of each character in * the strings. The character sequence represented by this - * String object is compared lexicographically to the + * {@code String} object is compared lexicographically to the * character sequence represented by the argument string. The result is - * a negative integer if this String object + * a negative integer if this {@code String} object * lexicographically precedes the argument string. The result is a - * positive integer if this String object lexicographically + * positive integer if this {@code String} object lexicographically * follows the argument string. The result is zero if the strings - * are equal; compareTo returns 0 exactly when - * the {@link #equals(Object)} method would return true. + * are equal; {@code compareTo} returns {@code 0} exactly when + * the {@link #equals(Object)} method would return {@code true}. *

* This is the definition of lexicographic ordering. If two strings are * different, then either they have different characters at some index @@ -1058,62 +1116,49 @@ public boolean equalsIgnoreCase(String anotherString) { * positions, let k be the smallest such index; then the string * whose character at position k has the smaller value, as * determined by using the < operator, lexicographically precedes the - * other string. In this case, compareTo returns the - * difference of the two character values at position k in + * other string. In this case, {@code compareTo} returns the + * difference of the two character values at position {@code k} in * the two string -- that is, the value: *

      * this.charAt(k)-anotherString.charAt(k)
      * 
* If there is no index position at which they differ, then the shorter * string lexicographically precedes the longer string. In this case, - * compareTo returns the difference of the lengths of the + * {@code compareTo} returns the difference of the lengths of the * strings -- that is, the value: *
      * this.length()-anotherString.length()
      * 
* - * @param anotherString the String to be compared. - * @return the value 0 if the argument string is equal to - * this string; a value less than 0 if this string + * @param anotherString the {@code String} to be compared. + * @return the value {@code 0} if the argument string is equal to + * this string; a value less than {@code 0} if this string * is lexicographically less than the string argument; and a - * value greater than 0 if this string is + * value greater than {@code 0} if this string is * lexicographically greater than the string argument. */ public int compareTo(String anotherString) { - int len1 = count; - int len2 = anotherString.count; - int n = Math.min(len1, len2); + int len1 = value.length; + int len2 = anotherString.value.length; + int lim = Math.min(len1, len2); char v1[] = value; char v2[] = anotherString.value; - int i = offset; - int j = anotherString.offset; - - if (i == j) { - int k = i; - int lim = n + i; - while (k < lim) { - char c1 = v1[k]; - char c2 = v2[k]; - if (c1 != c2) { - return c1 - c2; - } - k++; - } - } else { - while (n-- != 0) { - char c1 = v1[i++]; - char c2 = v2[j++]; - if (c1 != c2) { - return c1 - c2; - } + + int k = 0; + while (k < lim) { + char c1 = v1[k]; + char c2 = v2[k]; + if (c1 != c2) { + return c1 - c2; } + k++; } return len1 - len2; } /** - * A Comparator that orders String objects as by - * compareToIgnoreCase. This comparator is serializable. + * A Comparator that orders {@code String} objects as by + * {@code compareToIgnoreCase}. This comparator is serializable. *

* Note that this Comparator does not take locale into account, * and will result in an unsatisfactory ordering for certain locales. @@ -1126,7 +1171,7 @@ public int compareTo(String anotherString) { public static final Comparator CASE_INSENSITIVE_ORDER = new CaseInsensitiveComparator(); private static class CaseInsensitiveComparator - implements Comparator, java.io.Serializable { + implements Comparator, java.io.Serializable { // use serialVersionUID from JDK 1.2.2 for interoperability private static final long serialVersionUID = 8575799808933029326L; @@ -1152,14 +1197,17 @@ public int compare(String s1, String s2) { } return n1 - n2; } + + /** Replaces the de-serialized object. */ + private Object readResolve() { return CASE_INSENSITIVE_ORDER; } } /** * Compares two strings lexicographically, ignoring case * differences. This method returns an integer whose sign is that of - * calling compareTo with normalized versions of the strings + * calling {@code compareTo} with normalized versions of the strings * where case differences have been eliminated by calling - * Character.toLowerCase(Character.toUpperCase(character)) on + * {@code Character.toLowerCase(Character.toUpperCase(character))} on * each character. *

* Note that this method does not take locale into account, @@ -1167,7 +1215,7 @@ public int compare(String s1, String s2) { * The java.text package provides collators to allow * locale-sensitive ordering. * - * @param str the String to be compared. + * @param str the {@code String} to be compared. * @return a negative integer, zero, or a positive integer as the * specified String is greater than, equal to, or less * than this String, ignoring case considerations. @@ -1181,23 +1229,24 @@ public int compareToIgnoreCase(String str) { /** * Tests if two string regions are equal. *

- * A substring of this String object is compared to a substring + * A substring of this {@code String} object is compared to a substring * of the argument other. The result is true if these substrings * represent identical character sequences. The substring of this - * String object to be compared begins at index toffset - * and has length len. The substring of other to be compared - * begins at index ooffset and has length len. The - * result is false if and only if at least one of the following + * {@code String} object to be compared begins at index {@code toffset} + * and has length {@code len}. The substring of other to be compared + * begins at index {@code ooffset} and has length {@code len}. The + * result is {@code false} if and only if at least one of the following * is true: - *

  • toffset is negative. - *
  • ooffset is negative. - *
  • toffset+len is greater than the length of this - * String object. - *
  • ooffset+len is greater than the length of the other + *
    • {@code toffset} is negative. + *
    • {@code ooffset} is negative. + *
    • {@code toffset+len} is greater than the length of this + * {@code String} object. + *
    • {@code ooffset+len} is greater than the length of the other * argument. - *
    • There is some nonnegative integer k less than len + *
    • There is some nonnegative integer k less than {@code len} * such that: - * this.charAt(toffset+k) != other.charAt(ooffset+k) + * {@code this.charAt(toffset + }k{@code ) != other.charAt(ooffset + } + * k{@code )} *
    * * @param toffset the starting offset of the subregion in this string. @@ -1205,19 +1254,20 @@ public int compareToIgnoreCase(String str) { * @param ooffset the starting offset of the subregion in the string * argument. * @param len the number of characters to compare. - * @return true if the specified subregion of this string + * @return {@code true} if the specified subregion of this string * exactly matches the specified subregion of the string argument; - * false otherwise. + * {@code false} otherwise. */ public boolean regionMatches(int toffset, String other, int ooffset, - int len) { + int len) { char ta[] = value; - int to = offset + toffset; + int to = toffset; char pa[] = other.value; - int po = other.offset + ooffset; + int po = ooffset; // Note: toffset, ooffset, or len might be near -1>>>1. - if ((ooffset < 0) || (toffset < 0) || (toffset > (long)count - len) - || (ooffset > (long)other.count - len)) { + if ((ooffset < 0) || (toffset < 0) + || (toffset > (long)value.length - len) + || (ooffset > (long)other.value.length - len)) { return false; } while (len-- > 0) { @@ -1231,31 +1281,31 @@ public boolean regionMatches(int toffset, String other, int ooffset, /** * Tests if two string regions are equal. *

    - * A substring of this String object is compared to a substring - * of the argument other. The result is true if these + * A substring of this {@code String} object is compared to a substring + * of the argument {@code other}. The result is {@code true} if these * substrings represent character sequences that are the same, ignoring - * case if and only if ignoreCase is true. The substring of - * this String object to be compared begins at index - * toffset and has length len. The substring of - * other to be compared begins at index ooffset and - * has length len. The result is false if and only if + * case if and only if {@code ignoreCase} is true. The substring of + * this {@code String} object to be compared begins at index + * {@code toffset} and has length {@code len}. The substring of + * {@code other} to be compared begins at index {@code ooffset} and + * has length {@code len}. The result is {@code false} if and only if * at least one of the following is true: - *

    • toffset is negative. - *
    • ooffset is negative. - *
    • toffset+len is greater than the length of this - * String object. - *
    • ooffset+len is greater than the length of the other + *
      • {@code toffset} is negative. + *
      • {@code ooffset} is negative. + *
      • {@code toffset+len} is greater than the length of this + * {@code String} object. + *
      • {@code ooffset+len} is greater than the length of the other * argument. - *
      • ignoreCase is false and there is some nonnegative - * integer k less than len such that: + *
      • {@code ignoreCase} is {@code false} and there is some nonnegative + * integer k less than {@code len} such that: *
              * this.charAt(toffset+k) != other.charAt(ooffset+k)
              * 
        - *
      • ignoreCase is true and there is some nonnegative - * integer k less than len such that: + *
      • {@code ignoreCase} is {@code true} and there is some nonnegative + * integer k less than {@code len} such that: *
              * Character.toLowerCase(this.charAt(toffset+k)) !=
        -               Character.toLowerCase(other.charAt(ooffset+k))
        +     Character.toLowerCase(other.charAt(ooffset+k))
              * 
        * and: *
        @@ -1264,7 +1314,7 @@ public boolean regionMatches(int toffset, String other, int ooffset,
              * 
        *
      * - * @param ignoreCase if true, ignore case when comparing + * @param ignoreCase if {@code true}, ignore case when comparing * characters. * @param toffset the starting offset of the subregion in this * string. @@ -1272,21 +1322,22 @@ public boolean regionMatches(int toffset, String other, int ooffset, * @param ooffset the starting offset of the subregion in the string * argument. * @param len the number of characters to compare. - * @return true if the specified subregion of this string + * @return {@code true} if the specified subregion of this string * matches the specified subregion of the string argument; - * false otherwise. Whether the matching is exact - * or case insensitive depends on the ignoreCase + * {@code false} otherwise. Whether the matching is exact + * or case insensitive depends on the {@code ignoreCase} * argument. */ public boolean regionMatches(boolean ignoreCase, int toffset, - String other, int ooffset, int len) { + String other, int ooffset, int len) { char ta[] = value; - int to = offset + toffset; + int to = toffset; char pa[] = other.value; - int po = other.offset + ooffset; + int po = ooffset; // Note: toffset, ooffset, or len might be near -1>>>1. - if ((ooffset < 0) || (toffset < 0) || (toffset > (long)count - len) || - (ooffset > (long)other.count - len)) { + if ((ooffset < 0) || (toffset < 0) + || (toffset > (long)value.length - len) + || (ooffset > (long)other.value.length - len)) { return false; } while (len-- > 0) { @@ -1324,12 +1375,12 @@ public boolean regionMatches(boolean ignoreCase, int toffset, * * @param prefix the prefix. * @param toffset where to begin looking in this string. - * @return true if the character sequence represented by the + * @return {@code true} if the character sequence represented by the * argument is a prefix of the substring of this object starting - * at index toffset; false otherwise. - * The result is false if toffset is + * at index {@code toffset}; {@code false} otherwise. + * The result is {@code false} if {@code toffset} is * negative or greater than the length of this - * String object; otherwise the result is the same + * {@code String} object; otherwise the result is the same * as the result of the expression *
            *          this.substring(toffset).startsWith(prefix)
      @@ -1337,12 +1388,12 @@ public boolean regionMatches(boolean ignoreCase, int toffset,
            */
           public boolean startsWith(String prefix, int toffset) {
               char ta[] = value;
      -        int to = offset + toffset;
      +        int to = toffset;
               char pa[] = prefix.value;
      -        int po = prefix.offset;
      -        int pc = prefix.count;
      +        int po = 0;
      +        int pc = prefix.value.length;
               // Note: toffset might be near -1>>>1.
      -        if ((toffset < 0) || (toffset > count - pc)) {
      +        if ((toffset < 0) || (toffset > value.length - pc)) {
                   return false;
               }
               while (--pc >= 0) {
      @@ -1357,12 +1408,12 @@ public boolean startsWith(String prefix, int toffset) {
            * Tests if this string starts with the specified prefix.
            *
            * @param   prefix   the prefix.
      -     * @return  true if the character sequence represented by the
      +     * @return  {@code true} if the character sequence represented by the
            *          argument is a prefix of the character sequence represented by
      -     *          this string; false otherwise.
      -     *          Note also that true will be returned if the
      +     *          this string; {@code false} otherwise.
      +     *          Note also that {@code true} will be returned if the
            *          argument is an empty string or is equal to this
      -     *          String object as determined by the
      +     *          {@code String} object as determined by the
            *          {@link #equals(Object)} method.
            * @since   1. 0
            */
      @@ -1374,39 +1425,37 @@ public boolean startsWith(String prefix) {
            * Tests if this string ends with the specified suffix.
            *
            * @param   suffix   the suffix.
      -     * @return  true if the character sequence represented by the
      +     * @return  {@code true} if the character sequence represented by the
            *          argument is a suffix of the character sequence represented by
      -     *          this object; false otherwise. Note that the
      -     *          result will be true if the argument is the
      -     *          empty string or is equal to this String object
      +     *          this object; {@code false} otherwise. Note that the
      +     *          result will be {@code true} if the argument is the
      +     *          empty string or is equal to this {@code String} object
            *          as determined by the {@link #equals(Object)} method.
            */
           public boolean endsWith(String suffix) {
      -        return startsWith(suffix, count - suffix.count);
      +        return startsWith(suffix, value.length - suffix.value.length);
           }
       
           /**
            * Returns a hash code for this string. The hash code for a
      -     * String object is computed as
      +     * {@code String} object is computed as
            * 
            * s[0]*31^(n-1) + s[1]*31^(n-2) + ... + s[n-1]
            * 
      - * using int arithmetic, where s[i] is the - * ith character of the string, n is the length of - * the string, and ^ indicates exponentiation. + * using {@code int} arithmetic, where {@code s[i]} is the + * ith character of the string, {@code n} is the length of + * the string, and {@code ^} indicates exponentiation. * (The hash value of the empty string is zero.) * * @return a hash code value for this object. */ public int hashCode() { int h = hash; - if (h == 0 && count > 0) { - int off = offset; + if (h == 0 && value.length > 0) { char val[] = value; - int len = count; - for (int i = 0; i < len; i++) { - h = 31*h + val[off++]; + for (int i = 0; i < value.length; i++) { + h = 31 * h + val[i]; } hash = h; } @@ -1416,26 +1465,26 @@ public int hashCode() { /** * Returns the index within this string of the first occurrence of * the specified character. If a character with value - * ch occurs in the character sequence represented by - * this String object, then the index (in Unicode + * {@code ch} occurs in the character sequence represented by + * this {@code String} object, then the index (in Unicode * code units) of the first such occurrence is returned. For - * values of ch in the range from 0 to 0xFFFF + * values of {@code ch} in the range from 0 to 0xFFFF * (inclusive), this is the smallest value k such that: *
            * this.charAt(k) == ch
            * 
      - * is true. For other values of ch, it is the + * is true. For other values of {@code ch}, it is the * smallest value k such that: *
            * this.codePointAt(k) == ch
            * 
      * is true. In either case, if no such character occurs in this - * string, then -1 is returned. + * string, then {@code -1} is returned. * * @param ch a character (Unicode code point). * @return the index of the first occurrence of the character in the * character sequence represented by this object, or - * -1 if the character does not occur. + * {@code -1} if the character does not occur. */ public int indexOf(int ch) { return indexOf(ch, 0); @@ -1445,45 +1494,46 @@ public int indexOf(int ch) { * Returns the index within this string of the first occurrence of the * specified character, starting the search at the specified index. *

      - * If a character with value ch occurs in the - * character sequence represented by this String - * object at an index no smaller than fromIndex, then + * If a character with value {@code ch} occurs in the + * character sequence represented by this {@code String} + * object at an index no smaller than {@code fromIndex}, then * the index of the first such occurrence is returned. For values - * of ch in the range from 0 to 0xFFFF (inclusive), + * of {@code ch} in the range from 0 to 0xFFFF (inclusive), * this is the smallest value k such that: *

      -     * (this.charAt(k) == ch) && (k >= fromIndex)
      +     * (this.charAt(k) == ch) {@code &&} (k >= fromIndex)
            * 
      - * is true. For other values of ch, it is the + * is true. For other values of {@code ch}, it is the * smallest value k such that: *
      -     * (this.codePointAt(k) == ch) && (k >= fromIndex)
      +     * (this.codePointAt(k) == ch) {@code &&} (k >= fromIndex)
            * 
      * is true. In either case, if no such character occurs in this - * string at or after position fromIndex, then - * -1 is returned. + * string at or after position {@code fromIndex}, then + * {@code -1} is returned. * *

      - * There is no restriction on the value of fromIndex. If it + * There is no restriction on the value of {@code fromIndex}. If it * is negative, it has the same effect as if it were zero: this entire * string may be searched. If it is greater than the length of this * string, it has the same effect as if it were equal to the length of - * this string: -1 is returned. + * this string: {@code -1} is returned. * - *

      All indices are specified in char values + *

      All indices are specified in {@code char} values * (Unicode code units). * * @param ch a character (Unicode code point). * @param fromIndex the index to start the search from. * @return the index of the first occurrence of the character in the * character sequence represented by this object that is greater - * than or equal to fromIndex, or -1 + * than or equal to {@code fromIndex}, or {@code -1} * if the character does not occur. */ public int indexOf(int ch, int fromIndex) { + final int max = value.length; if (fromIndex < 0) { fromIndex = 0; - } else if (fromIndex >= count) { + } else if (fromIndex >= max) { // Note: fromIndex might be near -1>>>1. return -1; } @@ -1492,11 +1542,9 @@ public int indexOf(int ch, int fromIndex) { // handle most cases here (ch is a BMP code point or a // negative value (invalid code point)) final char[] value = this.value; - final int offset = this.offset; - final int max = offset + count; - for (int i = offset + fromIndex; i < max ; i++) { + for (int i = fromIndex; i < max; i++) { if (value[i] == ch) { - return i - offset; + return i; } } return -1; @@ -1511,13 +1559,12 @@ public int indexOf(int ch, int fromIndex) { private int indexOfSupplementary(int ch, int fromIndex) { if (Character.isValidCodePoint(ch)) { final char[] value = this.value; - final int offset = this.offset; final char hi = Character.highSurrogate(ch); final char lo = Character.lowSurrogate(ch); - final int max = offset + count - 1; - for (int i = offset + fromIndex; i < max; i++) { - if (value[i] == hi && value[i+1] == lo) { - return i - offset; + final int max = value.length - 1; + for (int i = fromIndex; i < max; i++) { + if (value[i] == hi && value[i + 1] == lo) { + return i; } } } @@ -1526,55 +1573,55 @@ private int indexOfSupplementary(int ch, int fromIndex) { /** * Returns the index within this string of the last occurrence of - * the specified character. For values of ch in the + * the specified character. For values of {@code ch} in the * range from 0 to 0xFFFF (inclusive), the index (in Unicode code * units) returned is the largest value k such that: *

            * this.charAt(k) == ch
            * 
      - * is true. For other values of ch, it is the + * is true. For other values of {@code ch}, it is the * largest value k such that: *
            * this.codePointAt(k) == ch
            * 
      * is true. In either case, if no such character occurs in this - * string, then -1 is returned. The - * String is searched backwards starting at the last + * string, then {@code -1} is returned. The + * {@code String} is searched backwards starting at the last * character. * * @param ch a character (Unicode code point). * @return the index of the last occurrence of the character in the * character sequence represented by this object, or - * -1 if the character does not occur. + * {@code -1} if the character does not occur. */ public int lastIndexOf(int ch) { - return lastIndexOf(ch, count - 1); + return lastIndexOf(ch, value.length - 1); } /** * Returns the index within this string of the last occurrence of * the specified character, searching backward starting at the - * specified index. For values of ch in the range + * specified index. For values of {@code ch} in the range * from 0 to 0xFFFF (inclusive), the index returned is the largest * value k such that: *
      -     * (this.charAt(k) == ch) && (k <= fromIndex)
      +     * (this.charAt(k) == ch) {@code &&} (k <= fromIndex)
            * 
      - * is true. For other values of ch, it is the + * is true. For other values of {@code ch}, it is the * largest value k such that: *
      -     * (this.codePointAt(k) == ch) && (k <= fromIndex)
      +     * (this.codePointAt(k) == ch) {@code &&} (k <= fromIndex)
            * 
      * is true. In either case, if no such character occurs in this - * string at or before position fromIndex, then - * -1 is returned. + * string at or before position {@code fromIndex}, then + * {@code -1} is returned. * - *

      All indices are specified in char values + *

      All indices are specified in {@code char} values * (Unicode code units). * * @param ch a character (Unicode code point). * @param fromIndex the index to start the search from. There is no - * restriction on the value of fromIndex. If it is + * restriction on the value of {@code fromIndex}. If it is * greater than or equal to the length of this string, it has * the same effect as if it were equal to one less than the * length of this string: this entire string may be searched. @@ -1582,7 +1629,7 @@ public int lastIndexOf(int ch) { * -1 is returned. * @return the index of the last occurrence of the character in the * character sequence represented by this object that is less - * than or equal to fromIndex, or -1 + * than or equal to {@code fromIndex}, or {@code -1} * if the character does not occur before that point. */ public int lastIndexOf(int ch, int fromIndex) { @@ -1590,11 +1637,10 @@ public int lastIndexOf(int ch, int fromIndex) { // handle most cases here (ch is a BMP code point or a // negative value (invalid code point)) final char[] value = this.value; - final int offset = this.offset; - int i = offset + Math.min(fromIndex, count - 1); - for (; i >= offset ; i--) { + int i = Math.min(fromIndex, value.length - 1); + for (; i >= 0; i--) { if (value[i] == ch) { - return i - offset; + return i; } } return -1; @@ -1609,13 +1655,12 @@ public int lastIndexOf(int ch, int fromIndex) { private int lastIndexOfSupplementary(int ch, int fromIndex) { if (Character.isValidCodePoint(ch)) { final char[] value = this.value; - final int offset = this.offset; char hi = Character.highSurrogate(ch); char lo = Character.lowSurrogate(ch); - int i = offset + Math.min(fromIndex, count - 2); - for (; i >= offset; i--) { - if (value[i] == hi && value[i+1] == lo) { - return i - offset; + int i = Math.min(fromIndex, value.length - 2); + for (; i >= 0; i--) { + if (value[i] == hi && value[i + 1] == lo) { + return i; } } } @@ -1646,7 +1691,7 @@ public int indexOf(String str) { * *

      The returned index is the smallest value k for which: *

      -     * k >= fromIndex && this.startsWith(str, k)
      +     * k >= fromIndex {@code &&} this.startsWith(str, k)
            * 
      * If no such value of k exists, then {@code -1} is returned. * @@ -1657,8 +1702,26 @@ public int indexOf(String str) { * or {@code -1} if there is no such occurrence. */ public int indexOf(String str, int fromIndex) { - return indexOf(value, offset, count, - str.value, str.offset, str.count, fromIndex); + return indexOf(value, 0, value.length, + str.value, 0, str.value.length, fromIndex); + } + + /** + * Code shared by String and AbstractStringBuilder to do searches. The + * source is the character array being searched, and the target + * is the string being searched for. + * + * @param source the characters being searched. + * @param sourceOffset offset of the source string. + * @param sourceCount count of the source string. + * @param target the characters being searched for. + * @param fromIndex the index to begin searching from. + */ + static int indexOf(char[] source, int sourceOffset, int sourceCount, + String target, int fromIndex) { + return indexOf(source, sourceOffset, sourceCount, + target.value, 0, target.value.length, + fromIndex); } /** @@ -1675,8 +1738,8 @@ public int indexOf(String str, int fromIndex) { * @param fromIndex the index to begin searching from. */ static int indexOf(char[] source, int sourceOffset, int sourceCount, - char[] target, int targetOffset, int targetCount, - int fromIndex) { + char[] target, int targetOffset, int targetCount, + int fromIndex) { if (fromIndex >= sourceCount) { return (targetCount == 0 ? sourceCount : -1); } @@ -1687,7 +1750,7 @@ static int indexOf(char[] source, int sourceOffset, int sourceCount, return fromIndex; } - char first = target[targetOffset]; + char first = target[targetOffset]; int max = sourceOffset + (sourceCount - targetCount); for (int i = sourceOffset + fromIndex; i <= max; i++) { @@ -1700,8 +1763,8 @@ static int indexOf(char[] source, int sourceOffset, int sourceCount, if (i <= max) { int j = i + 1; int end = j + targetCount - 1; - for (int k = targetOffset + 1; j < end && source[j] == - target[k]; j++, k++); + for (int k = targetOffset + 1; j < end && source[j] + == target[k]; j++, k++); if (j == end) { /* Found whole string. */ @@ -1728,7 +1791,7 @@ static int indexOf(char[] source, int sourceOffset, int sourceCount, * or {@code -1} if there is no such occurrence. */ public int lastIndexOf(String str) { - return lastIndexOf(str, count); + return lastIndexOf(str, value.length); } /** @@ -1737,7 +1800,7 @@ public int lastIndexOf(String str) { * *

      The returned index is the largest value k for which: *

      -     * k <= fromIndex && this.startsWith(str, k)
      +     * k {@code <=} fromIndex {@code &&} this.startsWith(str, k)
            * 
      * If no such value of k exists, then {@code -1} is returned. * @@ -1748,8 +1811,26 @@ public int lastIndexOf(String str) { * or {@code -1} if there is no such occurrence. */ public int lastIndexOf(String str, int fromIndex) { - return lastIndexOf(value, offset, count, - str.value, str.offset, str.count, fromIndex); + return lastIndexOf(value, 0, value.length, + str.value, 0, str.value.length, fromIndex); + } + + /** + * Code shared by String and AbstractStringBuilder to do searches. The + * source is the character array being searched, and the target + * is the string being searched for. + * + * @param source the characters being searched. + * @param sourceOffset offset of the source string. + * @param sourceCount count of the source string. + * @param target the characters being searched for. + * @param fromIndex the index to begin searching from. + */ + static int lastIndexOf(char[] source, int sourceOffset, int sourceCount, + String target, int fromIndex) { + return lastIndexOf(source, sourceOffset, sourceCount, + target.value, 0, target.value.length, + fromIndex); } /** @@ -1766,8 +1847,8 @@ public int lastIndexOf(String str, int fromIndex) { * @param fromIndex the index to begin searching from. */ static int lastIndexOf(char[] source, int sourceOffset, int sourceCount, - char[] target, int targetOffset, int targetCount, - int fromIndex) { + char[] target, int targetOffset, int targetCount, + int fromIndex) { /* * Check arguments; return immediately where possible. For * consistency, don't check for null str. @@ -1812,7 +1893,7 @@ static int lastIndexOf(char[] source, int sourceOffset, int sourceCount, } /** - * Returns a new string that is a substring of this string. The + * Returns a string that is a substring of this string. The * substring begins with the character at the specified index and * extends to the end of this string.

      * Examples: @@ -1825,18 +1906,25 @@ static int lastIndexOf(char[] source, int sourceOffset, int sourceCount, * @param beginIndex the beginning index, inclusive. * @return the specified substring. * @exception IndexOutOfBoundsException if - * beginIndex is negative or larger than the - * length of this String object. + * {@code beginIndex} is negative or larger than the + * length of this {@code String} object. */ public String substring(int beginIndex) { - return substring(beginIndex, count); + if (beginIndex < 0) { + throw new StringIndexOutOfBoundsException(beginIndex); + } + int subLen = value.length - beginIndex; + if (subLen < 0) { + throw new StringIndexOutOfBoundsException(subLen); + } + return (beginIndex == 0) ? this : new String(value, beginIndex, subLen); } /** - * Returns a new string that is a substring of this string. The - * substring begins at the specified beginIndex and - * extends to the character at index endIndex - 1. - * Thus the length of the substring is endIndex-beginIndex. + * Returns a string that is a substring of this string. The + * substring begins at the specified {@code beginIndex} and + * extends to the character at index {@code endIndex - 1}. + * Thus the length of the substring is {@code endIndex-beginIndex}. *

      * Examples: *

      @@ -1848,28 +1936,29 @@ public String substring(int beginIndex) {
            * @param      endIndex     the ending index, exclusive.
            * @return     the specified substring.
            * @exception  IndexOutOfBoundsException  if the
      -     *             beginIndex is negative, or
      -     *             endIndex is larger than the length of
      -     *             this String object, or
      -     *             beginIndex is larger than
      -     *             endIndex.
      +     *             {@code beginIndex} is negative, or
      +     *             {@code endIndex} is larger than the length of
      +     *             this {@code String} object, or
      +     *             {@code beginIndex} is larger than
      +     *             {@code endIndex}.
            */
           public String substring(int beginIndex, int endIndex) {
               if (beginIndex < 0) {
                   throw new StringIndexOutOfBoundsException(beginIndex);
               }
      -        if (endIndex > count) {
      +        if (endIndex > value.length) {
                   throw new StringIndexOutOfBoundsException(endIndex);
               }
      -        if (beginIndex > endIndex) {
      -            throw new StringIndexOutOfBoundsException(endIndex - beginIndex);
      +        int subLen = endIndex - beginIndex;
      +        if (subLen < 0) {
      +            throw new StringIndexOutOfBoundsException(subLen);
               }
      -        return ((beginIndex == 0) && (endIndex == count)) ? this :
      -            new String(offset + beginIndex, endIndex - beginIndex, value);
      +        return ((beginIndex == 0) && (endIndex == value.length)) ? this
      +                : new String(value, beginIndex, subLen);
           }
       
           /**
      -     * Returns a new character sequence that is a subsequence of this sequence.
      +     * Returns a character sequence that is a subsequence of this sequence.
            *
            * 

      An invocation of this method of the form * @@ -1881,17 +1970,18 @@ public String substring(int beginIndex, int endIndex) { *

            * str.substring(begin, end)
      * - * This method is defined so that the String class can implement - * the {@link CharSequence} interface.

      + * @apiNote + * This method is defined so that the {@code String} class can implement + * the {@link CharSequence} interface. * - * @param beginIndex the begin index, inclusive. - * @param endIndex the end index, exclusive. - * @return the specified subsequence. + * @param beginIndex the begin index, inclusive. + * @param endIndex the end index, exclusive. + * @return the specified subsequence. * * @throws IndexOutOfBoundsException - * if beginIndex or endIndex are negative, - * if endIndex is greater than length(), - * or if beginIndex is greater than startIndex + * if {@code beginIndex} or {@code endIndex} is negative, + * if {@code endIndex} is greater than {@code length()}, + * or if {@code beginIndex} is greater than {@code endIndex} * * @since 1.4 * @spec JSR-51 @@ -1903,11 +1993,11 @@ public CharSequence subSequence(int beginIndex, int endIndex) { /** * Concatenates the specified string to the end of this string. *

      - * If the length of the argument string is 0, then this - * String object is returned. Otherwise, a new - * String object is created, representing a character + * If the length of the argument string is {@code 0}, then this + * {@code String} object is returned. Otherwise, a + * {@code String} object is returned that represents a character * sequence that is the concatenation of the character sequence - * represented by this String object and the character + * represented by this {@code String} object and the character * sequence represented by the argument string.

      * Examples: *

      @@ -1915,8 +2005,8 @@ public CharSequence subSequence(int beginIndex, int endIndex) {
            * "to".concat("get").concat("her") returns "together"
            * 
      * - * @param str the String that is concatenated to the end - * of this String. + * @param str the {@code String} that is concatenated to the end + * of this {@code String}. * @return a string that represents the concatenation of this object's * characters followed by the string argument's characters. */ @@ -1925,24 +2015,24 @@ public String concat(String str) { if (otherLen == 0) { return this; } - char buf[] = new char[count + otherLen]; - getChars(0, count, buf, 0); - str.getChars(0, otherLen, buf, count); - return new String(0, count + otherLen, buf); + int len = value.length; + char buf[] = Arrays.copyOf(value, len + otherLen); + str.getChars(buf, len); + return new String(buf, true); } /** - * Returns a new string resulting from replacing all occurrences of - * oldChar in this string with newChar. + * Returns a string resulting from replacing all occurrences of + * {@code oldChar} in this string with {@code newChar}. *

      - * If the character oldChar does not occur in the - * character sequence represented by this String object, - * then a reference to this String object is returned. - * Otherwise, a new String object is created that + * If the character {@code oldChar} does not occur in the + * character sequence represented by this {@code String} object, + * then a reference to this {@code String} object is returned. + * Otherwise, a {@code String} object is returned that * represents a character sequence identical to the character sequence - * represented by this String object, except that every - * occurrence of oldChar is replaced by an occurrence - * of newChar. + * represented by this {@code String} object, except that every + * occurrence of {@code oldChar} is replaced by an occurrence + * of {@code newChar}. *

      * Examples: *

      @@ -1958,31 +2048,30 @@ public String concat(String str) {
            * @param   oldChar   the old character.
            * @param   newChar   the new character.
            * @return  a string derived from this string by replacing every
      -     *          occurrence of oldChar with newChar.
      +     *          occurrence of {@code oldChar} with {@code newChar}.
            */
           public String replace(char oldChar, char newChar) {
               if (oldChar != newChar) {
      -            int len = count;
      +            int len = value.length;
                   int i = -1;
                   char[] val = value; /* avoid getfield opcode */
      -            int off = offset;   /* avoid getfield opcode */
       
                   while (++i < len) {
      -                if (val[off + i] == oldChar) {
      +                if (val[i] == oldChar) {
                           break;
                       }
                   }
                   if (i < len) {
                       char buf[] = new char[len];
      -                for (int j = 0 ; j < i ; j++) {
      -                    buf[j] = val[off+j];
      +                for (int j = 0; j < i; j++) {
      +                    buf[j] = val[j];
                       }
                       while (i < len) {
      -                    char c = val[off + i];
      +                    char c = val[i];
                           buf[i] = (c == oldChar) ? newChar : c;
                           i++;
                       }
      -                return new String(0, len, buf);
      +                return new String(buf, true);
                   }
               }
               return this;
      @@ -1993,17 +2082,18 @@ public String replace(char oldChar, char newChar) {
            * href="../util/regex/Pattern.html#sum">regular expression.
            *
            * 

      An invocation of this method of the form - * str.matches(regex) yields exactly the + * str{@code .matches(}regex{@code )} yields exactly the * same result as the expression * - *

      {@link java.util.regex.Pattern}.{@link - * java.util.regex.Pattern#matches(String,CharSequence) - * matches}(regex, str)
      + *
      + * {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#matches(String,CharSequence) + * matches(regex, str)} + *
      * * @param regex * the regular expression to which this string is to be matched * - * @return true if, and only if, this string matches the + * @return {@code true} if, and only if, this string matches the * given regular expression * * @throws PatternSyntaxException @@ -2023,8 +2113,7 @@ public boolean matches(String regex) { * sequence of char values. * * @param s the sequence to search for - * @return true if this string contains s, false otherwise - * @throws NullPointerException if s is null + * @return true if this string contains {@code s}, false otherwise * @since 1.5 */ public boolean contains(CharSequence s) { @@ -2037,18 +2126,20 @@ public boolean contains(CharSequence s) { * given replacement. * *

      An invocation of this method of the form - * str.replaceFirst(regex, repl) + * str{@code .replaceFirst(}regex{@code ,} repl{@code )} * yields exactly the same result as the expression * - *

      - * {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#compile - * compile}(regex).{@link - * java.util.regex.Pattern#matcher(java.lang.CharSequence) - * matcher}(str).{@link java.util.regex.Matcher#replaceFirst - * replaceFirst}(repl)
      + *
      + * + * {@link java.util.regex.Pattern}.{@link + * java.util.regex.Pattern#compile compile}(regex).{@link + * java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(str).{@link + * java.util.regex.Matcher#replaceFirst replaceFirst}(repl) + * + *
      * *

      - * Note that backslashes (\) and dollar signs ($) in the + * Note that backslashes ({@code \}) and dollar signs ({@code $}) in the * replacement string may cause the results to be different than if it were * being treated as a literal replacement string; see * {@link java.util.regex.Matcher#replaceFirst}. @@ -2060,7 +2151,7 @@ public boolean contains(CharSequence s) { * @param replacement * the string to be substituted for the first match * - * @return The resulting String + * @return The resulting {@code String} * * @throws PatternSyntaxException * if the regular expression's syntax is invalid @@ -2080,18 +2171,20 @@ public String replaceFirst(String regex, String replacement) { * given replacement. * *

      An invocation of this method of the form - * str.replaceAll(regex, repl) + * str{@code .replaceAll(}regex{@code ,} repl{@code )} * yields exactly the same result as the expression * - *

      - * {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#compile - * compile}(regex).{@link - * java.util.regex.Pattern#matcher(java.lang.CharSequence) - * matcher}(str).{@link java.util.regex.Matcher#replaceAll - * replaceAll}(repl)
      + *
      + * + * {@link java.util.regex.Pattern}.{@link + * java.util.regex.Pattern#compile compile}(regex).{@link + * java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(str).{@link + * java.util.regex.Matcher#replaceAll replaceAll}(repl) + * + *
      * *

      - * Note that backslashes (\) and dollar signs ($) in the + * Note that backslashes ({@code \}) and dollar signs ({@code $}) in the * replacement string may cause the results to be different than if it were * being treated as a literal replacement string; see * {@link java.util.regex.Matcher#replaceAll Matcher.replaceAll}. @@ -2103,7 +2196,7 @@ public String replaceFirst(String regex, String replacement) { * @param replacement * the string to be substituted for each match * - * @return The resulting String + * @return The resulting {@code String} * * @throws PatternSyntaxException * if the regular expression's syntax is invalid @@ -2127,13 +2220,11 @@ public String replaceAll(String regex, String replacement) { * @param target The sequence of char values to be replaced * @param replacement The replacement sequence of char values * @return The resulting string - * @throws NullPointerException if target or - * replacement is null. * @since 1.5 */ public String replace(CharSequence target, CharSequence replacement) { return Pattern.compile(target.toString(), Pattern.LITERAL).matcher( - this).replaceAll(Matcher.quoteReplacement(replacement.toString())); + this).replaceAll(Matcher.quoteReplacement(replacement.toString())); } /** @@ -2147,7 +2238,12 @@ public String replace(CharSequence target, CharSequence replacement) { * expression does not match any part of the input then the resulting array * has just one element, namely this string. * - *

      The limit parameter controls the number of times the + *

      When there is a positive-width match at the beginning of this + * string then an empty leading substring is included at the beginning + * of the resulting array. A zero-width match at the beginning however + * never produces such empty leading substring. + * + *

      The {@code limit} parameter controls the number of times the * pattern is applied and therefore affects the length of the resulting * array. If the limit n is greater than zero then the pattern * will be applied at most n - 1 times, the array's @@ -2158,7 +2254,7 @@ public String replace(CharSequence target, CharSequence replacement) { * the pattern will be applied as many times as possible, the array can * have any length, and trailing empty strings will be discarded. * - *

      The string "boo:and:foo", for example, yields the + *

      The string {@code "boo:and:foo"}, for example, yields the * following results with these parameters: * *

      @@ -2169,33 +2265,34 @@ public String replace(CharSequence target, CharSequence replacement) { * * * - * + * * * - * + * * * - * + * * * - * + * * * - * + * * * - * + * *
      :2{ "boo", "and:foo" }
      {@code { "boo", "and:foo" }}
      :5{ "boo", "and", "foo" }
      {@code { "boo", "and", "foo" }}
      :-2{ "boo", "and", "foo" }
      {@code { "boo", "and", "foo" }}
      o5{ "b", "", ":and:f", "", "" }
      {@code { "b", "", ":and:f", "", "" }}
      o-2{ "b", "", ":and:f", "", "" }
      {@code { "b", "", ":and:f", "", "" }}
      o0{ "b", "", ":and:f" }
      {@code { "b", "", ":and:f" }}
      * *

      An invocation of this method of the form - * str.split(regex, n) + * str.{@code split(}regex{@code ,} n{@code )} * yields the same result as the expression * *

      - * {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#compile - * compile}(regex).{@link - * java.util.regex.Pattern#split(java.lang.CharSequence,int) - * split}(str, n) + * + * {@link java.util.regex.Pattern}.{@link + * java.util.regex.Pattern#compile compile}(regex).{@link + * java.util.regex.Pattern#split(java.lang.CharSequence,int) split}(strn) + * *
      * * @@ -2218,13 +2315,13 @@ public String replace(CharSequence target, CharSequence replacement) { */ public String[] split(String regex, int limit) { /* fastpath if the regex is a - (1)one-char String and this character is not one of the - RegEx's meta characters ".$|()[{^?*+\\", or - (2)two-char String and the first char is the backslash and - the second is not the ascii digit or ascii letter. - */ + (1)one-char String and this character is not one of the + RegEx's meta characters ".$|()[{^?*+\\", or + (2)two-char String and the first char is the backslash and + the second is not the ascii digit or ascii letter. + */ char ch = 0; - if (((regex.count == 1 && + if (((regex.value.length == 1 && ".$|()[{^?*+\\".indexOf(ch = regex.charAt(0)) == -1) || (regex.length() == 2 && regex.charAt(0) == '\\' && @@ -2244,24 +2341,26 @@ public String[] split(String regex, int limit) { off = next + 1; } else { // last one //assert (list.size() == limit - 1); - list.add(substring(off, count)); - off = count; + list.add(substring(off, value.length)); + off = value.length; break; } } // If no match was found, return this if (off == 0) - return new String[] { this }; + return new String[]{this}; // Add remaining segment if (!limited || list.size() < limit) - list.add(substring(off, count)); + list.add(substring(off, value.length)); // Construct result int resultSize = list.size(); - if (limit == 0) - while (resultSize > 0 && list.get(resultSize-1).length() == 0) + if (limit == 0) { + while (resultSize > 0 && list.get(resultSize - 1).length() == 0) { resultSize--; + } + } String[] result = new String[resultSize]; return list.subList(0, resultSize).toArray(result); } @@ -2277,7 +2376,7 @@ public String[] split(String regex, int limit) { * argument of zero. Trailing empty strings are therefore not included in * the resulting array. * - *

      The string "boo:and:foo", for example, yields the following + *

      The string {@code "boo:and:foo"}, for example, yields the following * results with these expressions: * *

      @@ -2286,9 +2385,9 @@ public String[] split(String regex, int limit) { * * * - * + * * - * + * *
      Result
      :{ "boo", "and", "foo" }
      {@code { "boo", "and", "foo" }}
      o{ "b", "", ":and:f" }
      {@code { "b", "", ":and:f" }}
      * * @@ -2311,11 +2410,95 @@ public String[] split(String regex) { } /** - * Converts all of the characters in this String to lower - * case using the rules of the given Locale. Case mapping is based + * Returns a new String composed of copies of the + * {@code CharSequence elements} joined together with a copy of + * the specified {@code delimiter}. + * + *
      For example, + *
      {@code
      +     *     String message = String.join("-", "Java", "is", "cool");
      +     *     // message returned is: "Java-is-cool"
      +     * }
      + * + * Note that if an element is null, then {@code "null"} is added. + * + * @param delimiter the delimiter that separates each element + * @param elements the elements to join together. + * + * @return a new {@code String} that is composed of the {@code elements} + * separated by the {@code delimiter} + * + * @throws NullPointerException If {@code delimiter} or {@code elements} + * is {@code null} + * + * @see java.util.StringJoiner + * @since 1.8 + */ + public static String join(CharSequence delimiter, CharSequence... elements) { + Objects.requireNonNull(delimiter); + Objects.requireNonNull(elements); + // Number of elements not likely worth Arrays.stream overhead. + StringJoiner joiner = new StringJoiner(delimiter); + for (CharSequence cs: elements) { + joiner.add(cs); + } + return joiner.toString(); + } + + /** + * Returns a new {@code String} composed of copies of the + * {@code CharSequence elements} joined together with a copy of the + * specified {@code delimiter}. + * + *
      For example, + *
      {@code
      +     *     List strings = new LinkedList<>();
      +     *     strings.add("Java");strings.add("is");
      +     *     strings.add("cool");
      +     *     String message = String.join(" ", strings);
      +     *     //message returned is: "Java is cool"
      +     *
      +     *     Set strings = new LinkedHashSet<>();
      +     *     strings.add("Java"); strings.add("is");
      +     *     strings.add("very"); strings.add("cool");
      +     *     String message = String.join("-", strings);
      +     *     //message returned is: "Java-is-very-cool"
      +     * }
      + * + * Note that if an individual element is {@code null}, then {@code "null"} is added. + * + * @param delimiter a sequence of characters that is used to separate each + * of the {@code elements} in the resulting {@code String} + * @param elements an {@code Iterable} that will have its {@code elements} + * joined together. + * + * @return a new {@code String} that is composed from the {@code elements} + * argument + * + * @throws NullPointerException If {@code delimiter} or {@code elements} + * is {@code null} + * + * @see #join(CharSequence,CharSequence...) + * @see java.util.StringJoiner + * @since 1.8 + */ + public static String join(CharSequence delimiter, + Iterable elements) { + Objects.requireNonNull(delimiter); + Objects.requireNonNull(elements); + StringJoiner joiner = new StringJoiner(delimiter); + for (CharSequence cs: elements) { + joiner.add(cs); + } + return joiner.toString(); + } + + /** + * Converts all of the characters in this {@code String} to lower + * case using the rules of the given {@code Locale}. Case mapping is based * on the Unicode Standard version specified by the {@link java.lang.Character Character} * class. Since case mappings are not always 1:1 char mappings, the resulting - * String may be a different length than the original String. + * {@code String} may be a different length than the original {@code String}. *

      * Examples of lowercase mappings are in the following table: * @@ -2356,7 +2539,7 @@ public String[] split(String regex) { *
      * * @param locale use the case transformation rules for this locale - * @return the String, converted to lowercase. + * @return the {@code String}, converted to lowercase. * @see java.lang.String#toLowerCase() * @see java.lang.String#toUpperCase() * @see java.lang.String#toUpperCase(Locale) @@ -2367,14 +2550,15 @@ public String toLowerCase(Locale locale) { throw new NullPointerException(); } - int firstUpper; + int firstUpper; + final int len = value.length; /* Now check if there are any characters that need to be changed. */ scan: { - for (firstUpper = 0 ; firstUpper < count; ) { - char c = value[offset+firstUpper]; - if ((c >= Character.MIN_HIGH_SURROGATE) && - (c <= Character.MAX_HIGH_SURROGATE)) { + for (firstUpper = 0 ; firstUpper < len; ) { + char c = value[firstUpper]; + if ((c >= Character.MIN_HIGH_SURROGATE) + && (c <= Character.MAX_HIGH_SURROGATE)) { int supplChar = codePointAt(firstUpper); if (supplChar != Character.toLowerCase(supplChar)) { break scan; @@ -2390,46 +2574,41 @@ public String toLowerCase(Locale locale) { return this; } - char[] result = new char[count]; - int resultOffset = 0; /* result may grow, so i+resultOffset - * is the write location in result */ + char[] result = new char[len]; + int resultOffset = 0; /* result may grow, so i+resultOffset + * is the write location in result */ /* Just copy the first few lowerCase characters. */ - System.arraycopy(value, offset, result, 0, firstUpper); + System.arraycopy(value, 0, result, 0, firstUpper); String lang = locale.getLanguage(); boolean localeDependent = - (lang == "tr" || lang == "az" || lang == "lt"); + (lang == "tr" || lang == "az" || lang == "lt"); char[] lowerCharArray; int lowerChar; int srcChar; int srcCount; - for (int i = firstUpper; i < count; i += srcCount) { - srcChar = (int)value[offset+i]; - if ((char)srcChar >= Character.MIN_HIGH_SURROGATE && - (char)srcChar <= Character.MAX_HIGH_SURROGATE) { + for (int i = firstUpper; i < len; i += srcCount) { + srcChar = (int)value[i]; + if ((char)srcChar >= Character.MIN_HIGH_SURROGATE + && (char)srcChar <= Character.MAX_HIGH_SURROGATE) { srcChar = codePointAt(i); srcCount = Character.charCount(srcChar); } else { srcCount = 1; } - if (localeDependent || srcChar == '\u03A3') { // GREEK CAPITAL LETTER SIGMA + if (localeDependent || + srcChar == '\u03A3' || // GREEK CAPITAL LETTER SIGMA + srcChar == '\u0130') { // LATIN CAPITAL LETTER I WITH DOT ABOVE lowerChar = ConditionalSpecialCasing.toLowerCaseEx(this, i, locale); - } else if (srcChar == '\u0130') { // LATIN CAPITAL LETTER I DOT - lowerChar = Character.ERROR; } else { lowerChar = Character.toLowerCase(srcChar); } - if ((lowerChar == Character.ERROR) || - (lowerChar >= Character.MIN_SUPPLEMENTARY_CODE_POINT)) { + if ((lowerChar == Character.ERROR) + || (lowerChar >= Character.MIN_SUPPLEMENTARY_CODE_POINT)) { if (lowerChar == Character.ERROR) { - if (!localeDependent && srcChar == '\u0130') { - lowerCharArray = - ConditionalSpecialCasing.toLowerCaseCharArray(this, i, Locale.ENGLISH); - } else { - lowerCharArray = + lowerCharArray = ConditionalSpecialCasing.toLowerCaseCharArray(this, i, locale); - } } else if (srcCount == 2) { resultOffset += Character.toChars(lowerChar, result, i + resultOffset) - srcCount; continue; @@ -2441,38 +2620,37 @@ public String toLowerCase(Locale locale) { int mapLen = lowerCharArray.length; if (mapLen > srcCount) { char[] result2 = new char[result.length + mapLen - srcCount]; - System.arraycopy(result, 0, result2, 0, - i + resultOffset); + System.arraycopy(result, 0, result2, 0, i + resultOffset); result = result2; } - for (int x=0; xString to lower + * Converts all of the characters in this {@code String} to lower * case using the rules of the default locale. This is equivalent to calling - * toLowerCase(Locale.getDefault()). + * {@code toLowerCase(Locale.getDefault())}. *

      * Note: This method is locale sensitive, and may produce unexpected * results if used for strings that are intended to be interpreted locale * independently. * Examples are programming language identifiers, protocol keys, and HTML * tags. - * For instance, "TITLE".toLowerCase() in a Turkish locale - * returns "t\u005Cu0131tle", where '\u005Cu0131' is the + * For instance, {@code "TITLE".toLowerCase()} in a Turkish locale + * returns {@code "t\u005Cu0131tle"}, where '\u005Cu0131' is the * LATIN SMALL LETTER DOTLESS I character. * To obtain correct results for locale insensitive strings, use - * toLowerCase(Locale.ENGLISH). + * {@code toLowerCase(Locale.ROOT)}. *

      - * @return the String, converted to lowercase. + * @return the {@code String}, converted to lowercase. * @see java.lang.String#toLowerCase(Locale) */ public String toLowerCase() { @@ -2480,14 +2658,14 @@ public String toLowerCase() { } /** - * Converts all of the characters in this String to upper - * case using the rules of the given Locale. Case mapping is based + * Converts all of the characters in this {@code String} to upper + * case using the rules of the given {@code Locale}. Case mapping is based * on the Unicode Standard version specified by the {@link java.lang.Character Character} * class. Since case mappings are not always 1:1 char mappings, the resulting - * String may be a different length than the original String. + * {@code String} may be a different length than the original {@code String}. *

      * Examples of locale-sensitive and 1:M case mappings are in the following table. - *

      + * * * * @@ -2521,7 +2699,7 @@ public String toLowerCase() { * *
      Language Code of Locale
      * @param locale use the case transformation rules for this locale - * @return the String, converted to uppercase. + * @return the {@code String}, converted to uppercase. * @see java.lang.String#toUpperCase() * @see java.lang.String#toLowerCase() * @see java.lang.String#toLowerCase(Locale) @@ -2532,23 +2710,24 @@ public String toUpperCase(Locale locale) { throw new NullPointerException(); } - int firstLower; + int firstLower; + final int len = value.length; /* Now check if there are any characters that need to be changed. */ scan: { - for (firstLower = 0 ; firstLower < count; ) { - int c = (int)value[offset+firstLower]; + for (firstLower = 0 ; firstLower < len; ) { + int c = (int)value[firstLower]; int srcCount; - if ((c >= Character.MIN_HIGH_SURROGATE) && - (c <= Character.MAX_HIGH_SURROGATE)) { + if ((c >= Character.MIN_HIGH_SURROGATE) + && (c <= Character.MAX_HIGH_SURROGATE)) { c = codePointAt(firstLower); srcCount = Character.charCount(c); } else { srcCount = 1; } int upperCaseChar = Character.toUpperCaseEx(c); - if ((upperCaseChar == Character.ERROR) || - (c != upperCaseChar)) { + if ((upperCaseChar == Character.ERROR) + || (c != upperCaseChar)) { break scan; } firstLower += srcCount; @@ -2556,22 +2735,22 @@ public String toUpperCase(Locale locale) { return this; } - char[] result = new char[count]; /* may grow */ - int resultOffset = 0; /* result may grow, so i+resultOffset - * is the write location in result */ + /* result may grow, so i+resultOffset is the write location in result */ + int resultOffset = 0; + char[] result = new char[len]; /* may grow */ /* Just copy the first few upperCase characters. */ - System.arraycopy(value, offset, result, 0, firstLower); + System.arraycopy(value, 0, result, 0, firstLower); String lang = locale.getLanguage(); boolean localeDependent = - (lang == "tr" || lang == "az" || lang == "lt"); + (lang == "tr" || lang == "az" || lang == "lt"); char[] upperCharArray; int upperChar; int srcChar; int srcCount; - for (int i = firstLower; i < count; i += srcCount) { - srcChar = (int)value[offset+i]; + for (int i = firstLower; i < len; i += srcCount) { + srcChar = (int)value[i]; if ((char)srcChar >= Character.MIN_HIGH_SURROGATE && (char)srcChar <= Character.MAX_HIGH_SURROGATE) { srcChar = codePointAt(i); @@ -2584,12 +2763,12 @@ public String toUpperCase(Locale locale) { } else { upperChar = Character.toUpperCaseEx(srcChar); } - if ((upperChar == Character.ERROR) || - (upperChar >= Character.MIN_SUPPLEMENTARY_CODE_POINT)) { + if ((upperChar == Character.ERROR) + || (upperChar >= Character.MIN_SUPPLEMENTARY_CODE_POINT)) { if (upperChar == Character.ERROR) { if (localeDependent) { upperCharArray = - ConditionalSpecialCasing.toUpperCaseCharArray(this, i, locale); + ConditionalSpecialCasing.toUpperCaseCharArray(this, i, locale); } else { upperCharArray = Character.toUpperCaseCharArray(srcChar); } @@ -2604,38 +2783,37 @@ public String toUpperCase(Locale locale) { int mapLen = upperCharArray.length; if (mapLen > srcCount) { char[] result2 = new char[result.length + mapLen - srcCount]; - System.arraycopy(result, 0, result2, 0, - i + resultOffset); + System.arraycopy(result, 0, result2, 0, i + resultOffset); result = result2; } - for (int x=0; xString to upper + * Converts all of the characters in this {@code String} to upper * case using the rules of the default locale. This method is equivalent to - * toUpperCase(Locale.getDefault()). + * {@code toUpperCase(Locale.getDefault())}. *

      * Note: This method is locale sensitive, and may produce unexpected * results if used for strings that are intended to be interpreted locale * independently. * Examples are programming language identifiers, protocol keys, and HTML * tags. - * For instance, "title".toUpperCase() in a Turkish locale - * returns "T\u005Cu0130TLE", where '\u005Cu0130' is the + * For instance, {@code "title".toUpperCase()} in a Turkish locale + * returns {@code "T\u005Cu0130TLE"}, where '\u005Cu0130' is the * LATIN CAPITAL LETTER I WITH DOT ABOVE character. * To obtain correct results for locale insensitive strings, use - * toUpperCase(Locale.ENGLISH). + * {@code toUpperCase(Locale.ROOT)}. *

      - * @return the String, converted to uppercase. + * @return the {@code String}, converted to uppercase. * @see java.lang.String#toUpperCase(Locale) */ public String toUpperCase() { @@ -2643,49 +2821,48 @@ public String toUpperCase() { } /** - * Returns a copy of the string, with leading and trailing whitespace - * omitted. + * Returns a string whose value is this string, with any leading and trailing + * whitespace removed. *

      - * If this String object represents an empty character + * If this {@code String} object represents an empty character * sequence, or the first and last characters of character sequence - * represented by this String object both have codes - * greater than '\u0020' (the space character), then a - * reference to this String object is returned. + * represented by this {@code String} object both have codes + * greater than {@code '\u005Cu0020'} (the space character), then a + * reference to this {@code String} object is returned. *

      * Otherwise, if there is no character with a code greater than - * '\u0020' in the string, then a new - * String object representing an empty string is created - * and returned. + * {@code '\u005Cu0020'} in the string, then a + * {@code String} object representing an empty string is + * returned. *

      * Otherwise, let k be the index of the first character in the - * string whose code is greater than '\u0020', and let + * string whose code is greater than {@code '\u005Cu0020'}, and let * m be the index of the last character in the string whose code - * is greater than '\u0020'. A new String - * object is created, representing the substring of this string that + * is greater than {@code '\u005Cu0020'}. A {@code String} + * object is returned, representing the substring of this string that * begins with the character at index k and ends with the * character at index m-that is, the result of - * this.substring(km+1). + * {@code this.substring(k, m + 1)}. *

      * This method may be used to trim whitespace (as defined above) from * the beginning and end of a string. * - * @return A copy of this string with leading and trailing white + * @return A string whose value is this string, with any leading and trailing white * space removed, or this string if it has no leading or * trailing white space. */ public String trim() { - int len = count; + int len = value.length; int st = 0; - int off = offset; /* avoid getfield opcode */ char[] val = value; /* avoid getfield opcode */ - while ((st < len) && (val[off + st] <= ' ')) { + while ((st < len) && (val[st] <= ' ')) { st++; } - while ((st < len) && (val[off + len - 1] <= ' ')) { + while ((st < len) && (val[len - 1] <= ' ')) { len--; } - return ((st > 0) || (len < count)) ? substring(st, len) : this; + return ((st > 0) || (len < value.length)) ? substring(st, len) : this; } /** @@ -2705,8 +2882,9 @@ public String toString() { * the character sequence represented by this string. */ public char[] toCharArray() { - char result[] = new char[count]; - getChars(0, count, result, 0); + // Cannot use Arrays.copyOf because of class initialization order issues + char result[] = new char[value.length]; + System.arraycopy(value, 0, result, 0, value.length); return result; } @@ -2728,10 +2906,10 @@ public char[] toCharArray() { * limited by the maximum dimension of a Java array as defined by * The Java™ Virtual Machine Specification. * The behaviour on a - * null argument depends on the conversion. * - * @throws IllegalFormatException + * @throws java.util.IllegalFormatException * If a format string contains an illegal syntax, a format * specifier that is incompatible with the given arguments, * insufficient arguments given the format string, or other @@ -2740,15 +2918,12 @@ public char[] toCharArray() { * href="../util/Formatter.html#detail">Details section of the * formatter class specification. * - * @throws NullPointerException - * If the format is null - * * @return A formatted string * * @see java.util.Formatter * @since 1.5 */ - public static String format(String format, Object ... args) { + public static String format(String format, Object... args) { return new Formatter().format(format, args).toString(); } @@ -2758,7 +2933,7 @@ public static String format(String format, Object ... args) { * * @param l * The {@linkplain java.util.Locale locale} to apply during - * formatting. If l is null then no localization + * formatting. If {@code l} is {@code null} then no localization * is applied. * * @param format @@ -2772,10 +2947,10 @@ public static String format(String format, Object ... args) { * limited by the maximum dimension of a Java array as defined by * The Java™ Virtual Machine Specification. * The behaviour on a - * null argument depends on the conversion. + * {@code null} argument depends on the + * conversion. * - * @throws IllegalFormatException + * @throws java.util.IllegalFormatException * If a format string contains an illegal syntax, a format * specifier that is incompatible with the given arguments, * insufficient arguments given the format string, or other @@ -2784,25 +2959,22 @@ public static String format(String format, Object ... args) { * href="../util/Formatter.html#detail">Details section of the * formatter class specification * - * @throws NullPointerException - * If the format is null - * * @return A formatted string * * @see java.util.Formatter * @since 1.5 */ - public static String format(Locale l, String format, Object ... args) { + public static String format(Locale l, String format, Object... args) { return new Formatter(l).format(format, args).toString(); } /** - * Returns the string representation of the Object argument. + * Returns the string representation of the {@code Object} argument. * - * @param obj an Object. - * @return if the argument is null, then a string equal to - * "null"; otherwise, the value of - * obj.toString() is returned. + * @param obj an {@code Object}. + * @return if the argument is {@code null}, then a string equal to + * {@code "null"}; otherwise, the value of + * {@code obj.toString()} is returned. * @see java.lang.Object#toString() */ public static String valueOf(Object obj) { @@ -2810,14 +2982,14 @@ public static String valueOf(Object obj) { } /** - * Returns the string representation of the char array + * Returns the string representation of the {@code char} array * argument. The contents of the character array are copied; subsequent - * modification of the character array does not affect the newly - * created string. + * modification of the character array does not affect the returned + * string. * - * @param data a char array. - * @return a newly allocated string representing the same sequence of - * characters contained in the character array argument. + * @param data the character array. + * @return a {@code String} that contains the characters of the + * character array. */ public static String valueOf(char data[]) { return new String(data); @@ -2825,89 +2997,89 @@ public static String valueOf(char data[]) { /** * Returns the string representation of a specific subarray of the - * char array argument. + * {@code char} array argument. *

      - * The offset argument is the index of the first - * character of the subarray. The count argument + * The {@code offset} argument is the index of the first + * character of the subarray. The {@code count} argument * specifies the length of the subarray. The contents of the subarray * are copied; subsequent modification of the character array does not - * affect the newly created string. + * affect the returned string. * * @param data the character array. - * @param offset the initial offset into the value of the - * String. - * @param count the length of the value of the String. - * @return a string representing the sequence of characters contained - * in the subarray of the character array argument. - * @exception IndexOutOfBoundsException if offset is - * negative, or count is negative, or - * offset+count is larger than - * data.length. + * @param offset initial offset of the subarray. + * @param count length of the subarray. + * @return a {@code String} that contains the characters of the + * specified subarray of the character array. + * @exception IndexOutOfBoundsException if {@code offset} is + * negative, or {@code count} is negative, or + * {@code offset+count} is larger than + * {@code data.length}. */ public static String valueOf(char data[], int offset, int count) { return new String(data, offset, count); } /** - * Returns a String that represents the character sequence in the - * array specified. + * Equivalent to {@link #valueOf(char[], int, int)}. * * @param data the character array. * @param offset initial offset of the subarray. * @param count length of the subarray. - * @return a String that contains the characters of the + * @return a {@code String} that contains the characters of the * specified subarray of the character array. + * @exception IndexOutOfBoundsException if {@code offset} is + * negative, or {@code count} is negative, or + * {@code offset+count} is larger than + * {@code data.length}. */ public static String copyValueOf(char data[], int offset, int count) { - // All public String constructors now copy the data. return new String(data, offset, count); } /** - * Returns a String that represents the character sequence in the - * array specified. + * Equivalent to {@link #valueOf(char[])}. * * @param data the character array. - * @return a String that contains the characters of the + * @return a {@code String} that contains the characters of the * character array. */ public static String copyValueOf(char data[]) { - return copyValueOf(data, 0, data.length); + return new String(data); } /** - * Returns the string representation of the boolean argument. + * Returns the string representation of the {@code boolean} argument. * - * @param b a boolean. - * @return if the argument is true, a string equal to - * "true" is returned; otherwise, a string equal to - * "false" is returned. + * @param b a {@code boolean}. + * @return if the argument is {@code true}, a string equal to + * {@code "true"} is returned; otherwise, a string equal to + * {@code "false"} is returned. */ public static String valueOf(boolean b) { return b ? "true" : "false"; } /** - * Returns the string representation of the char + * Returns the string representation of the {@code char} * argument. * - * @param c a char. - * @return a string of length 1 containing - * as its single character the argument c. + * @param c a {@code char}. + * @return a string of length {@code 1} containing + * as its single character the argument {@code c}. */ public static String valueOf(char c) { char data[] = {c}; - return new String(0, 1, data); + return new String(data, true); } /** - * Returns the string representation of the int argument. + * Returns the string representation of the {@code int} argument. *

      * The representation is exactly the one returned by the - * Integer.toString method of one argument. + * {@code Integer.toString} method of one argument. * - * @param i an int. - * @return a string representation of the int argument. + * @param i an {@code int}. + * @return a string representation of the {@code int} argument. * @see java.lang.Integer#toString(int, int) */ public static String valueOf(int i) { @@ -2915,13 +3087,13 @@ public static String valueOf(int i) { } /** - * Returns the string representation of the long argument. + * Returns the string representation of the {@code long} argument. *

      * The representation is exactly the one returned by the - * Long.toString method of one argument. + * {@code Long.toString} method of one argument. * - * @param l a long. - * @return a string representation of the long argument. + * @param l a {@code long}. + * @return a string representation of the {@code long} argument. * @see java.lang.Long#toString(long) */ public static String valueOf(long l) { @@ -2929,13 +3101,13 @@ public static String valueOf(long l) { } /** - * Returns the string representation of the float argument. + * Returns the string representation of the {@code float} argument. *

      * The representation is exactly the one returned by the - * Float.toString method of one argument. + * {@code Float.toString} method of one argument. * - * @param f a float. - * @return a string representation of the float argument. + * @param f a {@code float}. + * @return a string representation of the {@code float} argument. * @see java.lang.Float#toString(float) */ public static String valueOf(float f) { @@ -2943,13 +3115,13 @@ public static String valueOf(float f) { } /** - * Returns the string representation of the double argument. + * Returns the string representation of the {@code double} argument. *

      * The representation is exactly the one returned by the - * Double.toString method of one argument. + * {@code Double.toString} method of one argument. * - * @param d a double. - * @return a string representation of the double argument. + * @param d a {@code double}. + * @return a string representation of the {@code double} argument. * @see java.lang.Double#toString(double) */ public static String valueOf(double d) { @@ -2960,17 +3132,17 @@ public static String valueOf(double d) { * Returns a canonical representation for the string object. *

      * A pool of strings, initially empty, is maintained privately by the - * class String. + * class {@code String}. *

      * When the intern method is invoked, if the pool already contains a - * string equal to this String object as determined by + * string equal to this {@code String} object as determined by * the {@link #equals(Object)} method, then the string from the pool is - * returned. Otherwise, this String object is added to the - * pool and a reference to this String object is returned. + * returned. Otherwise, this {@code String} object is added to the + * pool and a reference to this {@code String} object is returned. *

      - * It follows that for any two strings s and t, - * s.intern() == t.intern() is true - * if and only if s.equals(t) is true. + * It follows that for any two strings {@code s} and {@code t}, + * {@code s.intern() == t.intern()} is {@code true} + * if and only if {@code s.equals(t)} is {@code true}. *

      * All literal strings and string-valued constant expressions are * interned. String literals are defined in section 3.10.5 of the @@ -2980,5 +3152,4 @@ public static String valueOf(double d) { * guaranteed to be from a pool of unique strings. */ public native String intern(); - } diff --git a/src/StringBuffer.java b/src/StringBuffer.java index b1d3597..4b3e74d 100644 --- a/src/StringBuffer.java +++ b/src/StringBuffer.java @@ -1,12 +1,110 @@ - +/* + * Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.lang; +import java.util.Arrays; + +/** + * A thread-safe, mutable sequence of characters. + * A string buffer is like a {@link String}, but can be modified. At any + * point in time it contains some particular sequence of characters, but + * the length and content of the sequence can be changed through certain + * method calls. + *

      + * String buffers are safe for use by multiple threads. The methods + * are synchronized where necessary so that all the operations on any + * particular instance behave as if they occur in some serial order + * that is consistent with the order of the method calls made by each of + * the individual threads involved. + *

      + * The principal operations on a {@code StringBuffer} are the + * {@code append} and {@code insert} methods, which are + * overloaded so as to accept data of any type. Each effectively + * converts a given datum to a string and then appends or inserts the + * characters of that string to the string buffer. The + * {@code append} method always adds these characters at the end + * of the buffer; the {@code insert} method adds the characters at + * a specified point. + *

      + * For example, if {@code z} refers to a string buffer object + * whose current contents are {@code "start"}, then + * the method call {@code z.append("le")} would cause the string + * buffer to contain {@code "startle"}, whereas + * {@code z.insert(4, "le")} would alter the string buffer to + * contain {@code "starlet"}. + *

      + * In general, if sb refers to an instance of a {@code StringBuffer}, + * then {@code sb.append(x)} has the same effect as + * {@code sb.insert(sb.length(), x)}. + *

      + * Whenever an operation occurs involving a source sequence (such as + * appending or inserting from a source sequence), this class synchronizes + * only on the string buffer performing the operation, not on the source. + * Note that while {@code StringBuffer} is designed to be safe to use + * concurrently from multiple threads, if the constructor or the + * {@code append} or {@code insert} operation is passed a source sequence + * that is shared across threads, the calling code must ensure + * that the operation has a consistent and unchanging view of the source + * sequence for the duration of the operation. + * This could be satisfied by the caller holding a lock during the + * operation's call, by using an immutable source sequence, or by not + * sharing the source sequence across threads. + *

      + * Every string buffer has a capacity. As long as the length of the + * character sequence contained in the string buffer does not exceed + * the capacity, it is not necessary to allocate a new internal + * buffer array. If the internal buffer overflows, it is + * automatically made larger. + *

      + * Unless otherwise noted, passing a {@code null} argument to a constructor + * or method in this class will cause a {@link NullPointerException} to be + * thrown. + *

      + * As of release JDK 5, this class has been supplemented with an equivalent + * class designed for use by a single thread, {@link StringBuilder}. The + * {@code StringBuilder} class should generally be used in preference to + * this one, as it supports all of the same operations but it is faster, as + * it performs no synchronization. + * + * @author Arthur van Hoff + * @see java.lang.StringBuilder + * @see java.lang.String + * @since JDK1.0 + */ public final class StringBuffer extends AbstractStringBuilder implements java.io.Serializable, CharSequence { + /** + * A cache of the last value returned by toString. Cleared + * whenever the StringBuffer is modified. + */ + private transient char[] toStringCache; + /** use serialVersionUID from JDK 1.0.2 for interoperability */ static final long serialVersionUID = 3388685877147921107L; @@ -23,8 +121,8 @@ public StringBuffer() { * the specified initial capacity. * * @param capacity the initial capacity. - * @exception NegativeArraySizeException if the capacity - * argument is less than 0. + * @exception NegativeArraySizeException if the {@code capacity} + * argument is less than {@code 0}. */ public StringBuffer(int capacity) { super(capacity); @@ -33,10 +131,9 @@ public StringBuffer(int capacity) { /** * Constructs a string buffer initialized to the contents of the * specified string. The initial capacity of the string buffer is - * 16 plus the length of the string argument. + * {@code 16} plus the length of the string argument. * * @param str the initial contents of the buffer. - * @exception NullPointerException if str is null */ public StringBuffer(String str) { super(str.length() + 16); @@ -45,16 +142,15 @@ public StringBuffer(String str) { /** * Constructs a string buffer that contains the same characters - * as the specified CharSequence. The initial capacity of - * the string buffer is 16 plus the length of the - * CharSequence argument. + * as the specified {@code CharSequence}. The initial capacity of + * the string buffer is {@code 16} plus the length of the + * {@code CharSequence} argument. *

      - * If the length of the specified CharSequence is + * If the length of the specified {@code CharSequence} is * less than or equal to zero, then an empty buffer of capacity - * 16 is returned. + * {@code 16} is returned. * * @param seq the sequence to copy. - * @exception NullPointerException if seq is null * @since 1.5 */ public StringBuffer(CharSequence seq) { @@ -62,15 +158,18 @@ public StringBuffer(CharSequence seq) { append(seq); } + @Override public synchronized int length() { return count; } + @Override public synchronized int capacity() { return value.length; } + @Override public synchronized void ensureCapacity(int minimumCapacity) { if (minimumCapacity > value.length) { expandCapacity(minimumCapacity); @@ -80,6 +179,7 @@ public synchronized void ensureCapacity(int minimumCapacity) { /** * @since 1.5 */ + @Override public synchronized void trimToSize() { super.trimToSize(); } @@ -88,7 +188,9 @@ public synchronized void trimToSize() { * @throws IndexOutOfBoundsException {@inheritDoc} * @see #length() */ + @Override public synchronized void setLength(int newLength) { + toStringCache = null; super.setLength(newLength); } @@ -96,6 +198,7 @@ public synchronized void setLength(int newLength) { * @throws IndexOutOfBoundsException {@inheritDoc} * @see #length() */ + @Override public synchronized char charAt(int index) { if ((index < 0) || (index >= count)) throw new StringIndexOutOfBoundsException(index); @@ -105,6 +208,7 @@ public synchronized char charAt(int index) { /** * @since 1.5 */ + @Override public synchronized int codePointAt(int index) { return super.codePointAt(index); } @@ -112,6 +216,7 @@ public synchronized int codePointAt(int index) { /** * @since 1.5 */ + @Override public synchronized int codePointBefore(int index) { return super.codePointBefore(index); } @@ -119,6 +224,7 @@ public synchronized int codePointBefore(int index) { /** * @since 1.5 */ + @Override public synchronized int codePointCount(int beginIndex, int endIndex) { return super.codePointCount(beginIndex, endIndex); } @@ -126,14 +232,15 @@ public synchronized int codePointCount(int beginIndex, int endIndex) { /** * @since 1.5 */ + @Override public synchronized int offsetByCodePoints(int index, int codePointOffset) { return super.offsetByCodePoints(index, codePointOffset); } /** - * @throws NullPointerException {@inheritDoc} * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public synchronized void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin) { @@ -144,95 +251,111 @@ public synchronized void getChars(int srcBegin, int srcEnd, char[] dst, * @throws IndexOutOfBoundsException {@inheritDoc} * @see #length() */ + @Override public synchronized void setCharAt(int index, char ch) { if ((index < 0) || (index >= count)) throw new StringIndexOutOfBoundsException(index); + toStringCache = null; value[index] = ch; } + @Override public synchronized StringBuffer append(Object obj) { + toStringCache = null; super.append(String.valueOf(obj)); return this; } + @Override public synchronized StringBuffer append(String str) { + toStringCache = null; super.append(str); return this; } /** - * Appends the specified StringBuffer to this sequence. + * Appends the specified {@code StringBuffer} to this sequence. *

      - * The characters of the StringBuffer argument are appended, - * in order, to the contents of this StringBuffer, increasing the - * length of this StringBuffer by the length of the argument. - * If sb is null, then the four characters - * "null" are appended to this StringBuffer. + * The characters of the {@code StringBuffer} argument are appended, + * in order, to the contents of this {@code StringBuffer}, increasing the + * length of this {@code StringBuffer} by the length of the argument. + * If {@code sb} is {@code null}, then the four characters + * {@code "null"} are appended to this {@code StringBuffer}. *

      * Let n be the length of the old character sequence, the one - * contained in the StringBuffer just prior to execution of the - * append method. Then the character at index k in + * contained in the {@code StringBuffer} just prior to execution of the + * {@code append} method. Then the character at index k in * the new character sequence is equal to the character at index k * in the old character sequence, if k is less than n; * otherwise, it is equal to the character at index k-n in the - * argument sb. + * argument {@code sb}. *

      - * This method synchronizes on this (the destination) - * object but does not synchronize on the source (sb). + * This method synchronizes on {@code this}, the destination + * object, but does not synchronize on the source ({@code sb}). * - * @param sb the StringBuffer to append. + * @param sb the {@code StringBuffer} to append. * @return a reference to this object. * @since 1.4 */ public synchronized StringBuffer append(StringBuffer sb) { + toStringCache = null; super.append(sb); return this; } + /** + * @since 1.8 + */ + @Override + synchronized StringBuffer append(AbstractStringBuilder asb) { + toStringCache = null; + super.append(asb); + return this; + } /** - * Appends the specified CharSequence to this + * Appends the specified {@code CharSequence} to this * sequence. *

      - * The characters of the CharSequence argument are appended, + * The characters of the {@code CharSequence} argument are appended, * in order, increasing the length of this sequence by the length of the * argument. * *

      The result of this method is exactly the same as if it were an * invocation of this.append(s, 0, s.length()); * - *

      This method synchronizes on this (the destination) - * object but does not synchronize on the source (s). + *

      This method synchronizes on {@code this}, the destination + * object, but does not synchronize on the source ({@code s}). * - *

      If s is null, then the four characters - * "null" are appended. + *

      If {@code s} is {@code null}, then the four characters + * {@code "null"} are appended. * - * @param s the CharSequence to append. + * @param s the {@code CharSequence} to append. * @return a reference to this object. * @since 1.5 */ - public StringBuffer append(CharSequence s) { - // Note, synchronization achieved via other invocations - if (s == null) - s = "null"; - if (s instanceof String) - return this.append((String)s); - if (s instanceof StringBuffer) - return this.append((StringBuffer)s); - return this.append(s, 0, s.length()); + @Override + public synchronized StringBuffer append(CharSequence s) { + toStringCache = null; + super.append(s); + return this; } /** * @throws IndexOutOfBoundsException {@inheritDoc} * @since 1.5 */ + @Override public synchronized StringBuffer append(CharSequence s, int start, int end) { + toStringCache = null; super.append(s, start, end); return this; } + @Override public synchronized StringBuffer append(char[] str) { + toStringCache = null; super.append(str); return this; } @@ -240,22 +363,30 @@ public synchronized StringBuffer append(char[] str) { /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public synchronized StringBuffer append(char[] str, int offset, int len) { + toStringCache = null; super.append(str, offset, len); return this; } + @Override public synchronized StringBuffer append(boolean b) { + toStringCache = null; super.append(b); return this; } + @Override public synchronized StringBuffer append(char c) { + toStringCache = null; super.append(c); return this; } + @Override public synchronized StringBuffer append(int i) { + toStringCache = null; super.append(i); return this; } @@ -263,22 +394,30 @@ public synchronized StringBuffer append(int i) { /** * @since 1.5 */ + @Override public synchronized StringBuffer appendCodePoint(int codePoint) { + toStringCache = null; super.appendCodePoint(codePoint); return this; } + @Override public synchronized StringBuffer append(long lng) { + toStringCache = null; super.append(lng); return this; } + @Override public synchronized StringBuffer append(float f) { + toStringCache = null; super.append(f); return this; } + @Override public synchronized StringBuffer append(double d) { + toStringCache = null; super.append(d); return this; } @@ -287,7 +426,9 @@ public synchronized StringBuffer append(double d) { * @throws StringIndexOutOfBoundsException {@inheritDoc} * @since 1.2 */ + @Override public synchronized StringBuffer delete(int start, int end) { + toStringCache = null; super.delete(start, end); return this; } @@ -296,7 +437,9 @@ public synchronized StringBuffer delete(int start, int end) { * @throws StringIndexOutOfBoundsException {@inheritDoc} * @since 1.2 */ + @Override public synchronized StringBuffer deleteCharAt(int index) { + toStringCache = null; super.deleteCharAt(index); return this; } @@ -305,7 +448,9 @@ public synchronized StringBuffer deleteCharAt(int index) { * @throws StringIndexOutOfBoundsException {@inheritDoc} * @since 1.2 */ + @Override public synchronized StringBuffer replace(int start, int end, String str) { + toStringCache = null; super.replace(start, end, str); return this; } @@ -314,6 +459,7 @@ public synchronized StringBuffer replace(int start, int end, String str) { * @throws StringIndexOutOfBoundsException {@inheritDoc} * @since 1.2 */ + @Override public synchronized String substring(int start) { return substring(start, count); } @@ -322,6 +468,7 @@ public synchronized String substring(int start) { * @throws IndexOutOfBoundsException {@inheritDoc} * @since 1.4 */ + @Override public synchronized CharSequence subSequence(int start, int end) { return super.substring(start, end); } @@ -330,6 +477,7 @@ public synchronized CharSequence subSequence(int start, int end) { * @throws StringIndexOutOfBoundsException {@inheritDoc} * @since 1.2 */ + @Override public synchronized String substring(int start, int end) { return super.substring(start, end); } @@ -338,9 +486,11 @@ public synchronized String substring(int start, int end) { * @throws StringIndexOutOfBoundsException {@inheritDoc} * @since 1.2 */ + @Override public synchronized StringBuffer insert(int index, char[] str, int offset, int len) { + toStringCache = null; super.insert(index, str, offset, len); return this; } @@ -348,7 +498,9 @@ public synchronized StringBuffer insert(int index, char[] str, int offset, /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public synchronized StringBuffer insert(int offset, Object obj) { + toStringCache = null; super.insert(offset, String.valueOf(obj)); return this; } @@ -356,7 +508,9 @@ public synchronized StringBuffer insert(int offset, Object obj) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public synchronized StringBuffer insert(int offset, String str) { + toStringCache = null; super.insert(offset, str); return this; } @@ -364,7 +518,9 @@ public synchronized StringBuffer insert(int offset, String str) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public synchronized StringBuffer insert(int offset, char[] str) { + toStringCache = null; super.insert(offset, str); return this; } @@ -373,22 +529,24 @@ public synchronized StringBuffer insert(int offset, char[] str) { * @throws IndexOutOfBoundsException {@inheritDoc} * @since 1.5 */ + @Override public StringBuffer insert(int dstOffset, CharSequence s) { - // Note, synchronization achieved via other invocations - if (s == null) - s = "null"; - if (s instanceof String) - return this.insert(dstOffset, (String)s); - return this.insert(dstOffset, s, 0, s.length()); + // Note, synchronization achieved via invocations of other StringBuffer methods + // after narrowing of s to specific type + // Ditto for toStringCache clearing + super.insert(dstOffset, s); + return this; } /** * @throws IndexOutOfBoundsException {@inheritDoc} * @since 1.5 */ + @Override public synchronized StringBuffer insert(int dstOffset, CharSequence s, - int start, int end) + int start, int end) { + toStringCache = null; super.insert(dstOffset, s, start, end); return this; } @@ -396,14 +554,21 @@ public synchronized StringBuffer insert(int dstOffset, CharSequence s, /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ - public StringBuffer insert(int offset, boolean b) { - return insert(offset, String.valueOf(b)); + @Override + public StringBuffer insert(int offset, boolean b) { + // Note, synchronization achieved via invocation of StringBuffer insert(int, String) + // after conversion of b to String by super class method + // Ditto for toStringCache clearing + super.insert(offset, b); + return this; } /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public synchronized StringBuffer insert(int offset, char c) { + toStringCache = null; super.insert(offset, c); return this; } @@ -411,76 +576,101 @@ public synchronized StringBuffer insert(int offset, char c) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuffer insert(int offset, int i) { - return insert(offset, String.valueOf(i)); + // Note, synchronization achieved via invocation of StringBuffer insert(int, String) + // after conversion of i to String by super class method + // Ditto for toStringCache clearing + super.insert(offset, i); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuffer insert(int offset, long l) { - return insert(offset, String.valueOf(l)); + // Note, synchronization achieved via invocation of StringBuffer insert(int, String) + // after conversion of l to String by super class method + // Ditto for toStringCache clearing + super.insert(offset, l); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuffer insert(int offset, float f) { - return insert(offset, String.valueOf(f)); + // Note, synchronization achieved via invocation of StringBuffer insert(int, String) + // after conversion of f to String by super class method + // Ditto for toStringCache clearing + super.insert(offset, f); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuffer insert(int offset, double d) { - return insert(offset, String.valueOf(d)); + // Note, synchronization achieved via invocation of StringBuffer insert(int, String) + // after conversion of d to String by super class method + // Ditto for toStringCache clearing + super.insert(offset, d); + return this; } /** - * @throws NullPointerException {@inheritDoc} * @since 1.4 */ + @Override public int indexOf(String str) { - return indexOf(str, 0); + // Note, synchronization achieved via invocations of other StringBuffer methods + return super.indexOf(str); } /** - * @throws NullPointerException {@inheritDoc} * @since 1.4 */ + @Override public synchronized int indexOf(String str, int fromIndex) { - return String.indexOf(value, 0, count, - str.toCharArray(), 0, str.length(), fromIndex); + return super.indexOf(str, fromIndex); } /** - * @throws NullPointerException {@inheritDoc} * @since 1.4 */ + @Override public int lastIndexOf(String str) { - // Note, synchronization achieved via other invocations + // Note, synchronization achieved via invocations of other StringBuffer methods return lastIndexOf(str, count); } /** - * @throws NullPointerException {@inheritDoc} * @since 1.4 */ + @Override public synchronized int lastIndexOf(String str, int fromIndex) { - return String.lastIndexOf(value, 0, count, - str.toCharArray(), 0, str.length(), fromIndex); + return super.lastIndexOf(str, fromIndex); } /** * @since JDK1.0.2 */ + @Override public synchronized StringBuffer reverse() { + toStringCache = null; super.reverse(); return this; } + @Override public synchronized String toString() { - return new String(value, 0, count); + if (toStringCache == null) { + toStringCache = Arrays.copyOfRange(value, 0, count); + } + return new String(toStringCache, true); } /** diff --git a/src/StringBuilder.java b/src/StringBuilder.java index 54f3b57..96e3288 100644 --- a/src/StringBuilder.java +++ b/src/StringBuilder.java @@ -1,6 +1,78 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.lang; + +/** + * A mutable sequence of characters. This class provides an API compatible + * with {@code StringBuffer}, but with no guarantee of synchronization. + * This class is designed for use as a drop-in replacement for + * {@code StringBuffer} in places where the string buffer was being + * used by a single thread (as is generally the case). Where possible, + * it is recommended that this class be used in preference to + * {@code StringBuffer} as it will be faster under most implementations. + * + *

      The principal operations on a {@code StringBuilder} are the + * {@code append} and {@code insert} methods, which are + * overloaded so as to accept data of any type. Each effectively + * converts a given datum to a string and then appends or inserts the + * characters of that string to the string builder. The + * {@code append} method always adds these characters at the end + * of the builder; the {@code insert} method adds the characters at + * a specified point. + *

      + * For example, if {@code z} refers to a string builder object + * whose current contents are "{@code start}", then + * the method call {@code z.append("le")} would cause the string + * builder to contain "{@code startle}", whereas + * {@code z.insert(4, "le")} would alter the string builder to + * contain "{@code starlet}". + *

      + * In general, if sb refers to an instance of a {@code StringBuilder}, + * then {@code sb.append(x)} has the same effect as + * {@code sb.insert(sb.length(), x)}. + *

      + * Every string builder has a capacity. As long as the length of the + * character sequence contained in the string builder does not exceed + * the capacity, it is not necessary to allocate a new internal + * buffer. If the internal buffer overflows, it is automatically made larger. + * + *

      Instances of {@code StringBuilder} are not safe for + * use by multiple threads. If such synchronization is required then it is + * recommended that {@link java.lang.StringBuffer} be used. + * + *

      Unless otherwise noted, passing a {@code null} argument to a constructor + * or method in this class will cause a {@link NullPointerException} to be + * thrown. + * + * @author Michael McCloskey + * @see java.lang.StringBuffer + * @see java.lang.String + * @since 1.5 + */ public final class StringBuilder extends AbstractStringBuilder implements java.io.Serializable, CharSequence @@ -19,11 +91,11 @@ public StringBuilder() { /** * Constructs a string builder with no characters in it and an - * initial capacity specified by the capacity argument. + * initial capacity specified by the {@code capacity} argument. * * @param capacity the initial capacity. - * @throws NegativeArraySizeException if the capacity - * argument is less than 0. + * @throws NegativeArraySizeException if the {@code capacity} + * argument is less than {@code 0}. */ public StringBuilder(int capacity) { super(capacity); @@ -32,10 +104,9 @@ public StringBuilder(int capacity) { /** * Constructs a string builder initialized to the contents of the * specified string. The initial capacity of the string builder is - * 16 plus the length of the string argument. + * {@code 16} plus the length of the string argument. * * @param str the initial contents of the buffer. - * @throws NullPointerException if str is null */ public StringBuilder(String str) { super(str.length() + 16); @@ -44,57 +115,45 @@ public StringBuilder(String str) { /** * Constructs a string builder that contains the same characters - * as the specified CharSequence. The initial capacity of - * the string builder is 16 plus the length of the - * CharSequence argument. + * as the specified {@code CharSequence}. The initial capacity of + * the string builder is {@code 16} plus the length of the + * {@code CharSequence} argument. * * @param seq the sequence to copy. - * @throws NullPointerException if seq is null */ public StringBuilder(CharSequence seq) { this(seq.length() + 16); append(seq); } + @Override public StringBuilder append(Object obj) { return append(String.valueOf(obj)); } + @Override public StringBuilder append(String str) { super.append(str); return this; } - // Appends the specified string builder to this sequence. - private StringBuilder append(StringBuilder sb) { - if (sb == null) - return append("null"); - int len = sb.length(); - int newcount = count + len; - if (newcount > value.length) - expandCapacity(newcount); - sb.getChars(0, len, value, count); - count = newcount; - return this; - } - /** - * Appends the specified StringBuffer to this sequence. + * Appends the specified {@code StringBuffer} to this sequence. *

      - * The characters of the StringBuffer argument are appended, + * The characters of the {@code StringBuffer} argument are appended, * in order, to this sequence, increasing the * length of this sequence by the length of the argument. - * If sb is null, then the four characters - * "null" are appended to this sequence. + * If {@code sb} is {@code null}, then the four characters + * {@code "null"} are appended to this sequence. *

      * Let n be the length of this character sequence just prior to - * execution of the append method. Then the character at index + * execution of the {@code append} method. Then the character at index * k in the new character sequence is equal to the character at * index k in the old character sequence, if k is less than * n; otherwise, it is equal to the character at index k-n - * in the argument sb. + * in the argument {@code sb}. * - * @param sb the StringBuffer to append. + * @param sb the {@code StringBuffer} to append. * @return a reference to this object. */ public StringBuilder append(StringBuffer sb) { @@ -102,28 +161,22 @@ public StringBuilder append(StringBuffer sb) { return this; } - /** - */ + @Override public StringBuilder append(CharSequence s) { - if (s == null) - s = "null"; - if (s instanceof String) - return this.append((String)s); - if (s instanceof StringBuffer) - return this.append((StringBuffer)s); - if (s instanceof StringBuilder) - return this.append((StringBuilder)s); - return this.append(s, 0, s.length()); + super.append(s); + return this; } /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder append(CharSequence s, int start, int end) { super.append(s, start, end); return this; } + @Override public StringBuilder append(char[] str) { super.append(str); return this; @@ -132,36 +185,43 @@ public StringBuilder append(char[] str) { /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder append(char[] str, int offset, int len) { super.append(str, offset, len); return this; } + @Override public StringBuilder append(boolean b) { super.append(b); return this; } + @Override public StringBuilder append(char c) { super.append(c); return this; } + @Override public StringBuilder append(int i) { super.append(i); return this; } + @Override public StringBuilder append(long lng) { super.append(lng); return this; } + @Override public StringBuilder append(float f) { super.append(f); return this; } + @Override public StringBuilder append(double d) { super.append(d); return this; @@ -170,6 +230,7 @@ public StringBuilder append(double d) { /** * @since 1.5 */ + @Override public StringBuilder appendCodePoint(int codePoint) { super.appendCodePoint(codePoint); return this; @@ -178,6 +239,7 @@ public StringBuilder appendCodePoint(int codePoint) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder delete(int start, int end) { super.delete(start, end); return this; @@ -186,6 +248,7 @@ public StringBuilder delete(int start, int end) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder deleteCharAt(int index) { super.deleteCharAt(index); return this; @@ -194,6 +257,7 @@ public StringBuilder deleteCharAt(int index) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder replace(int start, int end, String str) { super.replace(start, end, str); return this; @@ -202,6 +266,7 @@ public StringBuilder replace(int start, int end, String str) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int index, char[] str, int offset, int len) { @@ -212,13 +277,16 @@ public StringBuilder insert(int index, char[] str, int offset, /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, Object obj) { - return insert(offset, String.valueOf(obj)); + super.insert(offset, obj); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, String str) { super.insert(offset, str); return this; @@ -227,6 +295,7 @@ public StringBuilder insert(int offset, String str) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, char[] str) { super.insert(offset, str); return this; @@ -235,17 +304,16 @@ public StringBuilder insert(int offset, char[] str) { /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int dstOffset, CharSequence s) { - if (s == null) - s = "null"; - if (s instanceof String) - return this.insert(dstOffset, (String)s); - return this.insert(dstOffset, s, 0, s.length()); + super.insert(dstOffset, s); + return this; } /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int dstOffset, CharSequence s, int start, int end) { @@ -256,6 +324,7 @@ public StringBuilder insert(int dstOffset, CharSequence s, /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, boolean b) { super.insert(offset, b); return this; @@ -264,6 +333,7 @@ public StringBuilder insert(int offset, boolean b) { /** * @throws IndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, char c) { super.insert(offset, c); return this; @@ -272,79 +342,79 @@ public StringBuilder insert(int offset, char c) { /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, int i) { - return insert(offset, String.valueOf(i)); + super.insert(offset, i); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, long l) { - return insert(offset, String.valueOf(l)); + super.insert(offset, l); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, float f) { - return insert(offset, String.valueOf(f)); + super.insert(offset, f); + return this; } /** * @throws StringIndexOutOfBoundsException {@inheritDoc} */ + @Override public StringBuilder insert(int offset, double d) { - return insert(offset, String.valueOf(d)); + super.insert(offset, d); + return this; } - /** - * @throws NullPointerException {@inheritDoc} - */ + @Override public int indexOf(String str) { - return indexOf(str, 0); + return super.indexOf(str); } - /** - * @throws NullPointerException {@inheritDoc} - */ + @Override public int indexOf(String str, int fromIndex) { - return String.indexOf(value, 0, count, - str.toCharArray(), 0, str.length(), fromIndex); + return super.indexOf(str, fromIndex); } - /** - * @throws NullPointerException {@inheritDoc} - */ + @Override public int lastIndexOf(String str) { - return lastIndexOf(str, count); + return super.lastIndexOf(str); } - /** - * @throws NullPointerException {@inheritDoc} - */ + @Override public int lastIndexOf(String str, int fromIndex) { - return String.lastIndexOf(value, 0, count, - str.toCharArray(), 0, str.length(), fromIndex); + return super.lastIndexOf(str, fromIndex); } + @Override public StringBuilder reverse() { super.reverse(); return this; } + @Override public String toString() { // Create a copy, don't share the array return new String(value, 0, count); } /** - * Save the state of the StringBuilder instance to a stream + * Save the state of the {@code StringBuilder} instance to a stream * (that is, serialize it). * * @serialData the number of characters currently stored in the string - * builder (int), followed by the characters in the - * string builder (char[]). The length of the - * char array may be greater than the number of + * builder ({@code int}), followed by the characters in the + * string builder ({@code char[]}). The length of the + * {@code char} array may be greater than the number of * characters currently stored in the string builder, in which * case extra characters are ignored. */ diff --git a/src/ThreadLocal.java b/src/ThreadLocal.java new file mode 100644 index 0000000..23c6baf --- /dev/null +++ b/src/ThreadLocal.java @@ -0,0 +1,722 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.lang; +import java.lang.ref.*; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +/** + * This class provides thread-local variables. These variables differ from + * their normal counterparts in that each thread that accesses one (via its + * {@code get} or {@code set} method) has its own, independently initialized + * copy of the variable. {@code ThreadLocal} instances are typically private + * static fields in classes that wish to associate state with a thread (e.g., + * a user ID or Transaction ID). + * + *

      For example, the class below generates unique identifiers local to each + * thread. + * A thread's id is assigned the first time it invokes {@code ThreadId.get()} + * and remains unchanged on subsequent calls. + *

      + * import java.util.concurrent.atomic.AtomicInteger;
      + *
      + * public class ThreadId {
      + *     // Atomic integer containing the next thread ID to be assigned
      + *     private static final AtomicInteger nextId = new AtomicInteger(0);
      + *
      + *     // Thread local variable containing each thread's ID
      + *     private static final ThreadLocal<Integer> threadId =
      + *         new ThreadLocal<Integer>() {
      + *             @Override protected Integer initialValue() {
      + *                 return nextId.getAndIncrement();
      + *         }
      + *     };
      + *
      + *     // Returns the current thread's unique ID, assigning it if necessary
      + *     public static int get() {
      + *         return threadId.get();
      + *     }
      + * }
      + * 
      + *

      Each thread holds an implicit reference to its copy of a thread-local + * variable as long as the thread is alive and the {@code ThreadLocal} + * instance is accessible; after a thread goes away, all of its copies of + * thread-local instances are subject to garbage collection (unless other + * references to these copies exist). + * + * @author Josh Bloch and Doug Lea + * @since 1.2 + */ +public class ThreadLocal { + /** + * ThreadLocals rely on per-thread linear-probe hash maps attached + * to each thread (Thread.threadLocals and + * inheritableThreadLocals). The ThreadLocal objects act as keys, + * searched via threadLocalHashCode. This is a custom hash code + * (useful only within ThreadLocalMaps) that eliminates collisions + * in the common case where consecutively constructed ThreadLocals + * are used by the same threads, while remaining well-behaved in + * less common cases. + */ + private final int threadLocalHashCode = nextHashCode(); + + /** + * The next hash code to be given out. Updated atomically. Starts at + * zero. + */ + private static AtomicInteger nextHashCode = + new AtomicInteger(); + + /** + * The difference between successively generated hash codes - turns + * implicit sequential thread-local IDs into near-optimally spread + * multiplicative hash values for power-of-two-sized tables. + */ + private static final int HASH_INCREMENT = 0x61c88647; + + /** + * Returns the next hash code. + */ + private static int nextHashCode() { + return nextHashCode.getAndAdd(HASH_INCREMENT); + } + + /** + * Returns the current thread's "initial value" for this + * thread-local variable. This method will be invoked the first + * time a thread accesses the variable with the {@link #get} + * method, unless the thread previously invoked the {@link #set} + * method, in which case the {@code initialValue} method will not + * be invoked for the thread. Normally, this method is invoked at + * most once per thread, but it may be invoked again in case of + * subsequent invocations of {@link #remove} followed by {@link #get}. + * + *

      This implementation simply returns {@code null}; if the + * programmer desires thread-local variables to have an initial + * value other than {@code null}, {@code ThreadLocal} must be + * subclassed, and this method overridden. Typically, an + * anonymous inner class will be used. + * + * @return the initial value for this thread-local + */ + protected T initialValue() { + return null; + } + + /** + * Creates a thread local variable. The initial value of the variable is + * determined by invoking the {@code get} method on the {@code Supplier}. + * + * @param the type of the thread local's value + * @param supplier the supplier to be used to determine the initial value + * @return a new thread local variable + * @throws NullPointerException if the specified supplier is null + * @since 1.8 + */ + public static ThreadLocal withInitial(Supplier supplier) { + return new SuppliedThreadLocal<>(supplier); + } + + /** + * Creates a thread local variable. + * @see #withInitial(java.util.function.Supplier) + */ + public ThreadLocal() { + } + + /** + * Returns the value in the current thread's copy of this + * thread-local variable. If the variable has no value for the + * current thread, it is first initialized to the value returned + * by an invocation of the {@link #initialValue} method. + * + * @return the current thread's value of this thread-local + */ + public T get() { + Thread t = Thread.currentThread(); + ThreadLocalMap map = getMap(t); + if (map != null) { + ThreadLocalMap.Entry e = map.getEntry(this); + if (e != null) { + @SuppressWarnings("unchecked") + T result = (T)e.value; + return result; + } + } + return setInitialValue(); + } + + /** + * Variant of set() to establish initialValue. Used instead + * of set() in case user has overridden the set() method. + * + * @return the initial value + */ + private T setInitialValue() { + T value = initialValue(); + Thread t = Thread.currentThread(); + ThreadLocalMap map = getMap(t); + if (map != null) + map.set(this, value); + else + createMap(t, value); + return value; + } + + /** + * Sets the current thread's copy of this thread-local variable + * to the specified value. Most subclasses will have no need to + * override this method, relying solely on the {@link #initialValue} + * method to set the values of thread-locals. + * + * @param value the value to be stored in the current thread's copy of + * this thread-local. + */ + public void set(T value) { + Thread t = Thread.currentThread(); + ThreadLocalMap map = getMap(t); + if (map != null) + map.set(this, value); + else + createMap(t, value); + } + + /** + * Removes the current thread's value for this thread-local + * variable. If this thread-local variable is subsequently + * {@linkplain #get read} by the current thread, its value will be + * reinitialized by invoking its {@link #initialValue} method, + * unless its value is {@linkplain #set set} by the current thread + * in the interim. This may result in multiple invocations of the + * {@code initialValue} method in the current thread. + * + * @since 1.5 + */ + public void remove() { + ThreadLocalMap m = getMap(Thread.currentThread()); + if (m != null) + m.remove(this); + } + + /** + * Get the map associated with a ThreadLocal. Overridden in + * InheritableThreadLocal. + * + * @param t the current thread + * @return the map + */ + ThreadLocalMap getMap(Thread t) { + return t.threadLocals; + } + + /** + * Create the map associated with a ThreadLocal. Overridden in + * InheritableThreadLocal. + * + * @param t the current thread + * @param firstValue value for the initial entry of the map + */ + void createMap(Thread t, T firstValue) { + t.threadLocals = new ThreadLocalMap(this, firstValue); + } + + /** + * Factory method to create map of inherited thread locals. + * Designed to be called only from Thread constructor. + * + * @param parentMap the map associated with parent thread + * @return a map containing the parent's inheritable bindings + */ + static ThreadLocalMap createInheritedMap(ThreadLocalMap parentMap) { + return new ThreadLocalMap(parentMap); + } + + /** + * Method childValue is visibly defined in subclass + * InheritableThreadLocal, but is internally defined here for the + * sake of providing createInheritedMap factory method without + * needing to subclass the map class in InheritableThreadLocal. + * This technique is preferable to the alternative of embedding + * instanceof tests in methods. + */ + T childValue(T parentValue) { + throw new UnsupportedOperationException(); + } + + /** + * An extension of ThreadLocal that obtains its initial value from + * the specified {@code Supplier}. + */ + static final class SuppliedThreadLocal extends ThreadLocal { + + private final Supplier supplier; + + SuppliedThreadLocal(Supplier supplier) { + this.supplier = Objects.requireNonNull(supplier); + } + + @Override + protected T initialValue() { + return supplier.get(); + } + } + + /** + * ThreadLocalMap is a customized hash map suitable only for + * maintaining thread local values. No operations are exported + * outside of the ThreadLocal class. The class is package private to + * allow declaration of fields in class Thread. To help deal with + * very large and long-lived usages, the hash table entries use + * WeakReferences for keys. However, since reference queues are not + * used, stale entries are guaranteed to be removed only when + * the table starts running out of space. + */ + static class ThreadLocalMap { + + /** + * The entries in this hash map extend WeakReference, using + * its main ref field as the key (which is always a + * ThreadLocal object). Note that null keys (i.e. entry.get() + * == null) mean that the key is no longer referenced, so the + * entry can be expunged from table. Such entries are referred to + * as "stale entries" in the code that follows. + */ + static class Entry extends WeakReference> { + /** The value associated with this ThreadLocal. */ + Object value; + + Entry(ThreadLocal k, Object v) { + super(k); + value = v; + } + } + + /** + * The initial capacity -- MUST be a power of two. + */ + private static final int INITIAL_CAPACITY = 16; + + /** + * The table, resized as necessary. + * table.length MUST always be a power of two. + */ + private Entry[] table; + + /** + * The number of entries in the table. + */ + private int size = 0; + + /** + * The next size value at which to resize. + */ + private int threshold; // Default to 0 + + /** + * Set the resize threshold to maintain at worst a 2/3 load factor. + */ + private void setThreshold(int len) { + threshold = len * 2 / 3; + } + + /** + * Increment i modulo len. + */ + private static int nextIndex(int i, int len) { + return ((i + 1 < len) ? i + 1 : 0); + } + + /** + * Decrement i modulo len. + */ + private static int prevIndex(int i, int len) { + return ((i - 1 >= 0) ? i - 1 : len - 1); + } + + /** + * Construct a new map initially containing (firstKey, firstValue). + * ThreadLocalMaps are constructed lazily, so we only create + * one when we have at least one entry to put in it. + */ + ThreadLocalMap(ThreadLocal firstKey, Object firstValue) { + table = new Entry[INITIAL_CAPACITY]; + int i = firstKey.threadLocalHashCode & (INITIAL_CAPACITY - 1); + table[i] = new Entry(firstKey, firstValue); + size = 1; + setThreshold(INITIAL_CAPACITY); + } + + /** + * Construct a new map including all Inheritable ThreadLocals + * from given parent map. Called only by createInheritedMap. + * + * @param parentMap the map associated with parent thread. + */ + private ThreadLocalMap(ThreadLocalMap parentMap) { + Entry[] parentTable = parentMap.table; + int len = parentTable.length; + setThreshold(len); + table = new Entry[len]; + + for (int j = 0; j < len; j++) { + Entry e = parentTable[j]; + if (e != null) { + @SuppressWarnings("unchecked") + ThreadLocal key = (ThreadLocal) e.get(); + if (key != null) { + Object value = key.childValue(e.value); + Entry c = new Entry(key, value); + int h = key.threadLocalHashCode & (len - 1); + while (table[h] != null) + h = nextIndex(h, len); + table[h] = c; + size++; + } + } + } + } + + /** + * Get the entry associated with key. This method + * itself handles only the fast path: a direct hit of existing + * key. It otherwise relays to getEntryAfterMiss. This is + * designed to maximize performance for direct hits, in part + * by making this method readily inlinable. + * + * @param key the thread local object + * @return the entry associated with key, or null if no such + */ + private Entry getEntry(ThreadLocal key) { + int i = key.threadLocalHashCode & (table.length - 1); + Entry e = table[i]; + if (e != null && e.get() == key) + return e; + else + return getEntryAfterMiss(key, i, e); + } + + /** + * Version of getEntry method for use when key is not found in + * its direct hash slot. + * + * @param key the thread local object + * @param i the table index for key's hash code + * @param e the entry at table[i] + * @return the entry associated with key, or null if no such + */ + private Entry getEntryAfterMiss(ThreadLocal key, int i, Entry e) { + Entry[] tab = table; + int len = tab.length; + + while (e != null) { + ThreadLocal k = e.get(); + if (k == key) + return e; + if (k == null) + expungeStaleEntry(i); + else + i = nextIndex(i, len); + e = tab[i]; + } + return null; + } + + /** + * Set the value associated with key. + * + * @param key the thread local object + * @param value the value to be set + */ + private void set(ThreadLocal key, Object value) { + + // We don't use a fast path as with get() because it is at + // least as common to use set() to create new entries as + // it is to replace existing ones, in which case, a fast + // path would fail more often than not. + + Entry[] tab = table; + int len = tab.length; + int i = key.threadLocalHashCode & (len-1); + + for (Entry e = tab[i]; + e != null; + e = tab[i = nextIndex(i, len)]) { + ThreadLocal k = e.get(); + + if (k == key) { + e.value = value; + return; + } + + if (k == null) { + replaceStaleEntry(key, value, i); + return; + } + } + + tab[i] = new Entry(key, value); + int sz = ++size; + if (!cleanSomeSlots(i, sz) && sz >= threshold) + rehash(); + } + + /** + * Remove the entry for key. + */ + private void remove(ThreadLocal key) { + Entry[] tab = table; + int len = tab.length; + int i = key.threadLocalHashCode & (len-1); + for (Entry e = tab[i]; + e != null; + e = tab[i = nextIndex(i, len)]) { + if (e.get() == key) { + e.clear(); + expungeStaleEntry(i); + return; + } + } + } + + /** + * Replace a stale entry encountered during a set operation + * with an entry for the specified key. The value passed in + * the value parameter is stored in the entry, whether or not + * an entry already exists for the specified key. + * + * As a side effect, this method expunges all stale entries in the + * "run" containing the stale entry. (A run is a sequence of entries + * between two null slots.) + * + * @param key the key + * @param value the value to be associated with key + * @param staleSlot index of the first stale entry encountered while + * searching for key. + */ + private void replaceStaleEntry(ThreadLocal key, Object value, + int staleSlot) { + Entry[] tab = table; + int len = tab.length; + Entry e; + + // Back up to check for prior stale entry in current run. + // We clean out whole runs at a time to avoid continual + // incremental rehashing due to garbage collector freeing + // up refs in bunches (i.e., whenever the collector runs). + int slotToExpunge = staleSlot; + for (int i = prevIndex(staleSlot, len); + (e = tab[i]) != null; + i = prevIndex(i, len)) + if (e.get() == null) + slotToExpunge = i; + + // Find either the key or trailing null slot of run, whichever + // occurs first + for (int i = nextIndex(staleSlot, len); + (e = tab[i]) != null; + i = nextIndex(i, len)) { + ThreadLocal k = e.get(); + + // If we find key, then we need to swap it + // with the stale entry to maintain hash table order. + // The newly stale slot, or any other stale slot + // encountered above it, can then be sent to expungeStaleEntry + // to remove or rehash all of the other entries in run. + if (k == key) { + e.value = value; + + tab[i] = tab[staleSlot]; + tab[staleSlot] = e; + + // Start expunge at preceding stale entry if it exists + if (slotToExpunge == staleSlot) + slotToExpunge = i; + cleanSomeSlots(expungeStaleEntry(slotToExpunge), len); + return; + } + + // If we didn't find stale entry on backward scan, the + // first stale entry seen while scanning for key is the + // first still present in the run. + if (k == null && slotToExpunge == staleSlot) + slotToExpunge = i; + } + + // If key not found, put new entry in stale slot + tab[staleSlot].value = null; + tab[staleSlot] = new Entry(key, value); + + // If there are any other stale entries in run, expunge them + if (slotToExpunge != staleSlot) + cleanSomeSlots(expungeStaleEntry(slotToExpunge), len); + } + + /** + * Expunge a stale entry by rehashing any possibly colliding entries + * lying between staleSlot and the next null slot. This also expunges + * any other stale entries encountered before the trailing null. See + * Knuth, Section 6.4 + * + * @param staleSlot index of slot known to have null key + * @return the index of the next null slot after staleSlot + * (all between staleSlot and this slot will have been checked + * for expunging). + */ + private int expungeStaleEntry(int staleSlot) { + Entry[] tab = table; + int len = tab.length; + + // expunge entry at staleSlot + tab[staleSlot].value = null; + tab[staleSlot] = null; + size--; + + // Rehash until we encounter null + Entry e; + int i; + for (i = nextIndex(staleSlot, len); + (e = tab[i]) != null; + i = nextIndex(i, len)) { + ThreadLocal k = e.get(); + if (k == null) { + e.value = null; + tab[i] = null; + size--; + } else { + int h = k.threadLocalHashCode & (len - 1); + if (h != i) { + tab[i] = null; + + // Unlike Knuth 6.4 Algorithm R, we must scan until + // null because multiple entries could have been stale. + while (tab[h] != null) + h = nextIndex(h, len); + tab[h] = e; + } + } + } + return i; + } + + /** + * Heuristically scan some cells looking for stale entries. + * This is invoked when either a new element is added, or + * another stale one has been expunged. It performs a + * logarithmic number of scans, as a balance between no + * scanning (fast but retains garbage) and a number of scans + * proportional to number of elements, that would find all + * garbage but would cause some insertions to take O(n) time. + * + * @param i a position known NOT to hold a stale entry. The + * scan starts at the element after i. + * + * @param n scan control: {@code log2(n)} cells are scanned, + * unless a stale entry is found, in which case + * {@code log2(table.length)-1} additional cells are scanned. + * When called from insertions, this parameter is the number + * of elements, but when from replaceStaleEntry, it is the + * table length. (Note: all this could be changed to be either + * more or less aggressive by weighting n instead of just + * using straight log n. But this version is simple, fast, and + * seems to work well.) + * + * @return true if any stale entries have been removed. + */ + private boolean cleanSomeSlots(int i, int n) { + boolean removed = false; + Entry[] tab = table; + int len = tab.length; + do { + i = nextIndex(i, len); + Entry e = tab[i]; + if (e != null && e.get() == null) { + n = len; + removed = true; + i = expungeStaleEntry(i); + } + } while ( (n >>>= 1) != 0); + return removed; + } + + /** + * Re-pack and/or re-size the table. First scan the entire + * table removing stale entries. If this doesn't sufficiently + * shrink the size of the table, double the table size. + */ + private void rehash() { + expungeStaleEntries(); + + // Use lower threshold for doubling to avoid hysteresis + if (size >= threshold - threshold / 4) + resize(); + } + + /** + * Double the capacity of the table. + */ + private void resize() { + Entry[] oldTab = table; + int oldLen = oldTab.length; + int newLen = oldLen * 2; + Entry[] newTab = new Entry[newLen]; + int count = 0; + + for (int j = 0; j < oldLen; ++j) { + Entry e = oldTab[j]; + if (e != null) { + ThreadLocal k = e.get(); + if (k == null) { + e.value = null; // Help the GC + } else { + int h = k.threadLocalHashCode & (newLen - 1); + while (newTab[h] != null) + h = nextIndex(h, newLen); + newTab[h] = e; + count++; + } + } + } + + setThreshold(newLen); + size = count; + table = newTab; + } + + /** + * Expunge all stale entries in the table. + */ + private void expungeStaleEntries() { + Entry[] tab = table; + int len = tab.length; + for (int j = 0; j < len; j++) { + Entry e = tab[j]; + if (e != null && e.get() == null) + expungeStaleEntry(j); + } + } + } +} diff --git a/src/TreeMap.java b/src/TreeMap.java index 120b46c..29edb58 100644 --- a/src/TreeMap.java +++ b/src/TreeMap.java @@ -1,6 +1,113 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.util; +import java.io.Serializable; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Consumer; + +/** + * A Red-Black tree based {@link NavigableMap} implementation. + * The map is sorted according to the {@linkplain Comparable natural + * ordering} of its keys, or by a {@link Comparator} provided at map + * creation time, depending on which constructor is used. + * + *

      This implementation provides guaranteed log(n) time cost for the + * {@code containsKey}, {@code get}, {@code put} and {@code remove} + * operations. Algorithms are adaptations of those in Cormen, Leiserson, and + * Rivest's Introduction to Algorithms. + * + *

      Note that the ordering maintained by a tree map, like any sorted map, and + * whether or not an explicit comparator is provided, must be consistent + * with {@code equals} if this sorted map is to correctly implement the + * {@code Map} interface. (See {@code Comparable} or {@code Comparator} for a + * precise definition of consistent with equals.) This is so because + * the {@code Map} interface is defined in terms of the {@code equals} + * operation, but a sorted map performs all key comparisons using its {@code + * compareTo} (or {@code compare}) method, so two keys that are deemed equal by + * this method are, from the standpoint of the sorted map, equal. The behavior + * of a sorted map is well-defined even if its ordering is + * inconsistent with {@code equals}; it just fails to obey the general contract + * of the {@code Map} interface. + * + *

      Note that this implementation is not synchronized. + * If multiple threads access a map concurrently, and at least one of the + * threads modifies the map structurally, it must be synchronized + * externally. (A structural modification is any operation that adds or + * deletes one or more mappings; merely changing the value associated + * with an existing key is not a structural modification.) This is + * typically accomplished by synchronizing on some object that naturally + * encapsulates the map. + * If no such object exists, the map should be "wrapped" using the + * {@link Collections#synchronizedSortedMap Collections.synchronizedSortedMap} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the map:

      + *   SortedMap m = Collections.synchronizedSortedMap(new TreeMap(...));
      + * + *

      The iterators returned by the {@code iterator} method of the collections + * returned by all of this class's "collection view methods" are + * fail-fast: if the map is structurally modified at any time after + * the iterator is created, in any way except through the iterator's own + * {@code remove} method, the iterator will throw a {@link + * ConcurrentModificationException}. Thus, in the face of concurrent + * modification, the iterator fails quickly and cleanly, rather than risking + * arbitrary, non-deterministic behavior at an undetermined time in the future. + * + *

      Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw {@code ConcurrentModificationException} on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

      All {@code Map.Entry} pairs returned by methods in this class + * and its views represent snapshots of mappings at the time they were + * produced. They do not support the {@code Entry.setValue} + * method. (Note however that it is possible to change mappings in the + * associated map using {@code put}.) + * + *

      This class is a member of the + * + * Java Collections Framework. + * + * @param the type of keys maintained by this map + * @param the type of mapped values + * + * @author Josh Bloch and Doug Lea + * @see Map + * @see HashMap + * @see Hashtable + * @see Comparable + * @see Comparator + * @see Collection + * @since 1.2 + */ + public class TreeMap extends AbstractMap implements NavigableMap, Cloneable, java.io.Serializable @@ -13,7 +120,7 @@ public class TreeMap */ private final Comparator comparator; - private transient Entry root = null; + private transient Entry root; /** * The number of entries in the tree @@ -205,7 +312,7 @@ public K lastKey() { public void putAll(Map map) { int mapSize = map.size(); if (size==0 && mapSize!=0 && map instanceof SortedMap) { - Comparator c = ((SortedMap)map).comparator(); + Comparator c = ((SortedMap)map).comparator(); if (c == comparator || (c != null && c.equals(comparator))) { ++modCount; try { @@ -238,7 +345,8 @@ final Entry getEntry(Object key) { return getEntryUsingComparator(key); if (key == null) throw new NullPointerException(); - Comparable k = (Comparable) key; + @SuppressWarnings("unchecked") + Comparable k = (Comparable) key; Entry p = root; while (p != null) { int cmp = k.compareTo(p.key); @@ -259,7 +367,8 @@ else if (cmp > 0) * worthwhile here.) */ final Entry getEntryUsingComparator(Object key) { - K k = (K) key; + @SuppressWarnings("unchecked") + K k = (K) key; Comparator cpr = comparator; if (cpr != null) { Entry p = root; @@ -452,7 +561,8 @@ else if (cmp > 0) else { if (key == null) throw new NullPointerException(); - Comparable k = (Comparable) key; + @SuppressWarnings("unchecked") + Comparable k = (Comparable) key; do { parent = t; cmp = k.compareTo(t.key); @@ -516,11 +626,11 @@ public void clear() { * @return a shallow copy of this map */ public Object clone() { - TreeMap clone = null; + TreeMap clone; try { - clone = (TreeMap) super.clone(); + clone = (TreeMap) super.clone(); } catch (CloneNotSupportedException e) { - throw new InternalError(); + throw new InternalError(e); } // Put clone into "virgin" state (except for comparator) @@ -674,14 +784,25 @@ public K higherKey(K key) { * the first time this view is requested. Views are stateless, so * there's no reason to create more than one. */ - private transient EntrySet entrySet = null; - private transient KeySet navigableKeySet = null; - private transient NavigableMap descendingMap = null; + private transient EntrySet entrySet; + private transient KeySet navigableKeySet; + private transient NavigableMap descendingMap; /** * Returns a {@link Set} view of the keys contained in this map. - * The set's iterator returns the keys in ascending order. - * The set is backed by the map, so changes to the map are + * + *

      The set's iterator returns the keys in ascending order. + * The set's spliterator is + * late-binding, + * fail-fast, and additionally reports {@link Spliterator#SORTED} + * and {@link Spliterator#ORDERED} with an encounter order that is ascending + * key order. The spliterator's comparator (see + * {@link java.util.Spliterator#getComparator()}) is {@code null} if + * the tree map's comparator (see {@link #comparator()}) is {@code null}. + * Otherwise, the spliterator's comparator is the same as or imposes the + * same total ordering as the tree map's comparator. + * + *

      The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. If the map is modified * while an iteration over the set is in progress (except through * the iterator's own {@code remove} operation), the results of @@ -701,7 +822,7 @@ public Set keySet() { */ public NavigableSet navigableKeySet() { KeySet nks = navigableKeySet; - return (nks != null) ? nks : (navigableKeySet = new KeySet(this)); + return (nks != null) ? nks : (navigableKeySet = new KeySet<>(this)); } /** @@ -713,9 +834,15 @@ public NavigableSet descendingKeySet() { /** * Returns a {@link Collection} view of the values contained in this map. - * The collection's iterator returns the values in ascending order - * of the corresponding keys. - * The collection is backed by the map, so changes to the map are + * + *

      The collection's iterator returns the values in ascending order + * of the corresponding keys. The collection's spliterator is + * late-binding, + * fail-fast, and additionally reports {@link Spliterator#ORDERED} + * with an encounter order that is ascending order of the corresponding + * keys. + * + *

      The collection is backed by the map, so changes to the map are * reflected in the collection, and vice-versa. If the map is * modified while an iteration over the collection is in progress * (except through the iterator's own {@code remove} operation), @@ -733,8 +860,15 @@ public Collection values() { /** * Returns a {@link Set} view of the mappings contained in this map. - * The set's iterator returns the entries in ascending key order. - * The set is backed by the map, so changes to the map are + * + *

      The set's iterator returns the entries in ascending key order. The + * sets's spliterator is + * late-binding, + * fail-fast, and additionally reports {@link Spliterator#SORTED} and + * {@link Spliterator#ORDERED} with an encounter order that is ascending key + * order. + * + *

      The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. If the map is modified * while an iteration over the set is in progress (except through * the iterator's own {@code remove} operation, or through the @@ -757,9 +891,9 @@ public Set> entrySet() { public NavigableMap descendingMap() { NavigableMap km = descendingMap; return (km != null) ? km : - (descendingMap = new DescendingSubMap(this, - true, null, true, - true, null, true)); + (descendingMap = new DescendingSubMap<>(this, + true, null, true, + true, null, true)); } /** @@ -772,9 +906,9 @@ public NavigableMap descendingMap() { */ public NavigableMap subMap(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) { - return new AscendingSubMap(this, - false, fromKey, fromInclusive, - false, toKey, toInclusive); + return new AscendingSubMap<>(this, + false, fromKey, fromInclusive, + false, toKey, toInclusive); } /** @@ -786,9 +920,9 @@ public NavigableMap subMap(K fromKey, boolean fromInclusive, * @since 1.6 */ public NavigableMap headMap(K toKey, boolean inclusive) { - return new AscendingSubMap(this, - true, null, true, - false, toKey, inclusive); + return new AscendingSubMap<>(this, + true, null, true, + false, toKey, inclusive); } /** @@ -800,9 +934,9 @@ public NavigableMap headMap(K toKey, boolean inclusive) { * @since 1.6 */ public NavigableMap tailMap(K fromKey, boolean inclusive) { - return new AscendingSubMap(this, - false, fromKey, inclusive, - true, null, true); + return new AscendingSubMap<>(this, + false, fromKey, inclusive, + true, null, true); } /** @@ -838,6 +972,54 @@ public SortedMap tailMap(K fromKey) { return tailMap(fromKey, true); } + @Override + public boolean replace(K key, V oldValue, V newValue) { + Entry p = getEntry(key); + if (p!=null && Objects.equals(oldValue, p.value)) { + p.value = newValue; + return true; + } + return false; + } + + @Override + public V replace(K key, V value) { + Entry p = getEntry(key); + if (p!=null) { + V oldValue = p.value; + p.value = value; + return oldValue; + } + return null; + } + + @Override + public void forEach(BiConsumer action) { + Objects.requireNonNull(action); + int expectedModCount = modCount; + for (Entry e = getFirstEntry(); e != null; e = successor(e)) { + action.accept(e.key, e.value); + + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } + } + } + + @Override + public void replaceAll(BiFunction function) { + Objects.requireNonNull(function); + int expectedModCount = modCount; + + for (Entry e = getFirstEntry(); e != null; e = successor(e)) { + e.value = function.apply(e.key, e.value); + + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } + } + } + // View class support class Values extends AbstractCollection { @@ -866,6 +1048,10 @@ public boolean remove(Object o) { public void clear() { TreeMap.this.clear(); } + + public Spliterator spliterator() { + return new ValueSpliterator(TreeMap.this, null, null, 0, -1, 0); + } } class EntrySet extends AbstractSet> { @@ -876,8 +1062,8 @@ public Iterator> iterator() { public boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; - Map.Entry entry = (Map.Entry) o; - V value = entry.getValue(); + Map.Entry entry = (Map.Entry) o; + Object value = entry.getValue(); Entry p = getEntry(entry.getKey()); return p != null && valEquals(p.getValue(), value); } @@ -885,8 +1071,8 @@ public boolean contains(Object o) { public boolean remove(Object o) { if (!(o instanceof Map.Entry)) return false; - Map.Entry entry = (Map.Entry) o; - V value = entry.getValue(); + Map.Entry entry = (Map.Entry) o; + Object value = entry.getValue(); Entry p = getEntry(entry.getKey()); if (p != null && valEquals(p.getValue(), value)) { deleteEntry(p); @@ -902,6 +1088,10 @@ public int size() { public void clear() { TreeMap.this.clear(); } + + public Spliterator> spliterator() { + return new EntrySpliterator(TreeMap.this, null, null, 0, -1, 0); + } } /* @@ -921,21 +1111,21 @@ Iterator descendingKeyIterator() { } static final class KeySet extends AbstractSet implements NavigableSet { - private final NavigableMap m; - KeySet(NavigableMap map) { m = map; } + private final NavigableMap m; + KeySet(NavigableMap map) { m = map; } public Iterator iterator() { if (m instanceof TreeMap) - return ((TreeMap)m).keyIterator(); + return ((TreeMap)m).keyIterator(); else - return (Iterator)(((TreeMap.NavigableSubMap)m).keyIterator()); + return ((TreeMap.NavigableSubMap)m).keyIterator(); } public Iterator descendingIterator() { if (m instanceof TreeMap) - return ((TreeMap)m).descendingKeyIterator(); + return ((TreeMap)m).descendingKeyIterator(); else - return (Iterator)(((TreeMap.NavigableSubMap)m).descendingKeyIterator()); + return ((TreeMap.NavigableSubMap)m).descendingKeyIterator(); } public int size() { return m.size(); } @@ -950,11 +1140,11 @@ public Iterator descendingIterator() { public E last() { return m.lastKey(); } public Comparator comparator() { return m.comparator(); } public E pollFirst() { - Map.Entry e = m.pollFirstEntry(); + Map.Entry e = m.pollFirstEntry(); return (e == null) ? null : e.getKey(); } public E pollLast() { - Map.Entry e = m.pollLastEntry(); + Map.Entry e = m.pollLastEntry(); return (e == null) ? null : e.getKey(); } public boolean remove(Object o) { @@ -983,7 +1173,11 @@ public SortedSet tailSet(E fromElement) { return tailSet(fromElement, true); } public NavigableSet descendingSet() { - return new KeySet(m.descendingMap()); + return new KeySet<>(m.descendingMap()); + } + + public Spliterator spliterator() { + return keySpliteratorFor(m); } } @@ -1075,6 +1269,15 @@ final class DescendingKeyIterator extends PrivateEntryIterator { public K next() { return prevEntry().key; } + public void remove() { + if (lastReturned == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + deleteEntry(lastReturned); + lastReturned = null; + expectedModCount = modCount; + } } // Little utilities @@ -1082,6 +1285,7 @@ public K next() { /** * Compares two keys using the correct comparison method for this TreeMap. */ + @SuppressWarnings("unchecked") final int compare(Object k1, Object k2) { return comparator==null ? ((Comparable)k1).compareTo((K)k2) : comparator.compare((K)k1, (K)k2); @@ -1134,6 +1338,7 @@ static K key(Entry e) { */ abstract static class NavigableSubMap extends AbstractMap implements NavigableMap, java.io.Serializable { + private static final long serialVersionUID = -2102997345730753016L; /** * The backing map. */ @@ -1283,6 +1488,8 @@ final TreeMap.Entry absLowFence() { /** Returns ascending iterator from the perspective of this submap */ abstract Iterator keyIterator(); + abstract Spliterator keySpliterator(); + /** Returns descending iterator from the perspective of this submap */ abstract Iterator descendingKeyIterator(); @@ -1379,14 +1586,14 @@ public final Map.Entry pollLastEntry() { } // Views - transient NavigableMap descendingMapView = null; - transient EntrySetView entrySetView = null; - transient KeySet navigableKeySetView = null; + transient NavigableMap descendingMapView; + transient EntrySetView entrySetView; + transient KeySet navigableKeySetView; public final NavigableSet navigableKeySet() { KeySet nksv = navigableKeySetView; return (nksv != null) ? nksv : - (navigableKeySetView = new TreeMap.KeySet(this)); + (navigableKeySetView = new TreeMap.KeySet<>(this)); } public final Set keySet() { @@ -1420,7 +1627,7 @@ public int size() { if (size == -1 || sizeModCount != m.modCount) { sizeModCount = m.modCount; size = 0; - Iterator i = iterator(); + Iterator i = iterator(); while (i.hasNext()) { size++; i.next(); @@ -1437,11 +1644,11 @@ public boolean isEmpty() { public boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; - Map.Entry entry = (Map.Entry) o; - K key = entry.getKey(); + Map.Entry entry = (Map.Entry) o; + Object key = entry.getKey(); if (!inRange(key)) return false; - TreeMap.Entry node = m.getEntry(key); + TreeMap.Entry node = m.getEntry(key); return node != null && valEquals(node.getValue(), entry.getValue()); } @@ -1449,8 +1656,8 @@ public boolean contains(Object o) { public boolean remove(Object o) { if (!(o instanceof Map.Entry)) return false; - Map.Entry entry = (Map.Entry) o; - K key = entry.getKey(); + Map.Entry entry = (Map.Entry) o; + Object key = entry.getKey(); if (!inRange(key)) return false; TreeMap.Entry node = m.getEntry(key); @@ -1544,7 +1751,23 @@ public void remove() { } } - final class SubMapKeyIterator extends SubMapIterator { + final class DescendingSubMapEntryIterator extends SubMapIterator> { + DescendingSubMapEntryIterator(TreeMap.Entry last, + TreeMap.Entry fence) { + super(last, fence); + } + + public Map.Entry next() { + return prevEntry(); + } + public void remove() { + removeDescending(); + } + } + + // Implement minimal Spliterator as KeySpliterator backup + final class SubMapKeyIterator extends SubMapIterator + implements Spliterator { SubMapKeyIterator(TreeMap.Entry first, TreeMap.Entry fence) { super(first, fence); @@ -1555,23 +1778,34 @@ public K next() { public void remove() { removeAscending(); } - } - - final class DescendingSubMapEntryIterator extends SubMapIterator> { - DescendingSubMapEntryIterator(TreeMap.Entry last, - TreeMap.Entry fence) { - super(last, fence); + public Spliterator trySplit() { + return null; } - - public Map.Entry next() { - return prevEntry(); + public void forEachRemaining(Consumer action) { + while (hasNext()) + action.accept(next()); } - public void remove() { - removeDescending(); + public boolean tryAdvance(Consumer action) { + if (hasNext()) { + action.accept(next()); + return true; + } + return false; + } + public long estimateSize() { + return Long.MAX_VALUE; + } + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.ORDERED | + Spliterator.SORTED; + } + public final Comparator getComparator() { + return NavigableSubMap.this.comparator(); } } - final class DescendingSubMapKeyIterator extends SubMapIterator { + final class DescendingSubMapKeyIterator extends SubMapIterator + implements Spliterator { DescendingSubMapKeyIterator(TreeMap.Entry last, TreeMap.Entry fence) { super(last, fence); @@ -1582,6 +1816,26 @@ public K next() { public void remove() { removeDescending(); } + public Spliterator trySplit() { + return null; + } + public void forEachRemaining(Consumer action) { + while (hasNext()) + action.accept(next()); + } + public boolean tryAdvance(Consumer action) { + if (hasNext()) { + action.accept(next()); + return true; + } + return false; + } + public long estimateSize() { + return Long.MAX_VALUE; + } + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.ORDERED; + } } } @@ -1607,40 +1861,44 @@ public NavigableMap subMap(K fromKey, boolean fromInclusive, throw new IllegalArgumentException("fromKey out of range"); if (!inRange(toKey, toInclusive)) throw new IllegalArgumentException("toKey out of range"); - return new AscendingSubMap(m, - false, fromKey, fromInclusive, - false, toKey, toInclusive); + return new AscendingSubMap<>(m, + false, fromKey, fromInclusive, + false, toKey, toInclusive); } public NavigableMap headMap(K toKey, boolean inclusive) { if (!inRange(toKey, inclusive)) throw new IllegalArgumentException("toKey out of range"); - return new AscendingSubMap(m, - fromStart, lo, loInclusive, - false, toKey, inclusive); + return new AscendingSubMap<>(m, + fromStart, lo, loInclusive, + false, toKey, inclusive); } public NavigableMap tailMap(K fromKey, boolean inclusive) { if (!inRange(fromKey, inclusive)) throw new IllegalArgumentException("fromKey out of range"); - return new AscendingSubMap(m, - false, fromKey, inclusive, - toEnd, hi, hiInclusive); + return new AscendingSubMap<>(m, + false, fromKey, inclusive, + toEnd, hi, hiInclusive); } public NavigableMap descendingMap() { NavigableMap mv = descendingMapView; return (mv != null) ? mv : (descendingMapView = - new DescendingSubMap(m, - fromStart, lo, loInclusive, - toEnd, hi, hiInclusive)); + new DescendingSubMap<>(m, + fromStart, lo, loInclusive, + toEnd, hi, hiInclusive)); } Iterator keyIterator() { return new SubMapKeyIterator(absLowest(), absHighFence()); } + Spliterator keySpliterator() { + return new SubMapKeyIterator(absLowest(), absHighFence()); + } + Iterator descendingKeyIterator() { return new DescendingSubMapKeyIterator(absHighest(), absLowFence()); } @@ -1653,7 +1911,7 @@ public Iterator> iterator() { public Set> entrySet() { EntrySetView es = entrySetView; - return (es != null) ? es : new AscendingEntrySetView(); + return (es != null) ? es : (entrySetView = new AscendingEntrySetView()); } TreeMap.Entry subLowest() { return absLowest(); } @@ -1688,40 +1946,44 @@ public NavigableMap subMap(K fromKey, boolean fromInclusive, throw new IllegalArgumentException("fromKey out of range"); if (!inRange(toKey, toInclusive)) throw new IllegalArgumentException("toKey out of range"); - return new DescendingSubMap(m, - false, toKey, toInclusive, - false, fromKey, fromInclusive); + return new DescendingSubMap<>(m, + false, toKey, toInclusive, + false, fromKey, fromInclusive); } public NavigableMap headMap(K toKey, boolean inclusive) { if (!inRange(toKey, inclusive)) throw new IllegalArgumentException("toKey out of range"); - return new DescendingSubMap(m, - false, toKey, inclusive, - toEnd, hi, hiInclusive); + return new DescendingSubMap<>(m, + false, toKey, inclusive, + toEnd, hi, hiInclusive); } public NavigableMap tailMap(K fromKey, boolean inclusive) { if (!inRange(fromKey, inclusive)) throw new IllegalArgumentException("fromKey out of range"); - return new DescendingSubMap(m, - fromStart, lo, loInclusive, - false, fromKey, inclusive); + return new DescendingSubMap<>(m, + fromStart, lo, loInclusive, + false, fromKey, inclusive); } public NavigableMap descendingMap() { NavigableMap mv = descendingMapView; return (mv != null) ? mv : (descendingMapView = - new AscendingSubMap(m, - fromStart, lo, loInclusive, - toEnd, hi, hiInclusive)); + new AscendingSubMap<>(m, + fromStart, lo, loInclusive, + toEnd, hi, hiInclusive)); } Iterator keyIterator() { return new DescendingSubMapKeyIterator(absHighest(), absLowFence()); } + Spliterator keySpliterator() { + return new DescendingSubMapKeyIterator(absHighest(), absLowFence()); + } + Iterator descendingKeyIterator() { return new SubMapKeyIterator(absLowest(), absHighFence()); } @@ -1734,7 +1996,7 @@ public Iterator> iterator() { public Set> entrySet() { EntrySetView es = entrySetView; - return (es != null) ? es : new DescendingEntrySetView(); + return (es != null) ? es : (entrySetView = new DescendingEntrySetView()); } TreeMap.Entry subLowest() { return absHighest(); } @@ -1760,9 +2022,9 @@ private class SubMap extends AbstractMap private boolean fromStart = false, toEnd = false; private K fromKey, toKey; private Object readResolve() { - return new AscendingSubMap(TreeMap.this, - fromStart, fromKey, true, - toEnd, toKey, false); + return new AscendingSubMap<>(TreeMap.this, + fromStart, fromKey, true, + toEnd, toKey, false); } public Set> entrySet() { throw new InternalError(); } public K lastKey() { throw new InternalError(); } @@ -1787,8 +2049,8 @@ private Object readResolve() { static final class Entry implements Map.Entry { K key; V value; - Entry left = null; - Entry right = null; + Entry left; + Entry right; Entry parent; boolean color = BLACK; @@ -2229,12 +2491,12 @@ void addAllForTreeSet(SortedSet set, V defaultVal) { * @param defaultVal if non-null, this default value is used for * each value in the map. If null, each value is read from * iterator or stream, as described above. - * @throws IOException propagated from stream reads. This cannot + * @throws java.io.IOException propagated from stream reads. This cannot * occur if str is null. * @throws ClassNotFoundException propagated from readObject. * This cannot occur if str is null. */ - private void buildFromSorted(int size, Iterator it, + private void buildFromSorted(int size, Iterator it, java.io.ObjectInputStream str, V defaultVal) throws java.io.IOException, ClassNotFoundException { @@ -2257,9 +2519,10 @@ private void buildFromSorted(int size, Iterator it, * @param redLevel the level at which nodes should be red. * Must be equal to computeRedLevel for tree of this size. */ + @SuppressWarnings("unchecked") private final Entry buildFromSorted(int level, int lo, int hi, int redLevel, - Iterator it, + Iterator it, java.io.ObjectInputStream str, V defaultVal) throws java.io.IOException, ClassNotFoundException { @@ -2289,9 +2552,9 @@ private final Entry buildFromSorted(int level, int lo, int hi, V value; if (it != null) { if (defaultVal==null) { - Map.Entry entry = (Map.Entry)it.next(); - key = entry.getKey(); - value = entry.getValue(); + Map.Entry entry = (Map.Entry)it.next(); + key = (K)entry.getKey(); + value = (V)entry.getValue(); } else { key = (K)it.next(); value = defaultVal; @@ -2337,4 +2600,416 @@ private static int computeRedLevel(int sz) { level++; return level; } + + /** + * Currently, we support Spliterator-based versions only for the + * full map, in either plain of descending form, otherwise relying + * on defaults because size estimation for submaps would dominate + * costs. The type tests needed to check these for key views are + * not very nice but avoid disrupting existing class + * structures. Callers must use plain default spliterators if this + * returns null. + */ + static Spliterator keySpliteratorFor(NavigableMap m) { + if (m instanceof TreeMap) { + @SuppressWarnings("unchecked") TreeMap t = + (TreeMap) m; + return t.keySpliterator(); + } + if (m instanceof DescendingSubMap) { + @SuppressWarnings("unchecked") DescendingSubMap dm = + (DescendingSubMap) m; + TreeMap tm = dm.m; + if (dm == tm.descendingMap) { + @SuppressWarnings("unchecked") TreeMap t = + (TreeMap) tm; + return t.descendingKeySpliterator(); + } + } + @SuppressWarnings("unchecked") NavigableSubMap sm = + (NavigableSubMap) m; + return sm.keySpliterator(); + } + + final Spliterator keySpliterator() { + return new KeySpliterator(this, null, null, 0, -1, 0); + } + + final Spliterator descendingKeySpliterator() { + return new DescendingKeySpliterator(this, null, null, 0, -2, 0); + } + + /** + * Base class for spliterators. Iteration starts at a given + * origin and continues up to but not including a given fence (or + * null for end). At top-level, for ascending cases, the first + * split uses the root as left-fence/right-origin. From there, + * right-hand splits replace the current fence with its left + * child, also serving as origin for the split-off spliterator. + * Left-hands are symmetric. Descending versions place the origin + * at the end and invert ascending split rules. This base class + * is non-commital about directionality, or whether the top-level + * spliterator covers the whole tree. This means that the actual + * split mechanics are located in subclasses. Some of the subclass + * trySplit methods are identical (except for return types), but + * not nicely factorable. + * + * Currently, subclass versions exist only for the full map + * (including descending keys via its descendingMap). Others are + * possible but currently not worthwhile because submaps require + * O(n) computations to determine size, which substantially limits + * potential speed-ups of using custom Spliterators versus default + * mechanics. + * + * To boostrap initialization, external constructors use + * negative size estimates: -1 for ascend, -2 for descend. + */ + static class TreeMapSpliterator { + final TreeMap tree; + TreeMap.Entry current; // traverser; initially first node in range + TreeMap.Entry fence; // one past last, or null + int side; // 0: top, -1: is a left split, +1: right + int est; // size estimate (exact only for top-level) + int expectedModCount; // for CME checks + + TreeMapSpliterator(TreeMap tree, + TreeMap.Entry origin, TreeMap.Entry fence, + int side, int est, int expectedModCount) { + this.tree = tree; + this.current = origin; + this.fence = fence; + this.side = side; + this.est = est; + this.expectedModCount = expectedModCount; + } + + final int getEstimate() { // force initialization + int s; TreeMap t; + if ((s = est) < 0) { + if ((t = tree) != null) { + current = (s == -1) ? t.getFirstEntry() : t.getLastEntry(); + s = est = t.size; + expectedModCount = t.modCount; + } + else + s = est = 0; + } + return s; + } + + public final long estimateSize() { + return (long)getEstimate(); + } + } + + static final class KeySpliterator + extends TreeMapSpliterator + implements Spliterator { + KeySpliterator(TreeMap tree, + TreeMap.Entry origin, TreeMap.Entry fence, + int side, int est, int expectedModCount) { + super(tree, origin, fence, side, est, expectedModCount); + } + + public KeySpliterator trySplit() { + if (est < 0) + getEstimate(); // force initialization + int d = side; + TreeMap.Entry e = current, f = fence, + s = ((e == null || e == f) ? null : // empty + (d == 0) ? tree.root : // was top + (d > 0) ? e.right : // was right + (d < 0 && f != null) ? f.left : // was left + null); + if (s != null && s != e && s != f && + tree.compare(e.key, s.key) < 0) { // e not already past s + side = 1; + return new KeySpliterator<> + (tree, e, current = s, -1, est >>>= 1, expectedModCount); + } + return null; + } + + public void forEachRemaining(Consumer action) { + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + TreeMap.Entry f = fence, e, p, pl; + if ((e = current) != null && e != f) { + current = f; // exhaust + do { + action.accept(e.key); + if ((p = e.right) != null) { + while ((pl = p.left) != null) + p = pl; + } + else { + while ((p = e.parent) != null && e == p.right) + e = p; + } + } while ((e = p) != null && e != f); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + } + } + + public boolean tryAdvance(Consumer action) { + TreeMap.Entry e; + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + if ((e = current) == null || e == fence) + return false; + current = successor(e); + action.accept(e.key); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + + public int characteristics() { + return (side == 0 ? Spliterator.SIZED : 0) | + Spliterator.DISTINCT | Spliterator.SORTED | Spliterator.ORDERED; + } + + public final Comparator getComparator() { + return tree.comparator; + } + + } + + static final class DescendingKeySpliterator + extends TreeMapSpliterator + implements Spliterator { + DescendingKeySpliterator(TreeMap tree, + TreeMap.Entry origin, TreeMap.Entry fence, + int side, int est, int expectedModCount) { + super(tree, origin, fence, side, est, expectedModCount); + } + + public DescendingKeySpliterator trySplit() { + if (est < 0) + getEstimate(); // force initialization + int d = side; + TreeMap.Entry e = current, f = fence, + s = ((e == null || e == f) ? null : // empty + (d == 0) ? tree.root : // was top + (d < 0) ? e.left : // was left + (d > 0 && f != null) ? f.right : // was right + null); + if (s != null && s != e && s != f && + tree.compare(e.key, s.key) > 0) { // e not already past s + side = 1; + return new DescendingKeySpliterator<> + (tree, e, current = s, -1, est >>>= 1, expectedModCount); + } + return null; + } + + public void forEachRemaining(Consumer action) { + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + TreeMap.Entry f = fence, e, p, pr; + if ((e = current) != null && e != f) { + current = f; // exhaust + do { + action.accept(e.key); + if ((p = e.left) != null) { + while ((pr = p.right) != null) + p = pr; + } + else { + while ((p = e.parent) != null && e == p.left) + e = p; + } + } while ((e = p) != null && e != f); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + } + } + + public boolean tryAdvance(Consumer action) { + TreeMap.Entry e; + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + if ((e = current) == null || e == fence) + return false; + current = predecessor(e); + action.accept(e.key); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + + public int characteristics() { + return (side == 0 ? Spliterator.SIZED : 0) | + Spliterator.DISTINCT | Spliterator.ORDERED; + } + } + + static final class ValueSpliterator + extends TreeMapSpliterator + implements Spliterator { + ValueSpliterator(TreeMap tree, + TreeMap.Entry origin, TreeMap.Entry fence, + int side, int est, int expectedModCount) { + super(tree, origin, fence, side, est, expectedModCount); + } + + public ValueSpliterator trySplit() { + if (est < 0) + getEstimate(); // force initialization + int d = side; + TreeMap.Entry e = current, f = fence, + s = ((e == null || e == f) ? null : // empty + (d == 0) ? tree.root : // was top + (d > 0) ? e.right : // was right + (d < 0 && f != null) ? f.left : // was left + null); + if (s != null && s != e && s != f && + tree.compare(e.key, s.key) < 0) { // e not already past s + side = 1; + return new ValueSpliterator<> + (tree, e, current = s, -1, est >>>= 1, expectedModCount); + } + return null; + } + + public void forEachRemaining(Consumer action) { + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + TreeMap.Entry f = fence, e, p, pl; + if ((e = current) != null && e != f) { + current = f; // exhaust + do { + action.accept(e.value); + if ((p = e.right) != null) { + while ((pl = p.left) != null) + p = pl; + } + else { + while ((p = e.parent) != null && e == p.right) + e = p; + } + } while ((e = p) != null && e != f); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + } + } + + public boolean tryAdvance(Consumer action) { + TreeMap.Entry e; + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + if ((e = current) == null || e == fence) + return false; + current = successor(e); + action.accept(e.value); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + + public int characteristics() { + return (side == 0 ? Spliterator.SIZED : 0) | Spliterator.ORDERED; + } + } + + static final class EntrySpliterator + extends TreeMapSpliterator + implements Spliterator> { + EntrySpliterator(TreeMap tree, + TreeMap.Entry origin, TreeMap.Entry fence, + int side, int est, int expectedModCount) { + super(tree, origin, fence, side, est, expectedModCount); + } + + public EntrySpliterator trySplit() { + if (est < 0) + getEstimate(); // force initialization + int d = side; + TreeMap.Entry e = current, f = fence, + s = ((e == null || e == f) ? null : // empty + (d == 0) ? tree.root : // was top + (d > 0) ? e.right : // was right + (d < 0 && f != null) ? f.left : // was left + null); + if (s != null && s != e && s != f && + tree.compare(e.key, s.key) < 0) { // e not already past s + side = 1; + return new EntrySpliterator<> + (tree, e, current = s, -1, est >>>= 1, expectedModCount); + } + return null; + } + + public void forEachRemaining(Consumer> action) { + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + TreeMap.Entry f = fence, e, p, pl; + if ((e = current) != null && e != f) { + current = f; // exhaust + do { + action.accept(e); + if ((p = e.right) != null) { + while ((pl = p.left) != null) + p = pl; + } + else { + while ((p = e.parent) != null && e == p.right) + e = p; + } + } while ((e = p) != null && e != f); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + } + } + + public boolean tryAdvance(Consumer> action) { + TreeMap.Entry e; + if (action == null) + throw new NullPointerException(); + if (est < 0) + getEstimate(); // force initialization + if ((e = current) == null || e == fence) + return false; + current = successor(e); + action.accept(e); + if (tree.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + + public int characteristics() { + return (side == 0 ? Spliterator.SIZED : 0) | + Spliterator.DISTINCT | Spliterator.SORTED | Spliterator.ORDERED; + } + + @Override + public Comparator> getComparator() { + // Adapt or create a key-based comparator + if (tree.comparator != null) { + return Map.Entry.comparingByKey(tree.comparator); + } + else { + return (Comparator> & Serializable) (e1, e2) -> { + @SuppressWarnings("unchecked") + Comparable k1 = (Comparable) e1.getKey(); + return k1.compareTo(e2.getKey()); + }; + } + } + } } diff --git a/src/TreeSet.java b/src/TreeSet.java index 1f0d949..7cd1904 100644 --- a/src/TreeSet.java +++ b/src/TreeSet.java @@ -1,6 +1,94 @@ +/* + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ package java.util; +/** + * A {@link NavigableSet} implementation based on a {@link TreeMap}. + * The elements are ordered using their {@linkplain Comparable natural + * ordering}, or by a {@link Comparator} provided at set creation + * time, depending on which constructor is used. + * + *

      This implementation provides guaranteed log(n) time cost for the basic + * operations ({@code add}, {@code remove} and {@code contains}). + * + *

      Note that the ordering maintained by a set (whether or not an explicit + * comparator is provided) must be consistent with equals if it is to + * correctly implement the {@code Set} interface. (See {@code Comparable} + * or {@code Comparator} for a precise definition of consistent with + * equals.) This is so because the {@code Set} interface is defined in + * terms of the {@code equals} operation, but a {@code TreeSet} instance + * performs all element comparisons using its {@code compareTo} (or + * {@code compare}) method, so two elements that are deemed equal by this method + * are, from the standpoint of the set, equal. The behavior of a set + * is well-defined even if its ordering is inconsistent with equals; it + * just fails to obey the general contract of the {@code Set} interface. + * + *

      Note that this implementation is not synchronized. + * If multiple threads access a tree set concurrently, and at least one + * of the threads modifies the set, it must be synchronized + * externally. This is typically accomplished by synchronizing on some + * object that naturally encapsulates the set. + * If no such object exists, the set should be "wrapped" using the + * {@link Collections#synchronizedSortedSet Collections.synchronizedSortedSet} + * method. This is best done at creation time, to prevent accidental + * unsynchronized access to the set:

      + *   SortedSet s = Collections.synchronizedSortedSet(new TreeSet(...));
      + * + *

      The iterators returned by this class's {@code iterator} method are + * fail-fast: if the set is modified at any time after the iterator is + * created, in any way except through the iterator's own {@code remove} + * method, the iterator will throw a {@link ConcurrentModificationException}. + * Thus, in the face of concurrent modification, the iterator fails quickly + * and cleanly, rather than risking arbitrary, non-deterministic behavior at + * an undetermined time in the future. + * + *

      Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw {@code ConcurrentModificationException} on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

      This class is a member of the + * + * Java Collections Framework. + * + * @param the type of elements maintained by this set + * + * @author Josh Bloch + * @see Collection + * @see Set + * @see HashSet + * @see Comparable + * @see Comparator + * @see TreeMap + * @since 1.2 + */ + public class TreeSet extends AbstractSet implements NavigableSet, Cloneable, java.io.Serializable { @@ -214,7 +302,7 @@ public boolean addAll(Collection c) { m instanceof TreeMap) { SortedSet set = (SortedSet) c; TreeMap map = (TreeMap) m; - Comparator cc = (Comparator) set.comparator(); + Comparator cc = set.comparator(); Comparator mc = map.comparator(); if (cc==mc || (cc != null && cc.equals(mc))) { map.addAllForTreeSet(set, PRESENT); @@ -381,12 +469,13 @@ public E pollLast() { * * @return a shallow copy of this set */ + @SuppressWarnings("unchecked") public Object clone() { - TreeSet clone = null; + TreeSet clone; try { clone = (TreeSet) super.clone(); } catch (CloneNotSupportedException e) { - throw new InternalError(); + throw new InternalError(e); } clone.m = new TreeMap<>(m); @@ -431,14 +520,11 @@ private void readObject(java.io.ObjectInputStream s) s.defaultReadObject(); // Read in Comparator - Comparator c = (Comparator) s.readObject(); + @SuppressWarnings("unchecked") + Comparator c = (Comparator) s.readObject(); // Create backing TreeMap - TreeMap tm; - if (c==null) - tm = new TreeMap<>(); - else - tm = new TreeMap<>(c); + TreeMap tm = new TreeMap<>(c); m = tm; // Read in size @@ -447,5 +533,28 @@ private void readObject(java.io.ObjectInputStream s) tm.readTreeSet(size, s, PRESENT); } + /** + * Creates a late-binding + * and fail-fast {@link Spliterator} over the elements in this + * set. + * + *

      The {@code Spliterator} reports {@link Spliterator#SIZED}, + * {@link Spliterator#DISTINCT}, {@link Spliterator#SORTED}, and + * {@link Spliterator#ORDERED}. Overriding implementations should document + * the reporting of additional characteristic values. + * + *

      The spliterator's comparator (see + * {@link java.util.Spliterator#getComparator()}) is {@code null} if + * the tree set's comparator (see {@link #comparator()}) is {@code null}. + * Otherwise, the spliterator's comparator is the same as or imposes the + * same total ordering as the tree set's comparator. + * + * @return a {@code Spliterator} over the elements in this set + * @since 1.8 + */ + public Spliterator spliterator() { + return TreeMap.keySpliteratorFor(m); + } + private static final long serialVersionUID = -2479143000061671589L; } diff --git a/src/Vector.java b/src/Vector.java index 41cc027..a5cf769 100644 --- a/src/Vector.java +++ b/src/Vector.java @@ -1,5 +1,85 @@ +/* + * Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + package java.util; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.UnaryOperator; + +/** + * The {@code Vector} class implements a growable array of + * objects. Like an array, it contains components that can be + * accessed using an integer index. However, the size of a + * {@code Vector} can grow or shrink as needed to accommodate + * adding and removing items after the {@code Vector} has been created. + * + *

      Each vector tries to optimize storage management by maintaining a + * {@code capacity} and a {@code capacityIncrement}. The + * {@code capacity} is always at least as large as the vector + * size; it is usually larger because as components are added to the + * vector, the vector's storage increases in chunks the size of + * {@code capacityIncrement}. An application can increase the + * capacity of a vector before inserting a large number of + * components; this reduces the amount of incremental reallocation. + * + *

      + * The iterators returned by this class's {@link #iterator() iterator} and + * {@link #listIterator(int) listIterator} methods are fail-fast: + * if the vector is structurally modified at any time after the iterator is + * created, in any way except through the iterator's own + * {@link ListIterator#remove() remove} or + * {@link ListIterator#add(Object) add} methods, the iterator will throw a + * {@link ConcurrentModificationException}. Thus, in the face of + * concurrent modification, the iterator fails quickly and cleanly, rather + * than risking arbitrary, non-deterministic behavior at an undetermined + * time in the future. The {@link Enumeration Enumerations} returned by + * the {@link #elements() elements} method are not fail-fast. + * + *

      Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw {@code ConcurrentModificationException} on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

      As of the Java 2 platform v1.2, this class was retrofitted to + * implement the {@link List} interface, making it a member of the + * + * Java Collections Framework. Unlike the new collection + * implementations, {@code Vector} is synchronized. If a thread-safe + * implementation is not needed, it is recommended to use {@link + * ArrayList} in place of {@code Vector}. + * + * @author Lee Boynton + * @author Jonathan Payne + * @see Collection + * @see LinkedList + * @since JDK1.0 + */ public class Vector extends AbstractList implements List, RandomAccess, Cloneable, java.io.Serializable @@ -597,7 +677,7 @@ public synchronized Object clone() { return v; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable - throw new InternalError(); + throw new InternalError(e); } } @@ -1075,6 +1155,30 @@ public void remove() { lastRet = -1; } + @Override + public void forEachRemaining(Consumer action) { + Objects.requireNonNull(action); + synchronized (Vector.this) { + final int size = elementCount; + int i = cursor; + if (i >= size) { + return; + } + @SuppressWarnings("unchecked") + final E[] elementData = (E[]) Vector.this.elementData; + if (i >= elementData.length) { + throw new ConcurrentModificationException(); + } + while (i != size && modCount == expectedModCount) { + action.accept(elementData[i++]); + } + // update once at end of iteration to reduce heap write traffic + cursor = i; + lastRet = i - 1; + checkForComodification(); + } + } + final void checkForComodification() { if (modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -1133,4 +1237,194 @@ public void add(E e) { lastRet = -1; } } + + @Override + public synchronized void forEach(Consumer action) { + Objects.requireNonNull(action); + final int expectedModCount = modCount; + @SuppressWarnings("unchecked") + final E[] elementData = (E[]) this.elementData; + final int elementCount = this.elementCount; + for (int i=0; modCount == expectedModCount && i < elementCount; i++) { + action.accept(elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + } + + @Override + @SuppressWarnings("unchecked") + public synchronized boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + // figure out which elements are to be removed + // any exception thrown from the filter predicate at this stage + // will leave the collection unmodified + int removeCount = 0; + final int size = elementCount; + final BitSet removeSet = new BitSet(size); + final int expectedModCount = modCount; + for (int i=0; modCount == expectedModCount && i < size; i++) { + @SuppressWarnings("unchecked") + final E element = (E) elementData[i]; + if (filter.test(element)) { + removeSet.set(i); + removeCount++; + } + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + + // shift surviving elements left over the spaces left by removed elements + final boolean anyToRemove = removeCount > 0; + if (anyToRemove) { + final int newSize = size - removeCount; + for (int i=0, j=0; (i < size) && (j < newSize); i++, j++) { + i = removeSet.nextClearBit(i); + elementData[j] = elementData[i]; + } + for (int k=newSize; k < size; k++) { + elementData[k] = null; // Let gc do its work + } + elementCount = newSize; + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + return anyToRemove; + } + + @Override + @SuppressWarnings("unchecked") + public synchronized void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final int expectedModCount = modCount; + final int size = elementCount; + for (int i=0; modCount == expectedModCount && i < size; i++) { + elementData[i] = operator.apply((E) elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + @SuppressWarnings("unchecked") + @Override + public synchronized void sort(Comparator c) { + final int expectedModCount = modCount; + Arrays.sort((E[]) elementData, 0, elementCount, c); + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + /** + * Creates a late-binding + * and fail-fast {@link Spliterator} over the elements in this + * list. + * + *

      The {@code Spliterator} reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, and {@link Spliterator#ORDERED}. + * Overriding implementations should document the reporting of additional + * characteristic values. + * + * @return a {@code Spliterator} over the elements in this list + * @since 1.8 + */ + @Override + public Spliterator spliterator() { + return new VectorSpliterator<>(this, null, 0, -1, 0); + } + + /** Similar to ArrayList Spliterator */ + static final class VectorSpliterator implements Spliterator { + private final Vector list; + private Object[] array; + private int index; // current index, modified on advance/split + private int fence; // -1 until used; then one past last index + private int expectedModCount; // initialized when fence set + + /** Create new spliterator covering the given range */ + VectorSpliterator(Vector list, Object[] array, int origin, int fence, + int expectedModCount) { + this.list = list; + this.array = array; + this.index = origin; + this.fence = fence; + this.expectedModCount = expectedModCount; + } + + private int getFence() { // initialize on first use + int hi; + if ((hi = fence) < 0) { + synchronized(list) { + array = list.elementData; + expectedModCount = list.modCount; + hi = fence = list.elementCount; + } + } + return hi; + } + + public Spliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid) ? null : + new VectorSpliterator(list, array, lo, index = mid, + expectedModCount); + } + + @SuppressWarnings("unchecked") + public boolean tryAdvance(Consumer action) { + int i; + if (action == null) + throw new NullPointerException(); + if (getFence() > (i = index)) { + index = i + 1; + action.accept((E)array[i]); + if (list.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + return false; + } + + @SuppressWarnings("unchecked") + public void forEachRemaining(Consumer action) { + int i, hi; // hoist accesses and checks from loop + Vector lst; Object[] a; + if (action == null) + throw new NullPointerException(); + if ((lst = list) != null) { + if ((hi = fence) < 0) { + synchronized(lst) { + expectedModCount = lst.modCount; + a = array = lst.elementData; + hi = fence = lst.elementCount; + } + } + else + a = array; + if (a != null && (i = index) >= 0 && (index = hi) <= a.length) { + while (i < hi) + action.accept((E) a[i++]); + if (lst.modCount == expectedModCount) + return; + } + } + throw new ConcurrentModificationException(); + } + + public long estimateSize() { + return (long) (getFence() - index); + } + + public int characteristics() { + return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED; + } + } } diff --git a/src/WeakHashMap.java b/src/WeakHashMap.java new file mode 100644 index 0000000..5b868fe --- /dev/null +++ b/src/WeakHashMap.java @@ -0,0 +1,1331 @@ +/* + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package java.util; + +import java.lang.ref.WeakReference; +import java.lang.ref.ReferenceQueue; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Consumer; + + +/** + * Hash table based implementation of the Map interface, with + * weak keys. + * An entry in a WeakHashMap will automatically be removed when + * its key is no longer in ordinary use. More precisely, the presence of a + * mapping for a given key will not prevent the key from being discarded by the + * garbage collector, that is, made finalizable, finalized, and then reclaimed. + * When a key has been discarded its entry is effectively removed from the map, + * so this class behaves somewhat differently from other Map + * implementations. + * + *

      Both null values and the null key are supported. This class has + * performance characteristics similar to those of the HashMap + * class, and has the same efficiency parameters of initial capacity + * and load factor. + * + *

      Like most collection classes, this class is not synchronized. + * A synchronized WeakHashMap may be constructed using the + * {@link Collections#synchronizedMap Collections.synchronizedMap} + * method. + * + *

      This class is intended primarily for use with key objects whose + * equals methods test for object identity using the + * == operator. Once such a key is discarded it can never be + * recreated, so it is impossible to do a lookup of that key in a + * WeakHashMap at some later time and be surprised that its entry + * has been removed. This class will work perfectly well with key objects + * whose equals methods are not based upon object identity, such + * as String instances. With such recreatable key objects, + * however, the automatic removal of WeakHashMap entries whose + * keys have been discarded may prove to be confusing. + * + *

      The behavior of the WeakHashMap class depends in part upon + * the actions of the garbage collector, so several familiar (though not + * required) Map invariants do not hold for this class. Because + * the garbage collector may discard keys at any time, a + * WeakHashMap may behave as though an unknown thread is silently + * removing entries. In particular, even if you synchronize on a + * WeakHashMap instance and invoke none of its mutator methods, it + * is possible for the size method to return smaller values over + * time, for the isEmpty method to return false and + * then true, for the containsKey method to return + * true and later false for a given key, for the + * get method to return a value for a given key but later return + * null, for the put method to return + * null and the remove method to return + * false for a key that previously appeared to be in the map, and + * for successive examinations of the key set, the value collection, and + * the entry set to yield successively smaller numbers of elements. + * + *

      Each key object in a WeakHashMap is stored indirectly as + * the referent of a weak reference. Therefore a key will automatically be + * removed only after the weak references to it, both inside and outside of the + * map, have been cleared by the garbage collector. + * + *

      Implementation note: The value objects in a + * WeakHashMap are held by ordinary strong references. Thus care + * should be taken to ensure that value objects do not strongly refer to their + * own keys, either directly or indirectly, since that will prevent the keys + * from being discarded. Note that a value object may refer indirectly to its + * key via the WeakHashMap itself; that is, a value object may + * strongly refer to some other key object whose associated value object, in + * turn, strongly refers to the key of the first value object. If the values + * in the map do not rely on the map holding strong references to them, one way + * to deal with this is to wrap values themselves within + * WeakReferences before + * inserting, as in: m.put(key, new WeakReference(value)), + * and then unwrapping upon each get. + * + *

      The iterators returned by the iterator method of the collections + * returned by all of this class's "collection view methods" are + * fail-fast: if the map is structurally modified at any time after the + * iterator is created, in any way except through the iterator's own + * remove method, the iterator will throw a {@link + * ConcurrentModificationException}. Thus, in the face of concurrent + * modification, the iterator fails quickly and cleanly, rather than risking + * arbitrary, non-deterministic behavior at an undetermined time in the future. + * + *

      Note that the fail-fast behavior of an iterator cannot be guaranteed + * as it is, generally speaking, impossible to make any hard guarantees in the + * presence of unsynchronized concurrent modification. Fail-fast iterators + * throw ConcurrentModificationException on a best-effort basis. + * Therefore, it would be wrong to write a program that depended on this + * exception for its correctness: the fail-fast behavior of iterators + * should be used only to detect bugs. + * + *

      This class is a member of the + * + * Java Collections Framework. + * + * @param the type of keys maintained by this map + * @param the type of mapped values + * + * @author Doug Lea + * @author Josh Bloch + * @author Mark Reinhold + * @since 1.2 + * @see java.util.HashMap + * @see java.lang.ref.WeakReference + */ +public class WeakHashMap + extends AbstractMap + implements Map { + + /** + * The default initial capacity -- MUST be a power of two. + */ + private static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The maximum capacity, used if a higher value is implicitly specified + * by either of the constructors with arguments. + * MUST be a power of two <= 1<<30. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The load factor used when none specified in constructor. + */ + private static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The table, resized as necessary. Length MUST Always be a power of two. + */ + Entry[] table; + + /** + * The number of key-value mappings contained in this weak hash map. + */ + private int size; + + /** + * The next size value at which to resize (capacity * load factor). + */ + private int threshold; + + /** + * The load factor for the hash table. + */ + private final float loadFactor; + + /** + * Reference queue for cleared WeakEntries + */ + private final ReferenceQueue queue = new ReferenceQueue<>(); + + /** + * The number of times this WeakHashMap has been structurally modified. + * Structural modifications are those that change the number of + * mappings in the map or otherwise modify its internal structure + * (e.g., rehash). This field is used to make iterators on + * Collection-views of the map fail-fast. + * + * @see ConcurrentModificationException + */ + int modCount; + + @SuppressWarnings("unchecked") + private Entry[] newTable(int n) { + return (Entry[]) new Entry[n]; + } + + /** + * Constructs a new, empty WeakHashMap with the given initial + * capacity and the given load factor. + * + * @param initialCapacity The initial capacity of the WeakHashMap + * @param loadFactor The load factor of the WeakHashMap + * @throws IllegalArgumentException if the initial capacity is negative, + * or if the load factor is nonpositive. + */ + public WeakHashMap(int initialCapacity, float loadFactor) { + if (initialCapacity < 0) + throw new IllegalArgumentException("Illegal Initial Capacity: "+ + initialCapacity); + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + + if (loadFactor <= 0 || Float.isNaN(loadFactor)) + throw new IllegalArgumentException("Illegal Load factor: "+ + loadFactor); + int capacity = 1; + while (capacity < initialCapacity) + capacity <<= 1; + table = newTable(capacity); + this.loadFactor = loadFactor; + threshold = (int)(capacity * loadFactor); + } + + /** + * Constructs a new, empty WeakHashMap with the given initial + * capacity and the default load factor (0.75). + * + * @param initialCapacity The initial capacity of the WeakHashMap + * @throws IllegalArgumentException if the initial capacity is negative + */ + public WeakHashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs a new, empty WeakHashMap with the default initial + * capacity (16) and load factor (0.75). + */ + public WeakHashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs a new WeakHashMap with the same mappings as the + * specified map. The WeakHashMap is created with the default + * load factor (0.75) and an initial capacity sufficient to hold the + * mappings in the specified map. + * + * @param m the map whose mappings are to be placed in this map + * @throws NullPointerException if the specified map is null + * @since 1.3 + */ + public WeakHashMap(Map m) { + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, + DEFAULT_INITIAL_CAPACITY), + DEFAULT_LOAD_FACTOR); + putAll(m); + } + + // internal utilities + + /** + * Value representing null keys inside tables. + */ + private static final Object NULL_KEY = new Object(); + + /** + * Use NULL_KEY for key if it is null. + */ + private static Object maskNull(Object key) { + return (key == null) ? NULL_KEY : key; + } + + /** + * Returns internal representation of null key back to caller as null. + */ + static Object unmaskNull(Object key) { + return (key == NULL_KEY) ? null : key; + } + + /** + * Checks for equality of non-null reference x and possibly-null y. By + * default uses Object.equals. + */ + private static boolean eq(Object x, Object y) { + return x == y || x.equals(y); + } + + /** + * Retrieve object hash code and applies a supplemental hash function to the + * result hash, which defends against poor quality hash functions. This is + * critical because HashMap uses power-of-two length hash tables, that + * otherwise encounter collisions for hashCodes that do not differ + * in lower bits. + */ + final int hash(Object k) { + int h = k.hashCode(); + + // This function ensures that hashCodes that differ only by + // constant multiples at each bit position have a bounded + // number of collisions (approximately 8 at default load factor). + h ^= (h >>> 20) ^ (h >>> 12); + return h ^ (h >>> 7) ^ (h >>> 4); + } + + /** + * Returns index for hash code h. + */ + private static int indexFor(int h, int length) { + return h & (length-1); + } + + /** + * Expunges stale entries from the table. + */ + private void expungeStaleEntries() { + for (Object x; (x = queue.poll()) != null; ) { + synchronized (queue) { + @SuppressWarnings("unchecked") + Entry e = (Entry) x; + int i = indexFor(e.hash, table.length); + + Entry prev = table[i]; + Entry p = prev; + while (p != null) { + Entry next = p.next; + if (p == e) { + if (prev == e) + table[i] = next; + else + prev.next = next; + // Must not null out e.next; + // stale entries may be in use by a HashIterator + e.value = null; // Help GC + size--; + break; + } + prev = p; + p = next; + } + } + } + } + + /** + * Returns the table after first expunging stale entries. + */ + private Entry[] getTable() { + expungeStaleEntries(); + return table; + } + + /** + * Returns the number of key-value mappings in this map. + * This result is a snapshot, and may not reflect unprocessed + * entries that will be removed before next attempted access + * because they are no longer referenced. + */ + public int size() { + if (size == 0) + return 0; + expungeStaleEntries(); + return size; + } + + /** + * Returns true if this map contains no key-value mappings. + * This result is a snapshot, and may not reflect unprocessed + * entries that will be removed before next attempted access + * because they are no longer referenced. + */ + public boolean isEmpty() { + return size() == 0; + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

      More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code (key==null ? k==null : + * key.equals(k))}, then this method returns {@code v}; otherwise + * it returns {@code null}. (There can be at most one such mapping.) + * + *

      A return value of {@code null} does not necessarily + * indicate that the map contains no mapping for the key; it's also + * possible that the map explicitly maps the key to {@code null}. + * The {@link #containsKey containsKey} operation may be used to + * distinguish these two cases. + * + * @see #put(Object, Object) + */ + public V get(Object key) { + Object k = maskNull(key); + int h = hash(k); + Entry[] tab = getTable(); + int index = indexFor(h, tab.length); + Entry e = tab[index]; + while (e != null) { + if (e.hash == h && eq(k, e.get())) + return e.value; + e = e.next; + } + return null; + } + + /** + * Returns true if this map contains a mapping for the + * specified key. + * + * @param key The key whose presence in this map is to be tested + * @return true if there is a mapping for key; + * false otherwise + */ + public boolean containsKey(Object key) { + return getEntry(key) != null; + } + + /** + * Returns the entry associated with the specified key in this map. + * Returns null if the map contains no mapping for this key. + */ + Entry getEntry(Object key) { + Object k = maskNull(key); + int h = hash(k); + Entry[] tab = getTable(); + int index = indexFor(h, tab.length); + Entry e = tab[index]; + while (e != null && !(e.hash == h && eq(k, e.get()))) + e = e.next; + return e; + } + + /** + * Associates the specified value with the specified key in this map. + * If the map previously contained a mapping for this key, the old + * value is replaced. + * + * @param key key with which the specified value is to be associated. + * @param value value to be associated with the specified key. + * @return the previous value associated with key, or + * null if there was no mapping for key. + * (A null return can also indicate that the map + * previously associated null with key.) + */ + public V put(K key, V value) { + Object k = maskNull(key); + int h = hash(k); + Entry[] tab = getTable(); + int i = indexFor(h, tab.length); + + for (Entry e = tab[i]; e != null; e = e.next) { + if (h == e.hash && eq(k, e.get())) { + V oldValue = e.value; + if (value != oldValue) + e.value = value; + return oldValue; + } + } + + modCount++; + Entry e = tab[i]; + tab[i] = new Entry<>(k, value, queue, h, e); + if (++size >= threshold) + resize(tab.length * 2); + return null; + } + + /** + * Rehashes the contents of this map into a new array with a + * larger capacity. This method is called automatically when the + * number of keys in this map reaches its threshold. + * + * If current capacity is MAXIMUM_CAPACITY, this method does not + * resize the map, but sets threshold to Integer.MAX_VALUE. + * This has the effect of preventing future calls. + * + * @param newCapacity the new capacity, MUST be a power of two; + * must be greater than current capacity unless current + * capacity is MAXIMUM_CAPACITY (in which case value + * is irrelevant). + */ + void resize(int newCapacity) { + Entry[] oldTable = getTable(); + int oldCapacity = oldTable.length; + if (oldCapacity == MAXIMUM_CAPACITY) { + threshold = Integer.MAX_VALUE; + return; + } + + Entry[] newTable = newTable(newCapacity); + transfer(oldTable, newTable); + table = newTable; + + /* + * If ignoring null elements and processing ref queue caused massive + * shrinkage, then restore old table. This should be rare, but avoids + * unbounded expansion of garbage-filled tables. + */ + if (size >= threshold / 2) { + threshold = (int)(newCapacity * loadFactor); + } else { + expungeStaleEntries(); + transfer(newTable, oldTable); + table = oldTable; + } + } + + /** Transfers all entries from src to dest tables */ + private void transfer(Entry[] src, Entry[] dest) { + for (int j = 0; j < src.length; ++j) { + Entry e = src[j]; + src[j] = null; + while (e != null) { + Entry next = e.next; + Object key = e.get(); + if (key == null) { + e.next = null; // Help GC + e.value = null; // " " + size--; + } else { + int i = indexFor(e.hash, dest.length); + e.next = dest[i]; + dest[i] = e; + } + e = next; + } + } + } + + /** + * Copies all of the mappings from the specified map to this map. + * These mappings will replace any mappings that this map had for any + * of the keys currently in the specified map. + * + * @param m mappings to be stored in this map. + * @throws NullPointerException if the specified map is null. + */ + public void putAll(Map m) { + int numKeysToBeAdded = m.size(); + if (numKeysToBeAdded == 0) + return; + + /* + * Expand the map if the map if the number of mappings to be added + * is greater than or equal to threshold. This is conservative; the + * obvious condition is (m.size() + size) >= threshold, but this + * condition could result in a map with twice the appropriate capacity, + * if the keys to be added overlap with the keys already in this map. + * By using the conservative calculation, we subject ourself + * to at most one extra resize. + */ + if (numKeysToBeAdded > threshold) { + int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); + if (targetCapacity > MAXIMUM_CAPACITY) + targetCapacity = MAXIMUM_CAPACITY; + int newCapacity = table.length; + while (newCapacity < targetCapacity) + newCapacity <<= 1; + if (newCapacity > table.length) + resize(newCapacity); + } + + for (Map.Entry e : m.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Removes the mapping for a key from this weak hash map if it is present. + * More formally, if this map contains a mapping from key k to + * value v such that (key==null ? k==null : + * key.equals(k)), that mapping is removed. (The map can contain + * at most one such mapping.) + * + *

      Returns the value to which this map previously associated the key, + * or null if the map contained no mapping for the key. A + * return value of null does not necessarily indicate + * that the map contained no mapping for the key; it's also possible + * that the map explicitly mapped the key to null. + * + *

      The map will not contain a mapping for the specified key once the + * call returns. + * + * @param key key whose mapping is to be removed from the map + * @return the previous value associated with key, or + * null if there was no mapping for key + */ + public V remove(Object key) { + Object k = maskNull(key); + int h = hash(k); + Entry[] tab = getTable(); + int i = indexFor(h, tab.length); + Entry prev = tab[i]; + Entry e = prev; + + while (e != null) { + Entry next = e.next; + if (h == e.hash && eq(k, e.get())) { + modCount++; + size--; + if (prev == e) + tab[i] = next; + else + prev.next = next; + return e.value; + } + prev = e; + e = next; + } + + return null; + } + + /** Special version of remove needed by Entry set */ + boolean removeMapping(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Entry[] tab = getTable(); + Map.Entry entry = (Map.Entry)o; + Object k = maskNull(entry.getKey()); + int h = hash(k); + int i = indexFor(h, tab.length); + Entry prev = tab[i]; + Entry e = prev; + + while (e != null) { + Entry next = e.next; + if (h == e.hash && e.equals(entry)) { + modCount++; + size--; + if (prev == e) + tab[i] = next; + else + prev.next = next; + return true; + } + prev = e; + e = next; + } + + return false; + } + + /** + * Removes all of the mappings from this map. + * The map will be empty after this call returns. + */ + public void clear() { + // clear out ref queue. We don't need to expunge entries + // since table is getting cleared. + while (queue.poll() != null) + ; + + modCount++; + Arrays.fill(table, null); + size = 0; + + // Allocation of array may have caused GC, which may have caused + // additional entries to go stale. Removing these entries from the + // reference queue will make them eligible for reclamation. + while (queue.poll() != null) + ; + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. + * + * @param value value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the + * specified value + */ + public boolean containsValue(Object value) { + if (value==null) + return containsNullValue(); + + Entry[] tab = getTable(); + for (int i = tab.length; i-- > 0;) + for (Entry e = tab[i]; e != null; e = e.next) + if (value.equals(e.value)) + return true; + return false; + } + + /** + * Special-case code for containsValue with null argument + */ + private boolean containsNullValue() { + Entry[] tab = getTable(); + for (int i = tab.length; i-- > 0;) + for (Entry e = tab[i]; e != null; e = e.next) + if (e.value==null) + return true; + return false; + } + + /** + * The entries in this hash table extend WeakReference, using its main ref + * field as the key. + */ + private static class Entry extends WeakReference implements Map.Entry { + V value; + final int hash; + Entry next; + + /** + * Creates new entry. + */ + Entry(Object key, V value, + ReferenceQueue queue, + int hash, Entry next) { + super(key, queue); + this.value = value; + this.hash = hash; + this.next = next; + } + + @SuppressWarnings("unchecked") + public K getKey() { + return (K) WeakHashMap.unmaskNull(get()); + } + + public V getValue() { + return value; + } + + public V setValue(V newValue) { + V oldValue = value; + value = newValue; + return oldValue; + } + + public boolean equals(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + K k1 = getKey(); + Object k2 = e.getKey(); + if (k1 == k2 || (k1 != null && k1.equals(k2))) { + V v1 = getValue(); + Object v2 = e.getValue(); + if (v1 == v2 || (v1 != null && v1.equals(v2))) + return true; + } + return false; + } + + public int hashCode() { + K k = getKey(); + V v = getValue(); + return Objects.hashCode(k) ^ Objects.hashCode(v); + } + + public String toString() { + return getKey() + "=" + getValue(); + } + } + + private abstract class HashIterator implements Iterator { + private int index; + private Entry entry; + private Entry lastReturned; + private int expectedModCount = modCount; + + /** + * Strong reference needed to avoid disappearance of key + * between hasNext and next + */ + private Object nextKey; + + /** + * Strong reference needed to avoid disappearance of key + * between nextEntry() and any use of the entry + */ + private Object currentKey; + + HashIterator() { + index = isEmpty() ? 0 : table.length; + } + + public boolean hasNext() { + Entry[] t = table; + + while (nextKey == null) { + Entry e = entry; + int i = index; + while (e == null && i > 0) + e = t[--i]; + entry = e; + index = i; + if (e == null) { + currentKey = null; + return false; + } + nextKey = e.get(); // hold on to key in strong ref + if (nextKey == null) + entry = entry.next; + } + return true; + } + + /** The common parts of next() across different types of iterators */ + protected Entry nextEntry() { + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + if (nextKey == null && !hasNext()) + throw new NoSuchElementException(); + + lastReturned = entry; + entry = entry.next; + currentKey = nextKey; + nextKey = null; + return lastReturned; + } + + public void remove() { + if (lastReturned == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + + WeakHashMap.this.remove(currentKey); + expectedModCount = modCount; + lastReturned = null; + currentKey = null; + } + + } + + private class ValueIterator extends HashIterator { + public V next() { + return nextEntry().value; + } + } + + private class KeyIterator extends HashIterator { + public K next() { + return nextEntry().getKey(); + } + } + + private class EntryIterator extends HashIterator> { + public Map.Entry next() { + return nextEntry(); + } + } + + // Views + + private transient Set> entrySet; + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation), the results of + * the iteration are undefined. The set supports element removal, + * which removes the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or addAll + * operations. + */ + public Set keySet() { + Set ks = keySet; + return (ks != null ? ks : (keySet = new KeySet())); + } + + private class KeySet extends AbstractSet { + public Iterator iterator() { + return new KeyIterator(); + } + + public int size() { + return WeakHashMap.this.size(); + } + + public boolean contains(Object o) { + return containsKey(o); + } + + public boolean remove(Object o) { + if (containsKey(o)) { + WeakHashMap.this.remove(o); + return true; + } + else + return false; + } + + public void clear() { + WeakHashMap.this.clear(); + } + + public Spliterator spliterator() { + return new KeySpliterator<>(WeakHashMap.this, 0, -1, 0, 0); + } + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. If the map is + * modified while an iteration over the collection is in progress + * (except through the iterator's own remove operation), + * the results of the iteration are undefined. The collection + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll and clear operations. It does not + * support the add or addAll operations. + */ + public Collection values() { + Collection vs = values; + return (vs != null) ? vs : (values = new Values()); + } + + private class Values extends AbstractCollection { + public Iterator iterator() { + return new ValueIterator(); + } + + public int size() { + return WeakHashMap.this.size(); + } + + public boolean contains(Object o) { + return containsValue(o); + } + + public void clear() { + WeakHashMap.this.clear(); + } + + public Spliterator spliterator() { + return new ValueSpliterator<>(WeakHashMap.this, 0, -1, 0, 0); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation, or through the + * setValue operation on a map entry returned by the + * iterator) the results of the iteration are undefined. The set + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Set.remove, removeAll, retainAll and + * clear operations. It does not support the + * add or addAll operations. + */ + public Set> entrySet() { + Set> es = entrySet; + return es != null ? es : (entrySet = new EntrySet()); + } + + private class EntrySet extends AbstractSet> { + public Iterator> iterator() { + return new EntryIterator(); + } + + public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + Entry candidate = getEntry(e.getKey()); + return candidate != null && candidate.equals(e); + } + + public boolean remove(Object o) { + return removeMapping(o); + } + + public int size() { + return WeakHashMap.this.size(); + } + + public void clear() { + WeakHashMap.this.clear(); + } + + private List> deepCopy() { + List> list = new ArrayList<>(size()); + for (Map.Entry e : this) + list.add(new AbstractMap.SimpleEntry<>(e)); + return list; + } + + public Object[] toArray() { + return deepCopy().toArray(); + } + + public T[] toArray(T[] a) { + return deepCopy().toArray(a); + } + + public Spliterator> spliterator() { + return new EntrySpliterator<>(WeakHashMap.this, 0, -1, 0, 0); + } + } + + @SuppressWarnings("unchecked") + @Override + public void forEach(BiConsumer action) { + Objects.requireNonNull(action); + int expectedModCount = modCount; + + Entry[] tab = getTable(); + for (Entry entry : tab) { + while (entry != null) { + Object key = entry.get(); + if (key != null) { + action.accept((K)WeakHashMap.unmaskNull(key), entry.value); + } + entry = entry.next; + + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } + } + } + } + + @SuppressWarnings("unchecked") + @Override + public void replaceAll(BiFunction function) { + Objects.requireNonNull(function); + int expectedModCount = modCount; + + Entry[] tab = getTable();; + for (Entry entry : tab) { + while (entry != null) { + Object key = entry.get(); + if (key != null) { + entry.value = function.apply((K)WeakHashMap.unmaskNull(key), entry.value); + } + entry = entry.next; + + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } + } + } + } + + /** + * Similar form as other hash Spliterators, but skips dead + * elements. + */ + static class WeakHashMapSpliterator { + final WeakHashMap map; + WeakHashMap.Entry current; // current node + int index; // current index, modified on advance/split + int fence; // -1 until first use; then one past last index + int est; // size estimate + int expectedModCount; // for comodification checks + + WeakHashMapSpliterator(WeakHashMap m, int origin, + int fence, int est, + int expectedModCount) { + this.map = m; + this.index = origin; + this.fence = fence; + this.est = est; + this.expectedModCount = expectedModCount; + } + + final int getFence() { // initialize fence and size on first use + int hi; + if ((hi = fence) < 0) { + WeakHashMap m = map; + est = m.size(); + expectedModCount = m.modCount; + hi = fence = m.table.length; + } + return hi; + } + + public final long estimateSize() { + getFence(); // force init + return (long) est; + } + } + + static final class KeySpliterator + extends WeakHashMapSpliterator + implements Spliterator { + KeySpliterator(WeakHashMap m, int origin, int fence, int est, + int expectedModCount) { + super(m, origin, fence, est, expectedModCount); + } + + public KeySpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid) ? null : + new KeySpliterator(map, lo, index = mid, est >>>= 1, + expectedModCount); + } + + public void forEachRemaining(Consumer action) { + int i, hi, mc; + if (action == null) + throw new NullPointerException(); + WeakHashMap m = map; + WeakHashMap.Entry[] tab = m.table; + if ((hi = fence) < 0) { + mc = expectedModCount = m.modCount; + hi = fence = tab.length; + } + else + mc = expectedModCount; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + WeakHashMap.Entry p = current; + current = null; // exhaust + do { + if (p == null) + p = tab[i++]; + else { + Object x = p.get(); + p = p.next; + if (x != null) { + @SuppressWarnings("unchecked") K k = + (K) WeakHashMap.unmaskNull(x); + action.accept(k); + } + } + } while (p != null || i < hi); + } + if (m.modCount != mc) + throw new ConcurrentModificationException(); + } + + public boolean tryAdvance(Consumer action) { + int hi; + if (action == null) + throw new NullPointerException(); + WeakHashMap.Entry[] tab = map.table; + if (tab.length >= (hi = getFence()) && index >= 0) { + while (current != null || index < hi) { + if (current == null) + current = tab[index++]; + else { + Object x = current.get(); + current = current.next; + if (x != null) { + @SuppressWarnings("unchecked") K k = + (K) WeakHashMap.unmaskNull(x); + action.accept(k); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + } + } + return false; + } + + public int characteristics() { + return Spliterator.DISTINCT; + } + } + + static final class ValueSpliterator + extends WeakHashMapSpliterator + implements Spliterator { + ValueSpliterator(WeakHashMap m, int origin, int fence, int est, + int expectedModCount) { + super(m, origin, fence, est, expectedModCount); + } + + public ValueSpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid) ? null : + new ValueSpliterator(map, lo, index = mid, est >>>= 1, + expectedModCount); + } + + public void forEachRemaining(Consumer action) { + int i, hi, mc; + if (action == null) + throw new NullPointerException(); + WeakHashMap m = map; + WeakHashMap.Entry[] tab = m.table; + if ((hi = fence) < 0) { + mc = expectedModCount = m.modCount; + hi = fence = tab.length; + } + else + mc = expectedModCount; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + WeakHashMap.Entry p = current; + current = null; // exhaust + do { + if (p == null) + p = tab[i++]; + else { + Object x = p.get(); + V v = p.value; + p = p.next; + if (x != null) + action.accept(v); + } + } while (p != null || i < hi); + } + if (m.modCount != mc) + throw new ConcurrentModificationException(); + } + + public boolean tryAdvance(Consumer action) { + int hi; + if (action == null) + throw new NullPointerException(); + WeakHashMap.Entry[] tab = map.table; + if (tab.length >= (hi = getFence()) && index >= 0) { + while (current != null || index < hi) { + if (current == null) + current = tab[index++]; + else { + Object x = current.get(); + V v = current.value; + current = current.next; + if (x != null) { + action.accept(v); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + } + } + return false; + } + + public int characteristics() { + return 0; + } + } + + static final class EntrySpliterator + extends WeakHashMapSpliterator + implements Spliterator> { + EntrySpliterator(WeakHashMap m, int origin, int fence, int est, + int expectedModCount) { + super(m, origin, fence, est, expectedModCount); + } + + public EntrySpliterator trySplit() { + int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; + return (lo >= mid) ? null : + new EntrySpliterator(map, lo, index = mid, est >>>= 1, + expectedModCount); + } + + + public void forEachRemaining(Consumer> action) { + int i, hi, mc; + if (action == null) + throw new NullPointerException(); + WeakHashMap m = map; + WeakHashMap.Entry[] tab = m.table; + if ((hi = fence) < 0) { + mc = expectedModCount = m.modCount; + hi = fence = tab.length; + } + else + mc = expectedModCount; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + WeakHashMap.Entry p = current; + current = null; // exhaust + do { + if (p == null) + p = tab[i++]; + else { + Object x = p.get(); + V v = p.value; + p = p.next; + if (x != null) { + @SuppressWarnings("unchecked") K k = + (K) WeakHashMap.unmaskNull(x); + action.accept + (new AbstractMap.SimpleImmutableEntry(k, v)); + } + } + } while (p != null || i < hi); + } + if (m.modCount != mc) + throw new ConcurrentModificationException(); + } + + public boolean tryAdvance(Consumer> action) { + int hi; + if (action == null) + throw new NullPointerException(); + WeakHashMap.Entry[] tab = map.table; + if (tab.length >= (hi = getFence()) && index >= 0) { + while (current != null || index < hi) { + if (current == null) + current = tab[index++]; + else { + Object x = current.get(); + V v = current.value; + current = current.next; + if (x != null) { + @SuppressWarnings("unchecked") K k = + (K) WeakHashMap.unmaskNull(x); + action.accept + (new AbstractMap.SimpleImmutableEntry(k, v)); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + } + } + return false; + } + + public int characteristics() { + return Spliterator.DISTINCT; + } + } + +} From 3e7386a7c7284cfea0c8d26ef4231faf594eea00 Mon Sep 17 00:00:00 2001 From: CyC2018 <1029579233@qq.com> Date: Sat, 24 Mar 2018 15:20:48 +0800 Subject: [PATCH 2/2] add jdk 1.7 ConcurrentHashMap --- src/1.7/ConcurrentHashMap.java | 1522 ++++++++++++++++++++++++++++++++ 1 file changed, 1522 insertions(+) create mode 100644 src/1.7/ConcurrentHashMap.java diff --git a/src/1.7/ConcurrentHashMap.java b/src/1.7/ConcurrentHashMap.java new file mode 100644 index 0000000..e821480 --- /dev/null +++ b/src/1.7/ConcurrentHashMap.java @@ -0,0 +1,1522 @@ +/* + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * This file is available under and governed by the GNU General Public + * License version 2 only, as published by the Free Software Foundation. + * However, the following notice accompanied the original version of this + * file: + * + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package java.util.concurrent; +import java.util.concurrent.locks.*; +import java.util.*; +import java.io.Serializable; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** + * A hash table supporting full concurrency of retrievals and + * adjustable expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * Hashtable. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with Hashtable in programs that rely on its + * thread safety but not on its synchronization details. + * + *

      Retrieval operations (including get) generally do not + * block, so may overlap with update operations (including + * put and remove). Retrievals reflect the results + * of the most recently completed update operations holding + * upon their onset. For aggregate operations such as putAll + * and clear, concurrent retrievals may reflect insertion or + * removal of only some entries. Similarly, Iterators and + * Enumerations return elements reflecting the state of the hash table + * at some point at or since the creation of the iterator/enumeration. + * They do not throw {@link ConcurrentModificationException}. + * However, iterators are designed to be used by only one thread at a time. + * + *

      The allowed concurrency among update operations is guided by + * the optional concurrencyLevel constructor argument + * (default 16), which is used as a hint for internal sizing. The + * table is internally partitioned to try to permit the indicated + * number of concurrent updates without contention. Because placement + * in hash tables is essentially random, the actual concurrency will + * vary. Ideally, you should choose a value to accommodate as many + * threads as will ever concurrently modify the table. Using a + * significantly higher value than you need can waste space and time, + * and a significantly lower value can lead to thread contention. But + * overestimates and underestimates within an order of magnitude do + * not usually have much noticeable impact. A value of one is + * appropriate when it is known that only one thread will modify and + * all others will only read. Also, resizing this or any other kind of + * hash table is a relatively slow operation, so, when possible, it is + * a good idea to provide estimates of expected table sizes in + * constructors. + * + *

      This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

      Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a key or value. + * + *

      This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMap extends AbstractMap + implements ConcurrentMap, Serializable { + private static final long serialVersionUID = 7249069246763182397L; + + /* + * The basic strategy is to subdivide the table among Segments, + * each of which itself is a concurrently readable hash table. To + * reduce footprint, all but one segments are constructed only + * when first needed (see ensureSegment). To maintain visibility + * in the presence of lazy construction, accesses to segments as + * well as elements of segment's table must use volatile access, + * which is done via Unsafe within methods segmentAt etc + * below. These provide the functionality of AtomicReferenceArrays + * but reduce the levels of indirection. Additionally, + * volatile-writes of table elements and entry "next" fields + * within locked operations use the cheaper "lazySet" forms of + * writes (via putOrderedObject) because these writes are always + * followed by lock releases that maintain sequential consistency + * of table updates. + * + * Historical note: The previous version of this class relied + * heavily on "final" fields, which avoided some volatile reads at + * the expense of a large initial footprint. Some remnants of + * that design (including forced construction of segment 0) exist + * to ensure serialization compatibility. + */ + + /* ---------------- Constants -------------- */ + + /** + * The default initial capacity for this table, + * used when not otherwise specified in a constructor. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The default load factor for this table, used when not + * otherwise specified in a constructor. + */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The default concurrency level for this table, used when not + * otherwise specified in a constructor. + */ + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The maximum capacity, used if a higher value is implicitly + * specified by either of the constructors with arguments. MUST + * be a power of two <= 1<<30 to ensure that entries are indexable + * using ints. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The minimum capacity for per-segment tables. Must be a power + * of two, at least two to avoid immediate resizing on next use + * after lazy construction. + */ + static final int MIN_SEGMENT_TABLE_CAPACITY = 2; + + /** + * The maximum number of segments to allow; used to bound + * constructor arguments. Must be power of two less than 1 << 24. + */ + static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + + /** + * Number of unsynchronized retries in size and containsValue + * methods before resorting to locking. This is used to avoid + * unbounded retries if tables undergo continuous modification + * which would make it impossible to obtain an accurate result. + */ + static final int RETRIES_BEFORE_LOCK = 2; + + /* ---------------- Fields -------------- */ + + /** + * Mask value for indexing into segments. The upper bits of a + * key's hash code are used to choose the segment. + */ + final int segmentMask; + + /** + * Shift value for indexing within segments. + */ + final int segmentShift; + + /** + * The segments, each of which is a specialized hash table. + */ + final Segment[] segments; + + transient Set keySet; + transient Set> entrySet; + transient Collection values; + + /** + * ConcurrentHashMap list entry. Note that this is never exported + * out as a user-visible Map.Entry. + */ + static final class HashEntry { + final int hash; + final K key; + volatile V value; + volatile HashEntry next; + + HashEntry(int hash, K key, V value, HashEntry next) { + this.hash = hash; + this.key = key; + this.value = value; + this.next = next; + } + + /** + * Sets next field with volatile write semantics. (See above + * about use of putOrderedObject.) + */ + final void setNext(HashEntry n) { + UNSAFE.putOrderedObject(this, nextOffset, n); + } + + // Unsafe mechanics + static final sun.misc.Unsafe UNSAFE; + static final long nextOffset; + static { + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + Class k = HashEntry.class; + nextOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("next")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /** + * Gets the ith element of given table (if nonnull) with volatile + * read semantics. Note: This is manually integrated into a few + * performance-sensitive methods to reduce call overhead. + */ + @SuppressWarnings("unchecked") + static final HashEntry entryAt(HashEntry[] tab, int i) { + return (tab == null) ? null : + (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)i << TSHIFT) + TBASE); + } + + /** + * Sets the ith element of given table, with volatile write + * semantics. (See above about use of putOrderedObject.) + */ + static final void setEntryAt(HashEntry[] tab, int i, + HashEntry e) { + UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); + } + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because ConcurrentHashMap uses power-of-two length hash tables, + * that otherwise encounter collisions for hashCodes that do not + * differ in lower or upper bits. + */ + private static int hash(int h) { + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + /** + * Segments are specialized versions of hash tables. This + * subclasses from ReentrantLock opportunistically, just to + * simplify some locking and avoid separate construction. + */ + static final class Segment extends ReentrantLock implements Serializable { + /* + * Segments maintain a table of entry lists that are always + * kept in a consistent state, so can be read (via volatile + * reads of segments and tables) without locking. This + * requires replicating nodes when necessary during table + * resizing, so the old lists can be traversed by readers + * still using old version of table. + * + * This class defines only mutative methods requiring locking. + * Except as noted, the methods of this class perform the + * per-segment versions of ConcurrentHashMap methods. (Other + * methods are integrated directly into ConcurrentHashMap + * methods.) These mutative methods use a form of controlled + * spinning on contention via methods scanAndLock and + * scanAndLockForPut. These intersperse tryLocks with + * traversals to locate nodes. The main benefit is to absorb + * cache misses (which are very common for hash tables) while + * obtaining locks so that traversal is faster once + * acquired. We do not actually use the found nodes since they + * must be re-acquired under lock anyway to ensure sequential + * consistency of updates (and in any case may be undetectably + * stale), but they will normally be much faster to re-locate. + * Also, scanAndLockForPut speculatively creates a fresh node + * to use in put if no node is found. + */ + + private static final long serialVersionUID = 2249069246763182397L; + + /** + * The maximum number of times to tryLock in a prescan before + * possibly blocking on acquire in preparation for a locked + * segment operation. On multiprocessors, using a bounded + * number of retries maintains cache acquired while locating + * nodes. + */ + static final int MAX_SCAN_RETRIES = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * The per-segment table. Elements are accessed via + * entryAt/setEntryAt providing volatile semantics. + */ + transient volatile HashEntry[] table; + + /** + * The number of elements. Accessed only either within locks + * or among other volatile reads that maintain visibility. + */ + transient int count; + + /** + * The total number of mutative operations in this segment. + * Even though this may overflows 32 bits, it provides + * sufficient accuracy for stability checks in CHM isEmpty() + * and size() methods. Accessed only either within locks or + * among other volatile reads that maintain visibility. + */ + transient int modCount; + + /** + * The table is rehashed when its size exceeds this threshold. + * (The value of this field is always (int)(capacity * + * loadFactor).) + */ + transient int threshold; + + /** + * The load factor for the hash table. Even though this value + * is same for all segments, it is replicated to avoid needing + * links to outer object. + * @serial + */ + final float loadFactor; + + Segment(float lf, int threshold, HashEntry[] tab) { + this.loadFactor = lf; + this.threshold = threshold; + this.table = tab; + } + + final V put(K key, int hash, V value, boolean onlyIfAbsent) { + HashEntry node = tryLock() ? null : + scanAndLockForPut(key, hash, value); + V oldValue; + try { + HashEntry[] tab = table; + int index = (tab.length - 1) & hash; + HashEntry first = entryAt(tab, index); + for (HashEntry e = first;;) { + if (e != null) { + K k; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + oldValue = e.value; + if (!onlyIfAbsent) { + e.value = value; + ++modCount; + } + break; + } + e = e.next; + } + else { + if (node != null) + node.setNext(first); + else + node = new HashEntry(hash, key, value, first); + int c = count + 1; + if (c > threshold && tab.length < MAXIMUM_CAPACITY) + rehash(node); + else + setEntryAt(tab, index, node); + ++modCount; + count = c; + oldValue = null; + break; + } + } + } finally { + unlock(); + } + return oldValue; + } + + /** + * Doubles size of table and repacks entries, also adding the + * given node to new table + */ + @SuppressWarnings("unchecked") + private void rehash(HashEntry node) { + /* + * Reclassify nodes in each list to new table. Because we + * are using power-of-two expansion, the elements from + * each bin must either stay at same index, or move with a + * power of two offset. We eliminate unnecessary node + * creation by catching cases where old nodes can be + * reused because their next fields won't change. + * Statistically, at the default threshold, only about + * one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage + * collectable as soon as they are no longer referenced by + * any reader thread that may be in the midst of + * concurrently traversing table. Entry accesses use plain + * array indexing because they are followed by volatile + * table write. + */ + HashEntry[] oldTable = table; + int oldCapacity = oldTable.length; + int newCapacity = oldCapacity << 1; + threshold = (int)(newCapacity * loadFactor); + HashEntry[] newTable = + (HashEntry[]) new HashEntry[newCapacity]; + int sizeMask = newCapacity - 1; + for (int i = 0; i < oldCapacity ; i++) { + HashEntry e = oldTable[i]; + if (e != null) { + HashEntry next = e.next; + int idx = e.hash & sizeMask; + if (next == null) // Single node on list + newTable[idx] = e; + else { // Reuse consecutive sequence at same slot + HashEntry lastRun = e; + int lastIdx = idx; + for (HashEntry last = next; + last != null; + last = last.next) { + int k = last.hash & sizeMask; + if (k != lastIdx) { + lastIdx = k; + lastRun = last; + } + } + newTable[lastIdx] = lastRun; + // Clone remaining nodes + for (HashEntry p = e; p != lastRun; p = p.next) { + V v = p.value; + int h = p.hash; + int k = h & sizeMask; + HashEntry n = newTable[k]; + newTable[k] = new HashEntry(h, p.key, v, n); + } + } + } + } + int nodeIndex = node.hash & sizeMask; // add the new node + node.setNext(newTable[nodeIndex]); + newTable[nodeIndex] = node; + table = newTable; + } + + /** + * Scans for a node containing given key while trying to + * acquire lock, creating and returning one if not found. Upon + * return, guarantees that lock is held. UNlike in most + * methods, calls to method equals are not screened: Since + * traversal speed doesn't matter, we might as well help warm + * up the associated code and accesses as well. + * + * @return a new node if key not found, else null + */ + private HashEntry scanAndLockForPut(K key, int hash, V value) { + HashEntry first = entryForHash(this, hash); + HashEntry e = first; + HashEntry node = null; + int retries = -1; // negative while locating node + while (!tryLock()) { + HashEntry f; // to recheck first below + if (retries < 0) { + if (e == null) { + if (node == null) // speculatively create node + node = new HashEntry(hash, key, value, null); + retries = 0; + } + else if (key.equals(e.key)) + retries = 0; + else + e = e.next; + } + else if (++retries > MAX_SCAN_RETRIES) { + lock(); + break; + } + else if ((retries & 1) == 0 && + (f = entryForHash(this, hash)) != first) { + e = first = f; // re-traverse if entry changed + retries = -1; + } + } + return node; + } + + /** + * Scans for a node containing the given key while trying to + * acquire lock for a remove or replace operation. Upon + * return, guarantees that lock is held. Note that we must + * lock even if the key is not found, to ensure sequential + * consistency of updates. + */ + private void scanAndLock(Object key, int hash) { + // similar to but simpler than scanAndLockForPut + HashEntry first = entryForHash(this, hash); + HashEntry e = first; + int retries = -1; + while (!tryLock()) { + HashEntry f; + if (retries < 0) { + if (e == null || key.equals(e.key)) + retries = 0; + else + e = e.next; + } + else if (++retries > MAX_SCAN_RETRIES) { + lock(); + break; + } + else if ((retries & 1) == 0 && + (f = entryForHash(this, hash)) != first) { + e = first = f; + retries = -1; + } + } + } + + /** + * Remove; match on key only if value null, else match both. + */ + final V remove(Object key, int hash, Object value) { + if (!tryLock()) + scanAndLock(key, hash); + V oldValue = null; + try { + HashEntry[] tab = table; + int index = (tab.length - 1) & hash; + HashEntry e = entryAt(tab, index); + HashEntry pred = null; + while (e != null) { + K k; + HashEntry next = e.next; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + V v = e.value; + if (value == null || value == v || value.equals(v)) { + if (pred == null) + setEntryAt(tab, index, next); + else + pred.setNext(next); + ++modCount; + --count; + oldValue = v; + } + break; + } + pred = e; + e = next; + } + } finally { + unlock(); + } + return oldValue; + } + + final boolean replace(K key, int hash, V oldValue, V newValue) { + if (!tryLock()) + scanAndLock(key, hash); + boolean replaced = false; + try { + HashEntry e; + for (e = entryForHash(this, hash); e != null; e = e.next) { + K k; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + if (oldValue.equals(e.value)) { + e.value = newValue; + ++modCount; + replaced = true; + } + break; + } + } + } finally { + unlock(); + } + return replaced; + } + + final V replace(K key, int hash, V value) { + if (!tryLock()) + scanAndLock(key, hash); + V oldValue = null; + try { + HashEntry e; + for (e = entryForHash(this, hash); e != null; e = e.next) { + K k; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + oldValue = e.value; + e.value = value; + ++modCount; + break; + } + } + } finally { + unlock(); + } + return oldValue; + } + + final void clear() { + lock(); + try { + HashEntry[] tab = table; + for (int i = 0; i < tab.length ; i++) + setEntryAt(tab, i, null); + ++modCount; + count = 0; + } finally { + unlock(); + } + } + } + + // Accessing segments + + /** + * Gets the jth element of given segment array (if nonnull) with + * volatile element access semantics via Unsafe. (The null check + * can trigger harmlessly only during deserialization.) Note: + * because each element of segments array is set only once (using + * fully ordered writes), some performance-sensitive methods rely + * on this method only as a recheck upon null reads. + */ + @SuppressWarnings("unchecked") + static final Segment segmentAt(Segment[] ss, int j) { + long u = (j << SSHIFT) + SBASE; + return ss == null ? null : + (Segment) UNSAFE.getObjectVolatile(ss, u); + } + + /** + * Returns the segment for the given index, creating it and + * recording in segment table (via CAS) if not already present. + * + * @param k the index + * @return the segment + */ + @SuppressWarnings("unchecked") + private Segment ensureSegment(int k) { + final Segment[] ss = this.segments; + long u = (k << SSHIFT) + SBASE; // raw offset + Segment seg; + if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) == null) { + Segment proto = ss[0]; // use segment 0 as prototype + int cap = proto.table.length; + float lf = proto.loadFactor; + int threshold = (int)(cap * lf); + HashEntry[] tab = (HashEntry[])new HashEntry[cap]; + if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) + == null) { // recheck + Segment s = new Segment(lf, threshold, tab); + while ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) + == null) { + if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) + break; + } + } + } + return seg; + } + + // Hash-based segment and entry accesses + + /** + * Get the segment for the given hash + */ + @SuppressWarnings("unchecked") + private Segment segmentForHash(int h) { + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; + return (Segment) UNSAFE.getObjectVolatile(segments, u); + } + + /** + * Gets the table entry for the given segment and hash + */ + @SuppressWarnings("unchecked") + static final HashEntry entryForHash(Segment seg, int h) { + HashEntry[] tab; + return (seg == null || (tab = seg.table) == null) ? null : + (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the specified initial + * capacity, load factor and concurrency level. + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @param loadFactor the load factor threshold, used to control resizing. + * Resizing may be performed when the average number of elements per + * bin exceeds this threshold. + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation performs internal sizing + * to try to accommodate this many threads. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive. + */ + @SuppressWarnings("unchecked") + public ConcurrentHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (concurrencyLevel > MAX_SEGMENTS) + concurrencyLevel = MAX_SEGMENTS; + // Find power-of-two sizes best matching arguments + int sshift = 0; + int ssize = 1; + while (ssize < concurrencyLevel) { + ++sshift; + ssize <<= 1; + } + this.segmentShift = 32 - sshift; + this.segmentMask = ssize - 1; + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + int c = initialCapacity / ssize; + if (c * ssize < initialCapacity) + ++c; + int cap = MIN_SEGMENT_TABLE_CAPACITY; + while (cap < c) + cap <<= 1; + // create segments and segments[0] + Segment s0 = + new Segment(loadFactor, (int)(cap * loadFactor), + (HashEntry[])new HashEntry[cap]); + Segment[] ss = (Segment[])new Segment[ssize]; + UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] + this.segments = ss; + } + + /** + * Creates a new, empty map with the specified initial capacity + * and load factor and with the default concurrencyLevel (16). + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @param loadFactor the load factor threshold, used to control resizing. + * Resizing may be performed when the average number of elements per + * bin exceeds this threshold. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMap(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new, empty map with the specified initial capacity, + * and with default load factor (0.75) and concurrencyLevel (16). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative. + */ + public ConcurrentHashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new, empty map with a default initial capacity (16), + * load factor (0.75) and concurrencyLevel (16). + */ + public ConcurrentHashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new map with the same mappings as the given map. + * The map is created with a capacity of 1.5 times the number + * of mappings in the given map or 16 (whichever is greater), + * and a default load factor (0.75) and concurrencyLevel (16). + * + * @param m the map + */ + public ConcurrentHashMap(Map m) { + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, + DEFAULT_INITIAL_CAPACITY), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + putAll(m); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings + */ + public boolean isEmpty() { + /* + * Sum per-segment modCounts to avoid mis-reporting when + * elements are concurrently added and removed in one segment + * while checking another, in which case the table was never + * actually empty at any point. (The sum ensures accuracy up + * through at least 1<<31 per-segment modifications before + * recheck.) Methods size() and containsValue() use similar + * constructions for stability checks. + */ + long sum = 0L; + final Segment[] segments = this.segments; + for (int j = 0; j < segments.length; ++j) { + Segment seg = segmentAt(segments, j); + if (seg != null) { + if (seg.count != 0) + return false; + sum += seg.modCount; + } + } + if (sum != 0L) { // recheck unless no modifications + for (int j = 0; j < segments.length; ++j) { + Segment seg = segmentAt(segments, j); + if (seg != null) { + if (seg.count != 0) + return false; + sum -= seg.modCount; + } + } + if (sum != 0L) + return false; + } + return true; + } + + /** + * Returns the number of key-value mappings in this map. If the + * map contains more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of key-value mappings in this map + */ + public int size() { + // Try a few times to get accurate count. On failure due to + // continuous async changes in table, resort to locking. + final Segment[] segments = this.segments; + int size; + boolean overflow; // true if size overflows 32 bits + long sum; // sum of modCounts + long last = 0L; // previous sum + int retries = -1; // first iteration isn't retry + try { + for (;;) { + if (retries++ == RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + ensureSegment(j).lock(); // force creation + } + sum = 0L; + size = 0; + overflow = false; + for (int j = 0; j < segments.length; ++j) { + Segment seg = segmentAt(segments, j); + if (seg != null) { + sum += seg.modCount; + int c = seg.count; + if (c < 0 || (size += c) < 0) + overflow = true; + } + } + if (sum == last) + break; + last = sum; + } + } finally { + if (retries > RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + segmentAt(segments, j).unlock(); + } + } + return overflow ? Integer.MAX_VALUE : size; + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

      More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + public V get(Object key) { + Segment s; // manually integrate access methods to reduce overhead + HashEntry[] tab; + int h = hash(key.hashCode()); + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; + if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && + (tab = s.table) != null) { + for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + e != null; e = e.next) { + K k; + if ((k = e.key) == key || (e.hash == h && key.equals(k))) + return e.value; + } + } + return null; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return true if and only if the specified object + * is a key in this table, as determined by the + * equals method; false otherwise. + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") + public boolean containsKey(Object key) { + Segment s; // same as get() except no need for volatile value read + HashEntry[] tab; + int h = hash(key.hashCode()); + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; + if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && + (tab = s.table) != null) { + for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + e != null; e = e.next) { + K k; + if ((k = e.key) == key || (e.hash == h && key.equals(k))) + return true; + } + } + return false; + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. Note: This method requires a full internal + * traversal of the hash table, and so is much slower than + * method containsKey. + * + * @param value value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + // Same idea as size() + if (value == null) + throw new NullPointerException(); + final Segment[] segments = this.segments; + boolean found = false; + long last = 0; + int retries = -1; + try { + outer: for (;;) { + if (retries++ == RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + ensureSegment(j).lock(); // force creation + } + long hashSum = 0L; + int sum = 0; + for (int j = 0; j < segments.length; ++j) { + HashEntry[] tab; + Segment seg = segmentAt(segments, j); + if (seg != null && (tab = seg.table) != null) { + for (int i = 0 ; i < tab.length; i++) { + HashEntry e; + for (e = entryAt(tab, i); e != null; e = e.next) { + V v = e.value; + if (v != null && value.equals(v)) { + found = true; + break outer; + } + } + } + sum += seg.modCount; + } + } + if (retries > 0 && sum == last) + break; + last = sum; + } + } finally { + if (retries > RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + segmentAt(segments, j).unlock(); + } + } + return found; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + + * @param value a value to search for + * @return true if and only if some key maps to the + * value argument in this table as + * determined by the equals method; + * false otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

      The value can be retrieved by calling the get method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") + public V put(K key, V value) { + Segment s; + if (value == null) + throw new NullPointerException(); + int hash = hash(key.hashCode()); + int j = (hash >>> segmentShift) & segmentMask; + if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck + (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment + s = ensureSegment(j); + return s.put(key, hash, value, false); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") + public V putIfAbsent(K key, V value) { + Segment s; + if (value == null) + throw new NullPointerException(); + int hash = hash(key.hashCode()); + int j = (hash >>> segmentShift) & segmentMask; + if ((s = (Segment)UNSAFE.getObject + (segments, (j << SSHIFT) + SBASE)) == null) + s = ensureSegment(j); + return s.put(key, hash, value, true); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + for (Map.Entry e : m.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + public V remove(Object key) { + int hash = hash(key.hashCode()); + Segment s = segmentForHash(hash); + return s == null ? null : s.remove(key, hash, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + int hash = hash(key.hashCode()); + Segment s; + return value != null && (s = segmentForHash(hash)) != null && + s.remove(key, hash, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + int hash = hash(key.hashCode()); + if (oldValue == null || newValue == null) + throw new NullPointerException(); + Segment s = segmentForHash(hash); + return s != null && s.replace(key, hash, oldValue, newValue); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public V replace(K key, V value) { + int hash = hash(key.hashCode()); + if (value == null) + throw new NullPointerException(); + Segment s = segmentForHash(hash); + return s == null ? null : s.replace(key, hash, value); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + final Segment[] segments = this.segments; + for (int j = 0; j < segments.length; ++j) { + Segment s = segmentAt(segments, j); + if (s != null) + s.clear(); + } + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from this map, + * via the Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or + * addAll operations. + * + *

      The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set keySet() { + Set ks = keySet; + return (ks != null) ? ks : (keySet = new KeySet()); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. The collection + * supports element removal, which removes the corresponding + * mapping from this map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll, and clear operations. It does not + * support the add or addAll operations. + * + *

      The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Collection values() { + Collection vs = values; + return (vs != null) ? vs : (values = new Values()); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or + * addAll operations. + * + *

      The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + Set> es = entrySet; + return (es != null) ? es : (entrySet = new EntrySet()); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(); + } + + /* ---------------- Iterator Support -------------- */ + + abstract class HashIterator { + int nextSegmentIndex; + int nextTableIndex; + HashEntry[] currentTable; + HashEntry nextEntry; + HashEntry lastReturned; + + HashIterator() { + nextSegmentIndex = segments.length - 1; + nextTableIndex = -1; + advance(); + } + + /** + * Set nextEntry to first node of next non-empty table + * (in backwards order, to simplify checks). + */ + final void advance() { + for (;;) { + if (nextTableIndex >= 0) { + if ((nextEntry = entryAt(currentTable, + nextTableIndex--)) != null) + break; + } + else if (nextSegmentIndex >= 0) { + Segment seg = segmentAt(segments, nextSegmentIndex--); + if (seg != null && (currentTable = seg.table) != null) + nextTableIndex = currentTable.length - 1; + } + else + break; + } + } + + final HashEntry nextEntry() { + HashEntry e = nextEntry; + if (e == null) + throw new NoSuchElementException(); + lastReturned = e; // cannot assign until after null check + if ((nextEntry = e.next) == null) + advance(); + return e; + } + + public final boolean hasNext() { return nextEntry != null; } + public final boolean hasMoreElements() { return nextEntry != null; } + + public final void remove() { + if (lastReturned == null) + throw new IllegalStateException(); + ConcurrentHashMap.this.remove(lastReturned.key); + lastReturned = null; + } + } + + final class KeyIterator + extends HashIterator + implements Iterator, Enumeration + { + public final K next() { return super.nextEntry().key; } + public final K nextElement() { return super.nextEntry().key; } + } + + final class ValueIterator + extends HashIterator + implements Iterator, Enumeration + { + public final V next() { return super.nextEntry().value; } + public final V nextElement() { return super.nextEntry().value; } + } + + /** + * Custom Entry class used by EntryIterator.next(), that relays + * setValue changes to the underlying map. + */ + final class WriteThroughEntry + extends AbstractMap.SimpleEntry + { + WriteThroughEntry(K k, V v) { + super(k,v); + } + + /** + * Set our entry's value and write through to the map. The + * value to return is somewhat arbitrary here. Since a + * WriteThroughEntry does not necessarily track asynchronous + * changes, the most recent "previous" value could be + * different from what we return (or could even have been + * removed in which case the put will re-establish). We do not + * and cannot guarantee more. + */ + public V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = super.setValue(value); + ConcurrentHashMap.this.put(getKey(), value); + return v; + } + } + + final class EntryIterator + extends HashIterator + implements Iterator> + { + public Map.Entry next() { + HashEntry e = super.nextEntry(); + return new WriteThroughEntry(e.key, e.value); + } + } + + final class KeySet extends AbstractSet { + public Iterator iterator() { + return new KeyIterator(); + } + public int size() { + return ConcurrentHashMap.this.size(); + } + public boolean isEmpty() { + return ConcurrentHashMap.this.isEmpty(); + } + public boolean contains(Object o) { + return ConcurrentHashMap.this.containsKey(o); + } + public boolean remove(Object o) { + return ConcurrentHashMap.this.remove(o) != null; + } + public void clear() { + ConcurrentHashMap.this.clear(); + } + } + + final class Values extends AbstractCollection { + public Iterator iterator() { + return new ValueIterator(); + } + public int size() { + return ConcurrentHashMap.this.size(); + } + public boolean isEmpty() { + return ConcurrentHashMap.this.isEmpty(); + } + public boolean contains(Object o) { + return ConcurrentHashMap.this.containsValue(o); + } + public void clear() { + ConcurrentHashMap.this.clear(); + } + } + + final class EntrySet extends AbstractSet> { + public Iterator> iterator() { + return new EntryIterator(); + } + public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + V v = ConcurrentHashMap.this.get(e.getKey()); + return v != null && v.equals(e.getValue()); + } + public boolean remove(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); + } + public int size() { + return ConcurrentHashMap.this.size(); + } + public boolean isEmpty() { + return ConcurrentHashMap.this.isEmpty(); + } + public void clear() { + ConcurrentHashMap.this.clear(); + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Save the state of the ConcurrentHashMap instance to a + * stream (i.e., serialize it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + // force all segments for serialization compatibility + for (int k = 0; k < segments.length; ++k) + ensureSegment(k); + s.defaultWriteObject(); + + final Segment[] segments = this.segments; + for (int k = 0; k < segments.length; ++k) { + Segment seg = segmentAt(segments, k); + seg.lock(); + try { + HashEntry[] tab = seg.table; + for (int i = 0; i < tab.length; ++i) { + HashEntry e; + for (e = entryAt(tab, i); e != null; e = e.next) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } + } finally { + seg.unlock(); + } + } + s.writeObject(null); + s.writeObject(null); + } + + /** + * Reconstitute the ConcurrentHashMap instance from a + * stream (i.e., deserialize it). + * @param s the stream + */ + @SuppressWarnings("unchecked") + private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + + // Re-initialize segments to be minimally sized, and let grow. + int cap = MIN_SEGMENT_TABLE_CAPACITY; + final Segment[] segments = this.segments; + for (int k = 0; k < segments.length; ++k) { + Segment seg = segments[k]; + if (seg != null) { + seg.threshold = (int)(cap * seg.loadFactor); + seg.table = (HashEntry[]) new HashEntry[cap]; + } + } + + // Read the keys and values, and put the mappings in the table + for (;;) { + K key = (K) s.readObject(); + V value = (V) s.readObject(); + if (key == null) + break; + put(key, value); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long SBASE; + private static final int SSHIFT; + private static final long TBASE; + private static final int TSHIFT; + + static { + int ss, ts; + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + Class tc = HashEntry[].class; + Class sc = Segment[].class; + TBASE = UNSAFE.arrayBaseOffset(tc); + SBASE = UNSAFE.arrayBaseOffset(sc); + ts = UNSAFE.arrayIndexScale(tc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) + throw new Error("data type scale not a power of two"); + SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); + TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); + } + +}