blob: a1db94662d50f3bf977d525ff4c229194c5ac14d [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.concurrent;
/*
* CompactConcurrentHashSet2 is derived from the JSR 166 ConcurrentHashMap class version 1.43
* (current as of 27 March 2015). The modifications made for GemFire turn it into a Set instead of a
* Map by removing the map's storage for values and removing the methods that are associated with
* values.
*
* JSR 166 interest web site: http://gee.cs.oswego.edu/dl/concurrency-interest/
*
* File download location:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jdk7/java/util/concurrent/
* CompactConcurrentHashSet2.java?revision=1.43
*
* Original licensing notice: Written by Doug Lea with assistance from members of JCP JSR-166 Expert
* Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
import java.io.ObjectStreamField;
import java.io.Serializable;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.geode.annotations.Immutable;
import org.apache.geode.annotations.internal.MakeNotStatic;
/**
* <p>
* This is the original javadoc describing ConcurrentHashMap. This class is actually a Set based on
* Doug Lea's CHM implementation (see the source file for full attribution).
* </p>
* A hash map supporting full concurrency of retrievals and high expected concurrency for updates.
* This class obeys the same functional specification as {@link java.util.HashMap}, and includes
* versions of methods corresponding to each method of {@code HashMap}. However, even though all
* operations are thread-safe, retrieval operations do <em>not</em> entail locking, and there is
* <em>not</em> any support for locking the entire set in a way that prevents all access. This class
* is fully interoperable with {@code HashMap} in programs that rely on its thread safety but not on
* its synchronization details.
*
*
* @since Geode 1.0
* @author Originally Doug Lea
* @param <V> the type of values held in the set
*/
public class CompactConcurrentHashSet2<V> extends AbstractSet<V> implements Set<V>, Serializable {
private static final long serialVersionUID = 7249069246763182397L;
/*
* Overview:
*
* The primary design goal of this hash table is to maintain concurrent readability (typically
* method get(), but also iterators and related methods) while minimizing update contention.
* Secondary goals are to keep space consumption about the same or better than java.util.HashMap,
* and to support high initial insertion rates on an empty table by many threads.
*
* This map usually acts as a binned (bucketed) hash table. Each key-value mapping is held in a
* Node. Most nodes are instances of the basic Node class with hash, key, value, and next fields.
* However, various subclasses exist: TreeNodes are arranged in balanced trees, not lists.
* TreeBins hold the roots of sets of TreeNodes. ForwardingNodes are placed at the heads of bins
* during resizing. ReservationNodes are used as placeholders while establishing values in
* computeIfAbsent and related methods. The types TreeBin, ForwardingNode, and ReservationNode do
* not hold normal user keys, values, or hashes, and are readily distinguishable during search etc
* because they have negative hash fields and null key and value fields. (These special nodes are
* either uncommon or transient, so the impact of carrying around some unused fields is
* insignificant.)
*
* The table is lazily initialized to a power-of-two size upon the first insertion. Each bin in
* the table normally contains a list of Nodes (most often, the list has only zero or one Node).
* Table accesses require volatile/atomic reads, writes, and CASes. Because there is no other way
* to arrange this without adding further indirections, we use intrinsics (sun.misc.Unsafe)
* operations.
*
* We use the top (sign) bit of Node hash fields for control purposes -- it is available anyway
* because of addressing constraints. Nodes with negative hash fields are specially handled or
* ignored in map methods.
*
* Insertion (via put or its variants) of the first node in an empty bin is performed by just
* CASing it to the bin. This is by far the most common case for put operations under most
* key/hash distributions. Other update operations (insert, delete, and replace) require locks. We
* do not want to waste the space required to associate a distinct lock object with each bin, so
* instead use the first node of a bin list itself as a lock. Locking support for these locks
* relies on builtin "synchronized" monitors.
*
* Using the first node of a list as a lock does not by itself suffice though: When a node is
* locked, any update must first validate that it is still the first node after locking it, and
* retry if not. Because new nodes are always appended to lists, once a node is first in a bin, it
* remains first until deleted or the bin becomes invalidated (upon resizing).
*
* The main disadvantage of per-bin locks is that other update operations on other nodes in a bin
* list protected by the same lock can stall, for example when user equals() or mapping functions
* take a long time. However, statistically, under random hash codes, this is not a common
* problem. Ideally, the frequency of nodes in bins follows a Poisson distribution
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of about 0.5 on average,
* given the resizing threshold of 0.75, although with a large variance because of resizing
* granularity. Ignoring variance, the expected occurrences of list size k are (exp(-0.5) *
* pow(0.5, k) / factorial(k)). The first values are:
*
* 0: 0.60653066 1: 0.30326533 2: 0.07581633 3: 0.01263606 4: 0.00157952 5: 0.00015795 6:
* 0.00001316 7: 0.00000094 8: 0.00000006 more: less than 1 in ten million
*
* Lock contention probability for two threads accessing distinct elements is roughly 1 / (8 *
* #elements) under random hashes.
*
* Actual hash code distributions encountered in practice sometimes deviate significantly from
* uniform randomness. This includes the case when N > (1<<30), so some keys MUST collide.
* Similarly for dumb or hostile usages in which multiple keys are designed to have identical hash
* codes or ones that differs only in masked-out high bits. So we use a secondary strategy that
* applies when the number of nodes in a bin exceeds a threshold. These TreeBins use a balanced
* tree to hold nodes (a specialized form of red-black trees), bounding search time to O(log N).
* Each search step in a TreeBin is at least twice as slow as in a regular list, but given that N
* cannot exceed (1<<64) (before running out of addresses) this bounds search steps, lock hold
* times, etc, to reasonable constants (roughly 100 nodes inspected per operation worst case) so
* long as keys are Comparable (which is very common -- String, Long, etc). TreeBin nodes
* (TreeNodes) also maintain the same "next" traversal pointers as regular nodes, so can be
* traversed in iterators in the same way.
*
* The table is resized when occupancy exceeds a percentage threshold (nominally, 0.75, but see
* below). Any thread noticing an overfull bin may assist in resizing after the initiating thread
* allocates and sets up the replacement array. However, rather than stalling, these other threads
* may proceed with insertions etc. The use of TreeBins shields us from the worst case effects of
* overfilling while resizes are in progress. Resizing proceeds by transferring bins, one by one,
* from the table to the next table. However, threads claim small blocks of indices to transfer
* (via field transferIndex) before doing so, reducing contention. A generation stamp in field
* sizeCtl ensures that resizings do not overlap. Because we are using power-of-two expansion, the
* elements from each bin must either stay at same index, or move with a power of two offset. We
* eliminate unnecessary node creation by catching cases where old nodes can be reused because
* their next fields won't change. On average, only about one-sixth of them need cloning when a
* table doubles. The nodes they replace will be garbage collectable as soon as they are no longer
* referenced by any reader thread that may be in the midst of concurrently traversing table. Upon
* transfer, the old table bin contains only a special forwarding node (with hash field "MOVED")
* that contains the next table as its key. On encountering a forwarding node, access and update
* operations restart, using the new table.
*
* Each bin transfer requires its bin lock, which can stall waiting for locks while resizing.
* However, because other threads can join in and help resize rather than contend for locks,
* average aggregate waits become shorter as resizing progresses. The transfer operation must also
* ensure that all accessible bins in both the old and new table are usable by any traversal. This
* is arranged in part by proceeding from the last bin (table.length - 1) up towards the first.
* Upon seeing a forwarding node, traversals (see class Traverser) arrange to move to the new
* table without revisiting nodes. To ensure that no intervening nodes are skipped even when moved
* out of order, a stack (see class TableStack) is created on first encounter of a forwarding node
* during a traversal, to maintain its place if later processing the current table. The need for
* these save/restore mechanics is relatively rare, but when one forwarding node is encountered,
* typically many more will be. So Traversers use a simple caching scheme to avoid creating so
* many new TableStack nodes. (Thanks to Peter Levart for suggesting use of a stack here.)
*
* The traversal scheme also applies to partial traversals of ranges of bins (via an alternate
* Traverser constructor) to support partitioned aggregate operations. Also, read-only operations
* give up if ever forwarded to a null table, which provides support for shutdown-style clearing,
* which is also not currently implemented.
*
* Lazy table initialization minimizes footprint until first use, and also avoids resizings when
* the first operation is from a putAll, constructor with map argument, or deserialization. These
* cases attempt to override the initial capacity settings, but harmlessly fail to take effect in
* cases of races.
*
* The element count is maintained using a specialization of LongAdder. We need to incorporate a
* specialization rather than just use a LongAdder in order to access implicit contention-sensing
* that leads to creation of multiple CounterCells. The counter mechanics avoid contention on
* updates but can encounter cache thrashing if read too frequently during concurrent access. To
* avoid reading so often, resizing under contention is attempted only upon adding to a bin
* already holding two or more nodes. Under uniform hash distributions, the probability of this
* occurring at threshold is around 13%, meaning that only about 1 in 8 puts check threshold (and
* after resizing, many fewer do so).
*
* TreeBins use a special form of comparison for search and related operations (which is the main
* reason we cannot use existing collections such as TreeMaps). TreeBins contain Comparable
* elements, but may contain others, as well as elements that are Comparable but not necessarily
* Comparable for the same T, so we cannot invoke compareTo among them. To handle this, the tree
* is ordered primarily by hash value, then by Comparable.compareTo order if applicable. On lookup
* at a node, if elements are not comparable or compare as 0 then both left and right children may
* need to be searched in the case of tied hash values. (This corresponds to the full list search
* that would be necessary if all elements were non-Comparable and had tied hashes.) On insertion,
* to keep a total ordering (or as close as is required here) across rebalancings, we compare
* classes and identityHashCodes as tie-breakers. The red-black balancing code is updated from
* pre-jdk-collections (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) based in turn
* on Cormen, Leiserson, and Rivest "Introduction to Algorithms" (CLR).
*
* TreeBins also require an additional locking mechanism. While list traversal is always possible
* by readers even during updates, tree traversal is not, mainly because of tree-rotations that
* may change the root node and/or its linkages. TreeBins include a simple read-write lock
* mechanism parasitic on the main bin-synchronization strategy: Structural adjustments associated
* with an insertion or removal are already bin-locked (and so cannot conflict with other writers)
* but must wait for ongoing readers to finish. Since there can be only one such waiter, we use a
* simple scheme using a single "waiter" field to block writers. However, readers need never
* block. If the root lock is held, they proceed along the slow traversal path (via next-pointers)
* until the lock becomes available or the list is exhausted, whichever comes first. These cases
* are not fast, but maximize aggregate expected throughput.
*
* Maintaining API and serialization compatibility with previous versions of this class introduces
* several oddities. Mainly: We leave untouched but unused constructor arguments refering to
* concurrencyLevel. We accept a loadFactor constructor argument, but apply it only to initial
* table capacity (which is the only time that we can guarantee to honor it.) We also declare an
* unused "Segment" class that is instantiated in minimal form only when serializing.
*
* Also, solely for compatibility with previous versions of this class, it extends AbstractMap,
* even though all of its methods are overridden, so it is just useless baggage.
*
* This file is organized to make things a little easier to follow while reading than they might
* otherwise: First the main static declarations and utilities, then fields, then main public
* methods (with a few factorings of multiple public methods into internal ones), then sizing
* methods, trees, traversers, and bulk operations.
*/
/* ---------------- Constants -------------- */
/**
* The largest possible table capacity. This value must be exactly 1<<30 to stay within Java array
* allocation and indexing bounds for power of two table sizes, and is further required because
* the top two bits of 32bit hash fields are used for control purposes.
*/
private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* The default initial table capacity. Must be a power of 2 (i.e., at least 1) and at most
* MAXIMUM_CAPACITY.
*/
private static final int DEFAULT_CAPACITY = 16;
/**
* The largest possible (non-power of two) array size. Needed by toArray and related methods.
*/
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
* The default concurrency level for this table. Unused but defined for compatibility with
* previous versions of this class.
*/
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
* The load factor for this table. Overrides of this value in constructors affect only the initial
* table capacity. The actual floating point value isn't normally used -- it is simpler to use
* expressions such as {@code n - (n >>> 2)} for the associated resizing threshold.
*/
private static final float LOAD_FACTOR = 0.75f;
/**
* The bin count threshold for using a tree rather than list for a bin. Bins are converted to
* trees when adding an element to a bin with at least this many nodes. The value must be greater
* than 2, and should be at least 8 to mesh with assumptions in tree removal about conversion back
* to plain bins upon shrinkage.
*/
static final int TREEIFY_THRESHOLD = 8;
/**
* The bin count threshold for untreeifying a (split) bin during a resize operation. Should be
* less than TREEIFY_THRESHOLD, and at most 6 to mesh with shrinkage detection under removal.
*/
static final int UNTREEIFY_THRESHOLD = 6;
/**
* The smallest table capacity for which bins may be treeified. (Otherwise the table is resized if
* too many nodes in a bin.) The value should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
* between resizing and treeification thresholds.
*/
static final int MIN_TREEIFY_CAPACITY = 64;
/**
* Minimum number of rebinnings per transfer step. Ranges are subdivided to allow multiple resizer
* threads. This value serves as a lower bound to avoid resizers encountering excessive memory
* contention. The value should be at least DEFAULT_CAPACITY.
*/
private static final int MIN_TRANSFER_STRIDE = 16;
/**
* The number of bits used for generation stamp in sizeCtl. Must be at least 6 for 32bit arrays.
*/
private static final int RESIZE_STAMP_BITS = 16;
/**
* The maximum number of threads that can help resize. Must fit in 32 - RESIZE_STAMP_BITS bits.
*/
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
/**
* The bit shift for recording size stamp in sizeCtl.
*/
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
/*
* Encodings for Node hash fields. See above for explanation.
*/
static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes
static final int TREEBIN = 0x80000000; // hash for roots of trees
static final int RESERVED = 0x80000001; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** Number of CPUS, to place bounds on some sizings */
static final int NCPU = Runtime.getRuntime().availableProcessors();
/** For serialization compatibility. */
@Immutable
private static final ObjectStreamField[] serialPersistentFields =
{new ObjectStreamField("segments", Segment[].class),
new ObjectStreamField("segmentMask", Integer.TYPE),
new ObjectStreamField("segmentShift", Integer.TYPE)};
/* ---------------- Nodes -------------- */
/**
* Key-value entry. This class is never exported out as a user-mutable Map.Entry (i.e., one
* supporting setValue; see MapEntry below), but can be used for read-only traversals used in bulk
* tasks. Subclasses of Node with a negative hash field are special, and contain null keys and
* values (but are never exported). Otherwise, keys and vals are never null.
*/
static class Node<K> {
final int hash;
final K key;
Node<K> next;
Node(int hash, K key, Node<K> next) {
this.hash = hash;
this.key = key;
this.next = next;
}
public K getKey() {
return key;
}
public int hashCode() {
return key.hashCode();
}
public String toString() {
return key.toString();
}
public boolean equals(Object o) {
return ((o instanceof Node)
&& (o == key || o.equals(key)));
}
/**
* Virtualized support for map.get(); overridden in subclasses.
*/
Node<K> find(int h, Object k) {
Node<K> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
/* ---------------- Static utilities -------------- */
/**
* Spreads (XORs) higher bits of hash to lower and also forces top bit to 0. Because the table
* uses power-of-two masking, sets of hashes that vary only in bits above the current mask will
* always collide. (Among known examples are sets of Float keys holding consecutive whole numbers
* in small tables.) So we apply a transform that spreads the impact of higher bits downward.
* There is a tradeoff between speed, utility, and quality of bit-spreading. Because many common
* sets of hashes are already reasonably distributed (so don't benefit from spreading), and
* because we use trees to handle large sets of collisions in bins, we just XOR some shifted bits
* in the cheapest possible way to reduce systematic lossage, as well as to incorporate impact of
* the highest bits that would otherwise never be used in index calculations because of table
* bounds.
*/
static int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS;
}
/**
* Returns a power of two table size for the given desired capacity. See Hackers Delight, sec 3.2
*/
private static int tableSizeFor(int c) {
int n = c - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
* Returns x's Class if it is of the form "class C implements Comparable<C>", else null.
*/
static Class<?> comparableClassFor(Object x) {
if (x instanceof Comparable) {
Class<?> c;
Type[] ts, as;
Type t;
ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks
return c;
if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) {
if (((t = ts[i]) instanceof ParameterizedType)
&& ((p = (ParameterizedType) t).getRawType() == Comparable.class)
&& (as = p.getActualTypeArguments()) != null && as.length == 1 && as[0] == c) // type
// arg
// is c
return c;
}
}
}
return null;
}
/**
* Returns k.compareTo(x) if x matches kc (k's screened comparable class), else 0.
*/
@SuppressWarnings({"rawtypes", "unchecked"}) // for cast to Comparable
static int compareComparables(Class<?> kc, Object k, Object x) {
return (x == null || x.getClass() != kc ? 0 : ((Comparable) k).compareTo(x));
}
/* ---------------- Table element access -------------- */
/*
* Volatile access methods are used for table elements as well as elements of in-progress next
* table while resizing. All uses of the tab arguments must be null checked by callers. All
* callers also paranoically precheck that tab's length is not zero (or an equivalent check), thus
* ensuring that any index argument taking the form of a hash value anded with (length - 1) is a
* valid index. Note that, to be correct wrt arbitrary concurrency errors by users, these checks
* must operate on local variables, which accounts for some odd-looking inline assignments below.
* Note that calls to setTabAt always occur within locked regions, and so do not need full
* volatile semantics, but still require ordering to maintain concurrent readability.
*/
@SuppressWarnings("unchecked")
static <K> Node<K> tabAt(Node<K>[] tab, int i) {
return (Node<K>) U.getObjectVolatile(tab, ((long) i << ASHIFT) + ABASE);
}
static <K> boolean casTabAt(Node<K>[] tab, int i, Node<K> c, Node<K> v) {
return U.compareAndSwapObject(tab, ((long) i << ASHIFT) + ABASE, c, v);
}
static <K> void setTabAt(Node<K>[] tab, int i, Node<K> v) {
U.putOrderedObject(tab, ((long) i << ASHIFT) + ABASE, v);
}
/* ---------------- Fields -------------- */
/**
* The array of bins. Lazily initialized upon first insertion. Size is always a power of two.
* Accessed directly by iterators.
*/
transient volatile Node<V>[] table;
/**
* The next table to use; non-null only while resizing.
*/
private transient volatile Node<V>[] nextTable;
/**
* Base counter value, used mainly when there is no contention, but also as a fallback during
* table initialization races. Updated via CAS.
*/
private transient volatile long baseCount;
/**
* Table initialization and resizing control. When negative, the table is being initialized or
* resized: -1 for initialization, else -(1 + the number of active resizing threads). Otherwise,
* when table is null, holds the initial table size to use upon creation, or 0 for default. After
* initialization, holds the next element count value upon which to resize the table.
*/
private transient volatile int sizeCtl;
/**
* The next table index (plus one) to split while resizing.
*/
private transient volatile int transferIndex;
/**
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
*/
private transient volatile int cellsBusy;
/**
* Table of counter cells. When non-null, size is a power of 2.
*/
private transient volatile CounterCell[] counterCells;
/* ---------------- Public operations -------------- */
/**
* Creates a new, empty map with the default initial table size (16).
*/
public CompactConcurrentHashSet2() {}
/**
* Creates a new, empty map with an initial table size accommodating the specified number of
* elements without the need to dynamically resize.
*
* @param initialCapacity The implementation performs internal sizing to accommodate this many
* elements.
* @throws IllegalArgumentException if the initial capacity of elements is negative
*/
public CompactConcurrentHashSet2(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY
: tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
/**
* Creates a new map with the same mappings as the given map.
*
* @param m the map
*/
public CompactConcurrentHashSet2(Collection<? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
addAll(m);
}
/**
* Creates a new, empty map with an initial table size based on the given number of elements
* ({@code initialCapacity}) and initial table density ({@code loadFactor}).
*
* @param initialCapacity the initial capacity. The implementation performs internal sizing to
* accommodate this many elements, given the specified load factor.
* @param loadFactor the load factor (table density) for establishing the initial table size
* @throws IllegalArgumentException if the initial capacity of elements is negative or the load
* factor is nonpositive
*
* @since Geode 1.0
*/
public CompactConcurrentHashSet2(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
/**
* Creates a new, empty map with an initial table size based on the given number of elements
* ({@code initialCapacity}), table density ({@code loadFactor}), and number of concurrently
* updating threads ({@code concurrencyLevel}).
*
* @param initialCapacity the initial capacity. The implementation performs internal sizing to
* accommodate this many elements, given the specified load factor.
* @param loadFactor the load factor (table density) for establishing the initial table size
* @param concurrencyLevel the estimated number of concurrently updating threads. The
* implementation may use this value as a sizing hint.
* @throws IllegalArgumentException if the initial capacity is negative or the load factor or
* concurrencyLevel are nonpositive
*/
public CompactConcurrentHashSet2(int initialCapacity, float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long) (1.0 + (long) initialCapacity / loadFactor);
int cap = (size >= (long) MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int) size);
this.sizeCtl = cap;
}
// Original (since JDK1.2) Map methods
/**
* {@inheritDoc}
*/
@Override
public int size() {
long n = sumCount();
return ((n < 0L) ? 0 : (n > (long) Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) n);
}
/**
* {@inheritDoc}
*/
@Override
public boolean isEmpty() {
return sumCount() <= 0L; // ignore transient negative values
}
boolean _contains(Object key) {
Node<V>[] tab;
Node<V> e, p;
int n, eh;
V ek;
if (key == null) {
return false;
}
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return true;
} else if (eh < 0)
return ((p = e.find(h, key)) != null);
while ((e = e.next) != null) {
if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek))))
return true;
}
}
return false;
}
@Override
public boolean add(V value) {
return putKey(value, false);
}
/** Implementation for add */
boolean putKey(V key, boolean onlyIfAbsent) {
if (key == null)
throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<V>[] tab = table;;) {
Node<V> f;
int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null, new Node<V>(hash, key, null)))
break; // no lock when adding to empty bin
} else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
boolean wasPresent = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<V> e = f;; ++binCount) {
V ek;
if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
wasPresent = true;
break;
}
Node<V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<V>(hash, key, null);
break;
}
}
} else if (f instanceof TreeBin) {
Node<V> p;
binCount = 2;
if ((p = ((TreeBin<V>) f).putTreeVal(hash, key)) != null) {
wasPresent = true;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (wasPresent)
return true;
break;
}
}
}
addCount(1L, binCount);
return false;
}
/**
* Copies all of the mappings from the specified map to this one. These mappings replace any
* mappings that this map had for any of the keys currently in the specified map.
*
* @param m mappings to be stored in this map
*/
public void addAll(Set<? extends V> m) {
tryPresize(m.size());
Iterator<? extends V> it = m.iterator();
while (it.hasNext()) {
putKey(it.next(), false);
}
}
@Override
public boolean remove(Object key) {
return removeNode(key);
}
boolean removeNode(Object key) {
int hash = spread(key.hashCode());
for (Node<V>[] tab = table;;) {
Node<V> f;
int n, i, fh;
if (tab == null || (n = tab.length) == 0 || (f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
boolean wasPresent = false;
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node<V> e = f, pred = null;;) {
V ek;
if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
wasPresent = true;
if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
} else if (f instanceof TreeBin) {
validated = true;
TreeBin<V> t = (TreeBin<V>) f;
TreeNode<V> r, p;
if ((r = t.root) != null && (p = r.findTreeNode(hash, key, null)) != null) {
wasPresent = true;
if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
if (validated) {
if (wasPresent) {
addCount(-1L, -1);
return true;
}
break;
}
}
}
return false;
}
/**
* Removes all of the mappings from this map.
*/
@Override
public void clear() {
long delta = 0L; // negative number of deletions
int i = 0;
Node<V>[] tab = table;
while (tab != null && i < tab.length) {
int fh;
Node<V> f = tabAt(tab, i);
if (f == null)
++i;
else if ((fh = f.hash) == MOVED) {
tab = helpTransfer(tab, f);
i = 0; // restart
} else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<V> p = (fh >= 0 ? f : (f instanceof TreeBin) ? ((TreeBin<V>) f).first : null);
while (p != null) {
--delta;
p = p.next;
}
setTabAt(tab, i++, null);
}
}
}
}
if (delta != 0L)
addCount(delta, -1);
}
// public int hashCode() {
// int h = 0;
// Node<V>[] t;
// if ((t = table) != null) {
// Traverser<V> it = new Traverser<V>(t, t.length, 0, t.length);
// for (Node<V> p; (p = it.advance()) != null; )
// h += p.key.hashCode();
// }
// return h;
// }
/**
* Returns a string representation of this map. The string representation consists of a list of
* key-value mappings (in no particular order) enclosed in braces (<code>{}</code>). Adjacent
* mappings are separated by the characters <code>", "</code> (comma and space). Each key-value
* mapping is rendered as the key followed by an equals sign (<code>=</code>) followed by the
* associated value.
*
* @return a string representation of this map
*/
public String toString() {
Node<V>[] t;
int f = (t = table) == null ? 0 : t.length;
Traverser<V> it = new Traverser<V>(t, f, 0, f);
StringBuilder sb = new StringBuilder();
sb.append('{');
Node<V> p;
if ((p = it.advance()) != null) {
for (;;) {
V k = p.key;
sb.append(k == this ? "(this Set)" : k);
if ((p = it.advance()) == null)
break;
sb.append(',').append(' ');
}
}
return sb.append('}').toString();
}
// public boolean equals(Object o) {
// if (o != this) {
// if (!(o instanceof Set))
// return false;
// Set<?> m = (Set<?>) o;
// Node<V>[] t;
// int f = (t = table) == null ? 0 : t.length;
// Traverser<V> it = new Traverser<V>(t, f, 0, f);
// for (Node<V> p; (p = it.advance()) != null; ) {
// if (!m.contains(p.key)) {
// return false;
// }
// }
// Iterator<?> itr = m.iterator();
// while (itr.hasNext()) {
// if (!contains(itr.next())) {
// return false;
// }
// }
// }
// return true;
// }
/**
* Stripped-down version of helper class used in previous version, declared for the sake of
* serialization compatibility
*/
static class Segment<K> extends ReentrantLock implements Serializable {
private static final long serialVersionUID = 2249069246763182397L;
final float loadFactor;
Segment(float lf) {
this.loadFactor = lf;
}
}
/**
* Saves the state of the {@code CompactConcurrentHashSet2} instance to a stream (i.e., serializes
* it).
*
* @param s the stream
* @serialData the key (Object) and value (Object) for each key-value mapping, followed by a null
* pair. The key-value mappings are emitted in no particular order.
*/
private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException {
// For serialization compatibility
// Emulate segment calculation from previous version of this class
int sshift = 0;
int ssize = 1;
while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
++sshift;
ssize <<= 1;
}
int segmentShift = 32 - sshift;
int segmentMask = ssize - 1;
@SuppressWarnings("unchecked")
Segment<V>[] segments = (Segment<V>[]) new Segment<?>[DEFAULT_CONCURRENCY_LEVEL];
for (int i = 0; i < segments.length; ++i)
segments[i] = new Segment<V>(LOAD_FACTOR);
java.io.ObjectOutputStream.PutField streamFields = s.putFields();
streamFields.put("segments", segments);
streamFields.put("segmentShift", segmentShift);
streamFields.put("segmentMask", segmentMask);
s.writeFields();
Node<V>[] t;
if ((t = table) != null) {
Traverser<V> it = new Traverser<V>(t, t.length, 0, t.length);
for (Node<V> p; (p = it.advance()) != null;) {
s.writeObject(p.key);
}
}
s.writeObject(null);
s.writeObject(null);
segments = null; // throw away
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
*
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
/*
* To improve performance in typical cases, we create nodes while reading, then place in table
* once size is known. However, we must also validate uniqueness and deal with overpopulated
* bins while doing so, which requires specialized versions of putVal mechanics.
*/
sizeCtl = -1; // force exclusion for table construction
s.defaultReadObject();
long size = 0L;
Node<V> p = null;
for (;;) {
@SuppressWarnings("unchecked")
V k = (V) s.readObject();
if (k != null) {
p = new Node<V>(spread(k.hashCode()), k, p);
++size;
} else
break;
}
if (size == 0L)
sizeCtl = 0;
else {
int n;
if (size >= (long) (MAXIMUM_CAPACITY >>> 1))
n = MAXIMUM_CAPACITY;
else {
int sz = (int) size;
n = tableSizeFor(sz + (sz >>> 1) + 1);
}
@SuppressWarnings("unchecked")
Node<V>[] tab = (Node<V>[]) new Node<?>[n];
int mask = n - 1;
long added = 0L;
while (p != null) {
boolean insertAtFront;
Node<V> next = p.next, first;
int h = p.hash, j = h & mask;
if ((first = tabAt(tab, j)) == null)
insertAtFront = true;
else {
V k = p.key;
if (first.hash < 0) {
TreeBin<V> t = (TreeBin<V>) first;
if (t.putTreeVal(h, k) == null)
++added;
insertAtFront = false;
} else {
int binCount = 0;
insertAtFront = true;
Node<V> q;
V qk;
for (q = first; q != null; q = q.next) {
if (q.hash == h && ((qk = q.key) == k || (qk != null && k.equals(qk)))) {
insertAtFront = false;
break;
}
++binCount;
}
if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
insertAtFront = false;
++added;
p.next = first;
TreeNode<V> hd = null, tl = null;
for (q = p; q != null; q = q.next) {
TreeNode<V> t = new TreeNode<V>(q.hash, q.key, null, null);
if ((t.prev = tl) == null)
hd = t;
else
tl.next = t;
tl = t;
}
setTabAt(tab, j, new TreeBin<V>(hd));
}
}
}
if (insertAtFront) {
++added;
p.next = first;
setTabAt(tab, j, p);
}
p = next;
}
table = tab;
sizeCtl = n - (n >>> 2);
baseCount = added;
}
}
@Override
public boolean contains(Object value) {
return _contains(value);
}
@Override
public Iterator<V> iterator() {
Node<V>[] t;
CompactConcurrentHashSet2<V> m = this;
int f = (t = m.table) == null ? 0 : t.length;
return new KeyIterator<V>(t, f, 0, f, m);
}
// CompactConcurrentHashSet2-only methods
/**
* Returns the number of mappings. This method should be used instead of {@link #size} because a
* CompactConcurrentHashSet2 may contain more mappings than can be represented as an int. The
* value returned is an estimate; the actual count may differ if there are concurrent insertions
* or removals.
*
* @return the number of mappings
* @since Geode 1.0
*/
public long mappingCount() {
long n = sumCount();
return (n < 0L) ? 0L : n; // ignore transient negative values
}
/* ---------------- Special Nodes -------------- */
/**
* A node inserted at head of bins during transfer operations.
*/
static class ForwardingNode<K> extends Node<K> {
final Node<K>[] nextTable;
ForwardingNode(Node<K>[] tab) {
super(MOVED, null, null);
this.nextTable = tab;
}
@Override
Node<K> find(int h, Object k) {
Node<K> e;
int n;
Node<K>[] tab = nextTable;
if (k != null && tab != null && (n = tab.length) > 0
&& (e = tabAt(tab, (n - 1) & h)) != null) {
do {
int eh;
K ek;
if ((eh = e.hash) == h && ((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0)
return e.find(h, k);
} while ((e = e.next) != null);
}
return null;
}
}
/**
* A place-holder node used in computeIfAbsent and compute
*/
static class ReservationNode<K> extends Node<K> {
ReservationNode() {
super(RESERVED, null, null);
}
@Override
Node<K> find(int h, Object k) {
return null;
}
}
/* ---------------- Table Initialization and Resizing -------------- */
/**
* Returns the stamp bits for resizing a table of size n. Must be negative when shifted left by
* RESIZE_STAMP_SHIFT.
*/
static int resizeStamp(int n) {
return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
}
/**
* Initializes table, using the size recorded in sizeCtl.
*/
private Node<V>[] initTable() {
Node<V>[] tab;
int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<V>[] nt = (Node<V>[]) new Node<?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
/**
* Adds to count, and if table is too small and not already resizing, initiates transfer. If
* already resizing, helps perform transfer if work is available. Rechecks occupancy after a
* transfer to see if another resize is already needed because resizings are lagging additions.
*
* @param x the count to add
* @param check if <0, don't check resize, if <= 1 only check if uncontended
*/
private void addCount(long x, int check) {
CounterCell[] as;
long b, s;
if ((as = counterCells) != null
|| !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterHashCode hc;
CounterCell a;
long v;
int m;
boolean uncontended = true;
if ((hc = threadCounterHashCode.get()) == null || as == null || (m = as.length - 1) < 0
|| (a = as[m & hc.code]) == null
|| !(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
fullAddCount(x, hc, uncontended);
return;
}
if (check <= 1)
return;
s = sumCount();
}
if (check >= 0) {
Node<V>[] tab, nt;
int n, sc;
while (s >= (long) (sc = sizeCtl) && (tab = table) != null
&& (n = tab.length) < MAXIMUM_CAPACITY) {
int rs = resizeStamp(n);
if (sc < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS
|| (nt = nextTable) == null || transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
} else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
s = sumCount();
}
}
}
/**
* Helps transfer if a resize is in progress.
*/
Node<V>[] helpTransfer(Node<V>[] tab, Node<V> f) {
Node<V>[] nextTab;
int sc;
if (tab != null && (f instanceof ForwardingNode)
&& (nextTab = ((ForwardingNode<V>) f).nextTable) != null) {
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && table == tab && (sc = sizeCtl) < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS
|| transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return table;
}
/**
* Tries to presize table to accommodate the given number of elements.
*
* @param size number of elements (doesn't need to be perfectly accurate)
*/
private void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY
: tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) {
Node<V>[] tab = table;
int n;
if (tab == null || (n = tab.length) == 0) {
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node<V>[] nt = (Node<V>[]) new Node<?>[n];
table = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
} else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
/**
* Moves and/or copies the nodes in each bin to new table. See above for explanation.
*/
private void transfer(Node<V>[] tab, Node<V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
Node<V>[] nt = (Node<V>[]) new Node<?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
ForwardingNode<V> fwd = new ForwardingNode<V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<V> f;
int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
} else if (U.compareAndSwapInt(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ? nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
} else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<V> ln, hn;
if (fh >= 0) {
int runBit = fh & n;
Node<V> lastRun = f;
for (Node<V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
} else {
hn = lastRun;
ln = null;
}
for (Node<V> p = f; p != lastRun; p = p.next) {
int ph = p.hash;
V pk = p.key;
if ((ph & n) == 0)
ln = new Node<V>(ph, pk, ln);
else
hn = new Node<V>(ph, pk, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
} else if (f instanceof TreeBin) {
TreeBin<V> t = (TreeBin<V>) f;
TreeNode<V> lo = null, loTail = null;
TreeNode<V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<V> p = new TreeNode<V>(h, e.key, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
} else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : (hc != 0) ? new TreeBin<V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : (lc != 0) ? new TreeBin<V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
/* ---------------- Conversion from/to TreeBins -------------- */
/**
* Replaces all linked nodes in bin at given index unless table is too small, in which case
* resizes instead.
*/
private void treeifyBin(Node<V>[] tab, int index) {
Node<V> b;
int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<V> hd = null, tl = null;
for (Node<V> e = b; e != null; e = e.next) {
TreeNode<V> p = new TreeNode<V>(e.hash, e.key, null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index, new TreeBin<V>(hd));
}
}
}
}
}
/**
* Returns a list on non-TreeNodes replacing those in given list.
*/
static <K> Node<K> untreeify(Node<K> b) {
Node<K> hd = null, tl = null;
for (Node<K> q = b; q != null; q = q.next) {
Node<K> p = new Node<K>(q.hash, q.key, null);
if (tl == null)
hd = p;
else
tl.next = p;
tl = p;
}
return hd;
}
/* ---------------- TreeNodes -------------- */
/**
* Nodes for use in TreeBins
*/
static class TreeNode<K> extends Node<K> {
TreeNode<K> parent; // red-black tree links
TreeNode<K> left;
TreeNode<K> right;
TreeNode<K> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, Node<K> next, TreeNode<K> parent) {
super(hash, key, next);
this.parent = parent;
}
@Override
Node<K> find(int h, Object k) {
return findTreeNode(h, k, null);
}
/**
* Returns the TreeNode (or null if not found) for the given key starting at given root.
*/
TreeNode<K> findTreeNode(int h, Object k, Class<?> kc) {
if (k != null) {
TreeNode<K> p = this;
do {
int ph, dir;
K pk;
TreeNode<K> q;
TreeNode<K> pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null || (kc = comparableClassFor(k)) != null)
&& (dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.findTreeNode(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
}
return null;
}
}
/* ---------------- TreeBins -------------- */
/**
* TreeNodes used at the heads of bins. TreeBins do not hold user keys or values, but instead
* point to list of TreeNodes and their root. They also maintain a parasitic read-write lock
* forcing writers (who hold bin lock) to wait for readers (who do not) to complete before tree
* restructuring operations.
*/
static class TreeBin<K> extends Node<K> {
TreeNode<K> root;
volatile TreeNode<K> first;
volatile Thread waiter;
volatile int lockState;
// values for lockState
static final int WRITER = 1; // set while holding write lock
static final int WAITER = 2; // set when waiting for write lock
static final int READER = 4; // increment value for setting read lock
/**
* Tie-breaking utility for ordering insertions when equal hashCodes and non-comparable. We
* don't require a total order, just a consistent insertion rule to maintain equivalence across
* rebalancings. Tie-breaking further than necessary simplifies testing a bit.
*/
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null
|| (d = a.getClass().getName().compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ? -1 : 1);
return d;
}
/**
* Creates bin with initial set of nodes headed by b.
*/
TreeBin(TreeNode<K> b) {
super(TREEBIN, null, null);
this.first = b;
TreeNode<K> r = null;
for (TreeNode<K> x = b, next; x != null; x = next) {
next = (TreeNode<K>) x.next;
x.left = x.right = null;
if (r == null) {
x.parent = null;
x.red = false;
r = x;
} else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K> p = r;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null && (kc = comparableClassFor(k)) == null)
|| (dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
r = balanceInsertion(r, x);
break;
}
}
}
}
this.root = r;
assert checkInvariants(root);
}
/**
* Acquires write lock for tree restructuring.
*/
private void lockRoot() {
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
contendedLock(); // offload to separate method
}
/**
* Releases write lock for tree restructuring.
*/
private void unlockRoot() {
lockState = 0;
}
/**
* Possibly blocks awaiting root lock.
*/
private void contendedLock() {
boolean waiting = false;
for (int s;;) {
if (((s = lockState) & ~WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
if (waiting)
waiter = null;
return;
}
} else if ((s & WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
waiting = true;
waiter = Thread.currentThread();
}
} else if (waiting)
LockSupport.park(this);
}
}
/**
* Returns matching node or null if none. Tries to search using tree comparisons from root, but
* continues linear search when lock not available.
*/
@Override
Node<K> find(int h, Object k) {
if (k != null) {
for (Node<K> e = first; e != null;) {
int s;
K ek;
if (((s = lockState) & (WAITER | WRITER)) != 0) {
if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
} else if (U.compareAndSwapInt(this, LOCKSTATE, s, s + READER)) {
TreeNode<K> r, p;
try {
p = ((r = root) == null ? null : r.findTreeNode(h, k, null));
} finally {
Thread w;
int ls;
do {
} while (!U.compareAndSwapInt(this, LOCKSTATE, ls = lockState, ls - READER));
if (ls == (READER | WAITER) && (w = waiter) != null)
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
/**
* Finds or adds a node.
*
* @return null if added
*/
/**
* Finds or adds a node.
*
* @return null if added
*/
TreeNode<K> putTreeVal(int h, K k) {
Class<?> kc = null;
boolean searched = false;
for (TreeNode<K> p = root;;) {
int dir, ph;
K pk;
if (p == null) {
first = root = new TreeNode<K>(h, k, null, null);
break;
} else if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null && (kc = comparableClassFor(k)) == null)
|| (dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K> q, ch;
searched = true;
if (((ch = p.left) != null && (q = ch.findTreeNode(h, k, kc)) != null)
|| ((ch = p.right) != null && (q = ch.findTreeNode(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
TreeNode<K> x, f = first;
first = x = new TreeNode<K>(h, k, f, xp);
if (f != null)
f.prev = x;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
if (!xp.red)
x.red = true;
else {
lockRoot();
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot();
}
}
break;
}
}
assert checkInvariants(root);
return null;
}
/**
* Removes the given node, that must be present before this call. This is messier than typical
* red-black deletion code because we cannot swap the contents of an interior node with a leaf
* successor that is pinned by "next" pointers that are accessible independently of lock. So
* instead we swap the tree linkages.
*
* @return true if now too small, so should be untreeified
*/
boolean removeTreeNode(TreeNode<K> p) {
TreeNode<K> next = (TreeNode<K>) p.next;
TreeNode<K> pred = p.prev; // unlink traversal pointers
TreeNode<K> r, rl;
if (pred == null)
first = next;
else
pred.next = next;
if (next != null)
next.prev = pred;
if (first == null) {
root = null;
return true;
}
if ((r = root) == null || r.right == null || // too small
(rl = r.left) == null || rl.left == null)
return true;
lockRoot();
try {
TreeNode<K> replacement;
TreeNode<K> pl = p.left;
TreeNode<K> pr = p.right;
if (pl != null && pr != null) {
TreeNode<K> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red;
s.red = p.red;
p.red = c; // swap colors
TreeNode<K> sr = s.right;
TreeNode<K> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
} else {
TreeNode<K> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
s.right = pr;
pr.parent = s;
}
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
s.left = pl;
pl.parent = s;
if ((s.parent = pp) == null)
r = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
} else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K> pp = replacement.parent = p.parent;
if (pp == null)
r = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
root = (p.red) ? r : balanceDeletion(r, replacement);
if (p == replacement) { // detach pointers
TreeNode<K> pp;
if ((pp = p.parent) != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
p.parent = null;
}
}
} finally {
unlockRoot();
}
assert checkInvariants(root);
return false;
}
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K> TreeNode<K> rotateLeft(TreeNode<K> root, TreeNode<K> p) {
TreeNode<K> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K> TreeNode<K> rotateRight(TreeNode<K> root, TreeNode<K> p) {
TreeNode<K> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else
pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
static <K> TreeNode<K> balanceInsertion(TreeNode<K> root, TreeNode<K> x) {
x.red = true;
for (TreeNode<K> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
} else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
}
}
} else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
}
}
}
static <K> TreeNode<K> balanceDeletion(TreeNode<K> root, TreeNode<K> x) {
for (TreeNode<K> xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
} else if (x.red) {
x.red = false;
return root;
} else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr == null)
x = xp;
else {
TreeNode<K> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) && (sl == null || !sl.red)) {
xpr.red = true;
x = xp;
} else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
x = root;
}
}
} else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl == null)
x = xp;
else {
TreeNode<K> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) && (sr == null || !sr.red)) {
xpl.red = true;
x = xp;
} else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
}
}
}
}
/**
* Recursive invariant check
*/
static <K> boolean checkInvariants(TreeNode<K> t) {
TreeNode<K> tp = t.parent, tl = t.left, tr = t.right, tb = t.prev, tn = (TreeNode<K>) t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
}
@Immutable
private static final sun.misc.Unsafe U;
private static final long LOCKSTATE;
static {
try {
Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
U = (sun.misc.Unsafe) f.get(null);
Class<?> k = TreeBin.class;
LOCKSTATE = U.objectFieldOffset(k.getDeclaredField("lockState"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/* ----------------Table Traversal -------------- */
/**
* Records the table, its length, and current traversal index for a traverser that must process a
* region of a forwarded table before proceeding with current table.
*/
static class TableStack<K> {
int length;
int index;
Node<K>[] tab;
TableStack<K> next;
}
/**
* Encapsulates traversal for methods such as containsValue; also serves as a base class for other
* iterators and spliterators.
*
* Method advance visits once each still-valid node that was reachable upon iterator construction.
* It might miss some that were added to a bin after the bin was visited, which is OK wrt
* consistency guarantees. Maintaining this property in the face of possible ongoing resizes
* requires a fair amount of bookkeeping state that is difficult to optimize away amidst volatile
* accesses. Even so, traversal maintains reasonable throughput.
*
* Normally, iteration proceeds bin-by-bin traversing lists. However, if the table has been
* resized, then all future steps must traverse both the bin at the current index as well as at
* (index + baseSize); and so on for further resizings. To paranoically cope with potential
* sharing by users of iterators across threads, iteration terminates if a bounds checks fails for
* a table read.
*/
static class Traverser<K> {
Node<K>[] tab; // current table; updated if resized
Node<K> next; // the next entry to use
TableStack<K> stack, spare; // to save/restore on ForwardingNodes
int index; // index of bin to use next
int baseIndex; // current index of initial table
int baseLimit; // index bound for initial table
final int baseSize; // initial table size
Traverser(Node<K>[] tab, int size, int index, int limit) {
this.tab = tab;
this.baseSize = size;
this.baseIndex = this.index = index;
this.baseLimit = limit;
this.next = null;
}
/**
* Advances if possible, returning next valid node, or null if none.
*/
Node<K> advance() {
Node<K> e;
if ((e = next) != null)
e = e.next;
for (;;) {
Node<K>[] t;
int i, n; // must use locals in checks
if (e != null)
return next = e;
if (baseIndex >= baseLimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0)
return next = null;
if ((e = tabAt(t, i)) != null && e.hash < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K>) e).nextTable;
e = null;
pushState(t, i, n);
continue;
} else if (e instanceof TreeBin)
e = ((TreeBin<K>) e).first;
else
e = null;
}
if (stack != null)
recoverState(n);
else if ((index = i + baseSize) >= n)
index = ++baseIndex; // visit upper slots if present
}
}
/**
* Saves traversal state upon encountering a forwarding node.
*/
private void pushState(Node<K>[] t, int i, int n) {
TableStack<K> s = spare; // reuse if possible
if (s != null)
spare = s.next;
else
s = new TableStack<K>();
s.tab = t;
s.length = n;
s.index = i;
s.next = stack;
stack = s;
}
/**
* Possibly pops traversal state.
*
* @param n length of current table
*/
private void recoverState(int n) {
TableStack<K> s;
int len;
while ((s = stack) != null && (index += (len = s.length)) >= n) {
n = len;
index = s.index;
tab = s.tab;
s.tab = null;
TableStack<K> next = s.next;
s.next = spare; // save for reuse
stack = next;
spare = s;
}
if (s == null && (index += baseSize) >= n)
index = ++baseIndex;
}
}
/**
* Base of key, value, and entry Iterators. Adds fields to Traverser to support iterator.remove.
*/
static class BaseIterator<K> extends Traverser<K> {
final CompactConcurrentHashSet2<K> set;
Node<K> lastReturned;
BaseIterator(Node<K>[] tab, int size, int index, int limit, CompactConcurrentHashSet2<K> map) {
super(tab, size, index, limit);
this.set = map;
advance();
}
public boolean hasNext() {
return next != null;
}
public boolean hasMoreElements() {
return next != null;
}
public void remove() {
Node<K> p;
if ((p = lastReturned) == null)
throw new IllegalStateException();
lastReturned = null;
set.removeNode(p.key);
}
}
static class KeyIterator<K> extends BaseIterator<K> implements Iterator<K>, Enumeration<K> {
KeyIterator(Node<K>[] tab, int index, int size, int limit, CompactConcurrentHashSet2<K> map) {
super(tab, index, size, limit, map);
}
@Override
public K next() {
Node<K> p;
if ((p = next) == null)
throw new NoSuchElementException();
K k = p.key;
lastReturned = p;
advance();
return k;
}
@Override
public K nextElement() {
return next();
}
}
/* ---------------- Counters -------------- */
// Adapted from LongAdder and Striped64.
// See their internal docs for explanation.
// A padded cell for distributing counts
static class CounterCell {
volatile long p0, p1, p2, p3, p4, p5, p6;
volatile long value;
volatile long q0, q1, q2, q3, q4, q5, q6;
CounterCell(long x) {
value = x;
}
}
/**
* Holder for the thread-local hash code determining which CounterCell to use. The code is
* initialized via the counterHashCodeGenerator, but may be moved upon collisions.
*/
static class CounterHashCode {
int code;
}
/**
* Generates initial value for per-thread CounterHashCodes.
*/
@MakeNotStatic("Possible ok singleton?")
static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
/**
* Increment for counterHashCodeGenerator. See class ThreadLocal for explanation.
*/
static final int SEED_INCREMENT = 0x61c88647;
/**
* Per-thread counter hash codes. Shared across all instances.
*/
static final ThreadLocal<CounterHashCode> threadCounterHashCode =
new ThreadLocal<CounterHashCode>();
long sumCount() {
CounterCell[] as = counterCells;
CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
}
return sum;
}
// See LongAdder version for explanation
private void fullAddCount(long x, CounterHashCode hc, boolean wasUncontended) {
int h;
if (hc == null) {
hc = new CounterHashCode();
int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
h = hc.code = (s == 0) ? 1 : s; // Avoid zero
threadCounterHashCode.set(hc);
} else
h = hc.code;
boolean collide = false; // True if last slot nonempty
for (;;) {
CounterCell[] as;
CounterCell a;
int n;
long v;
if ((as = counterCells) != null && (n = as.length) > 0) {
if ((a = as[(n - 1) & h]) == null) {
if (cellsBusy == 0) { // Try to attach new Cell
CounterCell r = new CounterCell(x); // Optimistic create
if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean created = false;
try { // Recheck under lock
CounterCell[] rs;
int m, j;
if ((rs = counterCells) != null && (m = rs.length) > 0
&& rs[j = (m - 1) & h] == null) {
rs[j] = r;
created = true;
}
} finally {
cellsBusy = 0;
}
if (created)
break;
continue; // Slot is now non-empty
}
}
collide = false;
} else if (!wasUncontended) // CAS already known to fail
wasUncontended = true; // Continue after rehash
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
break;
else if (counterCells != as || n >= NCPU)
collide = false; // At max size or stale
else if (!collide)
collide = true;
else if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
try {
if (counterCells == as) {// Expand table unless stale
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
counterCells = rs;
}
} finally {
cellsBusy = 0;
}
collide = false;
continue; // Retry with expanded table
}
h ^= h << 13; // Rehash
h ^= h >>> 17;
h ^= h << 5;
} else if (cellsBusy == 0 && counterCells == as
&& U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean init = false;
try { // Initialize table
if (counterCells == as) {
CounterCell[] rs = new CounterCell[2];
rs[h & 1] = new CounterCell(x);
counterCells = rs;
init = true;
}
} finally {
cellsBusy = 0;
}
if (init)
break;
} else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
break; // Fall back on using base
}
hc.code = h; // Record index for next time
}
// Unsafe mechanics
@Immutable
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT;
static {
try {
Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
U = (sun.misc.Unsafe) f.get(null);
Class<?> k = CompactConcurrentHashSet2.class;
SIZECTL = U.objectFieldOffset(k.getDeclaredField("sizeCtl"));
TRANSFERINDEX = U.objectFieldOffset(k.getDeclaredField("transferIndex"));
BASECOUNT = U.objectFieldOffset(k.getDeclaredField("baseCount"));
CELLSBUSY = U.objectFieldOffset(k.getDeclaredField("cellsBusy"));
Class<?> ck = CounterCell.class;
CELLVALUE = U.objectFieldOffset(ck.getDeclaredField("value"));
Class<?> ak = Node[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
// Reduce the risk of rare disastrous classloading in first call to
// LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
Class<?> ensureLoaded = LockSupport.class;
}
}