HashMap
JDK1.7 HashMap
1.7 数组+链表方式存储数据,数组是Entry对象组成的数组,对key进行hash,hash后的结果作为数组下标,如果不同key的hash结果相同,就将Entry数据组成链表存放在对应的数组下
变量
/**
* 默认容器容量
*/
static final int DEFAULT_INITIAL_CAPACITY =
1 <<
4;
/**
* 容器最大容量
*/
static final int MAXIMUM_CAPACITY =
1 <<
30;
/**
* 加载因子
*/
static final float DEFAULT_LOAD_FACTOR =
0.75f;
/**
* 数组,桶
*/
static final Entry<?,?>[] EMPTY_TABLE = {};
/**
* 数组,根据需要调整。长度必须是2的幂。
*/
transient Entry<K,V>[] table = (Entry<K,V>[]) EMPTY_TABLE;
/**
* map的长度
*/
transient int size;
/**
* 桶大小,可在初始化时显式指定。调整扩展容量大小的话,是当前容量*0.75
* The next size value at which to resize (capacity * load factor).
*/
int threshold;
/**
* The load factor for the hash table.
* 负载因子,可在初始化时显式指定,默认0.75
*/
final float loadFactor;
/**
* HashMap fail-fast,循环HashMap,在循环里面添加、修改或删除key的话,会报错
*/
transient int modCount;
初始化
public HashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
}
public HashMap(
int initialCapacity,
float loadFactor) {
if (initialCapacity <
0)
throw new IllegalArgumentException(
"Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <=
0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException(
"Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
threshold = initialCapacity;
init();
}
无参数初始化HashMap的话,使用默认的容器大小 threshold = 16,默认加载因子 loadFactor=0.75,当数据大小达到了 16 * 0.75 = 12 时,就会对当前16的容量进行扩容,而扩容要涉及到 rehash、数据复制等操作,所以非常消耗性能,最好能预知map大小,避免扩容。
put方法
public V
put(K key, V value) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key ==
null)
return putForNullKey(value);
int hash = hash(key);
int i = indexFor(hash, table.length);
for (Entry<K,V> e = table[i]; e !=
null; e = e.next) {
Object k;
if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
V oldValue = e.value;
e.value = value;
e.recordAccess(
this);
return oldValue;
}
}
modCount++;
addEntry(hash, key, value, i);
return null;
}
如果table是null,table数组懒加载,创建了map的时候并没有创建table如果key是null,保存key为null的数据到table给key做hash,根据hash确定数组下标for循环table的下标为止数据,如果存在多个数据的话说明hash碰撞,判断当前的key在链表中是否有相同的,如果有更新value,没有的话,就拉到跳出循环modCount自增addEntry添加数据到Entry数组
/**
* Inflates the table.
*/
private void inflateTable(
int toSize) {
int capacity = roundUpToPowerOf2(toSize);
threshold = (
int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY +
1);
table =
new Entry[capacity];
initHashSeedAsNeeded(capacity);
}
private static int roundUpToPowerOf2(
int number) {
return number >= MAXIMUM_CAPACITY
? MAXIMUM_CAPACITY
: (number >
1) ? Integer.highestOneBit((number -
1) <<
1) :
1;
}
final boolean initHashSeedAsNeeded(
int capacity) {
boolean currentAltHashing = hashSeed !=
0;
boolean useAltHashing = sun.misc.VM.isBooted() &&
(capacity >= Holder.ALTERNATIVE_HASHING_THRESHOLD);
boolean switching = currentAltHashing ^ useAltHashing;
if (switching) {
hashSeed = useAltHashing
? sun.misc.Hashing.randomHashSeed(
this)
:
0;
}
return switching;
}
roundUpToPowerOf2 计算出大于toSize最临近的2的N此方的值,如果传入的容量是capacity=18那么计算出来的结果就是capacity=32,然后Math.min()得到最终的触发扩容的阀值threshold=24,new Entry大小就是32initHashSeedAsNeeded负责初始化 hashseed 的种子
final int hash(Object k) {
int h = hashSeed;
if (
0 != h && k
instanceof String) {
return sun.misc.Hashing.stringHash32((String) k);
}
h ^= k.hashCode();
h ^= (h >>>
20) ^ (h >>>
12);
return h ^ (h >>>
7) ^ (h >>>
4);
}
可以看到只有key为字符类型,而且hashSeed不为0时才采用新的哈希算法;sun.misc.Hashing.stringHash32实际上调用的是String.hash32方法:
void addEntry(
int hash, K key, V value,
int bucketIndex) {
if ((size >= threshold) && (
null != table[bucketIndex])) {
resize(
2 * table.length);
hash = (
null != key) ? hash(key) :
0;
bucketIndex = indexFor(hash, table.length);
}
createEntry(hash, key, value, bucketIndex);
}
void resize(
int newCapacity) {
Entry[] oldTable = table;
int oldCapacity = oldTable.length;
if (oldCapacity == MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return;
}
Entry[] newTable =
new Entry[newCapacity];
transfer(newTable, initHashSeedAsNeeded(newCapacity));
table = newTable;
threshold = (
int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY +
1);
}
void createEntry(
int hash, K key, V value,
int bucketIndex) {
Entry<K,V> e = table[bucketIndex];
table[bucketIndex] =
new Entry<>(hash, key, value, e);
size++;
}
判断数据长度如果大于等于阀值,并且当前要插入所在数组下标的数据不是空。那就需要扩容数组容量,resize数组容量为当前的2倍,具体在方法里创建了新数组。重新计算各个key的hash值,因为可能hashseed种子已经有变化了,保存到新数组。重新计算新的扩容阀值扩容之后createEntry,首先拿到对应下标的数据,如果有的话 创建新的Entry对象放到链表的首位,之前的值放到新entry的next位置。
get 方法
public V
get(Object key) {
if (key ==
null)
return getForNullKey();
Entry<K,V> entry = getEntry(key);
return null == entry ?
null : entry.getValue();
}
getForNullKey,因为null的key都保存到了数组下标0的位置上,这里直接去下标0位置上的链表,如果有值就找到null的value,返回key不是null,就getEntry(Object key), hash(key),计算下标,获取对应下标在数组上的链表,equals,key得到Entry对象返回。
JDK1.8 HashMap
1.8的map和1.7的实现上有很多的不同,同样是数组+链表的形式存储数据,只不过链表数据达到阀值之后,会转换成红黑树。在1.7中如果所有的记录都在一个桶上,那么平均查询一条记录要遍历一半的链表,时间复杂度是O(n)。1.8中的时间复杂度则是O(logn)。
基本参数
/**
* 默认的容量16
*/
static final int DEFAULT_INITIAL_CAPACITY =
1 <<
4;
/**
* 最大容量
*/
static final int MAXIMUM_CAPACITY =
1 <<
30;
/**
* 默认加载因子
*/
static final float DEFAULT_LOAD_FACTOR =
0.75f;
/**
* 链表转红黑树的阀值,大于8
*/
static final int TREEIFY_THRESHOLD =
8;
/**
* 红黑树转链表的阀值,=6个节点时转链表
*/
static final int UNTREEIFY_THRESHOLD =
6;
/**
* 转红黑树时,table的最小长度
*/
static final int MIN_TREEIFY_CAPACITY =
64;
hash定位
static final int hash(Object key) {
int h;
return (key ==
null) ?
0 : (h = key.hashCode()) ^ (h >>>
16);
}
int n = tab.length
int index = tab[(n -
1) & hash]
不管是增加,删除,还是查找,定位到哈希桶的位置都是至关重要的,HashMap是数组+链表+红黑树的组合,提高效率的方式就是尽量哈希的均匀。
key是null,直接就放到了数组第一个位置上,不为空的话,拿到key的hashCode,将hashCode的高16位参与运算 >>>(无符号右移) 无符号右移,忽略符号位,空位都以0补齐.
计算HashCode的话,沿用传入key参数的hashCode方法
之后用数组长度减一与key的hash后的值,,得到最终的数组index
例如:字符串A作为key的话String.hashCode后的结果是65^(65>>>16)=65
然后 16(默认table长度)-1 & 65,得到的结果就是1
15的二进制是 0000 1111
65的二进制是 0100 0001
与之后的结果就是 0000 0001,就等于 1
put方法
final V putVal(
int hash, K key, V value,
boolean onlyIfAbsent,
boolean evict) {
Node<K,V>[] tab; Node<K,V> p;
int n, i;
if ((tab = table) ==
null || (n = tab.length) ==
0)
n = (tab = resize()).length;
if ((p = tab[i = (n -
1) & hash]) ==
null)
tab[i] = newNode(hash, key, value,
null);
else {
Node<K,V> e; K k;
if (p.hash == hash &&
((k = p.key) == key || (key !=
null && key.equals(k))))
e = p;
else if (p
instanceof TreeNode)
e = ((TreeNode<K,V>)p).putTreeVal(
this, tab, hash, key, value);
else {
for (
int binCount =
0; ; ++binCount) {
if ((e = p.next) ==
null) {
p.next = newNode(hash, key, value,
null);
if (binCount >= TREEIFY_THRESHOLD -
1)
treeifyBin(tab, hash);
break;
}
if (e.hash == hash &&
((k = e.key) == key || (key !=
null && key.equals(k))))
break;
p = e;
}
}
if (e !=
null) {
V oldValue = e.value;
if (!onlyIfAbsent || oldValue ==
null)
e.value = value;
afterNodeAccess(e);
return oldValue;
}
}
++modCount;
if (++size > threshold)
resize();
afterNodeInsertion(evict);
return null;
}
get方法
final HashMap.Node<K,V> getNode(
int hash, Object key) {
HashMap.Node<K,V>[] tab;
HashMap.Node<K,V> first, e;
int n; K k;
if ((tab = table) !=
null && (n = tab.length) >
0 &&
(first = tab[(n -
1) & hash]) !=
null) {
if (first.hash == hash &&
((k = first.key) == key || (key !=
null && key.equals(k))))
return first;
if ((e = first.next) !=
null) {
if (first
instanceof HashMap.TreeNode)
return ((HashMap.TreeNode<K,V>)first).getTreeNode(hash, key);
do {
if (e.hash == hash &&
((k = e.key) == key || (key !=
null && key.equals(k))))
return e;
}
while ((e = e.next) !=
null);
}
}
return null;
}
getTreeNode(hash, key)方法
/**
* Calls find for root node.
*/
final TreeNode<K,V> getTreeNode(
int h, Object k) {
return ((parent !=
null) ? root() :
this).find(h, k,
null);
}
/**
* Returns root of tree containing this node.
*/
final TreeNode<K,V> root() {
for (TreeNode<K,V> r =
this, p;;) {
if ((p = r.parent) ==
null)
return r;
r = p;
}
}
/**
* Finds the node starting at root p with the given hash and key.
* The kc argument caches comparableClassFor(key) upon first use
* comparing keys.
* 此处是红黑树的遍历, 红黑树是特殊的自平衡二叉查找树
* 平衡二叉查找树的特点:左节点<根节点<右节点
*/
final TreeNode<K,V> find(
int h, Object k, Class<?> kc) {
TreeNode<K,V> p =
this;
do {
int ph, dir; K pk;
TreeNode<K,V> pl = p.left, pr = p.right, q;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (k !=
null && k.equals(pk)))
return p;
else if (pl ==
null)
p = pr;
else if (pr ==
null)
p = pl;
else if ((kc !=
null ||
(kc = comparableClassFor(k)) !=
null) && (dir = compareComparables(kc, k, pk)) !=
0)
p = (dir <
0) ? pl : pr;
else if ((q = pr.find(h, k, kc)) !=
null)
return q;
else
p = pl;
}
while (p !=
null);
return null;
}
ConcurrentHashMap
为什么要使用ConcurrentHashMap
线程不安全的HashMap
在多线程的环境下,使用HashMap进行put操作会引起死循环,导致CPU利用率接近100%,因为多线程会导致HashMap的Entry链表形成环形数据结构,一旦形成环形数据结构,Entry的next节点永远不为空,就会产生死循环获取Entry
2. 效率地下的HashTable
HashTable的大部分方法都使用了synchronized来保证线程安全,所以多线程竞争下HashTable的效率非常低下。
3. ConcurrentHashMap分段锁
ConcurrentHashMap分段锁首先将数据分成一段一段地存储,然后给每一段数据配置一把锁,当一个线程占用锁访问其中一个段数据的时候,其他段的数据也能被其他线程访问。
ConcurrentHashMap的结构
1.7 ConcurrentHashMap
ConcurrentHashMap采用分段锁技术,只有同一个分段内的数据才存在竞争关系,操作的时候不需要锁定整个map。
segment由数组组成,每一个segment下面由一个table数组,基于key的hash,确定table数组的位置,相同hash不同key的数据组成链表。
基本参数
/**
* 默认的table数组容量
*/
static final int DEFAULT_INITIAL_CAPACITY =
16;
/**
* 加载因子
*/
static final float DEFAULT_LOAD_FACTOR =
0.75f;
/**
* 默认并发级别
*/
static final int DEFAULT_CONCURRENCY_LEVEL =
16;
/**
* table数组最大容量
*/
static final int MAXIMUM_CAPACITY =
1 <<
30;
/**
* 最小的segment数组大小
*/
static final int MIN_SEGMENT_TABLE_CAPACITY =
2;
/**
* 最大的segment
*/
static final int MAX_SEGMENTS =
1 <<
16;
/**
* 这是为了避免使用*无限重试,如果表进行不断的修改,这将使其无法获得准确的结果。
*/
static final int RETRIES_BEFORE_LOCK =
2;
ConcurrentHashMap初始化
public ConcurrentHashMap(
int initialCapacity,
float loadFactor,
int concurrencyLevel) {
if (!(loadFactor >
0) || initialCapacity <
0 || concurrencyLevel <=
0)
throw new IllegalArgumentException();
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
int sshift =
0;
int ssize =
1;
while (ssize < concurrencyLevel) {
++sshift;
ssize <<=
1;
}
this.segmentShift =
32 - sshift;
this.segmentMask = ssize -
1;
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
int cap = MIN_SEGMENT_TABLE_CAPACITY;
while (cap < c)
cap <<=
1;
Segment<K,V> s0 =
new Segment<K,V>(loadFactor, (
int)(cap * loadFactor), (HashEntry<K,V>[])
new HashEntry[cap]);
Segment<K,V>[] ss = (Segment<K,V>[])
new Segment[ssize];
UNSAFE.putOrderedObject(ss, SBASE, s0);
this.segments = ss;
}
get方法
public V
get(Object key) {
Segment<K,V> s;
HashEntry<K,V>[] tab;
int h = hash(key);
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) !=
null &&
(tab = s.table) !=
null) {
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((
long)(((tab.length -
1) & h)) << TSHIFT) + TBASE);
e !=
null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return e.value;
}
}
return null;
}
put方法
public V
put(K key, V value) {
Segment<K,V> s;
if (value ==
null)
throw new NullPointerException();
int hash = hash(key);
int j = (hash >>> segmentShift) & segmentMask;
if ((s = (Segment<K,V>)UNSAFE.getObject
(segments, (j << SSHIFT) + SBASE)) ==
null)
s = ensureSegment(j);
return s.put(key, hash, value,
false);
}
private Segment<K,V>
ensureSegment(
int k) {
final Segment<K,V>[] ss =
this.segments;
long u = (k << SSHIFT) + SBASE;
Segment<K,V> seg;
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) ==
null) {
Segment<K,V> proto = ss[
0];
int cap = proto.table.length;
float lf = proto.loadFactor;
int threshold = (
int)(cap * lf);
HashEntry<K,V>[] tab = (HashEntry<K,V>[])
new HashEntry[cap];
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
==
null) {
Segment<K,V> s =
new Segment<K,V>(lf, threshold, tab);
while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
==
null) {
if (UNSAFE.compareAndSwapObject(ss, u,
null, seg = s))
break;
}
}
}
return seg;
}
final V put(K key,
int hash, V value,
boolean onlyIfAbsent) {
HashEntry<K,V> node = tryLock() ?
null :
scanAndLockForPut(key, hash, value);
V oldValue;
try {
HashEntry<K,V>[] tab = table;
这个hash是key的hash,现在计算key在HashEntry中的位置
int index = (tab.length -
1) & hash;
HashEntry<K,V> first = entryAt(tab, index);
for (HashEntry<K,V> e = first;;) {
if (e !=
null) {
K k;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
break;
}
e = e.next;
}
else {
if (node !=
null)
node.setNext(first);
else
node =
new HashEntry<K,V>(hash, key, value, first);
int c = count +
1;
if (c > threshold && tab.length < MAXIMUM_CAPACITY)
rehash(node);
else
setEntryAt(tab, index, node);
++modCount;
count = c;
oldValue =
null;
break;
}
}
}
finally {
unlock();
}
return oldValue;
}
rehash()
在put的时候,有可能会触发扩容
private void rehash(HashEntry<K,V> node) {
HashEntry<K,V>[] oldTable = table;
int oldCapacity = oldTable.length;
int newCapacity = oldCapacity <<
1;
threshold = (
int)(newCapacity * loadFactor);
HashEntry<K,V>[] newTable =
(HashEntry<K,V>[])
new HashEntry[newCapacity];
int sizeMask = newCapacity -
1;
for (
int i =
0; i < oldCapacity ; i++) {
HashEntry<K,V> e = oldTable[i];
if (e !=
null) {
HashEntry<K,V> next = e.next;
int idx = e.hash & sizeMask;
if (next ==
null)
newTable[idx] = e;
else {
HashEntry<K,V> lastRun = e;
int lastIdx = idx;
for (HashEntry<K,V> last = next;
last !=
null;
last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
newTable[lastIdx] = lastRun;
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
V v = p.value;
int h = p.hash;
int k = h & sizeMask;
HashEntry<K,V> n = newTable[k];
newTable[k] =
new HashEntry<K,V>(h, p.key, v, n);
}
}
}
}
int nodeIndex = node.hash & sizeMask;
node.setNext(newTable[nodeIndex]);
newTable[nodeIndex] = node;
table = newTable;
}
1.8 ConcurrentHashMap
ConcurrentHashMap在1.8中的实现,相比于1.7的版本基本上全部都变掉了。首先,取消了Segment分段锁的数据结构,取而代之的是数组+链表(红黑树)的结构。而对于锁的粒度,调整为对每个数组元素加锁(Node)。然后是定位节点的hash算法被简化了,Hash冲突会加剧。链表节点数量大于8时,会将链表转化为红黑树进行存储。查询的时间复杂度就会由原先的O(n)变为O(logN)。
/**
* table最大容量
*/
private static final int MAXIMUM_CAPACITY =
1 <<
30;
/**
* table默认容量
*/
private static final int DEFAULT_CAPACITY =
16;
/**
* key转数组的时候最大长度,concurrentHashMap.keySet().toArray();
*/
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE -
8;
/**
* 表示默认的并发级别,也就是table[]的默认大小
*/
private static final int DEFAULT_CONCURRENCY_LEVEL =
16;
/**
* 默认负载因子
*/
private static final float LOAD_FACTOR =
0.75f;
/**
* 链表转红黑树的阀值,当table[i]下面的链表长度大于8时就转化为红黑树结构
*/
static final int TREEIFY_THRESHOLD =
8;
/**
* 红黑树转链表的阀值,当链表长度<=6时转为链表(扩容时)
*/
static final int UNTREEIFY_THRESHOLD =
6;
初始化
private transient volatile int sizeCtl;
sizeCtl用于table[]的初始化和扩容操作,不同值的代表状态如下:
-1 :代表table正在初始化,其他线程应该交出CPU时间片 -N: 表示正有N-1个线程执行扩容操作(高 16 位是 length 生成的标识符,低 16 位是扩容的线程数) 大于 0: 如果table已经初始化,代表table容量,默认为table大小的0.75,如果还未初始化,代表需要初始化的大小
public ConcurrentHashMap(
int initialCapacity,
float loadFactor,
int concurrencyLevel) {
if (!(loadFactor >
0.0f) || initialCapacity <
0 || concurrencyLevel <=
0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel)
initialCapacity = concurrencyLevel;
long size = (
long)(
1.0 + (
long)initialCapacity / loadFactor);
int cap = (size >= (
long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((
int)size);
this.sizeCtl = cap;
}
tableSizeFor,计算table数组大小
int cap = (size >= (
long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((
int)size);
private static final int tableSizeFor(
int c) {
int n = c -
1;
n |= n >>>
1;
n |= n >>>
2;
n |= n >>>
4;
n |= n >>>
8;
n |= n >>>
16;
return (n <
0) ?
1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n +
1;
}
put
public V
put(K key, V value) {
return putVal(key, value,
false);
}
/** Implementation for put and putIfAbsent */
final V putVal(K key, V value,
boolean onlyIfAbsent) {
if (key ==
null || value ==
null)
throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount =
0;
for (ConcurrentHashMap.Node<K,V>[] tab = table;;) {
ConcurrentHashMap.Node<K,V> f;
int n, i, fh;
if (tab ==
null || (n = tab.length) ==
0)
tab = initTable();
else if ((f = tabAt(tab, i = (n -
1) & hash)) ==
null) {
if (casTabAt(tab, i,
null,
new ConcurrentHashMap.Node<K,V>(hash, key, value,
null)))
break;
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal =
null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >=
0) {
binCount =
1;
for (ConcurrentHashMap.Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek !=
null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
ConcurrentHashMap.Node<K,V> pred = e;
if ((e = e.next) ==
null) {
pred.next =
new ConcurrentHashMap.Node<K,V>(hash, key,
value,
null);
break;
}
}
}
else if (f
instanceof ConcurrentHashMap.TreeBin) {
ConcurrentHashMap.Node<K,V> p;
binCount =
2;
if ((p = ((ConcurrentHashMap.TreeBin<K,V>)f).putTreeVal(hash, key,
value)) !=
null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount !=
0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal !=
null)
return oldVal;
break;
}
}
}
addCount(
1L, binCount);
return null;
}
帮助扩容 helpTransfer
/**
* Helps transfer if a resize is in progress.
* 承接上一段put代码,走到这个方法说明当前要put的这个node在数组中有相同的hash值,
* 并且正在扩容,当前线程帮助扩容操作
* tab node的数组
* f 当前要put的node
*
*/
final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
Node<K,V>[] nextTab;
int sc;
if (tab !=
null && (f
instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode<K,V>)f).nextTable) !=
null) {
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && table == tab &&
(sc = sizeCtl) <
0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs +
1 ||
sc == rs + MAX_RESIZERS || transferIndex <=
0)
break;
if (U.compareAndSwapInt(
this, SIZECTL, sc, sc +
1)) {
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return table;
}
treeifyBin
当链表长度大于等于阀值8的时候,要转成红黑树,具体方法是treeifyBin
/**
* Replaces all linked nodes in bin at given index unless table is
* too small, in which case resizes instead.
*/
private final void treeifyBin(ConcurrentHashMap.Node<K,V>[] tab,
int index) {
ConcurrentHashMap.Node<K,V> b;
int n, sc;
if (tab !=
null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n <<
1);
else if ((b = tabAt(tab, index)) !=
null && b.hash >=
0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
ConcurrentHashMap.TreeNode<K,V> hd =
null, tl =
null;
for (ConcurrentHashMap.Node<K,V> e = b; e !=
null; e = e.next) {
ConcurrentHashMap.TreeNode<K,V> p =
new ConcurrentHashMap.TreeNode<K,V>(e.hash, e.key, e.val,
null,
null);
if ((p.prev = tl) ==
null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index,
new ConcurrentHashMap.TreeBin<K,V>(hd));
}
}
}
}
}
红黑树初始化TreeBin
红黑树的要求:https://zh.wikipedia.org/wiki/红黑树
节点是红色或黑色。根是黑色。所有叶子都是黑色(叶子是NIL节点)。每个红色节点必须有两个黑色的子节点。(从每个叶子到根的所有路径上不能有两个连续的红色节点。)从任一节点到其每个叶子的所有简单路径都包含相同数目的黑色节点。
tryPresize
/**
* putAll和转红黑树的时候会进入这个方法,试着调整大小
*/
private final void tryPresize(
int size) {
int c = (size >= (MAXIMUM_CAPACITY >>>
1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>>
1) +
1);
int sc;
while ((sc = sizeCtl) >=
0) {
ConcurrentHashMap.Node<K,V>[] tab = table;
int n;
if (tab ==
null || (n = tab.length) ==
0) {
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(
this, SIZECTL, sc, -
1)) {
try {
if (table == tab) {
@SuppressWarnings(
"unchecked")
ConcurrentHashMap.Node<K,V>[] nt = (ConcurrentHashMap.Node<K,V>[])
new ConcurrentHashMap.Node<?,?>[n];
table = nt;
sc = n - (n >>>
2);
}
}
finally {
sizeCtl = sc;
}
}
}
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
if (sc <
0) {
ConcurrentHashMap.Node<K,V>[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs +
1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) ==
null ||
transferIndex <=
0)
break;
if (U.compareAndSwapInt(
this, SIZECTL, sc, sc +
1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(
this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) +
2))
transfer(tab,
null);
}
}
}
TreeBin
/**
* Creates bin with initial set of nodes headed by b.
* 红黑树初始化
*/
TreeBin(ConcurrentHashMap.TreeNode<K,V> b) {
super(TREEBIN,
null,
null,
null);
this.first = b;
ConcurrentHashMap.TreeNode<K,V> r =
null;
for (TreeNode<K,V> x = b, next; x !=
null; x = next) {
next = (TreeNode<K,V>)x.next;
x.left = x.right =
null;
if (r ==
null) {
x.parent =
null;
x.red =
false;
r = x;
}
else {
K k = x.key;
int h = x.hash;
Class<?> kc =
null;
for (TreeNode<K,V> p = r;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -
1;
else if (ph < h)
dir =
1;
else if ((kc ==
null && (kc = comparableClassFor(k)) ==
null) ||
(dir = compareComparables(kc, k, pk)) ==
0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <=
0) ? p.left : p.right) ==
null) {
x.parent = xp;
if (dir <=
0)
xp.left = x;
else
xp.right = x;
r = balanceInsertion(r, x);
break;
}
}
}
}
this.root = r;
assert checkInvariants(root);
}