生活
为什么我们总是没有时间把事情做对,却有时间做完它?
了解ConcurrentHashMap
工作中常用到hashMap,但是HashMap在多线程高并发场景下并不是线程安全的。
所以引入了ConcurrentHashMap,它是HashMap的线程安全版本,采用了分段加锁的方式来保证线程安全,同样在高并发的场景下有较好的性能。
ConcurrentHashMap 组成
ConcurrentHashMap 底层通过分段锁的形式实现,他的底层一共16个Segment,而每个Segment维护一个HashEntry,在操作HashEntry里的数据时只需给对应的Segment加速,所以在高并发场景下的性能比较好。
现在先来简单了解一下它的组成:
成员
//Segment内部HashEntry默认的初始化容量
static final int DEFAULT_INITIAL_CAPACITY = 16;
//扩容时用到的加载因子
static final float DEFAULT_LOAD_FACTOR = 0.75f;
//默认并发数,就是Segment的数量
static final int DEFAULT_CONCURRENCY_LEVEL = 16;
//segment内部 hashEntry的最大长度
static final int MAXIMUM_CAPACITY = 1 << 30;
//每个分段最小长度
static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
//分段最大容量
static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
//默认自旋次数。超过这个次数就加锁
static final int RETRIES_BEFORE_LOCK = 2;
final int segmentMask;
final int segmentShift;
// segment数组
final Segment<K,V>[] segments;
transient Set<K> keySet;
transient Set<Map.Entry<K,V>> entrySet;
transient Collection<V> values;
Segment
//hashentey实际保存数据的地方
transient volatile HashEntry<K,V>[] table;
//保持的数量
transient int count;
//修改的次数
transient int modCount;
//扩容 rehash阈值
transient int threshold;
//加载因子
final float loadFactor;
注意segment其实就是一个锁,它继承自ReentrantLock,在执行某些操作时时互斥的。
HashEntry
HashEntry时ConcurrentHashMap下粒度最小的数据结构:
final int hash;
final K key;
volatile V value;
volatile HashEntry<K,V> next;
源码深入
创建
//其他构造器最终都调用这个构造器
@SuppressWarnings("unchecked")
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
// Find power-of-two sizes best matching arguments
int sshift = 0;
int ssize = 1;
//保证ssize是2的次数,方便后面的位运算,只要移位就行
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
this.segmentShift = 32 - sshift;
this.segmentMask = ssize - 1;
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
int cap = MIN_SEGMENT_TABLE_CAPACITY;
//cap 跟ssize同理
while (cap < c)
cap <<= 1;
// create segments and segments[0]
//在初始化时至初始化第一个segment,后面等用到再初始化
Segment<K,V> s0 =
new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
(HashEntry<K,V>[])new HashEntry[cap]);
Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
this.segments = ss;
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
DEFAULT_INITIAL_CAPACITY),
DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
putAll(m);
}
put
public V put(K key, V value) {
Segment<K,V> s;
if (value == null)
throw new NullPointerException();
int hash = hash(key);
//hash得到segment的数组索引
int j = (hash >>> segmentShift) & segmentMask;
if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck
(segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment
//如果还没有就初始化
s = ensureSegment(j);
//调用segment的put方法
return s.put(key, hash, value, false);
}
下面看下segment里的put方法
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
//先尝试上锁,上锁成功 node为null,否则进scanAndLockForPut 扫描预热并上锁返回对应的HashEntry
HashEntry<K,V> node = tryLock() ? null :
scanAndLockForPut(key, hash, value);
V oldValue;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> first = entryAt(tab, index);
//拿到对应链表的第一个HashEntry
for (HashEntry<K,V> e = first;;) {
//取出来的结果不空
if (e != null) {
K k;
//如果找到对应的key,且hash一样
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
oldValue = e.value;
//如果时onlyIfAbsent,就不设置值,只返回旧值,否则要覆盖的
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
//找到就break
break;
}
//没找到就继续找下一个
e = e.next;
}
else {
//吧新节点放在第一个节点位置
if (node != null)
node.setNext(first);
else
node = new HashEntry<K,V>(hash, key, value, first);
int c = count + 1;
//容量超过阈值就要扩容
if (c > threshold && tab.length < MAXIMUM_CAPACITY)
rehash(node);
else
setEntryAt(tab, index, node);
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
来看下 scanAndLockForPut ,这个方法主要时在等待的过程中预热一下数据
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
HashEntry<K,V> first = entryForHash(this, hash);
HashEntry<K,V> e = first;
HashEntry<K,V> node = null;
int retries = -1; // negative while locating node
//尝试获取锁失败就循环
while (!tryLock()) {
HashEntry<K,V> f; // to recheck first below
if (retries < 0) {
if (e == null) {
//如果node是空就初始化
if (node == null) // speculatively create node
node = new HashEntry<K,V>(hash, key, value, null);
retries = 0;
}
//找到key就跳出去
else if (key.equals(e.key))
retries = 0;
else
//找不到就往后找
e = e.next;
}
//自旋超过一定次数就阻塞上锁
else if (++retries > MAX_SCAN_RETRIES) {
lock();
break;
}
//在自旋过程中,可以已经被其他线程修改,所以要重置下
else if ((retries & 1) == 0 &&
(f = entryForHash(this, hash)) != first) {
e = first = f; // re-traverse if entry changed
retries = -1;
}
}
return node;
}
rehash
在put的过程中,如果size超过阈值则会rehash,来看下rehash的源码实现:
private void rehash(HashEntry<K,V> node) {
HashEntry<K,V>[] oldTable = table;
int oldCapacity = oldTable.length;
int newCapacity = oldCapacity << 1;
threshold = (int)(newCapacity * loadFactor);
HashEntry<K,V>[] newTable =
(HashEntry<K,V>[]) new HashEntry[newCapacity];
int sizeMask = newCapacity - 1;
for (int i = 0; i < oldCapacity ; i++) {
HashEntry<K,V> e = oldTable[i];
if (e != null) {
HashEntry<K,V> next = e.next;
int idx = e.hash & sizeMask;
if (next == null) // Single node on list
newTable[idx] = e; //纳闷点 1
else { // Reuse consecutive sequence at same slot
HashEntry<K,V> lastRun = e;
int lastIdx = idx;
for (HashEntry<K,V> last = next;
last != null;
last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
//rehash需要遍历所有hashentery的数据重新分配到指定indexde hashentry下,
//这里做了一个效率的优化,找到一个节点,其之后的数据都hash到一个 hashentery上,就
//直接把这一串放到那个对应的hashentry里
newTable[lastIdx] = lastRun;//纳闷点2
// Clone remaining nodes
//在这个点之前的hash到对应的entry存起来
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
V v = p.value;
int h = p.hash;
int k = h & sizeMask;
HashEntry<K,V> n = newTable[k];
newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
}
}
}
}
int nodeIndex = node.hash & sizeMask; // add the new node
node.setNext(newTable[nodeIndex]);
newTable[nodeIndex] = node;
table = newTable;
}
刚开始对 newTable[idx] = e;和newTable[lastIdx] = lastRun;比较纳闷,起初认为后续满足这个条件的会覆盖原先的数据。
后来看了其他博客了解到,因为扩容是扩容为原先的2倍,所以原先index上的数据 要么还在这里,要么就是2*index,所以不存在其他HashEntry覆盖原先的情况。
ensureSegment
这是put时 初始化 还未初始化的segment
private Segment<K,V> ensureSegment(int k) {
final Segment<K,V>[] ss = this.segments;
long u = (k << SSHIFT) + SBASE; // raw offset
Segment<K,V> seg;
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
Segment<K,V> proto = ss[0]; // use segment 0 as prototype
int cap = proto.table.length;
float lf = proto.loadFactor;
int threshold = (int)(cap * lf);
HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
== null) { // recheck
//根据第一个segment的参数创建一个新的segment
Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
//再设置前判断是否有其他线程已经优先初始化
while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
== null) {
if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
break;
}
}
}
return seg;
}
get
GET方法一目了然
public V get(Object key) {
Segment<K,V> s; // manually integrate access methods to reduce overhead
HashEntry<K,V>[] tab;
int h = hash(key);
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
(tab = s.table) != null) {
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return e.value;
}
}
return null;
}
size
//在查看size的时候,需要关注到每一个segment,如果直接全部上锁,对性能不好,
//因此采用的方式时自旋两次,判断两次的modCount是否一致,如果一致说明没有修改可以返回size
//如果两次的modCount不一致,则需要加锁
public int size() {
// Try a few times to get accurate count. On failure due to
// continuous async changes in table, resort to locking.
final Segment<K,V>[] segments = this.segments;
int size;
boolean overflow; // true if size overflows 32 bits
long sum; // sum of modCounts
long last = 0L; // previous sum
int retries = -1; // first iteration isn't retry
try {
for (;;) {
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
ensureSegment(j).lock(); // force creation
}
sum = 0L;
size = 0;
overflow = false;
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
sum += seg.modCount;
int c = seg.count;
if (c < 0 || (size += c) < 0)
overflow = true;
}
}
if (sum == last)
break;
last = sum;
}
} finally {
if (retries > RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
segmentAt(segments, j).unlock();
}
}
return overflow ? Integer.MAX_VALUE : size;
}
区别
1.7与1.8的差异如下,1.8的代码比较多,就不贴了。
感兴趣的自己看代码
1、1.7 使用segment分段锁,每个锁维护一个Node数组
1.8 给Node加锁,锁粒度更小,并发性能更佳,进一步减少了并发冲突
2、1.7使用的是 segment+数组+链表
1.8 使用的是数组+链表+红黑树,在哈希冲突较多的情况下,有较好的查询性能