HashMap的源码分析

HashMap的源码分析

1.关键变量

 
  1. //初始化容量
  2. static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
  3. //负载因子
  4. static final float DEFAULT_LOAD_FACTOR = 0.75f;
  5. //阈值
  6. int threshold;
  7. //修改记录,迭代map时,快速失败
  8. transient int modCount;

2.数据结构

 
  1. static class Entry<K,V> implements Map.Entry<K,V> {
  2. final K key;
  3. V value;
  4. Entry<K,V> next;
  5. int hash;
  6. ......
  7. }

3.添加数据put方法

 
  1. public V put(K key, V value) {
  2. if (table == EMPTY_TABLE) {
  3. //初始化表,重新计算表容量,阈值
  4. inflateTable(threshold);
  5. }
  6. //key为null,直接插入到0的位置
  7. if (key == null)
  8. return putForNullKey(value);
  9. //对key的hashcode,进行了二次hash,尽量减少hash冲突
  10. int hash = hash(key);
  11. //为什么表容量是2次幂,原因就在这了,在查找数组索引时,用的位运算,不是mod
  12. int i = indexFor(hash, table.length);
  13. for (Entry<K,V> e = table[i]; e != null; e = e.next) {
  14. Object k;
  15. if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
  16. V oldValue = e.value;
  17. e.value = value;
  18. e.recordAccess(this);
  19. return oldValue;
  20. }
  21. }
  22.  
  23. modCount++;
  24. //添加元素
  25. addEntry(hash, key, value, i);
  26. return null;
  27. }
 
  1. static int indexFor(int h, int length) {
  2. // assert Integer.bitCount(length) == 1 : "length must be a non-zero power of 2";
  3. return h & (length-1);
  4. }
 
  1. final int hash(Object k) {
  2. int h = hashSeed;
  3. if (0 != h && k instanceof String) {
  4. return sun.misc.Hashing.stringHash32((String) k);
  5. }
  6.  
  7. h ^= k.hashCode();
  8.  
  9. // This function ensures that hashCodes that differ only by
  10. // constant multiples at each bit position have a bounded
  11. // number of collisions (approximately 8 at default load factor).
  12. h ^= (h >>> 20) ^ (h >>> 12);
  13. return h ^ (h >>> 7) ^ (h >>> 4);
  14. }

4.自动扩容,当实际容量大于阈值,并且出现hash冲突时,才会扩容。扩容到原来表2倍。

 
  1. void addEntry(int hash, K key, V value, int bucketIndex) {
  2. //当table的size大于阈值,并且出现hash冲突时,才会自动扩容
  3. if ((size >= threshold) && (null != table[bucketIndex])) {
  4. resize(2 * table.length);
  5. hash = (null != key) ? hash(key) : 0;
  6. bucketIndex = indexFor(hash, table.length);
  7. }
  8.  
  9. createEntry(hash, key, value, bucketIndex);
  10. }

5.扩容时最耗时的,因为对原表重新映射到新表。

 
  1. void transfer(Entry[] newTable, boolean rehash) {
  2. int newCapacity = newTable.length;
  3. for (Entry<K,V> e : table) {
  4. while(null != e) {
  5. Entry<K,V> next = e.next;
  6. if (rehash) {
  7. e.hash = null == e.key ? 0 : hash(e.key);
  8. }
  9. int i = indexFor(e.hash, newCapacity);
  10. e.next = newTable[i];
  11. newTable[i] = e;
  12. e = next;
  13. }
  14. }
  15. }

6.对map迭代时,快速失败 fast-fail 通过比较

 
  1. final Entry<K,V> nextEntry() {
  2. //当modCount != expectedModCount不等时,直接快速失败!
  3. if (modCount != expectedModCount)
  4. throw new ConcurrentModificationException();
  5. Entry<K,V> e = next;
  6. if (e == null)
  7. throw new NoSuchElementException();
  8.  
  9. if ((next = e.next) == null) {
  10. Entry[] t = table;
  11. while (index < t.length && (next = t[index++]) == null)
  12. ;
  13. }
  14. current = e;
  15. return e;
  16. }

7.rehash时,多线程容易出现环情况。

猜你喜欢

转载自blog.csdn.net/TuxedoLinux/article/details/83150085