以太坊之Fetcher(收到BlockHash的处理)

以太坊节点广播block的时候一部分节点广播整个block内容,其余节点广播block的hash,

本篇分析一下节点收到block hash后的处理

收到NewBlockHash

eth/handler.go中收到NewBlockHashesMsg消息,看代码的处理:

case msg.Code == NewBlockHashesMsg:
    var announces newBlockHashesData
    if err := msg.Decode(&announces); err != nil {
	return errResp(ErrDecode, "%v: %v", msg, err)
    }
    // Mark the hashes as present at the remote node
    for _, block := range announces {
	p.MarkBlock(block.Hash)
    }
    // Schedule all the unknown hashes for retrieval
    unknown := make(newBlockHashesData, 0, len(announces))
    for _, block := range announces {
    if !pm.blockchain.HasBlock(block.Hash, block.Number) {
	    unknown = append(unknown, block)
	}
    }
    for _, block := range unknown {
	pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
    }

收到消息后:

1 遍历收到的block hashes,mark下对端节点拥有此block

2 再遍历收到的hashes,与本地对比看是否已有该block,没有的append到unknown

3 遍历unknown,然后调用fetcher的Notify,带hash,block num,request header函数和request body函数

func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
	block := &announce{
		hash:        hash,
		number:      number,
		time:        time,
		origin:      peer,
		fetchHeader: headerFetcher,
		fetchBodies: bodyFetcher,
	}
	select {
	case f.notify <- block:
		return nil
	case <-f.quit:
		return errTerminated
	}
}

Notify做的事情也很简单,new了一个announce结构,然后写到channel notify,实现了notify的作用,想必是有个地方一直在监听notify channel做事情,找到监听notify的地方

Fetcher获取header和body

(代码比较长,暂时先贴着了,要么分段帖也不好贴)

func (f *Fetcher) loop() {
	// Iterate the block fetching until a quit is requested
	fetchTimer := time.NewTimer(0)
	completeTimer := time.NewTimer(0)

	for {
		// Clean up any expired block fetches
		for hash, announce := range f.fetching {
			if time.Since(announce.time) > fetchTimeout {
				f.forgetHash(hash)
			}
		}
		// Import any queued blocks that could potentially fit
		height := f.chainHeight()
		for !f.queue.Empty() {
			op := f.queue.PopItem().(*inject)
			if f.queueChangeHook != nil {
				f.queueChangeHook(op.block.Hash(), false)
			}
			// If too high up the chain or phase, continue later
			number := op.block.NumberU64()
			if number > height+1 {
				f.queue.Push(op, -float32(op.block.NumberU64()))
				if f.queueChangeHook != nil {
					f.queueChangeHook(op.block.Hash(), true)
				}
				break
			}
			// Otherwise if fresh and still unknown, try and import
			hash := op.block.Hash()
			if number+maxUncleDist < height || f.getBlock(hash) != nil {
				f.forgetBlock(hash)
				continue
			}
			f.insert(op.origin, op.block)
		}
		// Wait for an outside event to occur
		select {
		case <-f.quit:
			// Fetcher terminating, abort all operations
			return

		case notification := <-f.notify:
			// A block was announced, make sure the peer isn't DOSing us
			propAnnounceInMeter.Mark(1)

			count := f.announces[notification.origin] + 1
			if count > hashLimit {
				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
				propAnnounceDOSMeter.Mark(1)
				break
			}
			// If we have a valid block number, check that it's potentially useful
			if notification.number > 0 {
				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
					propAnnounceDropMeter.Mark(1)
					break
				}
			}
			// All is well, schedule the announce if block's not yet downloading
			if _, ok := f.fetching[notification.hash]; ok {
				break
			}
			if _, ok := f.completing[notification.hash]; ok {
				break
			}
			f.announces[notification.origin] = count
			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
				f.announceChangeHook(notification.hash, true)
			}
			if len(f.announced) == 1 {
				f.rescheduleFetch(fetchTimer)
			}

		case op := <-f.inject:
			// A direct block insertion was requested, try and fill any pending gaps
			propBroadcastInMeter.Mark(1)
			f.enqueue(op.origin, op.block)

		case hash := <-f.done:
			// A pending import finished, remove all traces of the notification
			f.forgetHash(hash)
			f.forgetBlock(hash)

		case <-fetchTimer.C:
			// At least one block's timer ran out, check for needing retrieval
			request := make(map[string][]common.Hash)

			for hash, announces := range f.announced {
				if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
					// Pick a random peer to retrieve from, reset all others
					announce := announces[rand.Intn(len(announces))]
					f.forgetHash(hash)

					// If the block still didn't arrive, queue for fetching
					if f.getBlock(hash) == nil {
						request[announce.origin] = append(request[announce.origin], hash)
						f.fetching[hash] = announce
					}
				}
			}
			// Send out all block header requests
			for peer, hashes := range request {
				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)

				// Create a closure of the fetch and schedule in on a new thread
				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
				go func() {
					if f.fetchingHook != nil {
						f.fetchingHook(hashes)
					}
					for _, hash := range hashes {
						headerFetchMeter.Mark(1)
						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
					}
				}()
			}
			// Schedule the next fetch if blocks are still pending
			f.rescheduleFetch(fetchTimer)

		case <-completeTimer.C:
			// At least one header's timer ran out, retrieve everything
			request := make(map[string][]common.Hash)

			for hash, announces := range f.fetched {
				// Pick a random peer to retrieve from, reset all others
				announce := announces[rand.Intn(len(announces))]
				f.forgetHash(hash)

				// If the block still didn't arrive, queue for completion
				if f.getBlock(hash) == nil {
					request[announce.origin] = append(request[announce.origin], hash)
					f.completing[hash] = announce
				}
			}
			// Send out all block body requests
			for peer, hashes := range request {
				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)

				// Create a closure of the fetch and schedule in on a new thread
				if f.completingHook != nil {
					f.completingHook(hashes)
				}
				bodyFetchMeter.Mark(int64(len(hashes)))
				go f.completing[hashes[0]].fetchBodies(hashes)
			}
			// Schedule the next fetch if blocks are still pending
			f.rescheduleComplete(completeTimer)

		case filter := <-f.headerFilter:
			// Headers arrived from a remote peer. Extract those that were explicitly
			// requested by the fetcher, and return everything else so it's delivered
			// to other parts of the system.
			var task *headerFilterTask
			select {
			case task = <-filter:
			case <-f.quit:
				return
			}
			headerFilterInMeter.Mark(int64(len(task.headers)))

			// Split the batch of headers into unknown ones (to return to the caller),
			// known incomplete ones (requiring body retrievals) and completed blocks.
			unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
			for _, header := range task.headers {
				hash := header.Hash()

				// Filter fetcher-requested headers from other synchronisation algorithms
				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
					// If the delivered header does not match the promised number, drop the announcer
					if header.Number.Uint64() != announce.number {
						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
						f.dropPeer(announce.origin)
						f.forgetHash(hash)
						continue
					}
					// Only keep if not imported by other means
					if f.getBlock(hash) == nil {
						announce.header = header
						announce.time = task.time

						// If the block is empty (header only), short circuit into the final import queue
						if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())

							block := types.NewBlockWithHeader(header)
							block.ReceivedAt = task.time

							complete = append(complete, block)
							f.completing[hash] = announce
							continue
						}
						// Otherwise add to the list of blocks needing completion
						incomplete = append(incomplete, announce)
					} else {
						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
						f.forgetHash(hash)
					}
				} else {
					// Fetcher doesn't know about it, add to the return list
					unknown = append(unknown, header)
				}
			}
			headerFilterOutMeter.Mark(int64(len(unknown)))
			select {
			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
			case <-f.quit:
				return
			}
			// Schedule the retrieved headers for body completion
			for _, announce := range incomplete {
				hash := announce.header.Hash()
				if _, ok := f.completing[hash]; ok {
					continue
				}
				f.fetched[hash] = append(f.fetched[hash], announce)
				if len(f.fetched) == 1 {
					f.rescheduleComplete(completeTimer)
				}
			}
			// Schedule the header-only blocks for import
			for _, block := range complete {
				if announce := f.completing[block.Hash()]; announce != nil {
					f.enqueue(announce.origin, block)
				}
			}

		case filter := <-f.bodyFilter:
			// Block bodies arrived, extract any explicitly requested blocks, return the rest
			var task *bodyFilterTask
			select {
			case task = <-filter:
			case <-f.quit:
				return
			}
			bodyFilterInMeter.Mark(int64(len(task.transactions)))

			blocks := []*types.Block{}
			for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
				// Match up a body to any possible completion request
				matched := false

				for hash, announce := range f.completing {
					if f.queued[hash] == nil {
						txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
						uncleHash := types.CalcUncleHash(task.uncles[i])

						if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
							// Mark the body matched, reassemble if still unknown
							matched = true

							if f.getBlock(hash) == nil {
								block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
								block.ReceivedAt = task.time

								blocks = append(blocks, block)
							} else {
								f.forgetHash(hash)
							}
						}
					}
				}
				if matched {
					task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
					task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
					i--
					continue
				}
			}

			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
			select {
			case filter <- task:
			case <-f.quit:
				return
			}
			// Schedule the retrieved blocks for ordered import
			for _, block := range blocks {
				if announce := f.completing[block.Hash()]; announce != nil {
					f.enqueue(announce.origin, block)
				}
			}
		}
	}
}

加入hash到announced

果然这里有个loop(),内部是一个for循环,在监听读取一些channel的数据

1 判断f.announces map内从对端peer发来的hash个数是否大于hashLimit(256),如果大于,那么对端peer可能在Dos攻击,break出去

2 判断block的num与自身chain高度的差值,如果小于-maxUncleDist(7)或者大于maxQueueDist(32),也break出去

3 判断是否在f.fetching,如果是break

4 判断是否在f.completing,如果是break

5 加入到f.announced, f.announces加1(此个数用来判断节点是否在Dos攻击)

6 一旦f.announced有元素了,就开始rescheduleFetch(fetchTimer),内容是reset下fetchTimer,遍历announced中的条目,拿到最早的时间,然后reset fetchTimer为500ms减去最早的条目加入到announced后耗费的时间,目的是让fetchTimer尽快执行而又不浪费资源空转

获取headers

所以接下来看f.announced的处理在哪里

还在本函数中,case <- fetchTimer.C

这个timer一直在跑,每次跑完会reset一下,刚刚上面有讲过它的原理

1 遍历f.announced中的announce,对于同一个block的hash,可能有若干个节点广播过来

    if time.Since(announces[0].time) > arriveTimeOut(500ms) - gatherSlack(100ms)

这里判断最早加入的hash的时间要大于400ms(500-100)才去处理,这个逻辑应该是为了等够一定的时间积攒一些节点广播同一个block然后随机挑选一个节点去同步,避免所有节点都向源节点同步造成源节点网络拥堵

把这些要请求的加入到request map内

同时把此hash从f.announced中移除并加入到f.fetching map内

2 遍历request map,启动goroutine去获取指定hash的block header,有几个header就几个goroutine

然后等待对端节点返回header

返回headers

还在本loop()函数内,返回header是写入到channel f.headerFilter

1 遍历处理收到的headers,从f.fetching内拿到anounce(上一部放入到该map内),并且返回的peer也是获取的peer,不在fetched内,不在completing内,不在queued内才通过判断;否则加入unkonwn map内

2 如果不是要获取的高度的block header,continue

3 如果block是空的:既没有tx也没有uncle,那么直接组成block然后加入到complete map内并且加入f.completing,否则加入到incomplete map内

4 把unkonwn放入到filter处理一下(具体怎么处理再看一下)

5 遍历incomplete map,然后放入到f.fetched map内,表示拿到了header还需要继续拿body。同时rescheduleComplete(),同上面的rescheduleFetch,尽快取block,时间不大于100ms

6 遍历complete map,调用f.enqueue(),目的就是把完成的block加入到f.queued内,处理等后面拿到body一起说下

获取bodies

上面拿到header后把hash加入到f.fetched map内,接着拿body

还是在本loop()内,等completeTimer到达:case <-completeTimer.C

1 遍历f.fetched,随机选一个去获取body

2 把hash加入到f.completing map内和request内

3 遍历request,启动gorountine去对端节点获取body内容

返回bodies

对端节点处理完返回对应的body后, 还是写入到本loop()内的channel f.bodyFilter: 

1 遍历收到的tx和uncle

2 如果hash不在f.queued(没有处理完成),并且判断收到的tx hash、unclue hash、peer与completing中的一致就加入到blocks map内

3遍历blocks,调用f.enqueue()加入到f.queue

处理完成

接着处理f.queue map,还是在本loop()内循环处理的

如果f.queue不为空pop一个出来:f.queue.PopItem(),经过简单的check,f.insert()加入到本地

func (f *Fetcher) insert(peer string, block *types.Block) {
	hash := block.Hash()

	// Run the import on a new thread
	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
	go func() {
		defer func() { f.done <- hash }()

		// If the parent's unknown, abort insertion
		parent := f.getBlock(block.ParentHash())
		if parent == nil {
			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
			return
		}
		// Quickly validate the header and propagate the block if it passes
		switch err := f.verifyHeader(block.Header()); err {
		case nil:
			// All ok, quickly propagate to our peers
			propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
			go f.broadcastBlock(block, true)

		case consensus.ErrFutureBlock:
			// Weird future block, don't fail, but neither propagate

		default:
			// Something went very wrong, drop the peer
			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
			f.dropPeer(peer)
			return
		}
		// Run the actual import and log any issues
		if _, err := f.insertChain(types.Blocks{block}); err != nil {
			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
			return
		}
		// If import succeeded, broadcast the block
		propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
		go f.broadcastBlock(block, false)

		// Invoke the testing hook if needed
		if f.importedHook != nil {
			f.importedHook(block)
		}
	}()
}

启动gorountine插入到本地:

1 先VerifyHeader,通过后广播block到若干节点

2 f.insertChain()就是写入到本地的leveldb

3 插入成功后,广播block hash到其他节点

有人要问了,为什么广播hash放在最后呢,要做到尽快广播到全网是不是放在1也可以:

1是广播整个区块,3是广播hash,只有等2插入到本地后,3广播出去后,别的节点来获取自己才有东西给别人,这也是3放在2后面的原因

不过其实也是可以放在第1步,只是这么做的话需要做另外的处理,不利于代码的统一性

总结

自此就分析完了节点收到blockhash后的处理,大致如下:

1 交给fetcher处理,先去拿header

2 等拿到header后,如果body为空,直接组织成block;否则接着拿body

3 拿到body后跟header组织成block,然后插入到本地的leveldb

中间有一些小细节:

1 获取header或者body的时候会等一段时间(400ms、100ms)等收到若干个节点广播的hash,

然后随机选一个去获取,这也是网络负载均衡的设计;代价就是要等一段时间才能同步完成一个区块

2 防Dos攻击:会记录同一个节点同时有多少hash在处理,如果大于给定的256就认为是节点在Dos攻击




猜你喜欢

转载自blog.csdn.net/csds319/article/details/80595962
今日推荐