go-ethereum中eth-ProtocolManager源码学习
NewProtocolManager
在eth的创建过程中初始化了一个protocolManager成员,从名称上看是协议管理者,实际上它是用来管理Ethereum的子协议的,也是上层消息的处理分发的类。由于和上一篇P2P有很大的联系,所以单独拿出来分析一下。
// go-ethereum\eth\handler.go
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, whitelist map[uint64]common.Hash) (*ProtocolManager, error) {
manager := &ProtocolManager{
networkID: networkID,
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
chainconfig: config,
peers: newPeerSet(),
whitelist: whitelist,
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
}
if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
log.Warn("Blockchain not empty, fast sync disabled")
mode = downloader.FullSync
}
if mode == downloader.FastSync {
manager.fastSync = uint32(1)
}
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
for i, version := range ProtocolVersions {
if mode == downloader.FastSync && version < eth63 {
continue
}
version := version
manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
Name: ProtocolName,
Version: version,
Length: ProtocolLengths[i],
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(int(version), p, rw)
select {
case manager.newPeerCh <- peer:
manager.wg.Add(1)
defer manager.wg.Done()
return manager.handle(peer)
case <-manager.quitSync:
return p2p.DiscQuitting
}
},
NodeInfo: func() interface{} {
return manager.NodeInfo()
},
PeerInfo: func(id enode.ID) interface{} {
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}
return nil
},
})
}
if len(manager.SubProtocols) == 0 {
return nil, errIncompatibleConfig
}
manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
validator := func(header *types.Header) error {
return engine.VerifyHeader(blockchain, header, true)
}
heighter := func() uint64 {
return blockchain.CurrentBlock().NumberU64()
}
inserter := func(blocks types.Blocks) (int, error) {
if atomic.LoadUint32(&manager.fastSync) == 1 {
log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
return 0, nil
}
atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
return manager.blockchain.InsertChain(blocks)
}
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
return manager, nil
}
在初始化对象后,先判断了节点同步方法,如果是FastSync而当前节点又不是0,就改为FullSync,这也就是FastSync模式只在第一次有效的原因。然后配置了manager的同步方式。接下来我们可以看到他所管理的子协议实际上就是p2p协议,这里在pm初始化的同时就创建了一个p2p的Protocol数组SubProtocols。然后遍历ProtocolVersions,这个代表协议版本号。在我当前的版本中支持{eth63, eth62},关于eth63、eth62及以太坊网络协议的更多说明见这里。
对于每个版本都向SubProtocols中添加一个Protocol对象,其名字就是eth,ProtocolLengths长度对于eth63而言是17,对于eth62是8;接下来定义一个Run方法,前文在分析p2p-peer时,对于每个peer都会启动所有协议,启动时会在一个独立的goroutine中运行run方法。在run方法中先用newPeer创建了一个peer(p2p-peer的封装):
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, p, newMeteredMsgWriter(rw))
}
// go-ethereum\eth\peer.go
func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return &peer{
Peer: p,
rw: rw,
version: version,
id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
knownTxs: mapset.NewSet(),
knownBlocks: mapset.NewSet(),
queuedTxs: make(chan []*types.Transaction, maxQueuedTxs),
queuedProps: make(chan *propEvent, maxQueuedProps),
queuedAnns: make(chan *types.Block, maxQueuedAnns),
term: make(chan struct{}),
}
}
handle
接着给manager的newPeerCh赋值,执行handle方法,用来处理连接
func (pm *ProtocolManager) handle(p *peer) error {
if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers
}
p.Log().Debug("Ethereum peer connected", "name", p.Name())
var (
genesis = pm.blockchain.Genesis()
head = pm.blockchain.CurrentHeader()
hash = head.Hash()
number = head.Number.Uint64()
td = pm.blockchain.GetTd(hash, number)
)
if err := p.Handshake(pm.networkID, td, hash, genesis.Hash()); err != nil {
p.Log().Debug("Ethereum handshake failed", "err", err)
return err
}
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version)
}
if err := pm.peers.Register(p); err != nil {
p.Log().Error("Ethereum peer registration failed", "err", err)
return err
}
defer pm.removePeer(p.id)
if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
return err
}
pm.syncTransactions(p)
if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil {
if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil {
return err
}
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
p.Log().Debug("Timed out DAO fork-check, dropping")
pm.removePeer(p.id)
})
defer func() {
if p.forkDrop != nil {
p.forkDrop.Stop()
p.forkDrop = nil
}
}()
}
for number := range pm.whitelist {
if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil {
return err
}
}
for {
if err := pm.handleMsg(p); err != nil {
p.Log().Debug("Ethereum message handling failed", "err", err)
return err
}
}
}
Handshake
首先检测是否超过最大peer数量或者不是信任节点,检测通过的话进行Ethereum握手。
// go-ethereum\eth\peer.go
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash) error {
errc := make(chan error, 2)
var status statusData
go func() {
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
ProtocolVersion: uint32(p.version),
NetworkId: network,
TD: td,
CurrentBlock: head,
GenesisBlock: genesis,
})
}()
go func() {
errc <- p.readStatus(network, &status, genesis)
}()
timeout := time.NewTimer(handshakeTimeout)
defer timeout.Stop()
for i := 0; i < 2; i++ {
select {
case err := <-errc:
if err != nil {
return err
}
case <-timeout.C:
return p2p.DiscReadTimeout
}
}
p.td, p.head = status.TD, status.CurrentBlock
return nil
}
首先启动一个goroutine利用Send发送一条初始消息,这里的Send是p2p包中message.go的方法,但是send方法中又调用了参数中的MsgWriter,这里的MsgWriter是在p2p包内peer.go在启动协议时传入的,实际上是protoRW的WriteMsg,但最终实际执行者是rlpx。这里还有一点需要注意的是,在发送时将原来的code加了一个offset,因为eth的协议code是从0开始,如果不加offset,会在读取时被当做基本协议过滤掉(详见p2p的peer源码分析),这个offset最后会在读取时减去恢复出原始的code。
发送的msg的code是StatusMsg,即0。发送的消息有协议版本,网络ID,总难度当前区块和创世区块。之后又启动一个goroutine去读取消息:
func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash) (err error) {
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
if msg.Code != StatusMsg {
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
}
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
// Decode the handshake and make sure everything matches
if err := msg.Decode(&status); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
if status.GenesisBlock != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
}
if status.NetworkId != network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network)
}
if int(status.ProtocolVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
}
return nil
}
首先调用ReadMsg,这个ReadMsg是p2p包内peer.go中protoRW的方法,回顾peer源码分析,对于一个peer当收到一条消息时,如果消息不是基本消息,会给protoRW的in字段赋值,而在ReadMsg会阻塞的从in字段读取msg并返回。回到readStatus中,首先判断code是否是StatusMsg,在判断size大小是否合格。之后调用msg的Decode方法解码,就是rlp解码,最后将数据写入statusData,之后对双方的创世区块、网络ID一届协议版本进行对比,判断是否匹配,不匹配的话返回具体错误。
回到Handshake中,在启动收发数据的goroutine的同时,启动了一个定时器,时间是5s,来判断是否握手超时,在判断超时时也判断了收发是否报错。握手成功后记录对方的总难度和当前区块。有一点需要注意的是,Handshake的这两个收发goroutine是没有先后关系的,因为我们p2p的peer是在主动发送或收到一个请求后经过握手建立的,建立成功后双方各自实例化一个peer对象,然后启动协议,并执行协议的Run方法,这些步骤在双方是独立进行的。
Register & broadcast
再回到handle中,经过刚才的握手后,如果双方的区块链信息也匹配,则将这个peer注册到ProtocolManager的peers中,这个注册会在结束时被移除。注册方法如下:
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if ps.closed {
return errClosed
}
if _, ok := ps.peers[p.id]; ok {
return errAlreadyRegistered
}
ps.peers[p.id] = p
go p.broadcast()
return nil
}
主要实现就是将peer添加到pm的peers字段中用来集中管理,并启动了一个goroutine去调用broadcast:
func (p *peer) broadcast() {
for {
select {
case txs := <-p.queuedTxs:
if err := p.SendTransactions(txs); err != nil {
return
}
p.Log().Trace("Broadcast transactions", "count", len(txs))
case prop := <-p.queuedProps:
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
return
}
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
case block := <-p.queuedAnns:
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
return
}
p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash())
case <-p.term:
return
}
}
}
这也是一个无限循环,用于事件处理,具体逻辑稍后介绍。
之后开始同步交易,将自己交易池中等待的交易发送给对方。然后又验证了DAO硬分叉,另外如果有白名单则也去请求,和前面验证DAO一样,都是通过编号去请求头。
handleMsg
最后来到主循环,调用handleMsg去处理消息:
func (pm *ProtocolManager) handleMsg(p *peer) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
defer msg.Discard()
// Handle the message depending on its contents
switch {
case msg.Code == StatusMsg:
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
...
}
return nil
}
GetBlockHeadersMsg
这一部分代码非常多,由于是p2p通信的上层部分,所以要考虑的情况十分多。我们简单看几个,以刚才验证DAO分叉和请求白名单所用的RequestHeadersByNumber方法为例(其余的会在介绍后续流程时涉及到),首先发送如下请求:
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
同样是发送一个msg,其code是GetBlockHeadersMsg,在handleMsg中对于逻辑如下:
case msg.Code == GetBlockHeadersMsg:
var query getBlockHeadersData
if err := msg.Decode(&query); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
hashMode := query.Origin.Hash != (common.Hash{})
first := true
maxNonCanonical := uint64(100)
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
var origin *types.Header
if hashMode {
if first {
first = false
origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
if origin != nil {
query.Origin.Number = origin.Number.Uint64()
}
} else {
origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
}
} else {
origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
headers = append(headers, origin)
bytes += estHeaderRlpSize
switch {
case hashMode && query.Reverse:
ancestor := query.Skip + 1
if ancestor == 0 {
unknown = true
} else {
query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
unknown = (query.Origin.Hash == common.Hash{})
}
case hashMode && !query.Reverse:
var (
current = origin.Number.Uint64()
next = current + query.Skip + 1
)
if next <= current {
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
unknown = true
} else {
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
nextHash := header.Hash()
expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
if expOldHash == query.Origin.Hash {
query.Origin.Hash, query.Origin.Number = nextHash, next
} else {
unknown = true
}
} else {
unknown = true
}
}
case query.Reverse:
if query.Origin.Number >= query.Skip+1 {
query.Origin.Number -= query.Skip + 1
} else {
unknown = true
}
case !query.Reverse:
query.Origin.Number += query.Skip + 1
}
}
return p.SendBlockHeaders(headers)
第一步就是解码,得到请求消息getBlockHeadersData,其中包含要查询的某一个区块的编号或hash,查询数量,跳过的数量,是否反向查询等。然后判断查询用的是编号还是hash,之后再一个循环内,根据请求获取头数据,然后放到heads中,最后通过SendBlockHeaders发送数据:
func (p *peer) SendBlockHeaders(headers []*types.Header) error {
return p2p.Send(p.rw, BlockHeadersMsg, headers)
}
BlockHeadersMsg
可见code是BlockHeadersMsg,回到请求方,对应的逻辑如下:
case msg.Code == BlockHeadersMsg:
var headers []*types.Header
if err := msg.Decode(&headers); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
if len(headers) == 0 && p.forkDrop != nil {
// Possibly an empty reply to the fork header checks, sanity check TDs
verifyDAO := true
if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil {
if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash(), daoHeader.Number.Uint64())) >= 0 {
verifyDAO = false
}
}
if verifyDAO {
p.Log().Debug("Seems to be on the same side of the DAO fork")
p.forkDrop.Stop()
p.forkDrop = nil
return nil
}
}
filter := len(headers) == 1
if filter {
if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 {
p.forkDrop.Stop()
p.forkDrop = nil
if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
return err
}
p.Log().Debug("Verified to be on the same side of the DAO fork")
return nil
}
if want, ok := pm.whitelist[headers[0].Number.Uint64()]; ok {
if hash := headers[0].Hash(); want != hash {
p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want)
return errors.New("whitelist block mismatch")
}
p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
}
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
}
if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers)
if err != nil {
log.Debug("Failed to deliver headers", "err", err)
}
}
到这里也是首先解析返回的数据,首先对于DAO分叉验证,如果收到的heads内容为空,则验证通过,最后如果验证通过则取消刚才请求时的定时器,否则那个定时器到期后会中断连接。接着当heads长度为1是,还是先验证DAO分叉,如果不必验证则是刚才请求的白名单区块,进行验证,主要是进行hash匹配。最后如果heads长度大于1则执行downloader的DeliverHeaders方法。
到这里handle基本分析完了,回到NewProtocolManager中,刚才主要看了定义协议时的Run方法,除了定义Run还定义了NodeInfo和PeerInfo,这两个方法也是共p2p调用的。在添加完要管理的协议后,检查总共协议的数量,如果为0则退出。否则调用downloader.New创建一个Downloader。接着配置了validator方法用来验证头部;heighter方法用来获取区块链高度;inserter 方法用于插入区块,但是如果开启fastSync,则不会使用,最后生成一个fetcher,这是用来汇集各个peer的区块通知。
Start
这样一个ProtocolManager就创建并初始化完毕,随后随着Ethereum的启动,也会调用ProtocolManager的start方法
func (pm *ProtocolManager) Start(maxPeers int) {
pm.maxPeers = maxPeers
pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
go pm.txBroadcastLoop()
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
go pm.minedBroadcastLoop()
go pm.syncer()
go pm.txsyncLoop()
}
txBroadcastLoop
主要是启动了几个goroutine,分别进行交易的广播,挖矿广播,网络同步和交易同步等。对于交易广播:
func (pm *ProtocolManager) txBroadcastLoop() {
for {
select {
case event := <-pm.txsCh:
pm.BroadcastTxs(event.Txs)
case <-pm.txsSub.Err():
return
}
}
}
func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
var txset = make(map[*peer]types.Transactions)
for _, tx := range txs {
peers := pm.peers.PeersWithoutTx(tx.Hash())
for _, peer := range peers {
txset[peer] = append(txset[peer], tx)
}
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
}
for peer, txs := range txset {
peer.AsyncSendTransactions(txs)
}
}
func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
if !p.knownTxs.Contains(hash) {
list = append(list, p)
}
}
return list
}
当收到合法的交易时,变调用BroadcastTxs方法,在BroadcastTxs中,先查询每个peer是否有这个交易(通过判断knownTxs字段,knownTxs的内容实在pm中收到TxMsg消息时添加的,所以存储着对于peer所有的tx信息),找出没有的peer放入txset中,然后调用AsyncSendTransactions发送
func (p *peer) AsyncSendTransactions(txs []*types.Transaction) {
select {
case p.queuedTxs <- txs:
for _, tx := range txs {
p.knownTxs.Add(tx.Hash())
}
default:
p.Log().Debug("Dropping transaction propagation", "count", len(txs))
}
}
这是一个异步发送方法,首先标记这个peer已有这些tx信息,由于是传值给queuedTxs,所以触发broadcast方法中对于的逻辑,
case txs := <-p.queuedTxs:
if err := p.SendTransactions(txs); err != nil {
return
}
p.Log().Trace("Broadcast transactions", "count", len(txs))
func (p *peer) SendTransactions(txs types.Transactions) error {
for _, tx := range txs {
p.knownTxs.Add(tx.Hash())
}
return p2p.Send(p.rw, TxMsg, txs)
}
最后利用SendTransactions中的Send方法发送,发送的包是TxMsg,顺便看一下接受到该包时的逻辑,还是在handleMsg中:
case msg.Code == TxMsg:
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
break
}
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
for i, tx := range txs {
if tx == nil {
return errResp(ErrDecode, "transaction %d is nil", i)
}
p.MarkTransaction(tx.Hash())
}
pm.txpool.AddRemotes(txs)
由于是pm主要是事件分发,所以主要就是先进行解码,然后标记对应的peer有相关tx,最后交给txpool处理。
minedBroadcastLoop
start中调用的另外一个方法是minedBroadcastLoop
func (pm *ProtocolManager) minedBroadcastLoop() {
for obj := range pm.minedBlockSub.Chan() {
if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok {
pm.BroadcastBlock(ev.Block, true) // First propagate block to peers
pm.BroadcastBlock(ev.Block, false) // Only then announce to the rest
}
}
}
这里当收到订阅事件时,调用BroadcastBlock用来广播区块:
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
hash := block.Hash()
peers := pm.peers.PeersWithoutBlock(hash)
if propagate {
var td *big.Int
if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
} else {
log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
return
}
transferLen := int(math.Sqrt(float64(len(peers))))
if transferLen < minBroadcastPeers {
transferLen = minBroadcastPeers
}
if transferLen > len(peers) {
transferLen = len(peers)
}
transfer := peers[:transferLen]
for _, peer := range transfer {
peer.AsyncSendNewBlock(block, td)
}
log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
return
}
if pm.blockchain.HasBlock(hash, block.NumberU64()) {
for _, peer := range peers {
peer.AsyncSendNewBlockHash(block)
}
log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
}
}
这个方法根据其第二个参数的不同,有两种不同的执行逻辑。首先寻找没有改区块的peer,然后当propagate为true时,先计算含该区块的总难度,然后对部分peer发送区块信息及总难度,如果是false则,在自己拥有该区块的前提下给peers发送区块的hash,二者分别调用了AsyncSendNewBlock和peer.AsyncSendNewBlockHash(block)
func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
select {
case p.queuedProps <- &propEvent{block: block, td: td}:
p.knownBlocks.Add(block.Hash())
default:
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
}
}
func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
select {
case p.queuedAnns <- block:
p.knownBlocks.Add(block.Hash())
default:
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
}
}
AsyncSendNewBlock方法中给queuedProps赋值,然后标记该peer拥有该区块。赋值后触发broadcast中的逻辑:
case prop := <-p.queuedProps:
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
return
}
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
p.knownBlocks.Add(block.Hash())
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
}
最后调用SendNewBlock利用p2p发送区块信息和总难度,所发的msg是NewBlockMsg。
对于AsyncSendNewBlockHash方法,先给queuedAnns赋值,然后标记该peer知道该区块,之后触发broadcast中的逻辑:
case block := <-p.queuedAnns:
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
return
}
p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash())
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
for _, hash := range hashes {
p.knownBlocks.Add(hash)
}
request := make(newBlockHashesData, len(hashes))
for i := 0; i < len(hashes); i++ {
request[i].Hash = hashes[i]
request[i].Number = numbers[i]
}
return p2p.Send(p.rw, NewBlockHashesMsg, request)
}
这里调用SendNewBlockHashes发送区块hash和编号,发送的msg是NewBlockHashesMsg。
顺便也看一下接受的逻辑,对于NewBlockMsg,还HandleMsg中
case msg.Code == NewBlockMsg:
var request newBlockData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
request.Block.ReceivedAt = msg.ReceivedAt
request.Block.ReceivedFrom = p
p.MarkBlock(request.Block.Hash())
pm.fetcher.Enqueue(p.id, request.Block)
var (
trueHead = request.Block.ParentHash()
trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty())
)
if _, td := p.Head(); trueTD.Cmp(td) > 0 {
p.SetHead(trueHead, trueTD)
currentBlock := pm.blockchain.CurrentBlock()
if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
go pm.synchronise(p)
}
}
首先进行解码,然后取出收到的时间和发送对象,之后标记对应的peer拥有该区块,然后交给fetcher处理。接着计算出对方的总难度减去区块的难度是否大于0,然后更新对应peer的区块链头和总难度,接着计算双方总难度之差,如果对方大于自己,则去同步。
对于另外一个消息NewBlockHashesMsg,逻辑如下:
case msg.Code == NewBlockHashesMsg:
var announces newBlockHashesData
if err := msg.Decode(&announces); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
for _, block := range announces {
p.MarkBlock(block.Hash)
}
unknown := make(newBlockHashesData, 0, len(announces))
for _, block := range announces {
if !pm.blockchain.HasBlock(block.Hash, block.Number) {
unknown = append(unknown, block)
}
}
for _, block := range unknown {
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
}
这里也是先解码,然后标记对应的peer有响应的区块,然后统计自由不知道的区块最后交给fetcher处理。
syncer
start还启动了syncer方法:
func (pm *ProtocolManager) syncer() {
pm.fetcher.Start()
defer pm.fetcher.Stop()
defer pm.downloader.Terminate()
forceSync := time.NewTicker(forceSyncCycle)
defer forceSync.Stop()
for {
select {
case <-pm.newPeerCh:
if pm.peers.Len() < minDesiredPeerCount {
break
}
go pm.synchronise(pm.peers.BestPeer())
case <-forceSync.C:
go pm.synchronise(pm.peers.BestPeer())
case <-pm.noMorePeers:
return
}
}
}
这里首先启动了fetcher,关于fetcher稍后介绍,然后启动了一个定时器每5秒执行一次逻辑。下面是一个阻塞型的事件触发逻辑,其中关于定时器的就是调用synchronise去同步。
txsyncLoop
start中最后还有一个txsyncLoop:
func (pm *ProtocolManager) txsyncLoop() {
....
for {
select {
case s := <-pm.txsyncCh:
pending[s.p.ID()] = s
if !sending {
send(s)
}
case err := <-done:
sending = false
if err != nil {
pack.p.Log().Debug("Transaction send failed", "err", err)
delete(pending, pack.p.ID())
}
if s := pick(); s != nil {
send(s)
}
case <-pm.quitSync:
return
}
}
}
这里主要是先定义了两个方法,然后启动了一个循环取处理事件,第一个事件在syncTransactions中发出,它在ProtocolManager的handle中调用,handle在会在每个peer建立后得到调用,所以也就是对新来的连接会执行syncTransactions方法:
func (pm *ProtocolManager) syncTransactions(p *peer) {
var txs types.Transactions
pending, _ := pm.txpool.Pending()
for _, batch := range pending {
txs = append(txs, batch...)
}
if len(txs) == 0 {
return
}
select {
case pm.txsyncCh <- &txsync{p, txs}:
case <-pm.quitSync:
}
}
在syncTransactions中,首先获取所有等待中的交易,然后打包传给txsyncCh触发txsyncLoop中的逻辑。主要是调用send方法,就是开始定义的:
send := func(s *txsync) {
size := common.StorageSize(0)
pack.p = s.p
pack.txs = pack.txs[:0]
for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
pack.txs = append(pack.txs, s.txs[i])
size += s.txs[i].Size()
}
s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
if len(s.txs) == 0 {
delete(pending, s.p.ID())
}
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
sending = true
go func() { done <- pack.p.SendTransactions(pack.txs) }()
}
这里一次只发送一部分交易信息,要求发送的消息总大小不超过txsyncPackSize。之后如果全都发送的话,把该peer从pending中移除,表示没有消息要发送,然后更新剩余信息。之后启动一个goroutine发送信息,使用的是SendTransactions方法,前文介绍过,并将发送结果传给done,触发txsyncLoop中select另外一个逻辑,这里如果发送无错则先调用pick方法:
pick := func() *txsync {
if len(pending) == 0 {
return nil
}
n := rand.Intn(len(pending)) + 1
for _, s := range pending {
if n--; n == 0 {
return s
}
}
return nil
}
这是从pending中随机选一个等待发送的peer(表示为一个txsync对象,里面含有一个peer和对应的tx),然后调用send方法。直到所有等待的都发送完了(先前一次没有发完的txsync也会在后续被随机选到再次发送),send–pick逻辑结束。
题图来自unsplash:https://unsplash.com/photos/fR9U2S31Exs