diff options
Diffstat (limited to 'core/headerchain.go')
-rw-r--r-- | core/headerchain.go | 161 |
1 files changed, 110 insertions, 51 deletions
diff --git a/core/headerchain.go b/core/headerchain.go index d956226..20f6bd3 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -30,9 +30,9 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/ethdb" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" lru "github.com/hashicorp/golang-lru" ) @@ -45,6 +45,14 @@ const ( // HeaderChain implements the basic block header chain logic that is shared by // core.BlockChain and light.LightChain. It is not usable in itself, only as // a part of either structure. +// +// HeaderChain is responsible for maintaining the header chain including the +// header query and updating. +// +// The components maintained by headerchain includes: (1) total difficult +// (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping +// and (5) head header flag. +// // It is not thread safe either, the encapsulating chain structures should do // the necessary mutex locking/unlocking. type HeaderChain struct { @@ -66,10 +74,8 @@ type HeaderChain struct { engine consensus.Engine } -// NewHeaderChain creates a new HeaderChain structure. -// getValidator should return the parent's validator -// procInterrupt points to the parent's interrupt semaphore -// wg points to the parent's shutdown wait group +// NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points +// to the parent's interrupt semaphore. func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { headerCache, _ := lru.New(headerCacheLimit) tdCache, _ := lru.New(tdCacheLimit) @@ -143,29 +149,46 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er if ptd == nil { return NonStatTy, consensus.ErrUnknownAncestor } - localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64()) + head := hc.CurrentHeader().Number.Uint64() + localTd := hc.GetTd(hc.currentHeaderHash, head) externTd := new(big.Int).Add(header.Difficulty, ptd) // Irrelevant of the canonical status, write the td and header to the database - if err := hc.WriteTd(hash, number, externTd); err != nil { - log.Crit("Failed to write header total difficulty", "err", err) + // + // Note all the components of header(td, hash->number index and header) should + // be written atomically. + headerBatch := hc.chainDb.NewBatch() + rawdb.WriteTd(headerBatch, hash, number, externTd) + rawdb.WriteHeader(headerBatch, header) + if err := headerBatch.Write(); err != nil { + log.Crit("Failed to write header into disk", "err", err) } - rawdb.WriteHeader(hc.chainDb, header) - // If the total difficulty is higher than our known, add it to the canonical chain // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf - if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) { + reorg := externTd.Cmp(localTd) > 0 + if !reorg && externTd.Cmp(localTd) == 0 { + if header.Number.Uint64() < head { + reorg = true + } else if header.Number.Uint64() == head { + reorg = mrand.Float64() < 0.5 + } + } + if reorg { + // If the header can be added into canonical chain, adjust the + // header chain markers(canonical indexes and head header flag). + // + // Note all markers should be written atomically. + // Delete any canonical number assignments above the new head - batch := hc.chainDb.NewBatch() + markerBatch := hc.chainDb.NewBatch() for i := number + 1; ; i++ { hash := rawdb.ReadCanonicalHash(hc.chainDb, i) if hash == (common.Hash{}) { break } - rawdb.DeleteCanonicalHash(batch, i) + rawdb.DeleteCanonicalHash(markerBatch, i) } - batch.Write() // Overwrite any stale canonical number assignments var ( @@ -174,16 +197,19 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er headHeader = hc.GetHeader(headHash, headNumber) ) for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { - rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber) + rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) headHash = headHeader.ParentHash headNumber = headHeader.Number.Uint64() - 1 headHeader = hc.GetHeader(headHash, headNumber) } // Extend the canonical chain with the new header - rawdb.WriteCanonicalHash(hc.chainDb, hash, number) - rawdb.WriteHeadHeaderHash(hc.chainDb, hash) - + rawdb.WriteCanonicalHash(markerBatch, hash, number) + rawdb.WriteHeadHeaderHash(markerBatch, hash) + if err := markerBatch.Write(); err != nil { + log.Crit("Failed to write header markers into disk", "err", err) + } + // Last step update all in-memory head header markers hc.currentHeaderHash = hash hc.currentHeader.Store(types.CopyHeader(header)) headHeaderGauge.Update(header.Number.Int64()) @@ -192,9 +218,9 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er } else { status = SideStatTy } + hc.tdCache.Add(hash, externTd) hc.headerCache.Add(hash, header) hc.numberCache.Add(hash, number) - return } @@ -349,8 +375,11 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma } for ancestor != 0 { if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { - number -= ancestor - return rawdb.ReadCanonicalHash(hc.chainDb, number), number + ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) + if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { + number -= ancestor + return ancestorHash, number + } } if *maxNonCanonical == 0 { return common.Hash{}, 0 @@ -393,14 +422,6 @@ func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int { return hc.GetTd(hash, *number) } -// WriteTd stores a block's total difficulty into the database, also caching it -// along the way. -func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) error { - rawdb.WriteTd(hc.chainDb, hash, number, td) - hc.tdCache.Add(hash, new(big.Int).Set(td)) - return nil -} - // GetHeader retrieves a block header from the database by hash and number, // caching it if found. func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { @@ -428,6 +449,8 @@ func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { } // HasHeader checks if a block header is present in the database or not. +// In theory, if header is present in the database, all relative components +// like td and hash->number should be present too. func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { return true @@ -445,16 +468,19 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { return hc.GetHeader(hash, number) } +func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { + return rawdb.ReadCanonicalHash(hc.chainDb, number) +} + // CurrentHeader retrieves the current head header of the canonical chain. The // header is retrieved from the HeaderChain's internal cache. func (hc *HeaderChain) CurrentHeader() *types.Header { return hc.currentHeader.Load().(*types.Header) } -// SetCurrentHeader sets the current head header of the canonical chain. +// SetCurrentHeader sets the in-memory head header marker of the canonical chan +// as the given header. func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { - rawdb.WriteHeadHeaderHash(hc.chainDb, head.Hash()) - hc.currentHeader.Store(head) hc.currentHeaderHash = head.Hash() headHeaderGauge.Update(head.Number.Int64()) @@ -462,8 +488,10 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { type ( // UpdateHeadBlocksCallback is a callback function that is called by SetHead - // before head header is updated. - UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) + // before head header is updated. The method will return the actual block it + // updated the head to (missing state) and a flag if setHead should continue + // rewinding till that forcefully (exceeded ancient limits) + UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) // DeleteBlockContentCallback is a callback function that is called by SetHead // before each header is deleted. @@ -476,9 +504,10 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d var ( parentHash common.Hash batch = hc.chainDb.NewBatch() + origin = true ) for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { - hash, num := hdr.Hash(), hdr.Number.Uint64() + num := hdr.Number.Uint64() // Rewind block chain to new head. parent := hc.GetHeader(hdr.ParentHash, num-1) @@ -486,34 +515,64 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d parent = hc.genesisHeader } parentHash = hdr.ParentHash + // Notably, since geth has the possibility for setting the head to a low // height which is even lower than ancient head. // In order to ensure that the head is always no higher than the data in - // the database(ancient store or active store), we need to update head + // the database (ancient store or active store), we need to update head // first then remove the relative data from the database. // // Update head first(head fast block, head full block) before deleting the data. + markerBatch := hc.chainDb.NewBatch() if updateFn != nil { - updateFn(hc.chainDb, parent) + newHead, force := updateFn(markerBatch, parent) + if force && newHead < head { + log.Warn("Force rewinding till ancient limit", "head", newHead) + head = newHead + } } // Update head header then. - rawdb.WriteHeadHeaderHash(hc.chainDb, parentHash) - - // Remove the relative data from the database. - if delFn != nil { - delFn(batch, hash, num) + rawdb.WriteHeadHeaderHash(markerBatch, parentHash) + if err := markerBatch.Write(); err != nil { + log.Crit("Failed to update chain markers", "error", err) } - // Rewind header chain to new head. - rawdb.DeleteHeader(batch, hash, num) - rawdb.DeleteTd(batch, hash, num) - rawdb.DeleteCanonicalHash(batch, num) - hc.currentHeader.Store(parent) hc.currentHeaderHash = parentHash headHeaderGauge.Update(parent.Number.Int64()) - } - batch.Write() + // If this is the first iteration, wipe any leftover data upwards too so + // we don't end up with dangling daps in the database + var nums []uint64 + if origin { + for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { + nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path + } + origin = false + } + nums = append(nums, num) + + // Remove the related data from the database on all sidechains + for _, num := range nums { + // Gather all the side fork hashes + hashes := rawdb.ReadAllHashes(hc.chainDb, num) + if len(hashes) == 0 { + // No hashes in the database whatsoever, probably frozen already + hashes = append(hashes, hdr.Hash()) + } + for _, hash := range hashes { + if delFn != nil { + delFn(batch, hash, num) + } + rawdb.DeleteHeader(batch, hash, num) + rawdb.DeleteTd(batch, hash, num) + } + rawdb.DeleteCanonicalHash(batch, num) + } + } + // Flush all accumulated deletions. + if err := batch.Write(); err != nil { + log.Crit("Failed to rewind block", "error", err) + } // Clear out any stale content from the caches hc.headerCache.Purge() hc.tdCache.Purge() |