aboutsummaryrefslogtreecommitdiff
path: root/core/blockchain.go
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2019-10-21 14:47:51 -0400
committerDeterminant <[email protected]>2019-10-21 14:47:51 -0400
commit79b1169a9ff0b54ddf3b520a70a79c78ba5c988d (patch)
tree13fc62be3ebf344544547eeb9979450a3c6ecd40 /core/blockchain.go
parent913e9439a7c7883881895ee597a0cc464fb92353 (diff)
customize Blockchain code
Diffstat (limited to 'core/blockchain.go')
-rw-r--r--core/blockchain.go2235
1 files changed, 2235 insertions, 0 deletions
diff --git a/core/blockchain.go b/core/blockchain.go
new file mode 100644
index 0000000..174d403
--- /dev/null
+++ b/core/blockchain.go
@@ -0,0 +1,2235 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package core implements the Ethereum consensus protocol.
+package core
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ mrand "math/rand"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/common/mclock"
+ "github.com/ava-labs/go-ethereum/common/prque"
+ "github.com/ava-labs/go-ethereum/consensus"
+ "github.com/ava-labs/go-ethereum/core/rawdb"
+ "github.com/ava-labs/go-ethereum/core/state"
+ "github.com/ava-labs/go-ethereum/core/types"
+ "github.com/ava-labs/go-ethereum/core/vm"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/event"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/metrics"
+ "github.com/ava-labs/go-ethereum/params"
+ "github.com/ava-labs/go-ethereum/rlp"
+ "github.com/ava-labs/go-ethereum/trie"
+ "github.com/hashicorp/golang-lru"
+)
+
+var (
+ headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil)
+ headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil)
+ headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
+
+ accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
+ accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
+ accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
+ accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
+
+ storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
+ storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
+ storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
+ storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
+
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
+ blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
+ blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
+
+ blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
+ blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
+
+ errInsertionInterrupted = errors.New("insertion is interrupted")
+)
+
+const (
+ bodyCacheLimit = 256
+ blockCacheLimit = 256
+ receiptsCacheLimit = 32
+ txLookupCacheLimit = 1024
+ maxFutureBlocks = 256
+ maxTimeFutureBlocks = 30
+ badBlockLimit = 10
+ TriesInMemory = 128
+
+ // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
+ //
+ // Changelog:
+ //
+ // - Version 4
+ // The following incompatible database changes were added:
+ // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
+ // * the `Bloom` field of receipt is deleted
+ // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
+ // - Version 5
+ // The following incompatible database changes were added:
+ // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
+ // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
+ // receipts' corresponding block
+ // - Version 6
+ // The following incompatible database changes were added:
+ // * Transaction lookup information stores the corresponding block number instead of block hash
+ // - Version 7
+ // The following incompatible database changes were added:
+ // * Use freezer as the ancient database to maintain all ancient data
+ BlockChainVersion uint64 = 7
+)
+
+// CacheConfig contains the configuration values for the trie caching/pruning
+// that's resident in a blockchain.
+type CacheConfig struct {
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
+ TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
+ TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+}
+
+// BlockChain represents the canonical chain given a database with a genesis
+// block. The Blockchain manages chain imports, reverts, chain reorganisations.
+//
+// Importing blocks in to the block chain happens according to the set of rules
+// defined by the two stage Validator. Processing of blocks is done using the
+// Processor which processes the included transaction. The validation of the state
+// is done in the second part of the Validator. Failing results in aborting of
+// the import.
+//
+// The BlockChain also helps in returning blocks from **any** chain included
+// in the database as well as blocks that represents the canonical chain. It's
+// important to note that GetBlock can return any block and does not need to be
+// included in the canonical one where as GetBlockByNumber always represents the
+// canonical chain.
+type BlockChain struct {
+ chainConfig *params.ChainConfig // Chain & network configuration
+ cacheConfig *CacheConfig // Cache configuration for pruning
+
+ db ethdb.Database // Low level persistent database to store final content in
+ triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
+ gcproc time.Duration // Accumulates canonical block processing for trie dumping
+
+ hc *HeaderChain
+ rmLogsFeed event.Feed
+ chainFeed event.Feed
+ chainSideFeed event.Feed
+ chainHeadFeed event.Feed
+ logsFeed event.Feed
+ blockProcFeed event.Feed
+ scope event.SubscriptionScope
+ genesisBlock *types.Block
+
+ chainmu sync.RWMutex // blockchain insertion lock
+
+ currentBlock atomic.Value // Current head of the block chain
+ currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
+
+ stateCache state.Database // State database to reuse between imports (contains state cache)
+ bodyCache *lru.Cache // Cache for the most recent block bodies
+ bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
+ receiptsCache *lru.Cache // Cache for the most recent receipts per block
+ blockCache *lru.Cache // Cache for the most recent entire blocks
+ txLookupCache *lru.Cache // Cache for the most recent transaction lookup data.
+ futureBlocks *lru.Cache // future blocks are blocks added for later processing
+
+ quit chan struct{} // blockchain quit channel
+ running int32 // running must be called atomically
+ // procInterrupt must be atomically called
+ procInterrupt int32 // interrupt signaler for block processing
+ wg sync.WaitGroup // chain processing wait group for shutting down
+
+ engine consensus.Engine
+ validator Validator // Block and state validator interface
+ prefetcher Prefetcher // Block state prefetcher interface
+ processor Processor // Block transaction processor interface
+ vmConfig vm.Config
+
+ badBlocks *lru.Cache // Bad block cache
+ shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
+ terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+ manualCanonical bool
+}
+
+// NewBlockChain returns a fully initialised block chain using information
+// available in the database. It initialises the default Ethereum Validator and
+// Processor.
+func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, manualCanonical bool) (*BlockChain, error) {
+ if cacheConfig == nil {
+ cacheConfig = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ }
+ }
+ bodyCache, _ := lru.New(bodyCacheLimit)
+ bodyRLPCache, _ := lru.New(bodyCacheLimit)
+ receiptsCache, _ := lru.New(receiptsCacheLimit)
+ blockCache, _ := lru.New(blockCacheLimit)
+ txLookupCache, _ := lru.New(txLookupCacheLimit)
+ futureBlocks, _ := lru.New(maxFutureBlocks)
+ badBlocks, _ := lru.New(badBlockLimit)
+
+ bc := &BlockChain{
+ chainConfig: chainConfig,
+ cacheConfig: cacheConfig,
+ db: db,
+ triegc: prque.New(nil),
+ stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
+ quit: make(chan struct{}),
+ shouldPreserve: shouldPreserve,
+ bodyCache: bodyCache,
+ bodyRLPCache: bodyRLPCache,
+ receiptsCache: receiptsCache,
+ blockCache: blockCache,
+ txLookupCache: txLookupCache,
+ futureBlocks: futureBlocks,
+ engine: engine,
+ vmConfig: vmConfig,
+ badBlocks: badBlocks,
+ manualCanonical: manualCanonical,
+ }
+ bc.validator = NewBlockValidator(chainConfig, bc, engine)
+ bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
+ bc.processor = NewStateProcessor(chainConfig, bc, engine)
+
+ var err error
+ bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
+ if err != nil {
+ return nil, err
+ }
+ bc.genesisBlock = bc.GetBlockByNumber(0)
+ if bc.genesisBlock == nil {
+ return nil, ErrNoGenesis
+ }
+ // Initialize the chain with ancient data if it isn't empty.
+ if bc.empty() {
+ rawdb.InitDatabaseFromFreezer(bc.db)
+ }
+ if err := bc.loadLastState(); err != nil {
+ return nil, err
+ }
+ // The first thing the node will do is reconstruct the verification data for
+ // the head block (ethash cache or clique voting snapshot). Might as well do
+ // it in advance.
+ bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
+
+ if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
+ var (
+ needRewind bool
+ low uint64
+ )
+ // The head full block may be rolled back to a very low height due to
+ // blockchain repair. If the head full block is even lower than the ancient
+ // chain, truncate the ancient store.
+ fullBlock := bc.CurrentBlock()
+ if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
+ needRewind = true
+ low = fullBlock.NumberU64()
+ }
+ // In fast sync, it may happen that ancient data has been written to the
+ // ancient store, but the LastFastBlock has not been updated, truncate the
+ // extra data here.
+ fastBlock := bc.CurrentFastBlock()
+ if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
+ needRewind = true
+ if fastBlock.NumberU64() < low || low == 0 {
+ low = fastBlock.NumberU64()
+ }
+ }
+ if needRewind {
+ var hashes []common.Hash
+ previous := bc.CurrentHeader().Number.Uint64()
+ for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
+ hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
+ }
+ bc.Rollback(hashes)
+ log.Warn("Truncate ancient chain", "from", previous, "to", low)
+ }
+ }
+ // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
+ for hash := range BadHashes {
+ if header := bc.GetHeaderByHash(hash); header != nil {
+ // get the canonical block corresponding to the offending header's number
+ headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
+ // make sure the headerByNumber (if present) is in our current canonical chain
+ if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
+ log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
+ bc.SetHead(header.Number.Uint64() - 1)
+ log.Error("Chain rewind was successful, resuming normal operation")
+ }
+ }
+ }
+ // Take ownership of this particular state
+ go bc.update()
+ return bc, nil
+}
+
+func (bc *BlockChain) getProcInterrupt() bool {
+ return atomic.LoadInt32(&bc.procInterrupt) == 1
+}
+
+// GetVMConfig returns the block chain VM config.
+func (bc *BlockChain) GetVMConfig() *vm.Config {
+ return &bc.vmConfig
+}
+
+// empty returns an indicator whether the blockchain is empty.
+// Note, it's a special case that we connect a non-empty ancient
+// database with an empty node, so that we can plugin the ancient
+// into node seamlessly.
+func (bc *BlockChain) empty() bool {
+ genesis := bc.genesisBlock.Hash()
+ for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
+ if hash != genesis {
+ return false
+ }
+ }
+ return true
+}
+
+// loadLastState loads the last known chain state from the database. This method
+// assumes that the chain manager mutex is held.
+func (bc *BlockChain) loadLastState() error {
+ // Restore the last known head block
+ head := rawdb.ReadHeadBlockHash(bc.db)
+ if head == (common.Hash{}) {
+ // Corrupt or empty database, init from scratch
+ log.Warn("Empty database, resetting chain")
+ return bc.Reset()
+ }
+ // Make sure the entire head block is available
+ currentBlock := bc.GetBlockByHash(head)
+ if currentBlock == nil {
+ // Corrupt or empty database, init from scratch
+ log.Warn("Head block missing, resetting chain", "hash", head)
+ return bc.Reset()
+ }
+ // Make sure the state associated with the block is available
+ if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
+ // Dangling block without a state associated, init from scratch
+ log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
+ if err := bc.repair(&currentBlock); err != nil {
+ return err
+ }
+ rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
+ }
+ // Everything seems to be fine, set as the head block
+ bc.currentBlock.Store(currentBlock)
+ headBlockGauge.Update(int64(currentBlock.NumberU64()))
+
+ // Restore the last known head header
+ currentHeader := currentBlock.Header()
+ if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
+ if header := bc.GetHeaderByHash(head); header != nil {
+ currentHeader = header
+ }
+ }
+ bc.hc.SetCurrentHeader(currentHeader)
+
+ // Restore the last known head fast block
+ bc.currentFastBlock.Store(currentBlock)
+ headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
+
+ if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
+ if block := bc.GetBlockByHash(head); block != nil {
+ bc.currentFastBlock.Store(block)
+ headFastBlockGauge.Update(int64(block.NumberU64()))
+ }
+ }
+ // Issue a status log for the user
+ currentFastBlock := bc.CurrentFastBlock()
+
+ headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
+ blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
+ fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
+
+ log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
+ log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
+ log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
+
+ return nil
+}
+
+// SetHead rewinds the local chain to a new head. In the case of headers, everything
+// above the new head will be deleted and the new one set. In the case of blocks
+// though, the head may be further rewound if block bodies are missing (non-archive
+// nodes after a fast sync).
+func (bc *BlockChain) SetHead(head uint64) error {
+ log.Warn("Rewinding blockchain", "target", head)
+
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
+ // Rewind the block chain, ensuring we don't end up with a stateless head block
+ if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
+ newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
+ if newHeadBlock == nil {
+ newHeadBlock = bc.genesisBlock
+ } else {
+ if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
+ // Rewound state missing, rolled back to before pivot, reset to genesis
+ newHeadBlock = bc.genesisBlock
+ }
+ }
+ rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
+ bc.currentBlock.Store(newHeadBlock)
+ headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
+ }
+
+ // Rewind the fast block in a simpleton way to the target head
+ if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
+ newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
+ // If either blocks reached nil, reset to the genesis state
+ if newHeadFastBlock == nil {
+ newHeadFastBlock = bc.genesisBlock
+ }
+ rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
+ bc.currentFastBlock.Store(newHeadFastBlock)
+ headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
+ }
+ }
+
+ // Rewind the header chain, deleting all block bodies until then
+ delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
+ // Ignore the error here since light client won't hit this path
+ frozen, _ := bc.db.Ancients()
+ if num+1 <= frozen {
+ // Truncate all relative data(header, total difficulty, body, receipt
+ // and canonical hash) from ancient store.
+ if err := bc.db.TruncateAncients(num + 1); err != nil {
+ log.Crit("Failed to truncate ancient data", "number", num, "err", err)
+ }
+
+ // Remove the hash <-> number mapping from the active store.
+ rawdb.DeleteHeaderNumber(db, hash)
+ } else {
+ // Remove relative body and receipts from the active store.
+ // The header, total difficulty and canonical hash will be
+ // removed in the hc.SetHead function.
+ rawdb.DeleteBody(db, hash, num)
+ rawdb.DeleteReceipts(db, hash, num)
+ }
+ // Todo(rjl493456442) txlookup, bloombits, etc
+ }
+ bc.hc.SetHead(head, updateFn, delFn)
+
+ // Clear out any stale content from the caches
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.receiptsCache.Purge()
+ bc.blockCache.Purge()
+ bc.txLookupCache.Purge()
+ bc.futureBlocks.Purge()
+
+ return bc.loadLastState()
+}
+
+// FastSyncCommitHead sets the current head block to the one defined by the hash
+// irrelevant what the chain contents were prior.
+func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
+ // Make sure that both the block as well at its state trie exists
+ block := bc.GetBlockByHash(hash)
+ if block == nil {
+ return fmt.Errorf("non existent block [%x…]", hash[:4])
+ }
+ if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
+ return err
+ }
+ // If all checks out, manually set the head block
+ bc.chainmu.Lock()
+ bc.currentBlock.Store(block)
+ headBlockGauge.Update(int64(block.NumberU64()))
+ bc.chainmu.Unlock()
+
+ log.Info("Committed new head block", "number", block.Number(), "hash", hash)
+ return nil
+}
+
+// GasLimit returns the gas limit of the current HEAD block.
+func (bc *BlockChain) GasLimit() uint64 {
+ return bc.CurrentBlock().GasLimit()
+}
+
+// CurrentBlock retrieves the current head block of the canonical chain. The
+// block is retrieved from the blockchain's internal cache.
+func (bc *BlockChain) CurrentBlock() *types.Block {
+ return bc.currentBlock.Load().(*types.Block)
+}
+
+// CurrentFastBlock retrieves the current fast-sync head block of the canonical
+// chain. The block is retrieved from the blockchain's internal cache.
+func (bc *BlockChain) CurrentFastBlock() *types.Block {
+ return bc.currentFastBlock.Load().(*types.Block)
+}
+
+// Validator returns the current validator.
+func (bc *BlockChain) Validator() Validator {
+ return bc.validator
+}
+
+// Processor returns the current processor.
+func (bc *BlockChain) Processor() Processor {
+ return bc.processor
+}
+
+// State returns a new mutable state based on the current HEAD block.
+func (bc *BlockChain) State() (*state.StateDB, error) {
+ return bc.StateAt(bc.CurrentBlock().Root())
+}
+
+// StateAt returns a new mutable state based on a particular point in time.
+func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
+ return state.New(root, bc.stateCache)
+}
+
+// StateCache returns the caching database underpinning the blockchain instance.
+func (bc *BlockChain) StateCache() state.Database {
+ return bc.stateCache
+}
+
+// Reset purges the entire blockchain, restoring it to its genesis state.
+func (bc *BlockChain) Reset() error {
+ return bc.ResetWithGenesisBlock(bc.genesisBlock)
+}
+
+// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
+// specified genesis state.
+func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
+ // Dump the entire block chain and purge the caches
+ if err := bc.SetHead(0); err != nil {
+ return err
+ }
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ // Prepare the genesis block and reinitialise the chain
+ if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
+ log.Crit("Failed to write genesis block TD", "err", err)
+ }
+ rawdb.WriteBlock(bc.db, genesis)
+
+ bc.genesisBlock = genesis
+ bc.insert(bc.genesisBlock)
+ bc.currentBlock.Store(bc.genesisBlock)
+ headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
+
+ bc.hc.SetGenesis(bc.genesisBlock.Header())
+ bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
+ bc.currentFastBlock.Store(bc.genesisBlock)
+ headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
+
+ return nil
+}
+
+// repair tries to repair the current blockchain by rolling back the current block
+// until one with associated state is found. This is needed to fix incomplete db
+// writes caused either by crashes/power outages, or simply non-committed tries.
+//
+// This method only rolls back the current block. The current header and current
+// fast block are left intact.
+func (bc *BlockChain) repair(head **types.Block) error {
+ for {
+ // Abort if we've rewound to a head block that does have associated state
+ if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
+ log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
+ return nil
+ }
+ // Otherwise rewind one block and recheck state availability there
+ block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ if block == nil {
+ return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
+ }
+ *head = block
+ }
+}
+
+// Export writes the active chain to the given writer.
+func (bc *BlockChain) Export(w io.Writer) error {
+ return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
+}
+
+// ExportN writes a subset of the active chain to the given writer.
+func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
+ bc.chainmu.RLock()
+ defer bc.chainmu.RUnlock()
+
+ if first > last {
+ return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
+ }
+ log.Info("Exporting batch of blocks", "count", last-first+1)
+
+ start, reported := time.Now(), time.Now()
+ for nr := first; nr <= last; nr++ {
+ block := bc.GetBlockByNumber(nr)
+ if block == nil {
+ return fmt.Errorf("export failed on #%d: not found", nr)
+ }
+ if err := block.EncodeRLP(w); err != nil {
+ return err
+ }
+ if time.Since(reported) >= statsReportLimit {
+ log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
+ reported = time.Now()
+ }
+ }
+ return nil
+}
+
+// insert injects a new head block into the current block chain. This method
+// assumes that the block is indeed a true head. It will also reset the head
+// header and the head fast sync block to this very same block if they are older
+// or if they are on a different side chain.
+//
+// Note, this function assumes that the `mu` mutex is held!
+func (bc *BlockChain) insert(block *types.Block) {
+ // If the block is on a side chain or an unknown one, force other heads onto it too
+ updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
+
+ // Add the block to the canonical chain number scheme and mark as the head
+ rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(bc.db, block.Hash())
+
+ bc.currentBlock.Store(block)
+ headBlockGauge.Update(int64(block.NumberU64()))
+
+ // If the block is better than our head or is on a different chain, force update heads
+ if updateHeads {
+ bc.hc.SetCurrentHeader(block.Header())
+ rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
+
+ bc.currentFastBlock.Store(block)
+ headFastBlockGauge.Update(int64(block.NumberU64()))
+ }
+}
+
+// Genesis retrieves the chain's genesis block.
+func (bc *BlockChain) Genesis() *types.Block {
+ return bc.genesisBlock
+}
+
+// GetBody retrieves a block body (transactions and uncles) from the database by
+// hash, caching it if found.
+func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
+ // Short circuit if the body's already in the cache, retrieve otherwise
+ if cached, ok := bc.bodyCache.Get(hash); ok {
+ body := cached.(*types.Body)
+ return body
+ }
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ body := rawdb.ReadBody(bc.db, hash, *number)
+ if body == nil {
+ return nil
+ }
+ // Cache the found body for next time and return
+ bc.bodyCache.Add(hash, body)
+ return body
+}
+
+// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
+// caching it if found.
+func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
+ // Short circuit if the body's already in the cache, retrieve otherwise
+ if cached, ok := bc.bodyRLPCache.Get(hash); ok {
+ return cached.(rlp.RawValue)
+ }
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ body := rawdb.ReadBodyRLP(bc.db, hash, *number)
+ if len(body) == 0 {
+ return nil
+ }
+ // Cache the found body for next time and return
+ bc.bodyRLPCache.Add(hash, body)
+ return body
+}
+
+// HasBlock checks if a block is fully present in the database or not.
+func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
+ if bc.blockCache.Contains(hash) {
+ return true
+ }
+ return rawdb.HasBody(bc.db, hash, number)
+}
+
+// HasFastBlock checks if a fast block is fully present in the database or not.
+func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
+ if !bc.HasBlock(hash, number) {
+ return false
+ }
+ if bc.receiptsCache.Contains(hash) {
+ return true
+ }
+ return rawdb.HasReceipts(bc.db, hash, number)
+}
+
+// HasState checks if state trie is fully present in the database or not.
+func (bc *BlockChain) HasState(hash common.Hash) bool {
+ _, err := bc.stateCache.OpenTrie(hash)
+ return err == nil
+}
+
+// HasBlockAndState checks if a block and associated state trie is fully present
+// in the database or not, caching it if present.
+func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
+ // Check first that the block itself is known
+ block := bc.GetBlock(hash, number)
+ if block == nil {
+ return false
+ }
+ return bc.HasState(block.Root())
+}
+
+// GetBlock retrieves a block from the database by hash and number,
+// caching it if found.
+func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ // Short circuit if the block's already in the cache, retrieve otherwise
+ if block, ok := bc.blockCache.Get(hash); ok {
+ return block.(*types.Block)
+ }
+ block := rawdb.ReadBlock(bc.db, hash, number)
+ if block == nil {
+ return nil
+ }
+ // Cache the found block for next time and return
+ bc.blockCache.Add(block.Hash(), block)
+ return block
+}
+
+// GetBlockByHash retrieves a block from the database by hash, caching it if found.
+func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ return bc.GetBlock(hash, *number)
+}
+
+// GetBlockByNumber retrieves a block from the database by number, caching it
+// (associated with its hash) if found.
+func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
+ hash := rawdb.ReadCanonicalHash(bc.db, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return bc.GetBlock(hash, number)
+}
+
+// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
+func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
+ if receipts, ok := bc.receiptsCache.Get(hash); ok {
+ return receipts.(types.Receipts)
+ }
+ number := rawdb.ReadHeaderNumber(bc.db, hash)
+ if number == nil {
+ return nil
+ }
+ receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
+ if receipts == nil {
+ return nil
+ }
+ bc.receiptsCache.Add(hash, receipts)
+ return receipts
+}
+
+// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
+// [deprecated by eth/62]
+func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return nil
+ }
+ for i := 0; i < n; i++ {
+ block := bc.GetBlock(hash, *number)
+ if block == nil {
+ break
+ }
+ blocks = append(blocks, block)
+ hash = block.ParentHash()
+ *number--
+ }
+ return
+}
+
+// GetUnclesInChain retrieves all the uncles from a given block backwards until
+// a specific distance is reached.
+func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
+ uncles := []*types.Header{}
+ for i := 0; block != nil && i < length; i++ {
+ uncles = append(uncles, block.Uncles()...)
+ block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
+ }
+ return uncles
+}
+
+// TrieNode retrieves a blob of data associated with a trie node (or code hash)
+// either from ephemeral in-memory cache, or from persistent storage.
+func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
+ return bc.stateCache.TrieDB().Node(hash)
+}
+
+// Stop stops the blockchain service. If any imports are currently in progress
+// it will abort them using the procInterrupt.
+func (bc *BlockChain) Stop() {
+ if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
+ return
+ }
+ // Unsubscribe all subscriptions registered from blockchain
+ bc.scope.Close()
+ close(bc.quit)
+ atomic.StoreInt32(&bc.procInterrupt, 1)
+
+ bc.wg.Wait()
+
+ // Ensure the state of a recent block is also stored to disk before exiting.
+ // We're writing three different states to catch different restart scenarios:
+ // - HEAD: So we don't need to reprocess any blocks in the general case
+ // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
+ // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
+ if !bc.cacheConfig.TrieDirtyDisabled {
+ triedb := bc.stateCache.TrieDB()
+
+ for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
+ if number := bc.CurrentBlock().NumberU64(); number > offset {
+ log.Info(".....", "number", number, "offset", offset)
+ recent := bc.GetBlockByNumber(number - offset)
+
+ log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
+ if err := triedb.Commit(recent.Root(), true); err != nil {
+ log.Error("Failed to commit recent state trie", "err", err)
+ }
+ }
+ }
+ for !bc.triegc.Empty() {
+ triedb.Dereference(bc.triegc.PopItem().(common.Hash))
+ }
+ if size, _ := triedb.Size(); size != 0 {
+ log.Error("Dangling trie nodes after full cleanup")
+ }
+ }
+ log.Info("Blockchain manager stopped")
+}
+
+func (bc *BlockChain) procFutureBlocks() {
+ blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
+ for _, hash := range bc.futureBlocks.Keys() {
+ if block, exist := bc.futureBlocks.Peek(hash); exist {
+ blocks = append(blocks, block.(*types.Block))
+ }
+ }
+ if len(blocks) > 0 {
+ types.BlockBy(types.Number).Sort(blocks)
+
+ // Insert one by one as chain insertion needs contiguous ancestry between blocks
+ for i := range blocks {
+ bc.InsertChain(blocks[i : i+1])
+ }
+ }
+}
+
+// WriteStatus status of write
+type WriteStatus byte
+
+const (
+ NonStatTy WriteStatus = iota
+ CanonStatTy
+ SideStatTy
+)
+
+// Rollback is designed to remove a chain of links from the database that aren't
+// certain enough to be valid.
+func (bc *BlockChain) Rollback(chain []common.Hash) {
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ for i := len(chain) - 1; i >= 0; i-- {
+ hash := chain[i]
+
+ currentHeader := bc.hc.CurrentHeader()
+ if currentHeader.Hash() == hash {
+ bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
+ }
+ if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
+ newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
+ rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
+ bc.currentFastBlock.Store(newFastBlock)
+ headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
+ }
+ if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
+ newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
+ rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
+ bc.currentBlock.Store(newBlock)
+ headBlockGauge.Update(int64(newBlock.NumberU64()))
+ }
+ }
+ // Truncate ancient data which exceeds the current header.
+ //
+ // Notably, it can happen that system crashes without truncating the ancient data
+ // but the head indicator has been updated in the active store. Regarding this issue,
+ // system will self recovery by truncating the extra data during the setup phase.
+ if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
+ log.Crit("Truncate ancient store failed", "err", err)
+ }
+}
+
+// truncateAncient rewinds the blockchain to the specified header and deletes all
+// data in the ancient store that exceeds the specified header.
+func (bc *BlockChain) truncateAncient(head uint64) error {
+ frozen, err := bc.db.Ancients()
+ if err != nil {
+ return err
+ }
+ // Short circuit if there is no data to truncate in ancient store.
+ if frozen <= head+1 {
+ return nil
+ }
+ // Truncate all the data in the freezer beyond the specified head
+ if err := bc.db.TruncateAncients(head + 1); err != nil {
+ return err
+ }
+ // Clear out any stale content from the caches
+ bc.hc.headerCache.Purge()
+ bc.hc.tdCache.Purge()
+ bc.hc.numberCache.Purge()
+
+ // Clear out any stale content from the caches
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.receiptsCache.Purge()
+ bc.blockCache.Purge()
+ bc.txLookupCache.Purge()
+ bc.futureBlocks.Purge()
+
+ log.Info("Rewind ancient data", "number", head)
+ return nil
+}
+
+// numberHash is just a container for a number and a hash, to represent a block
+type numberHash struct {
+ number uint64
+ hash common.Hash
+}
+
+// InsertReceiptChain attempts to complete an already existing header chain with
+// transaction and receipt data.
+func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
+ // We don't require the chainMu here since we want to maximize the
+ // concurrency of header insertion and receipt insertion.
+ bc.wg.Add(1)
+ defer bc.wg.Done()
+
+ var (
+ ancientBlocks, liveBlocks types.Blocks
+ ancientReceipts, liveReceipts []types.Receipts
+ )
+ // Do a sanity check that the provided chain is actually ordered and linked
+ for i := 0; i < len(blockChain); i++ {
+ if i != 0 {
+ if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
+ log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
+ "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
+ return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
+ blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
+ }
+ }
+ if blockChain[i].NumberU64() <= ancientLimit {
+ ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
+ } else {
+ liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
+ }
+ }
+
+ var (
+ stats = struct{ processed, ignored int32 }{}
+ start = time.Now()
+ size = 0
+ )
+ // updateHead updates the head fast sync block if the inserted blocks are better
+ // and returns a indicator whether the inserted blocks are canonical.
+ updateHead := func(head *types.Block) bool {
+ if bc.manualCanonical {
+ return false
+ }
+ bc.chainmu.Lock()
+
+ // Rewind may have occurred, skip in that case.
+ if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
+ currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64())
+ if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
+ rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
+ bc.currentFastBlock.Store(head)
+ headFastBlockGauge.Update(int64(head.NumberU64()))
+ bc.chainmu.Unlock()
+ return true
+ }
+ }
+ bc.chainmu.Unlock()
+ return false
+ }
+ // writeAncient writes blockchain and corresponding receipt chain into ancient store.
+ //
+ // this function only accepts canonical chain data. All side chain will be reverted
+ // eventually.
+ writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
+ var (
+ previous = bc.CurrentFastBlock()
+ batch = bc.db.NewBatch()
+ )
+ // If any error occurs before updating the head or we are inserting a side chain,
+ // all the data written this time wll be rolled back.
+ defer func() {
+ if previous != nil {
+ if err := bc.truncateAncient(previous.NumberU64()); err != nil {
+ log.Crit("Truncate ancient store failed", "err", err)
+ }
+ }
+ }()
+ var deleted []*numberHash
+ for i, block := range blockChain {
+ // Short circuit insertion if shutting down or processing failed
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ return 0, errInsertionInterrupted
+ }
+ // Short circuit insertion if it is required(used in testing only)
+ if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
+ return i, errors.New("insertion is terminated for testing purpose")
+ }
+ // Short circuit if the owner header is unknown
+ if !bc.HasHeader(block.Hash(), block.NumberU64()) {
+ return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
+ }
+ var (
+ start = time.Now()
+ logged = time.Now()
+ count int
+ )
+ // Migrate all ancient blocks. This can happen if someone upgrades from Geth
+ // 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
+ // long term.
+ for {
+ // We can ignore the error here since light client won't hit this code path.
+ frozen, _ := bc.db.Ancients()
+ if frozen >= block.NumberU64() {
+ break
+ }
+ h := rawdb.ReadCanonicalHash(bc.db, frozen)
+ b := rawdb.ReadBlock(bc.db, h, frozen)
+ size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
+ count += 1
+
+ // Always keep genesis block in active database.
+ if b.NumberU64() != 0 {
+ deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
+ }
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ // Don't collect too much in-memory, write it out every 100K blocks
+ if len(deleted) > 100000 {
+
+ // Sync the ancient store explicitly to ensure all data has been flushed to disk.
+ if err := bc.db.Sync(); err != nil {
+ return 0, err
+ }
+ // Wipe out canonical block data.
+ for _, nh := range deleted {
+ rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
+ rawdb.DeleteCanonicalHash(batch, nh.number)
+ }
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ batch.Reset()
+ // Wipe out side chain too.
+ for _, nh := range deleted {
+ for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
+ rawdb.DeleteBlock(batch, hash, nh.number)
+ }
+ }
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ batch.Reset()
+ deleted = deleted[0:]
+ }
+ }
+ if count > 0 {
+ log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
+ }
+ // Flush data into ancient database.
+ size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
+ rawdb.WriteTxLookupEntries(batch, block)
+
+ stats.processed++
+ }
+ // Flush all tx-lookup index data.
+ size += batch.ValueSize()
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ batch.Reset()
+
+ // Sync the ancient store explicitly to ensure all data has been flushed to disk.
+ if err := bc.db.Sync(); err != nil {
+ return 0, err
+ }
+ if !updateHead(blockChain[len(blockChain)-1]) {
+ return 0, errors.New("side blocks can't be accepted as the ancient chain data")
+ }
+ previous = nil // disable rollback explicitly
+
+ // Wipe out canonical block data.
+ for _, nh := range deleted {
+ rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
+ rawdb.DeleteCanonicalHash(batch, nh.number)
+ }
+ for _, block := range blockChain {
+ // Always keep genesis block in active database.
+ if block.NumberU64() != 0 {
+ rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
+ rawdb.DeleteCanonicalHash(batch, block.NumberU64())
+ }
+ }
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ batch.Reset()
+
+ // Wipe out side chain too.
+ for _, nh := range deleted {
+ for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
+ rawdb.DeleteBlock(batch, hash, nh.number)
+ }
+ }
+ for _, block := range blockChain {
+ // Always keep genesis block in active database.
+ if block.NumberU64() != 0 {
+ for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
+ rawdb.DeleteBlock(batch, hash, block.NumberU64())
+ }
+ }
+ }
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ return 0, nil
+ }
+ // writeLive writes blockchain and corresponding receipt chain into active store.
+ writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
+ batch := bc.db.NewBatch()
+ for i, block := range blockChain {
+ // Short circuit insertion if shutting down or processing failed
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ return 0, errInsertionInterrupted
+ }
+ // Short circuit if the owner header is unknown
+ if !bc.HasHeader(block.Hash(), block.NumberU64()) {
+ return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
+ }
+ if bc.HasBlock(block.Hash(), block.NumberU64()) {
+ stats.ignored++
+ continue
+ }
+ // Write all the data out into the database
+ rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
+ rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
+ rawdb.WriteTxLookupEntries(batch, block)
+
+ stats.processed++
+ if batch.ValueSize() >= ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ size += batch.ValueSize()
+ batch.Reset()
+ }
+ }
+ if batch.ValueSize() > 0 {
+ size += batch.ValueSize()
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ }
+ updateHead(blockChain[len(blockChain)-1])
+ return 0, nil
+ }
+ // Write downloaded chain data and corresponding receipt chain data.
+ if len(ancientBlocks) > 0 {
+ if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
+ if err == errInsertionInterrupted {
+ return 0, nil
+ }
+ return n, err
+ }
+ }
+ if len(liveBlocks) > 0 {
+ if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
+ if err == errInsertionInterrupted {
+ return 0, nil
+ }
+ return n, err
+ }
+ }
+
+ head := blockChain[len(blockChain)-1]
+ context := []interface{}{
+ "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
+ "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
+ "size", common.StorageSize(size),
+ }
+ if stats.ignored > 0 {
+ context = append(context, []interface{}{"ignored", stats.ignored}...)
+ }
+ log.Info("Imported new block receipts", context...)
+
+ return 0, nil
+}
+
+var lastWrite uint64
+
+// writeBlockWithoutState writes only the block and its metadata to the database,
+// but does not write any state. This is used to construct competing side forks
+// up to the point where they exceed the canonical total difficulty.
+func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
+ bc.wg.Add(1)
+ defer bc.wg.Done()
+
+ if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
+ return err
+ }
+ rawdb.WriteBlock(bc.db, block)
+
+ return nil
+}
+
+// writeKnownBlock updates the head block flag with a known block
+// and introduces chain reorg if necessary.
+func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
+ bc.wg.Add(1)
+ defer bc.wg.Done()
+
+ current := bc.CurrentBlock()
+ if block.ParentHash() != current.Hash() {
+ if err := bc.reorg(current, block); err != nil {
+ return err
+ }
+ }
+ // Write the positional metadata for transaction/receipt lookups.
+ // Preimages here is empty, ignore it.
+ rawdb.WriteTxLookupEntries(bc.db, block)
+
+ bc.insert(block)
+ return nil
+}
+
+// WriteBlockWithState writes the block and all associated state to the database.
+func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ return bc.writeBlockWithState(block, receipts, state)
+}
+
+// writeBlockWithState writes the block and all associated state to the database,
+// but is expects the chain mutex to be held.
+func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
+ bc.wg.Add(1)
+ defer bc.wg.Done()
+
+ // Calculate the total difficulty of the block
+ ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
+ if ptd == nil {
+ return NonStatTy, consensus.ErrUnknownAncestor
+ }
+ // Make sure no inconsistent state is leaked during insertion
+ currentBlock := bc.CurrentBlock()
+ localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
+ externTd := new(big.Int).Add(block.Difficulty(), ptd)
+
+ // Irrelevant of the canonical status, write the block itself to the database
+ if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
+ return NonStatTy, err
+ }
+ rawdb.WriteBlock(bc.db, block)
+
+ root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
+ if err != nil {
+ return NonStatTy, err
+ }
+ triedb := bc.stateCache.TrieDB()
+
+ // If we're running an archive node, always flush
+ if bc.cacheConfig.TrieDirtyDisabled {
+ if err := triedb.Commit(root, false); err != nil {
+ return NonStatTy, err
+ }
+ } else {
+ // Full but not archive node, do proper garbage collection
+ triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
+ bc.triegc.Push(root, -int64(block.NumberU64()))
+
+ if current := block.NumberU64(); current > TriesInMemory {
+ // If we exceeded our memory allowance, flush matured singleton nodes to disk
+ var (
+ nodes, imgs = triedb.Size()
+ limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
+ )
+ if nodes > limit || imgs > 4*1024*1024 {
+ triedb.Cap(limit - ethdb.IdealBatchSize)
+ }
+ // Find the next state trie we need to commit
+ chosen := current - TriesInMemory
+
+ // If we exceeded out time allowance, flush an entire trie to disk
+ if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
+ // If the header is missing (canonical chain behind), we're reorging a low
+ // diff sidechain. Suspend committing until this operation is completed.
+ header := bc.GetHeaderByNumber(chosen)
+ if header == nil {
+ log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
+ } else {
+ // If we're exceeding limits but haven't reached a large enough memory gap,
+ // warn the user that the system is becoming unstable.
+ if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
+ log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
+ }
+ // Flush an entire trie and restart the counters
+ triedb.Commit(header.Root, true)
+ lastWrite = chosen
+ bc.gcproc = 0
+ }
+ }
+ // Garbage collect anything below our required write retention
+ for !bc.triegc.Empty() {
+ root, number := bc.triegc.Pop()
+ if uint64(-number) > chosen {
+ bc.triegc.Push(root, number)
+ break
+ }
+ triedb.Dereference(root.(common.Hash))
+ }
+ }
+ }
+
+ // Write other block data using a batch.
+ batch := bc.db.NewBatch()
+ rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
+
+ // If the total difficulty is higher than our known, add it to the canonical chain
+ // Second clause in the if statement reduces the vulnerability to selfish mining.
+ // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
+ reorg := externTd.Cmp(localTd) > 0
+ currentBlock = bc.CurrentBlock()
+ if !bc.manualCanonical && (!reorg && externTd.Cmp(localTd) == 0) {
+ //if (!reorg && externTd.Cmp(localTd) == 0) {
+ // Split same-difficulty blocks by number, then preferentially select
+ // the block generated by the local miner as the canonical block.
+ if block.NumberU64() < currentBlock.NumberU64() {
+ reorg = true
+ } else if block.NumberU64() == currentBlock.NumberU64() {
+ var currentPreserve, blockPreserve bool
+ if bc.shouldPreserve != nil {
+ currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
+ }
+ reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
+ }
+ }
+ if !bc.manualCanonical && reorg {
+ // Reorganise the chain if the parent is not the head block
+ if block.ParentHash() != currentBlock.Hash() {
+ if err := bc.reorg(currentBlock, block); err != nil {
+ return NonStatTy, err
+ }
+ }
+ // Write the positional metadata for transaction/receipt lookups and preimages
+ rawdb.WriteTxLookupEntries(batch, block)
+ rawdb.WritePreimages(batch, state.Preimages())
+
+ status = CanonStatTy
+ } else {
+ status = SideStatTy
+ }
+ if err := batch.Write(); err != nil {
+ return NonStatTy, err
+ }
+
+ // Set new head.
+ if status == CanonStatTy {
+ bc.insert(block)
+ }
+ bc.futureBlocks.Remove(block.Hash())
+ return status, nil
+}
+
+// addFutureBlock checks if the block is within the max allowed window to get
+// accepted for future processing, and returns an error if the block is too far
+// ahead and was not added.
+func (bc *BlockChain) addFutureBlock(block *types.Block) error {
+ max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
+ if block.Time() > max {
+ return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
+ }
+ bc.futureBlocks.Add(block.Hash(), block)
+ return nil
+}
+
+// InsertChain attempts to insert the given batch of blocks in to the canonical
+// chain or, otherwise, create a fork. If an error is returned it will return
+// the index number of the failing block as well an error describing what went
+// wrong.
+//
+// After insertion is done, all accumulated events will be fired.
+func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
+ // Sanity check that we have something meaningful to import
+ if len(chain) == 0 {
+ return 0, nil
+ }
+
+ bc.blockProcFeed.Send(true)
+ defer bc.blockProcFeed.Send(false)
+
+ // Remove already known canon-blocks
+ var (
+ block, prev *types.Block
+ )
+ // Do a sanity check that the provided chain is actually ordered and linked
+ for i := 1; i < len(chain); i++ {
+ block = chain[i]
+ prev = chain[i-1]
+ if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
+ // Chain broke ancestry, log a message (programming error) and skip insertion
+ log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
+ "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
+
+ return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
+ prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
+ }
+ }
+ // Pre-checks passed, start the full block imports
+ bc.wg.Add(1)
+ bc.chainmu.Lock()
+ n, events, logs, err := bc.insertChain(chain, true)
+ bc.chainmu.Unlock()
+ bc.wg.Done()
+
+ bc.PostChainEvents(events, logs)
+ return n, err
+}
+
+// insertChain is the internal implementation of InsertChain, which assumes that
+// 1) chains are contiguous, and 2) The chain mutex is held.
+//
+// This method is split out so that import batches that require re-injecting
+// historical blocks can do so without releasing the lock, which could lead to
+// racey behaviour. If a sidechain import is in progress, and the historic state
+// is imported, but then new canon-head is added before the actual sidechain
+// completes, then the historic state could be pruned again
+func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
+ // If the chain is terminating, don't even bother starting up
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ return 0, nil, nil, nil
+ }
+ // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
+ senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
+
+ // A queued approach to delivering events. This is generally
+ // faster than direct delivery and requires much less mutex
+ // acquiring.
+ var (
+ stats = insertStats{startTime: mclock.Now()}
+ events = make([]interface{}, 0, len(chain))
+ lastCanon *types.Block
+ coalescedLogs []*types.Log
+ )
+ // Start the parallel header verifier
+ headers := make([]*types.Header, len(chain))
+ seals := make([]bool, len(chain))
+
+ for i, block := range chain {
+ headers[i] = block.Header()
+ seals[i] = verifySeals
+ }
+ abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
+ defer close(abort)
+
+ // Peek the error for the first block to decide the directing import logic
+ it := newInsertIterator(chain, results, bc.validator)
+
+ block, err := it.next()
+
+ // Left-trim all the known blocks
+ if err == ErrKnownBlock {
+ // First block (and state) is known
+ // 1. We did a roll-back, and should now do a re-import
+ // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
+ // from the canonical chain, which has not been verified.
+ // Skip all known blocks that are behind us
+ var (
+ current = bc.CurrentBlock()
+ localTd = bc.GetTd(current.Hash(), current.NumberU64())
+ externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
+ )
+ for block != nil && err == ErrKnownBlock {
+ externTd = new(big.Int).Add(externTd, block.Difficulty())
+ if !bc.manualCanonical && localTd.Cmp(externTd) < 0 {
+ break
+ }
+ log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
+ stats.ignored++
+
+ block, err = it.next()
+ }
+ // The remaining blocks are still known blocks, the only scenario here is:
+ // During the fast sync, the pivot point is already submitted but rollback
+ // happens. Then node resets the head full block to a lower height via `rollback`
+ // and leaves a few known blocks in the database.
+ //
+ // When node runs a fast sync again, it can re-import a batch of known blocks via
+ // `insertChain` while a part of them have higher total difficulty than current
+ // head full block(new pivot point).
+ for block != nil && err == ErrKnownBlock {
+ log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
+ if err := bc.writeKnownBlock(block); err != nil {
+ return it.index, nil, nil, err
+ }
+ lastCanon = block
+
+ block, err = it.next()
+ }
+ // Falls through to the block import
+ }
+ switch {
+ // First block is pruned, insert as sidechain and reorg only if TD grows enough
+ case err == consensus.ErrPrunedAncestor:
+ log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
+ return bc.insertSideChain(block, it)
+
+ // First block is future, shove it (and all children) to the future queue (unknown ancestor)
+ case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
+ for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
+ log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+ }
+ stats.queued += it.processed()
+ stats.ignored += it.remaining()
+
+ // If there are any still remaining, mark as ignored
+ return it.index, events, coalescedLogs, err
+
+ // Some other error occurred, abort
+ case err != nil:
+ stats.ignored += len(it.chain)
+ bc.reportBlock(block, nil, err)
+ return it.index, events, coalescedLogs, err
+ }
+ // No validation errors for the first block (or chain prefix skipped)
+ for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
+ // If the chain is terminating, stop processing blocks
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ log.Debug("Premature abort during blocks processing")
+ break
+ }
+ // If the header is a banned one, straight out abort
+ if BadHashes[block.Hash()] {
+ bc.reportBlock(block, nil, ErrBlacklistedHash)
+ return it.index, events, coalescedLogs, ErrBlacklistedHash
+ }
+ // If the block is known (in the middle of the chain), it's a special case for
+ // Clique blocks where they can share state among each other, so importing an
+ // older block might complete the state of the subsequent one. In this case,
+ // just skip the block (we already validated it once fully (and crashed), since
+ // its header and body was already in the database).
+ if err == ErrKnownBlock {
+ logger := log.Debug
+ if bc.chainConfig.Clique == nil {
+ logger = log.Warn
+ }
+ logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
+ "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
+ "root", block.Root())
+
+ if err := bc.writeKnownBlock(block); err != nil {
+ return it.index, nil, nil, err
+ }
+ stats.processed++
+
+ // We can assume that logs are empty here, since the only way for consecutive
+ // Clique blocks to have the same state is if there are no transactions.
+ events = append(events, ChainEvent{block, block.Hash(), nil})
+ lastCanon = block
+
+ continue
+ }
+ // Retrieve the parent block and it's state to execute on top
+ start := time.Now()
+
+ parent := it.previous()
+ if parent == nil {
+ parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
+ }
+ statedb, err := state.New(parent.Root, bc.stateCache)
+ if err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ // If we have a followup block, run that against the current state to pre-cache
+ // transactions and probabilistically some of the account/storage trie nodes.
+ var followupInterrupt uint32
+
+ if !bc.cacheConfig.TrieCleanNoPrefetch {
+ if followup, err := it.peek(); followup != nil && err == nil {
+ go func(start time.Time) {
+ throwaway, _ := state.New(parent.Root, bc.stateCache)
+ bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
+
+ blockPrefetchExecuteTimer.Update(time.Since(start))
+ if atomic.LoadUint32(&followupInterrupt) == 1 {
+ blockPrefetchInterruptMeter.Mark(1)
+ }
+ }(time.Now())
+ }
+ }
+ // Process block using the parent state as reference point
+ substart := time.Now()
+ receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
+ if err != nil {
+ bc.reportBlock(block, receipts, err)
+ atomic.StoreUint32(&followupInterrupt, 1)
+ return it.index, events, coalescedLogs, err
+ }
+ // Update the metrics touched during block processing
+ accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
+ storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
+ accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
+ storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
+
+ triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
+ trieproc := statedb.AccountReads + statedb.AccountUpdates
+ trieproc += statedb.StorageReads + statedb.StorageUpdates
+
+ blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
+
+ // Validate the state using the default validator
+ substart = time.Now()
+ if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
+ bc.reportBlock(block, receipts, err)
+ atomic.StoreUint32(&followupInterrupt, 1)
+ return it.index, events, coalescedLogs, err
+ }
+ proctime := time.Since(start)
+
+ // Update the metrics touched during block validation
+ accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
+ storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
+
+ blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
+
+ // Write the block to the chain and get the status.
+ substart = time.Now()
+ status, err := bc.writeBlockWithState(block, receipts, statedb)
+ if err != nil {
+ atomic.StoreUint32(&followupInterrupt, 1)
+ return it.index, events, coalescedLogs, err
+ }
+ atomic.StoreUint32(&followupInterrupt, 1)
+
+ // Update the metrics touched during block commit
+ accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
+ storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
+
+ blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits)
+ blockInsertTimer.UpdateSince(start)
+
+ switch status {
+ case CanonStatTy:
+ log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
+ "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
+ "elapsed", common.PrettyDuration(time.Since(start)),
+ "root", block.Root())
+
+ coalescedLogs = append(coalescedLogs, logs...)
+ events = append(events, ChainEvent{block, block.Hash(), logs})
+ lastCanon = block
+
+ // Only count canonical blocks for GC processing time
+ bc.gcproc += proctime
+
+ case SideStatTy:
+ log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
+ events = append(events, ChainSideEvent{block})
+
+ default:
+ // This in theory is impossible, but lets be nice to our future selves and leave
+ // a log, instead of trying to track down blocks imports that don't emit logs.
+ log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
+ }
+ stats.processed++
+ stats.usedGas += usedGas
+
+ dirty, _ := bc.stateCache.TrieDB().Size()
+ stats.report(chain, it.index, dirty)
+ }
+ // Any blocks remaining here? The only ones we care about are the future ones
+ if block != nil && err == consensus.ErrFutureBlock {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+
+ for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ stats.queued++
+ }
+ }
+ stats.ignored += it.remaining()
+
+ // Append a single chain head event if we've progressed the chain
+ if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
+ events = append(events, ChainHeadEvent{lastCanon})
+ }
+ return it.index, events, coalescedLogs, err
+}
+
+// insertSideChain is called when an import batch hits upon a pruned ancestor
+// error, which happens when a sidechain with a sufficiently old fork-block is
+// found.
+//
+// The method writes all (header-and-body-valid) blocks to disk, then tries to
+// switch over to the new chain if the TD exceeded the current chain.
+func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
+ var (
+ externTd *big.Int
+ current = bc.CurrentBlock()
+ )
+ // The first sidechain block error is already verified to be ErrPrunedAncestor.
+ // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
+ // ones. Any other errors means that the block is invalid, and should not be written
+ // to disk.
+ err := consensus.ErrPrunedAncestor
+ for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
+ // Check the canonical state root for that number
+ if number := block.NumberU64(); current.NumberU64() >= number {
+ canonical := bc.GetBlockByNumber(number)
+ if canonical != nil && canonical.Hash() == block.Hash() {
+ // Not a sidechain block, this is a re-import of a canon block which has it's state pruned
+
+ // Collect the TD of the block. Since we know it's a canon one,
+ // we can get it directly, and not (like further below) use
+ // the parent and then add the block on top
+ externTd = bc.GetTd(block.Hash(), block.NumberU64())
+ continue
+ }
+ if canonical != nil && canonical.Root() == block.Root() {
+ // This is most likely a shadow-state attack. When a fork is imported into the
+ // database, and it eventually reaches a block height which is not pruned, we
+ // just found that the state already exist! This means that the sidechain block
+ // refers to a state which already exists in our canon chain.
+ //
+ // If left unchecked, we would now proceed importing the blocks, without actually
+ // having verified the state of the previous blocks.
+ log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
+
+ // If someone legitimately side-mines blocks, they would still be imported as usual. However,
+ // we cannot risk writing unverified blocks to disk when they obviously target the pruning
+ // mechanism.
+ return it.index, nil, nil, errors.New("sidechain ghost-state attack")
+ }
+ }
+ if externTd == nil {
+ externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
+ }
+ externTd = new(big.Int).Add(externTd, block.Difficulty())
+
+ if !bc.HasBlock(block.Hash(), block.NumberU64()) {
+ start := time.Now()
+ if err := bc.writeBlockWithoutState(block, externTd); err != nil {
+ return it.index, nil, nil, err
+ }
+ log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
+ }
+ }
+ // At this point, we've written all sidechain blocks to database. Loop ended
+ // either on some other error or all were processed. If there was some other
+ // error, we can ignore the rest of those blocks.
+ //
+ // If the externTd was larger than our local TD, we now need to reimport the previous
+ // blocks to regenerate the required state
+ localTd := bc.GetTd(current.Hash(), current.NumberU64())
+ if bc.manualCanonical || localTd.Cmp(externTd) > 0 {
+ log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
+ return it.index, nil, nil, err
+ }
+ // Gather all the sidechain hashes (full blocks may be memory heavy)
+ var (
+ hashes []common.Hash
+ numbers []uint64
+ )
+ parent := it.previous()
+ for parent != nil && !bc.HasState(parent.Root) {
+ hashes = append(hashes, parent.Hash())
+ numbers = append(numbers, parent.Number.Uint64())
+
+ parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
+ }
+ if parent == nil {
+ return it.index, nil, nil, errors.New("missing parent")
+ }
+ // Import all the pruned blocks to make the state available
+ var (
+ blocks []*types.Block
+ memory common.StorageSize
+ )
+ for i := len(hashes) - 1; i >= 0; i-- {
+ // Append the next block to our batch
+ block := bc.GetBlock(hashes[i], numbers[i])
+
+ blocks = append(blocks, block)
+ memory += block.Size()
+
+ // If memory use grew too large, import and continue. Sadly we need to discard
+ // all raised events and logs from notifications since we're too heavy on the
+ // memory here.
+ if len(blocks) >= 2048 || memory > 64*1024*1024 {
+ log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
+ if _, _, _, err := bc.insertChain(blocks, false); err != nil {
+ return 0, nil, nil, err
+ }
+ blocks, memory = blocks[:0], 0
+
+ // If the chain is terminating, stop processing blocks
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ log.Debug("Premature abort during blocks processing")
+ return 0, nil, nil, nil
+ }
+ }
+ }
+ if len(blocks) > 0 {
+ log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
+ return bc.insertChain(blocks, false)
+ }
+ return 0, nil, nil, nil
+}
+
+// reorg takes two blocks, an old chain and a new chain and will reconstruct the
+// blocks and inserts them to be part of the new canonical chain and accumulates
+// potential missing transactions and post an event about them.
+func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
+ var (
+ newChain types.Blocks
+ oldChain types.Blocks
+ commonBlock *types.Block
+
+ deletedTxs types.Transactions
+ addedTxs types.Transactions
+
+ deletedLogs []*types.Log
+ rebirthLogs []*types.Log
+
+ // collectLogs collects the logs that were generated during the
+ // processing of the block that corresponds with the given hash.
+ // These logs are later announced as deleted or reborn
+ collectLogs = func(hash common.Hash, removed bool) {
+ number := bc.hc.GetBlockNumber(hash)
+ if number == nil {
+ return
+ }
+ receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
+ for _, receipt := range receipts {
+ for _, log := range receipt.Logs {
+ l := *log
+ if removed {
+ l.Removed = true
+ deletedLogs = append(deletedLogs, &l)
+ } else {
+ rebirthLogs = append(rebirthLogs, &l)
+ }
+ }
+ }
+ }
+ )
+ // Reduce the longer chain to the same number as the shorter one
+ if oldBlock.NumberU64() > newBlock.NumberU64() {
+ // Old chain is longer, gather all transactions and logs as deleted ones
+ for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
+ oldChain = append(oldChain, oldBlock)
+ deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
+ collectLogs(oldBlock.Hash(), true)
+ }
+ } else {
+ // New chain is longer, stash all blocks away for subsequent insertion
+ for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
+ newChain = append(newChain, newBlock)
+ }
+ }
+ if oldBlock == nil {
+ return fmt.Errorf("invalid old chain")
+ }
+ if newBlock == nil {
+ return fmt.Errorf("invalid new chain")
+ }
+ // Both sides of the reorg are at the same number, reduce both until the common
+ // ancestor is found
+ for {
+ // If the common ancestor was found, bail out
+ if oldBlock.Hash() == newBlock.Hash() {
+ commonBlock = oldBlock
+ break
+ }
+ // Remove an old block as well as stash away a new block
+ oldChain = append(oldChain, oldBlock)
+ deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
+ collectLogs(oldBlock.Hash(), true)
+
+ newChain = append(newChain, newBlock)
+
+ // Step back with both chains
+ oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
+ if oldBlock == nil {
+ return fmt.Errorf("invalid old chain")
+ }
+ newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
+ if newBlock == nil {
+ return fmt.Errorf("invalid new chain")
+ }
+ }
+ // Ensure the user sees large reorgs
+ if len(oldChain) > 0 && len(newChain) > 0 {
+ logFn := log.Info
+ msg := "Chain reorg detected"
+ if len(oldChain) > 63 {
+ msg = "Large chain reorg detected"
+ logFn = log.Warn
+ }
+ logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
+ "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
+ blockReorgAddMeter.Mark(int64(len(newChain)))
+ blockReorgDropMeter.Mark(int64(len(oldChain)))
+ } else {
+ log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
+ }
+ // Insert the new chain(except the head block(reverse order)),
+ // taking care of the proper incremental order.
+ for i := len(newChain) - 1; i >= 1; i-- {
+ // Insert the block in the canonical way, re-writing history
+ bc.insert(newChain[i])
+
+ // Collect reborn logs due to chain reorg
+ collectLogs(newChain[i].Hash(), false)
+
+ // Write lookup entries for hash based transaction/receipt searches
+ rawdb.WriteTxLookupEntries(bc.db, newChain[i])
+ addedTxs = append(addedTxs, newChain[i].Transactions()...)
+ }
+ // When transactions get deleted from the database, the receipts that were
+ // created in the fork must also be deleted
+ batch := bc.db.NewBatch()
+ for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
+ rawdb.DeleteTxLookupEntry(batch, tx.Hash())
+ }
+ // Delete any canonical number assignments above the new head
+ number := bc.CurrentBlock().NumberU64()
+ for i := number + 1; ; i++ {
+ hash := rawdb.ReadCanonicalHash(bc.db, i)
+ if hash == (common.Hash{}) {
+ break
+ }
+ rawdb.DeleteCanonicalHash(batch, i)
+ }
+ batch.Write()
+ // If any logs need to be fired, do it now. In theory we could avoid creating
+ // this goroutine if there are no events to fire, but realistcally that only
+ // ever happens if we're reorging empty blocks, which will only happen on idle
+ // networks where performance is not an issue either way.
+ //
+ // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
+ // event ordering?
+ go func() {
+ if len(deletedLogs) > 0 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
+ }
+ if len(rebirthLogs) > 0 {
+ bc.logsFeed.Send(rebirthLogs)
+ }
+ if len(oldChain) > 0 {
+ for _, block := range oldChain {
+ bc.chainSideFeed.Send(ChainSideEvent{Block: block})
+ }
+ }
+ }()
+ return nil
+}
+
+// PostChainEvents iterates over the events generated by a chain insertion and
+// posts them into the event feed.
+// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
+func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
+ // post event logs for further processing
+ if logs != nil {
+ bc.logsFeed.Send(logs)
+ }
+ for _, event := range events {
+ switch ev := event.(type) {
+ case ChainEvent:
+ bc.chainFeed.Send(ev)
+
+ case ChainHeadEvent:
+ bc.chainHeadFeed.Send(ev)
+
+ case ChainSideEvent:
+ bc.chainSideFeed.Send(ev)
+ }
+ }
+}
+
+func (bc *BlockChain) update() {
+ futureTimer := time.NewTicker(5 * time.Second)
+ defer futureTimer.Stop()
+ for {
+ select {
+ case <-futureTimer.C:
+ bc.procFutureBlocks()
+ case <-bc.quit:
+ return
+ }
+ }
+}
+
+// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
+func (bc *BlockChain) BadBlocks() []*types.Block {
+ blocks := make([]*types.Block, 0, bc.badBlocks.Len())
+ for _, hash := range bc.badBlocks.Keys() {
+ if blk, exist := bc.badBlocks.Peek(hash); exist {
+ block := blk.(*types.Block)
+ blocks = append(blocks, block)
+ }
+ }
+ return blocks
+}
+
+// addBadBlock adds a bad block to the bad-block LRU cache
+func (bc *BlockChain) addBadBlock(block *types.Block) {
+ bc.badBlocks.Add(block.Hash(), block)
+}
+
+// reportBlock logs a bad block error.
+func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
+ bc.addBadBlock(block)
+
+ var receiptString string
+ for i, receipt := range receipts {
+ receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
+ i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
+ receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
+ }
+ log.Error(fmt.Sprintf(`
+########## BAD BLOCK #########
+Chain config: %v
+
+Number: %v
+Hash: 0x%x
+%v
+
+Error: %v
+##############################
+`, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
+}
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verify nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+ start := time.Now()
+ if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
+ return i, err
+ }
+
+ // Make sure only one thread manipulates the chain at once
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ bc.wg.Add(1)
+ defer bc.wg.Done()
+
+ whFunc := func(header *types.Header) error {
+ _, err := bc.hc.WriteHeader(header)
+ return err
+ }
+ return bc.hc.InsertHeaderChain(chain, whFunc, start)
+}
+
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the HeaderChain's internal cache.
+func (bc *BlockChain) CurrentHeader() *types.Header {
+ return bc.hc.CurrentHeader()
+}
+
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash and number, caching it if found.
+func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
+ return bc.hc.GetTd(hash, number)
+}
+
+// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
+ return bc.hc.GetTdByHash(hash)
+}
+
+// GetHeader retrieves a block header from the database by hash and number,
+// caching it if found.
+func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
+ return bc.hc.GetHeader(hash, number)
+}
+
+// GetHeaderByHash retrieves a block header from the database by hash, caching it if
+// found.
+func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
+ return bc.hc.GetHeaderByHash(hash)
+}
+
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
+ return bc.hc.HasHeader(hash, number)
+}
+
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ return bc.hc.GetBlockHashesFromHash(hash, max)
+}
+
+// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
+// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
+// number of blocks to be individually checked before we reach the canonical chain.
+//
+// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
+func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
+ bc.chainmu.RLock()
+ defer bc.chainmu.RUnlock()
+
+ return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
+}
+
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
+ return bc.hc.GetHeaderByNumber(number)
+}
+
+// GetTransactionLookup retrieves the lookup associate with the given transaction
+// hash from the cache or database.
+func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
+ // Short circuit if the txlookup already in the cache, retrieve otherwise
+ if lookup, exist := bc.txLookupCache.Get(hash); exist {
+ return lookup.(*rawdb.LegacyTxLookupEntry)
+ }
+ tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
+ if tx == nil {
+ return nil
+ }
+ lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
+ bc.txLookupCache.Add(hash, lookup)
+ return lookup
+}
+
+// Config retrieves the chain's fork configuration.
+func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
+
+// Engine retrieves the blockchain's consensus engine.
+func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
+
+// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
+func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
+ return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
+}
+
+// SubscribeChainEvent registers a subscription of ChainEvent.
+func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
+ return bc.scope.Track(bc.chainFeed.Subscribe(ch))
+}
+
+// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
+func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
+ return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
+}
+
+// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
+func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
+ return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
+}
+
+// SubscribeLogsEvent registers a subscription of []*types.Log.
+func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return bc.scope.Track(bc.logsFeed.Subscribe(ch))
+}
+
+// SubscribeBlockProcessingEvent registers a subscription of bool where true means
+// block processing has started while false means it has stopped.
+func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
+ return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
+}
+
+func (bc *BlockChain) ManualHead(hash common.Hash) error {
+ block := bc.GetBlockByHash(hash)
+ if block == nil {
+ return errors.New("block not found")
+ }
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+ bc.insert(block)
+ return nil
+}