aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <ted.sybil@gmail.com>2019-10-21 14:47:51 -0400
committerDeterminant <ted.sybil@gmail.com>2019-10-21 14:47:51 -0400
commit79b1169a9ff0b54ddf3b520a70a79c78ba5c988d (patch)
tree13fc62be3ebf344544547eeb9979450a3c6ecd40
parent913e9439a7c7883881895ee597a0cc464fb92353 (diff)
customize Blockchain code
-rw-r--r--core/block_validator.go139
-rw-r--r--core/blockchain.go2235
-rw-r--r--core/blockchain_insert.go166
-rw-r--r--core/blocks.go25
-rw-r--r--core/chain_indexer.go512
-rw-r--r--core/error.go38
-rw-r--r--core/events.go48
-rw-r--r--core/evm.go97
-rw-r--r--core/gaspool.go54
-rw-r--r--core/headerchain.go538
-rw-r--r--core/state_prefetcher.go85
-rw-r--r--core/state_processor.go129
-rw-r--r--core/state_transition.go255
-rw-r--r--core/tx_cacher.go105
-rw-r--r--core/tx_journal.go180
-rw-r--r--core/tx_list.go520
-rw-r--r--core/tx_noncer.go79
-rw-r--r--core/tx_pool.go1523
-rw-r--r--core/types.go51
-rw-r--r--coreth.go2
-rw-r--r--eth/api.go2
-rw-r--r--eth/api_backend.go5
-rw-r--r--eth/backend.go67
-rw-r--r--eth/bloombits.go2
-rw-r--r--eth/config.go4
-rw-r--r--eth/enr_entry.go61
-rw-r--r--eth/filters/filter.go2
-rw-r--r--eth/filters/filter_system.go2
-rw-r--r--eth/gen_config.go2
-rw-r--r--eth/handler.go2
-rw-r--r--eth/protocol.go2
-rw-r--r--examples/chain/main.go1
-rw-r--r--examples/counter/main.go1
-rw-r--r--examples/payments/main.go25
-rw-r--r--internal/ethapi/backend.go5
-rw-r--r--miner/miner.go2
-rw-r--r--miner/worker.go2
37 files changed, 6843 insertions, 125 deletions
diff --git a/core/block_validator.go b/core/block_validator.go
new file mode 100644
index 0000000..ae6cd4d
--- /dev/null
+++ b/core/block_validator.go
@@ -0,0 +1,139 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+
+ "github.com/ava-labs/go-ethereum/consensus"
+ "github.com/ava-labs/go-ethereum/core/state"
+ "github.com/ava-labs/go-ethereum/core/types"
+ "github.com/ava-labs/go-ethereum/params"
+)
+
+// BlockValidator is responsible for validating block headers, uncles and
+// processed state.
+//
+// BlockValidator implements Validator.
+type BlockValidator struct {
+ config *params.ChainConfig // Chain configuration options
+ bc *BlockChain // Canonical block chain
+ engine consensus.Engine // Consensus engine used for validating
+}
+
+// NewBlockValidator returns a new block validator which is safe for re-use
+func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
+ validator := &BlockValidator{
+ config: config,
+ engine: engine,
+ bc: blockchain,
+ }
+ return validator
+}
+
+// ValidateBody validates the given block's uncles and verifies the block
+// header's transaction and uncle roots. The headers are assumed to be already
+// validated at this point.
+func (v *BlockValidator) ValidateBody(block *types.Block) error {
+ // Check whether the block's known, and if not, that it's linkable
+ if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
+ return ErrKnownBlock
+ }
+ // Header validity is known at this point, check the uncles and transactions
+ header := block.Header()
+ if err := v.engine.VerifyUncles(v.bc, block); err != nil {
+ return err
+ }
+ if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
+ return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
+ }
+ if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
+ return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
+ }
+ if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
+ if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
+ return consensus.ErrUnknownAncestor
+ }
+ return consensus.ErrPrunedAncestor
+ }
+ return nil
+}
+
+// ValidateState validates the various changes that happen after a state
+// transition, such as amount of used gas, the receipt roots and the state root
+// itself. ValidateState returns a database batch if the validation was a success
+// otherwise nil and an error is returned.
+func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
+ header := block.Header()
+ if block.GasUsed() != usedGas {
+ return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
+ }
+ // Validate the received block's bloom with the one derived from the generated receipts.
+ // For valid blocks this should always validate to true.
+ rbloom := types.CreateBloom(receipts)
+ if rbloom != header.Bloom {
+ return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
+ }
+ // Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
+ receiptSha := types.DeriveSha(receipts)
+ if receiptSha != header.ReceiptHash {
+ return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
+ }
+ // Validate the state root against the received state root and throw
+ // an error if they don't match.
+ if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
+ return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
+ }
+ return nil
+}
+
+// CalcGasLimit computes the gas limit of the next block after parent. It aims
+// to keep the baseline gas above the provided floor, and increase it towards the
+// ceil if the blocks are full. If the ceil is exceeded, it will always decrease
+// the gas allowance.
+func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 {
+ // contrib = (parentGasUsed * 3 / 2) / 1024
+ contrib := (parent.GasUsed() + parent.GasUsed()/2) / params.GasLimitBoundDivisor
+
+ // decay = parentGasLimit / 1024 -1
+ decay := parent.GasLimit()/params.GasLimitBoundDivisor - 1
+
+ /*
+ strategy: gasLimit of block-to-mine is set based on parent's
+ gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we
+ increase it, otherwise lower it (or leave it unchanged if it's right
+ at that usage) the amount increased/decreased depends on how far away
+ from parentGasLimit * (2/3) parentGasUsed is.
+ */
+ limit := parent.GasLimit() - decay + contrib
+ if limit < params.MinGasLimit {
+ limit = params.MinGasLimit
+ }
+ // If we're outside our allowed gas range, we try to hone towards them
+ if limit < gasFloor {
+ limit = parent.GasLimit() + decay
+ if limit > gasFloor {
+ limit = gasFloor
+ }
+ } else if limit > gasCeil {
+ limit = parent.GasLimit() - decay
+ if limit < gasCeil {
+ limit = gasCeil
+ }
+ }
+ return limit
+}
diff --git a/core/blockchain.go b/core/blockchain.go
new file mode 100644
index 0000000..174d403
--- /dev/null
+++ b/core/blockchain.go
@@ -0,0 +1,2235 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package core implements the Ethereum consensus protocol.
+package core
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ mrand "math/rand"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/common/mclock"
+ "github.com/ava-labs/go-ethereum/common/prque"
+ "github.com/ava-labs/go-ethereum/consensus"
+ "github.com/ava-labs/go-ethereum/core/rawdb"
+ "github.com/ava-labs/go-ethereum/core/state"
+ "github.com/ava-labs/go-ethereum/core/types"
+ "github.com/ava-labs/go-ethereum/core/vm"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/event"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/metrics"
+ "github.com/ava-labs/go-ethereum/params"
+ "github.com/ava-labs/go-ethereum/rlp"
+ "github.com/ava-labs/go-ethereum/trie"
+ "github.com/hashicorp/golang-lru"
+)
+
+var (
+ headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil)
+ headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil)
+ headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
+
+ accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
+ accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
+ accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
+ accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
+
+ storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
+ storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
+ storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
+ storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
+
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
+ blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
+ blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
+
+ blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
+ blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
+
+ errInsertionInterrupted = errors.New("insertion is interrupted")
+)
+
+const (
+ bodyCacheLimit = 256
+ blockCacheLimit = 256
+ receiptsCacheLimit = 32
+ txLookupCacheLimit = 1024
+ maxFutureBlocks = 256
+ maxTimeFutureBlocks = 30
+ badBlockLimit = 10
+ TriesInMemory = 128
+
+ // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
+ //
+ // Changelog:
+ //
+ // - Version 4
+ // The following incompatible database changes were added:
+ // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
+ // * the `Bloom` field of receipt is deleted
+ // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
+ // - Version 5
+ // The following incompatible database changes were added:
+ // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
+ // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
+ // receipts' corresponding block
+ // - Version 6
+ // The following incompatible database changes were added:
+ // * Transaction lookup information stores the corresponding block number instead of block hash
+ // - Version 7
+ // The following incompatible database changes were added:
+ // * Use freezer as the ancient database to maintain all ancient data
+ BlockChainVersion uint64 = 7
+)
+
+// CacheConfig contains the configuration values for the trie caching/pruning
+// that's resident in a blockchain.
+type CacheConfig struct {
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
+ TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
+ TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+}
+
+// BlockChain represents the canonical chain given a database with a genesis
+// block. The Blockchain manages chain imports, reverts, chain reorganisations.
+//
+// Importing blocks in to the block chain happens according to the set of rules
+// defined by the two stage Validator. Processing of blocks is done using the
+// Processor which processes the included transaction. The validation of the state
+// is done in the second part of the Validator. Failing results in aborting of
+// the import.
+//
+// The BlockChain also helps in returning blocks from **any** chain included
+// in the database as well as blocks that represents the canonical chain. It's
+// important to note that GetBlock can return any block and does not need to be
+// included in the canonical one where as GetBlockByNumber always represents the
+// canonical chain.
+type BlockChain struct {
+ chainConfig *params.ChainConfig // Chain & network configuration
+ cacheConfig *CacheConfig // Cache configuration for pruning
+
+ db ethdb.Database // Low level persistent database to store final content in
+ triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
+ gcproc time.Duration // Accumulates canonical block processing for trie dumping
+
+ hc *HeaderChain
+ rmLogsFeed event.Feed
+ chainFeed event.Feed
+ chainSideFeed event.Feed
+ chainHeadFeed event.Feed
+ logsFeed event.Feed
+ blockProcFeed event.Feed
+ scope event.SubscriptionScope
+ genesisBlock *types.Block
+
+ chainmu sync.RWMutex // blockchain insertion lock
+
+ currentBlock atomic.Value // Current head of the block chain
+ currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
+
+ stateCache state.Database // State database to reuse between imports (contains state cache)
+ bodyCache *lru.Cache // Cache for the most recent block bodies
+ bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
+ receiptsCache *lru.Cache // Cache for the most recent receipts per block
+ blockCache *lru.Cache // Cache for the most recent entire blocks
+ txLookupCache *lru.Cache // Cache for the most recent transaction lookup data.
+ futureBlocks *lru.Cache // future blocks are blocks added for later processing
+
+ quit chan struct{} // blockchain quit channel
+ running int32 // running must be called atomically
+ // procInterrupt must be atomically called
+ procInterrupt int32 // interrupt signaler for block processing
+ wg sync.WaitGroup // chain processing wait group for shutting down
+
+ engine consensus.Engine
+ validator Validator // Block and state validator interface
+ prefetcher Prefetcher // Block state prefetcher interface
+ processor Processor // Block transaction processor interface
+ vmConfig vm.Config
+
+ badBlocks *lru.Cache // Bad block cache
+ shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
+ terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+ manualCanonical bool
+}
+
+// NewBlockChain returns a fully initialised block chain using information
+// available in the database. It initialises the default Ethereum Validator and
+// Processor.
+func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, manualCanonical bool) (*BlockChain, error) {
+ if cacheConfig == nil {
+ cacheConfig = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ }
+ }
+ bodyCache, _ := lru.New(bodyCacheLimit)
+ bodyRLPCache, _ := lru.New(bodyCacheLimit)
+ receiptsCache, _ := lru.New(receiptsCacheLimit)
+ blockCache, _ := lru.New(blockCacheLimit)
+ txLookupCache, _ := lru.New(txLookupCacheLimit)
+ futureBlocks, _ := lru.New(maxFutureBlocks)
+ badBlocks, _ := lru.New(badBlockLimit)
+
+ bc := &BlockChain{
+ chainConfig: chainConfig,
+ cacheConfig: cacheConfig,
+ db: db,
+ triegc: prque.New(nil),
+ stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
+ quit: make(chan struct{}),
+ shouldPreserve: shouldPreserve,
+ bodyCache: bodyCache,
+ bodyRLPCache: bodyRLPCache,
+ receiptsCache: receiptsCache,
+ blockCache: blockCache,
+ txLookupCache: txLookupCache,
+ futureBlocks: futureBlocks,
+ engine: engine,
+ vmConfig: vmConfig,
+ badBlocks: badBlocks,
+ manualCanonical: manualCanonical,
+ }
+ bc.validator = NewBlockValidator(chainConfig, bc, engine)
+ bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
+ bc.processor = NewStateProcessor(chainConfig, bc, engine)
+
+ var err error
+ bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
+ if err != nil {
+ return nil, err
+ }
+ bc.genesisBlock = bc.GetBlockByNumber(0)
+ if bc.genesisBlock == nil {
+ return nil, ErrNoGenesis
+ }
+ // Initialize the chain with ancient data if it isn't empty.
+ if bc.empty() {
+ rawdb.InitDatabaseFromFreezer(bc.db)
+ }
+ if err := bc.loadLastState(); err != nil {
+ return nil, err
+ }
+ // The first thing the node will do is reconstruct the verification data for
+ // the head block (ethash cache or clique voting snapshot). Might as well do
+ // it in advance.
+ bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
+
+ if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
+ var (
+ needRewind bool
+ low uint64
+ )
+ // The head full block may be rolled back to a very low height due to
+ // blockchain repair. If the head full block is even lower than the ancient
+ // chain, truncate the ancient store.
+ fullBlock := bc.CurrentBlock()
+ if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
+ needRewind = true
+ low = fullBlock.NumberU64()
+ }
+ // In fast sync, it may happen that ancient data has been written to the
+ // ancient store, but the LastFastBlock has not been updated, truncate the
+ // extra data here.
+ fastBlock := bc.CurrentFastBlock()
+ if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
+ needRewind = true
+ if fastBlock.NumberU64() < low || low == 0 {
+ low = fastBlock.NumberU64()
+ }
+ }
+ if needRewind {
+ var hashes []common.Hash
+ previous := bc.CurrentHeader().Number.Uint64()
+ for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
+ hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
+ }
+ bc.Rollback(hashes)
+ log.Warn("Truncate ancient chain", "from", previous, "to", low)
+ }
+ }
+ // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
+ for hash := range BadHashes {
+ if header := bc.GetHeaderByHash(hash); header != nil {
+ // get the canonical block corresponding to the offending header's number
+ headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
+ // make sure the headerByNumber (if present) is in our current canonical chain
+ if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
+ log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
+ bc.SetHead(header.Number.Uint64() - 1)
+ log.Error("Chain rewind was successful, resuming normal operation")
+ }
+ }
+ }
+ // Take ownership of this particular state
+ go bc.update()
+ return bc, nil
+}
+
+func (bc *BlockChain) getProcInterrupt() bool {
+ return atomic.LoadInt32(&bc.procInterrupt) == 1
+}
+
+// GetVMConfig returns the block chain VM config.
+func (bc *BlockChain) GetVMConfig() *vm.Config {
+ return &bc.vmConfig
+}
+
+// empty returns an indicator whether the blockchain is empty.
+// Note, it's a special case that we connect a non-empty ancient
+// database with an empty node, so that we can plugin the ancient
+// into node seamlessly.
+func (bc *BlockChain) empty() bool {
+ genesis := bc.genesisBlock.Hash()
+ for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
+ if hash != genesis {
+ return false
+ }
+ }
+ return true
+}
+
+// loadLastState loads the last known chain state from the database. This method
+// assumes that the chain manager mutex is held.
+func (bc *BlockChain) loadLastState() error {
+ // Restore the last known head block
+ head := rawdb.ReadHeadBlockHash(bc.db)
+ if head == (common.Hash{}) {
+ // Corrupt or empty database, init from scratch
+ log.Warn("Empty database, resetting chain")
+ return bc.Reset()
+ }
+ // Make sure the entire head block is available
+ currentBlock := bc.GetBlockByHash(head)
+ if currentBlock == nil {
+ // Corrupt or empty database, init from scratch
+ log.Warn("Head block missing, resetting chain", "hash", head)
+ return bc.Reset()
+ }
+ // Make sure the state associated with the block is available
+ if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
+ // Dangling block without a state associated, init from scratch
+ log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
+ if err := bc.repair(&currentBlock); err != nil {
+ return err
+ }
+ rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
+ }
+ // Everything seems to be fine, set as the head block
+ bc.currentBlock.Store(currentBlock)
+ headBlockGauge.Update(int64(currentBlock.NumberU64()))
+
+ // Restore the last known head header
+ currentHeader := currentBlock.Header()
+ if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
+ if header := bc.GetHeaderByHash(head); header != nil {
+ currentHeader = header
+ }
+ }
+ bc.hc.SetCurrentHeader(currentHeader)
+
+ // Restore the last known head fast block
+ bc.currentFastBlock.Store(currentBlock)
+ headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
+
+ if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
+ if block := bc.GetBlockByHash(head); block != nil {
+ bc.currentFastBlock.Store(block)
+ headFastBlockGauge.Update(int64(block.NumberU64()))
+ }
+ }
+ // Issue a status log for the user
+ currentFastBlock := bc.CurrentFastBlock()
+
+ headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
+ blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
+ fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
+
+ log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
+ log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
+ log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
+
+ return nil
+}
+
+// SetHead rewinds the local chain to a new head. In the case of headers, everything
+// above the new head will be deleted and the new one set. In the case of blocks
+// though, the head may be further rewound if block bodies are missing (non-archive
+// nodes after a fast sync).
+func (bc *BlockChain) SetHead(head uint64) error {
+ log.Warn("Rewinding blockchain", "target", head)
+
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
+ // Rewind the block chain, ensuring we don't end up with a stateless head block
+ if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
+ newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
+ if newHeadBlock == nil {
+ newHeadBlock = bc.genesisBlock
+ } else {
+ if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
+ // Rewound state missing, rolled back to before pivot, reset to genesis
+ newHeadBlock = bc.genesisBlock
+ }
+ }
+ rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
+ bc.currentBlock.Store(newHeadBlock)
+ headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
+ }
+
+ // Rewind the fast block in a simpleton way to the target head
+ if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
+ newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
+ // If either blocks reached nil, reset to the genesis state
+ if newHeadFastBlock == nil {
+ newHeadFastBlock = bc.genesisBlock
+ }
+ rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
+ bc.currentFastBlock.Store(newHeadFastBlock)
+ headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
+ }
+ }
+
+ // Rewind the header chain, deleting all block bodies until then
+ delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
+ // Ignore the error here since light client won't hit this path
+ frozen, _ := bc.db.Ancients()
+ if num+1 <= frozen {
+ // Truncate all relative data(header, total difficulty, body, receipt
+ // and canonical hash) from ancient store.
+ if err := bc.db.TruncateAncients(num + 1); err != nil {
+ log.Crit("Failed to truncate ancient data", "number", num, "err", err)
+ }
+
+ // Remove the hash <-> number mapping from the active store.
+ rawdb.DeleteHeaderNumber(db, hash)
+ } else {
+ // Remove relative body and receipts from the active store.
+ // The header, total difficulty and canonical hash will be
+ // removed in the hc.SetHead function.
+ rawdb.DeleteBody(db, hash, num)
+ rawdb.DeleteReceipts(db, hash, num)
+ }
+ // Todo(rjl493456442) txlookup, bloombits, etc
+ }
+ bc.hc.SetHead(head, updateFn, delFn)
+
+ // Clear out any stale content from the caches
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.receiptsCache.Purge()
+ bc.blockCache.Purge()
+ bc.txLookupCache.Purge()
+ bc.futureBlocks.Purge()
+
+ return bc.loadLastState()
+}
+
+// FastSyncCommitHead sets the current head block to the one defined by the hash
+// irrelevant what the chain contents were prior.
+func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
+ // Make sure that both the block as well at its state trie exists
+ block := bc.GetBlockByHash(hash)
+ if block == nil {
+ return fmt.Errorf("non existent block [%x…]", hash[:4])
+ }
+ if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
+ return err
+ }
+ // If all checks out, manually set the head block
+ bc.chainmu.Lock()
+ bc.currentBlock.Store(block)
+ headBlockGauge.Update(int64(block.NumberU64()))
+ bc.chainmu.Unlock()
+
+ log.Info("Committed new head block", "number", block.Number(), "hash", hash)
+ return nil
+}
+
+// GasLimit returns the gas limit of the current HEAD block.
+func (bc *BlockChain) GasLimit() uint64 {
+ return bc.CurrentBlock().GasLimit()
+}
+
+// CurrentBlock retrieves the current head block of the canonical chain. The
+// block is retrieved from the blockchain's internal cache.
+func (bc *BlockChain) CurrentBlock() *types.Block {
+ return bc.currentBlock.Load().(*types.Block)
+}
+
+// CurrentFastBlock retrieves the current fast-sync head block of the canonical
+// chain. The block is retrieved from the blockchain's internal cache.
+func (bc *BlockChain) CurrentFastBlock() *types.Block {
+ return bc.currentFastBlock.Load().(*types.Block)
+}
+
+// Validator returns the current validator.
+func (bc *BlockChain) Validator() Validator {
+ return bc.validator
+}
+
+// Processor returns the current processor.
+func (bc *BlockChain) Processor() Processor {
+ return bc.processor
+}
+
+// State returns a new mutable state based on the current HEAD block.
+func (bc *BlockChain) State() (*state.StateDB, error) {
+ return bc.StateAt(bc.CurrentBlock().Root())
+}
+
+// StateAt returns a new mutable state based on a particular point in time.
+func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
+ return state.New(root, bc.stateCache)
+}
+
+// StateCache returns the caching database underpinning the blockchain instance.
+f