aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <tederminant@gmail.com>2020-09-16 19:00:25 -0400
committerDeterminant <tederminant@gmail.com>2020-09-16 19:00:25 -0400
commitb1ac5e6ce73c37378e575d6291e3c5f1ee170430 (patch)
tree842889ed69fcf55a3dd3a0226e44ad54025d6a79
parent8478802ddacc027a8d8c866da9365f6739d9d9d4 (diff)
build success
-rw-r--r--consensus/clique/clique.go2
-rw-r--r--consensus/dummy/consensus.go31
-rw-r--r--consensus/ethash/consensus.go2
-rw-r--r--core/blockchain.go2
-rw-r--r--core/forkid/forkid.go258
-rw-r--r--core/rawdb/accessors_state.go96
-rw-r--r--core/rawdb/chain_iterator.go304
-rw-r--r--core/state/snapshot/difflayer_test.go400
-rw-r--r--core/state/snapshot/disklayer_test.go511
-rw-r--r--core/state/snapshot/iterator_test.go1046
-rw-r--r--core/state/snapshot/snapshot_test.go371
-rw-r--r--core/state/snapshot/wipe_test.go124
-rw-r--r--core/vm/instructions.go14
-rw-r--r--coreth.go19
-rw-r--r--eth/api.go2
-rw-r--r--eth/backend.go10
-rw-r--r--eth/protocol.go1
-rw-r--r--internal/ethapi/api.go1
-rw-r--r--internal/ethapi/backend.go1
-rw-r--r--miner/miner.go3
-rw-r--r--node/api.go1
-rw-r--r--node/node.go34
-rw-r--r--rpc/metrics.go39
23 files changed, 777 insertions, 2495 deletions
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index d239aca..b27525b 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -566,7 +566,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
header.UncleHash = types.CalcUncleHash(nil)
// Assemble and return the final block for sealing
- return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil
+ return types.NewBlock(header, txs, nil, receipts, new(trie.Trie), nil), nil
}
// Authorize injects a private key into the consensus engine to mint new blocks
diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go
index 2108684..da63673 100644
--- a/consensus/dummy/consensus.go
+++ b/consensus/dummy/consensus.go
@@ -13,14 +13,15 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
+ mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
- mapset "github.com/deckarep/golang-set"
+ "github.com/ethereum/go-ethereum/trie"
)
-type OnFinalizeCallbackType = func(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header)
+type OnFinalizeCallbackType = func(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header)
type OnFinalizeAndAssembleCallbackType = func(state *state.StateDB, txs []*types.Transaction) ([]byte, error)
-type OnAPIsCallbackType = func(consensus.ChainReader) []rpc.API
+type OnAPIsCallbackType = func(consensus.ChainHeaderReader) []rpc.API
type OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB) error
type ConsensusCallbacks struct {
@@ -55,7 +56,7 @@ var (
)
// modified from consensus.go
-func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
+func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error {
// Ensure that the header's extra-data section is of a reasonable size
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
@@ -103,7 +104,7 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, paren
return nil
}
-func (self *DummyEngine) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error {
+func (self *DummyEngine) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error {
var parent *types.Header
if index == 0 {
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
@@ -123,7 +124,7 @@ func (self *DummyEngine) Author(header *types.Header) (common.Address, error) {
return header.Coinbase, nil
}
-func (self *DummyEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
+func (self *DummyEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
// Short circuit if the header is known, or it's parent not
number := header.Number.Uint64()
if chain.GetHeader(header.Hash(), number) != nil {
@@ -137,7 +138,7 @@ func (self *DummyEngine) VerifyHeader(chain consensus.ChainReader, header *types
return self.verifyHeader(chain, header, parent, false, seal)
}
-func (self *DummyEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
+func (self *DummyEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
// Spawn as many workers as allowed threads
workers := runtime.GOMAXPROCS(0)
if len(headers) < workers {
@@ -239,17 +240,17 @@ func (self *DummyEngine) VerifyUncles(chain consensus.ChainReader, block *types.
return nil
}
-func (self *DummyEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+func (self *DummyEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
return nil
}
-func (self *DummyEngine) Prepare(chain consensus.ChainReader, header *types.Header) error {
+func (self *DummyEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
header.Difficulty = big.NewInt(1)
return nil
}
func (self *DummyEngine) Finalize(
- chain consensus.ChainReader, header *types.Header,
+ chain consensus.ChainHeaderReader, header *types.Header,
state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header) {
if self.cb.OnFinalize != nil {
@@ -259,7 +260,7 @@ func (self *DummyEngine) Finalize(
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
}
-func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
+func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
var extdata []byte
if self.cb.OnFinalizeAndAssemble != nil {
@@ -273,10 +274,10 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainReader, header
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
// Header seems complete, assemble into a block and return
- return types.NewBlock(header, txs, uncles, receipts, extdata), nil
+ return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie), extdata), nil
}
-func (self *DummyEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) (err error) {
+func (self *DummyEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) (err error) {
if self.cb.OnSeal != nil {
err = self.cb.OnSeal(block)
} else {
@@ -314,11 +315,11 @@ func (self *DummyEngine) SealHash(header *types.Header) (hash common.Hash) {
return hash
}
-func (self *DummyEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
+func (self *DummyEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
return big.NewInt(1)
}
-func (self *DummyEngine) APIs(chain consensus.ChainReader) (res []rpc.API) {
+func (self *DummyEngine) APIs(chain consensus.ChainHeaderReader) (res []rpc.API) {
res = nil
if self.cb.OnAPIs != nil {
res = self.cb.OnAPIs(chain)
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index 151761c..dc56b6f 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -584,7 +584,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
// Header seems complete, assemble into a block and return
- return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil
+ return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie), nil), nil
}
// SealHash returns the hash of a block prior to it being sealed.
diff --git a/core/blockchain.go b/core/blockchain.go
index b861220..82e3b6c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -2498,6 +2498,6 @@ func (bc *BlockChain) ManualHead(hash common.Hash) error {
}
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
- bc.insert(block)
+ bc.writeHeadBlock(block)
return nil
}
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
new file mode 100644
index 0000000..1d6563d
--- /dev/null
+++ b/core/forkid/forkid.go
@@ -0,0 +1,258 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124).
+package forkid
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash/crc32"
+ "math"
+ "math/big"
+ "reflect"
+ "strings"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+var (
+ // ErrRemoteStale is returned by the validator if a remote fork checksum is a
+ // subset of our already applied forks, but the announced next fork block is
+ // not on our already passed chain.
+ ErrRemoteStale = errors.New("remote needs update")
+
+ // ErrLocalIncompatibleOrStale is returned by the validator if a remote fork
+ // checksum does not match any local checksum variation, signalling that the
+ // two chains have diverged in the past at some point (possibly at genesis).
+ ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
+)
+
+// Blockchain defines all necessary method to build a forkID.
+type Blockchain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // Genesis retrieves the chain's genesis block.
+ Genesis() *types.Block
+
+ // CurrentHeader retrieves the current head header of the canonical chain.
+ CurrentHeader() *types.Header
+}
+
+// ID is a fork identifier as defined by EIP-2124.
+type ID struct {
+ Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
+ Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
+}
+
+// Filter is a fork id filter to validate a remotely advertised ID.
+type Filter func(id ID) error
+
+// NewID calculates the Ethereum fork ID from the chain config and head.
+func NewID(chain Blockchain) ID {
+ return newID(
+ chain.Config(),
+ chain.Genesis().Hash(),
+ chain.CurrentHeader().Number.Uint64(),
+ )
+}
+
+// newID is the internal version of NewID, which takes extracted values as its
+// arguments instead of a chain. The reason is to allow testing the IDs without
+// having to simulate an entire blockchain.
+func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
+ // Calculate the starting checksum from the genesis hash
+ hash := crc32.ChecksumIEEE(genesis[:])
+
+ // Calculate the current fork checksum and the next fork block
+ var next uint64
+ for _, fork := range gatherForks(config) {
+ if fork <= head {
+ // Fork already passed, checksum the previous hash and the fork number
+ hash = checksumUpdate(hash, fork)
+ continue
+ }
+ next = fork
+ break
+ }
+ return ID{Hash: checksumToBytes(hash), Next: next}
+}
+
+// NewFilter creates a filter that returns if a fork ID should be rejected or not
+// based on the local chain's status.
+func NewFilter(chain Blockchain) Filter {
+ return newFilter(
+ chain.Config(),
+ chain.Genesis().Hash(),
+ func() uint64 {
+ return chain.CurrentHeader().Number.Uint64()
+ },
+ )
+}
+
+// NewStaticFilter creates a filter at block zero.
+func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
+ head := func() uint64 { return 0 }
+ return newFilter(config, genesis, head)
+}
+
+// newFilter is the internal version of NewFilter, taking closures as its arguments
+// instead of a chain. The reason is to allow testing it without having to simulate
+// an entire blockchain.
+func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
+ // Calculate the all the valid fork hash and fork next combos
+ var (
+ forks = gatherForks(config)
+ sums = make([][4]byte, len(forks)+1) // 0th is the genesis
+ )
+ hash := crc32.ChecksumIEEE(genesis[:])
+ sums[0] = checksumToBytes(hash)
+ for i, fork := range forks {
+ hash = checksumUpdate(hash, fork)
+ sums[i+1] = checksumToBytes(hash)
+ }
+ // Add two sentries to simplify the fork checks and don't require special
+ // casing the last one.
+ forks = append(forks, math.MaxUint64) // Last fork will never be passed
+
+ // Create a validator that will filter out incompatible chains
+ return func(id ID) error {
+ // Run the fork checksum validation ruleset:
+ // 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
+ // The two nodes are in the same fork state currently. They might know
+ // of differing future forks, but that's not relevant until the fork
+ // triggers (might be postponed, nodes might be updated to match).
+ // 1a. A remotely announced but remotely not passed block is already passed
+ // locally, disconnect, since the chains are incompatible.
+ // 1b. No remotely announced fork; or not yet passed locally, connect.
+ // 2. If the remote FORK_CSUM is a subset of the local past forks and the
+ // remote FORK_NEXT matches with the locally following fork block number,
+ // connect.
+ // Remote node is currently syncing. It might eventually diverge from
+ // us, but at this current point in time we don't have enough information.
+ // 3. If the remote FORK_CSUM is a superset of the local past forks and can
+ // be completed with locally known future forks, connect.
+ // Local node is currently syncing. It might eventually diverge from
+ // the remote, but at this current point in time we don't have enough
+ // information.
+ // 4. Reject in all other cases.
+ head := headfn()
+ for i, fork := range forks {
+ // If our head is beyond this fork, continue to the next (we have a dummy
+ // fork of maxuint64 as the last item to always fail this check eventually).
+ if head > fork {
+ continue
+ }
+ // Found the first unpassed fork block, check if our current state matches
+ // the remote checksum (rule #1).
+ if sums[i] == id.Hash {
+ // Fork checksum matched, check if a remote future fork block already passed
+ // locally without the local node being aware of it (rule #1a).
+ if id.Next > 0 && head >= id.Next {
+ return ErrLocalIncompatibleOrStale
+ }
+ // Haven't passed locally a remote-only fork, accept the connection (rule #1b).
+ return nil
+ }
+ // The local and remote nodes are in different forks currently, check if the
+ // remote checksum is a subset of our local forks (rule #2).
+ for j := 0; j < i; j++ {
+ if sums[j] == id.Hash {
+ // Remote checksum is a subset, validate based on the announced next fork
+ if forks[j] != id.Next {
+ return ErrRemoteStale
+ }
+ return nil
+ }
+ }
+ // Remote chain is not a subset of our local one, check if it's a superset by
+ // any chance, signalling that we're simply out of sync (rule #3).
+ for j := i + 1; j < len(sums); j++ {
+ if sums[j] == id.Hash {
+ // Yay, remote checksum is a superset, ignore upcoming forks
+ return nil
+ }
+ }
+ // No exact, subset or superset match. We are on differing chains, reject.
+ return ErrLocalIncompatibleOrStale
+ }
+ log.Error("Impossible fork ID validation", "id", id)
+ return nil // Something's very wrong, accept rather than reject
+ }
+}
+
+// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
+// one and a fork block number (equivalent to CRC32(original-blob || fork)).
+func checksumUpdate(hash uint32, fork uint64) uint32 {
+ var blob [8]byte
+ binary.BigEndian.PutUint64(blob[:], fork)
+ return crc32.Update(hash, crc32.IEEETable, blob[:])
+}
+
+// checksumToBytes converts a uint32 checksum into a [4]byte array.
+func checksumToBytes(hash uint32) [4]byte {
+ var blob [4]byte
+ binary.BigEndian.PutUint32(blob[:], hash)
+ return blob
+}
+
+// gatherForks gathers all the known forks and creates a sorted list out of them.
+func gatherForks(config *params.ChainConfig) []uint64 {
+ // Gather all the fork block numbers via reflection
+ kind := reflect.TypeOf(params.ChainConfig{})
+ conf := reflect.ValueOf(config).Elem()
+
+ var forks []uint64
+ for i := 0; i < kind.NumField(); i++ {
+ // Fetch the next field and skip non-fork rules
+ field := kind.Field(i)
+ if !strings.HasSuffix(field.Name, "Block") {
+ continue
+ }
+ if field.Type != reflect.TypeOf(new(big.Int)) {
+ continue
+ }
+ // Extract the fork rule block number and aggregate it
+ rule := conf.Field(i).Interface().(*big.Int)
+ if rule != nil {
+ forks = append(forks, rule.Uint64())
+ }
+ }
+ // Sort the fork block numbers to permit chronological XOR
+ for i := 0; i < len(forks); i++ {
+ for j := i + 1; j < len(forks); j++ {
+ if forks[i] > forks[j] {
+ forks[i], forks[j] = forks[j], forks[i]
+ }
+ }
+ }
+ // Deduplicate block numbers applying multiple forks
+ for i := 1; i < len(forks); i++ {
+ if forks[i] == forks[i-1] {
+ forks = append(forks[:i], forks[i+1:]...)
+ i--
+ }
+ }
+ // Skip any forks in block 0, that's the genesis ruleset
+ if len(forks) > 0 && forks[0] == 0 {
+ forks = forks[1:]
+ }
+ return forks
+}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
new file mode 100644
index 0000000..6112de0
--- /dev/null
+++ b/core/rawdb/accessors_state.go
@@ -0,0 +1,96 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// ReadPreimage retrieves a single preimage of the provided hash.
+func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(preimageKey(hash))
+ return data
+}
+
+// WritePreimages writes the provided set of preimages to the database.
+func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
+ for hash, preimage := range preimages {
+ if err := db.Put(preimageKey(hash), preimage); err != nil {
+ log.Crit("Failed to store trie preimage", "err", err)
+ }
+ }
+ preimageCounter.Inc(int64(len(preimages)))
+ preimageHitCounter.Inc(int64(len(preimages)))
+}
+
+// ReadCode retrieves the contract code of the provided code hash.
+func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ // Try with the legacy code scheme first, if not then try with current
+ // scheme. Since most of the code will be found with legacy scheme.
+ //
+ // todo(rjl493456442) change the order when we forcibly upgrade the code
+ // scheme with snapshot.
+ data, _ := db.Get(hash[:])
+ if len(data) != 0 {
+ return data
+ }
+ return ReadCodeWithPrefix(db, hash)
+}
+
+// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
+// The main difference between this function and ReadCode is this function
+// will only check the existence with latest scheme(with prefix).
+func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(codeKey(hash))
+ return data
+}
+
+// WriteCode writes the provided contract code database.
+func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
+ if err := db.Put(codeKey(hash), code); err != nil {
+ log.Crit("Failed to store contract code", "err", err)
+ }
+}
+
+// DeleteCode deletes the specified contract code from the database.
+func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(codeKey(hash)); err != nil {
+ log.Crit("Failed to delete contract code", "err", err)
+ }
+}
+
+// ReadTrieNode retrieves the trie node of the provided hash.
+func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(hash.Bytes())
+ return data
+}
+
+// WriteTrieNode writes the provided trie node database.
+func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
+ if err := db.Put(hash.Bytes(), node); err != nil {
+ log.Crit("Failed to store trie node", "err", err)
+ }
+}
+
+// DeleteTrieNode deletes the specified trie node from the database.
+func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(hash.Bytes()); err != nil {
+ log.Crit("Failed to delete trie node", "err", err)
+ }
+}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
new file mode 100644
index 0000000..3130e92
--- /dev/null
+++ b/core/rawdb/chain_iterator.go
@@ -0,0 +1,304 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
+// of frozen ancient blocks. The method iterates over all the frozen blocks and
+// injects into the database the block hash->number mappings.
+func InitDatabaseFromFreezer(db ethdb.Database) {
+ // If we can't access the freezer or it's empty, abort
+ frozen, err := db.Ancients()
+ if err != nil || frozen == 0 {
+ return
+ }
+ var (
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
+ hash common.Hash
+ )
+ for i := uint64(0); i < frozen; i++ {
+ // Since the freezer has all data in sequential order on a file,
+ // it would be 'neat' to read more data in one go, and let the
+ // freezerdb return N items (e.g up to 1000 items per go)
+ // That would require an API change in Ancients though
+ if h, err := db.Ancient(freezerHashTable, i); err != nil {
+ log.Crit("Failed to init database from freezer", "err", err)
+ } else {
+ hash = common.BytesToHash(h)
+ }
+ WriteHeaderNumber(batch, hash, i)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+
+ WriteHeadHeaderHash(db, hash)
+ WriteHeadFastBlockHash(db, hash)
+ log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+type blockTxHashes struct {
+ number uint64
+ hashes []common.Hash
+}
+
+// iterateTransactions iterates over all transactions in the (canon) block
+// number(s) given, and yields the hashes on a channel
+func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool) (chan *blockTxHashes, chan struct{}) {
+ // One thread sequentially reads data from db
+ type numberRlp struct {
+ number uint64
+ rlp rlp.RawValue
+ }
+ if to == from {
+ return nil, nil
+ }
+ threads := to - from
+ if cpus := runtime.NumCPU(); threads > uint64(cpus) {
+ threads = uint64(cpus)
+ }
+ var (
+ rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
+ hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
+ abortCh = make(chan struct{})
+ )
+ // lookup runs in one instance
+ lookup := func() {
+ n, end := from, to
+ if reverse {
+ n, end = to-1, from-1
+ }
+ defer close(rlpCh)
+ for n != end {
+ data := ReadCanonicalBodyRLP(db, n)
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case rlpCh <- &numberRlp{n, data}:
+ case <-abortCh:
+ return
+ }
+ if reverse {
+ n--
+ } else {
+ n++
+ }
+ }
+ }
+ // process runs in parallel
+ nThreadsAlive := int32(threads)
+ process := func() {
+ defer func() {
+ // Last processor closes the result channel
+ if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
+ close(hashesCh)
+ }
+ }()
+
+ var hasher = sha3.NewLegacyKeccak256()
+ for data := range rlpCh {
+ it, err := rlp.NewListIterator(data.rlp)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ it.Next()
+ txs := it.Value()
+ txIt, err := rlp.NewListIterator(txs)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var hashes []common.Hash
+ for txIt.Next() {
+ if err := txIt.Err(); err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var txHash common.Hash
+ hasher.Reset()
+ hasher.Write(txIt.Value())
+ hasher.Sum(txHash[:0])
+ hashes = append(hashes, txHash)
+ }
+ result := &blockTxHashes{
+ hashes: hashes,
+ number: data.number,
+ }
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case hashesCh <- result:
+ case <-abortCh:
+ return
+ }
+ }
+ }
+ go lookup() // start the sequential db accessor
+ for i := 0; i < int(threads); i++ {
+ go process()
+ }
+ return hashesCh, abortCh
+}
+
+// IndexTransactions creates txlookup indices of the specified block range.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, true)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ // Since we iterate in reverse, we expect the first number to come
+ // in to be [to-1]. Therefore, setting lastNum to means that the
+ // prqueue gap-evaluation will work correctly
+ lastNum = to
+ queue = prque.New(nil)
+ // for stats reporting
+ blocks, txs = 0, 0
+ )
+ defer close(abortCh)
+
+ for chanDelivery := range hashesCh {
+ // Push the delivery into the queue and process contiguous ranges.
+ // Since we iterate in reverse, so lower numbers have lower prio, and
+ // we can use the number directly as prio marker
+ queue.Push(chanDelivery, int64(chanDelivery.number))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); priority != int64(lastNum-1) {
+ break
+ }
+ // Next block available, pop it off and index it
+ delivery := queue.PopItem().(*blockTxHashes)
+ lastNum = delivery.number
+ WriteTxLookupEntries(batch, delivery.number, delivery.hashes)
+ blocks++
+ txs += len(delivery.hashes)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ // Also write the tail there
+ WriteTxIndexTail(batch, lastNum)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ if lastNum < to {
+ WriteTxIndexTail(batch, lastNum)
+ // No need to write the batch if we never entered the loop above...
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ }
+ log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+// UnindexTransactions re