aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2020-09-16 19:00:25 -0400
committerDeterminant <[email protected]>2020-09-16 19:00:25 -0400
commitb1ac5e6ce73c37378e575d6291e3c5f1ee170430 (patch)
tree842889ed69fcf55a3dd3a0226e44ad54025d6a79
parent8478802ddacc027a8d8c866da9365f6739d9d9d4 (diff)
build success
-rw-r--r--consensus/clique/clique.go2
-rw-r--r--consensus/dummy/consensus.go31
-rw-r--r--consensus/ethash/consensus.go2
-rw-r--r--core/blockchain.go2
-rw-r--r--core/forkid/forkid.go258
-rw-r--r--core/rawdb/accessors_state.go96
-rw-r--r--core/rawdb/chain_iterator.go304
-rw-r--r--core/state/snapshot/difflayer_test.go400
-rw-r--r--core/state/snapshot/disklayer_test.go511
-rw-r--r--core/state/snapshot/iterator_test.go1046
-rw-r--r--core/state/snapshot/snapshot_test.go371
-rw-r--r--core/state/snapshot/wipe_test.go124
-rw-r--r--core/vm/instructions.go14
-rw-r--r--coreth.go19
-rw-r--r--eth/api.go2
-rw-r--r--eth/backend.go10
-rw-r--r--eth/protocol.go1
-rw-r--r--internal/ethapi/api.go1
-rw-r--r--internal/ethapi/backend.go1
-rw-r--r--miner/miner.go3
-rw-r--r--node/api.go1
-rw-r--r--node/node.go34
-rw-r--r--rpc/metrics.go39
23 files changed, 777 insertions, 2495 deletions
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index d239aca..b27525b 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -566,7 +566,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
header.UncleHash = types.CalcUncleHash(nil)
// Assemble and return the final block for sealing
- return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil
+ return types.NewBlock(header, txs, nil, receipts, new(trie.Trie), nil), nil
}
// Authorize injects a private key into the consensus engine to mint new blocks
diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go
index 2108684..da63673 100644
--- a/consensus/dummy/consensus.go
+++ b/consensus/dummy/consensus.go
@@ -13,14 +13,15 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
+ mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
- mapset "github.com/deckarep/golang-set"
+ "github.com/ethereum/go-ethereum/trie"
)
-type OnFinalizeCallbackType = func(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header)
+type OnFinalizeCallbackType = func(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header)
type OnFinalizeAndAssembleCallbackType = func(state *state.StateDB, txs []*types.Transaction) ([]byte, error)
-type OnAPIsCallbackType = func(consensus.ChainReader) []rpc.API
+type OnAPIsCallbackType = func(consensus.ChainHeaderReader) []rpc.API
type OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB) error
type ConsensusCallbacks struct {
@@ -55,7 +56,7 @@ var (
)
// modified from consensus.go
-func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
+func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error {
// Ensure that the header's extra-data section is of a reasonable size
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
@@ -103,7 +104,7 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, paren
return nil
}
-func (self *DummyEngine) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error {
+func (self *DummyEngine) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error {
var parent *types.Header
if index == 0 {
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
@@ -123,7 +124,7 @@ func (self *DummyEngine) Author(header *types.Header) (common.Address, error) {
return header.Coinbase, nil
}
-func (self *DummyEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
+func (self *DummyEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
// Short circuit if the header is known, or it's parent not
number := header.Number.Uint64()
if chain.GetHeader(header.Hash(), number) != nil {
@@ -137,7 +138,7 @@ func (self *DummyEngine) VerifyHeader(chain consensus.ChainReader, header *types
return self.verifyHeader(chain, header, parent, false, seal)
}
-func (self *DummyEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
+func (self *DummyEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
// Spawn as many workers as allowed threads
workers := runtime.GOMAXPROCS(0)
if len(headers) < workers {
@@ -239,17 +240,17 @@ func (self *DummyEngine) VerifyUncles(chain consensus.ChainReader, block *types.
return nil
}
-func (self *DummyEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+func (self *DummyEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
return nil
}
-func (self *DummyEngine) Prepare(chain consensus.ChainReader, header *types.Header) error {
+func (self *DummyEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
header.Difficulty = big.NewInt(1)
return nil
}
func (self *DummyEngine) Finalize(
- chain consensus.ChainReader, header *types.Header,
+ chain consensus.ChainHeaderReader, header *types.Header,
state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header) {
if self.cb.OnFinalize != nil {
@@ -259,7 +260,7 @@ func (self *DummyEngine) Finalize(
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
}
-func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
+func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
var extdata []byte
if self.cb.OnFinalizeAndAssemble != nil {
@@ -273,10 +274,10 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainReader, header
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
// Header seems complete, assemble into a block and return
- return types.NewBlock(header, txs, uncles, receipts, extdata), nil
+ return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie), extdata), nil
}
-func (self *DummyEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) (err error) {
+func (self *DummyEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) (err error) {
if self.cb.OnSeal != nil {
err = self.cb.OnSeal(block)
} else {
@@ -314,11 +315,11 @@ func (self *DummyEngine) SealHash(header *types.Header) (hash common.Hash) {
return hash
}
-func (self *DummyEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
+func (self *DummyEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
return big.NewInt(1)
}
-func (self *DummyEngine) APIs(chain consensus.ChainReader) (res []rpc.API) {
+func (self *DummyEngine) APIs(chain consensus.ChainHeaderReader) (res []rpc.API) {
res = nil
if self.cb.OnAPIs != nil {
res = self.cb.OnAPIs(chain)
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index 151761c..dc56b6f 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -584,7 +584,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
// Header seems complete, assemble into a block and return
- return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil
+ return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie), nil), nil
}
// SealHash returns the hash of a block prior to it being sealed.
diff --git a/core/blockchain.go b/core/blockchain.go
index b861220..82e3b6c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -2498,6 +2498,6 @@ func (bc *BlockChain) ManualHead(hash common.Hash) error {
}
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
- bc.insert(block)
+ bc.writeHeadBlock(block)
return nil
}
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
new file mode 100644
index 0000000..1d6563d
--- /dev/null
+++ b/core/forkid/forkid.go
@@ -0,0 +1,258 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124).
+package forkid
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash/crc32"
+ "math"
+ "math/big"
+ "reflect"
+ "strings"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+var (
+ // ErrRemoteStale is returned by the validator if a remote fork checksum is a
+ // subset of our already applied forks, but the announced next fork block is
+ // not on our already passed chain.
+ ErrRemoteStale = errors.New("remote needs update")
+
+ // ErrLocalIncompatibleOrStale is returned by the validator if a remote fork
+ // checksum does not match any local checksum variation, signalling that the
+ // two chains have diverged in the past at some point (possibly at genesis).
+ ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
+)
+
+// Blockchain defines all necessary method to build a forkID.
+type Blockchain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // Genesis retrieves the chain's genesis block.
+ Genesis() *types.Block
+
+ // CurrentHeader retrieves the current head header of the canonical chain.
+ CurrentHeader() *types.Header
+}
+
+// ID is a fork identifier as defined by EIP-2124.
+type ID struct {
+ Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
+ Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
+}
+
+// Filter is a fork id filter to validate a remotely advertised ID.
+type Filter func(id ID) error
+
+// NewID calculates the Ethereum fork ID from the chain config and head.
+func NewID(chain Blockchain) ID {
+ return newID(
+ chain.Config(),
+ chain.Genesis().Hash(),
+ chain.CurrentHeader().Number.Uint64(),
+ )
+}
+
+// newID is the internal version of NewID, which takes extracted values as its
+// arguments instead of a chain. The reason is to allow testing the IDs without
+// having to simulate an entire blockchain.
+func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
+ // Calculate the starting checksum from the genesis hash
+ hash := crc32.ChecksumIEEE(genesis[:])
+
+ // Calculate the current fork checksum and the next fork block
+ var next uint64
+ for _, fork := range gatherForks(config) {
+ if fork <= head {
+ // Fork already passed, checksum the previous hash and the fork number
+ hash = checksumUpdate(hash, fork)
+ continue
+ }
+ next = fork
+ break
+ }
+ return ID{Hash: checksumToBytes(hash), Next: next}
+}
+
+// NewFilter creates a filter that returns if a fork ID should be rejected or not
+// based on the local chain's status.
+func NewFilter(chain Blockchain) Filter {
+ return newFilter(
+ chain.Config(),
+ chain.Genesis().Hash(),
+ func() uint64 {
+ return chain.CurrentHeader().Number.Uint64()
+ },
+ )
+}
+
+// NewStaticFilter creates a filter at block zero.
+func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
+ head := func() uint64 { return 0 }
+ return newFilter(config, genesis, head)
+}
+
+// newFilter is the internal version of NewFilter, taking closures as its arguments
+// instead of a chain. The reason is to allow testing it without having to simulate
+// an entire blockchain.
+func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
+ // Calculate the all the valid fork hash and fork next combos
+ var (
+ forks = gatherForks(config)
+ sums = make([][4]byte, len(forks)+1) // 0th is the genesis
+ )
+ hash := crc32.ChecksumIEEE(genesis[:])
+ sums[0] = checksumToBytes(hash)
+ for i, fork := range forks {
+ hash = checksumUpdate(hash, fork)
+ sums[i+1] = checksumToBytes(hash)
+ }
+ // Add two sentries to simplify the fork checks and don't require special
+ // casing the last one.
+ forks = append(forks, math.MaxUint64) // Last fork will never be passed
+
+ // Create a validator that will filter out incompatible chains
+ return func(id ID) error {
+ // Run the fork checksum validation ruleset:
+ // 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
+ // The two nodes are in the same fork state currently. They might know
+ // of differing future forks, but that's not relevant until the fork
+ // triggers (might be postponed, nodes might be updated to match).
+ // 1a. A remotely announced but remotely not passed block is already passed
+ // locally, disconnect, since the chains are incompatible.
+ // 1b. No remotely announced fork; or not yet passed locally, connect.
+ // 2. If the remote FORK_CSUM is a subset of the local past forks and the
+ // remote FORK_NEXT matches with the locally following fork block number,
+ // connect.
+ // Remote node is currently syncing. It might eventually diverge from
+ // us, but at this current point in time we don't have enough information.
+ // 3. If the remote FORK_CSUM is a superset of the local past forks and can
+ // be completed with locally known future forks, connect.
+ // Local node is currently syncing. It might eventually diverge from
+ // the remote, but at this current point in time we don't have enough
+ // information.
+ // 4. Reject in all other cases.
+ head := headfn()
+ for i, fork := range forks {
+ // If our head is beyond this fork, continue to the next (we have a dummy
+ // fork of maxuint64 as the last item to always fail this check eventually).
+ if head > fork {
+ continue
+ }
+ // Found the first unpassed fork block, check if our current state matches
+ // the remote checksum (rule #1).
+ if sums[i] == id.Hash {
+ // Fork checksum matched, check if a remote future fork block already passed
+ // locally without the local node being aware of it (rule #1a).
+ if id.Next > 0 && head >= id.Next {
+ return ErrLocalIncompatibleOrStale
+ }
+ // Haven't passed locally a remote-only fork, accept the connection (rule #1b).
+ return nil
+ }
+ // The local and remote nodes are in different forks currently, check if the
+ // remote checksum is a subset of our local forks (rule #2).
+ for j := 0; j < i; j++ {
+ if sums[j] == id.Hash {
+ // Remote checksum is a subset, validate based on the announced next fork
+ if forks[j] != id.Next {
+ return ErrRemoteStale
+ }
+ return nil
+ }
+ }
+ // Remote chain is not a subset of our local one, check if it's a superset by
+ // any chance, signalling that we're simply out of sync (rule #3).
+ for j := i + 1; j < len(sums); j++ {
+ if sums[j] == id.Hash {
+ // Yay, remote checksum is a superset, ignore upcoming forks
+ return nil
+ }
+ }
+ // No exact, subset or superset match. We are on differing chains, reject.
+ return ErrLocalIncompatibleOrStale
+ }
+ log.Error("Impossible fork ID validation", "id", id)
+ return nil // Something's very wrong, accept rather than reject
+ }
+}
+
+// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
+// one and a fork block number (equivalent to CRC32(original-blob || fork)).
+func checksumUpdate(hash uint32, fork uint64) uint32 {
+ var blob [8]byte
+ binary.BigEndian.PutUint64(blob[:], fork)
+ return crc32.Update(hash, crc32.IEEETable, blob[:])
+}
+
+// checksumToBytes converts a uint32 checksum into a [4]byte array.
+func checksumToBytes(hash uint32) [4]byte {
+ var blob [4]byte
+ binary.BigEndian.PutUint32(blob[:], hash)
+ return blob
+}
+
+// gatherForks gathers all the known forks and creates a sorted list out of them.
+func gatherForks(config *params.ChainConfig) []uint64 {
+ // Gather all the fork block numbers via reflection
+ kind := reflect.TypeOf(params.ChainConfig{})
+ conf := reflect.ValueOf(config).Elem()
+
+ var forks []uint64
+ for i := 0; i < kind.NumField(); i++ {
+ // Fetch the next field and skip non-fork rules
+ field := kind.Field(i)
+ if !strings.HasSuffix(field.Name, "Block") {
+ continue
+ }
+ if field.Type != reflect.TypeOf(new(big.Int)) {
+ continue
+ }
+ // Extract the fork rule block number and aggregate it
+ rule := conf.Field(i).Interface().(*big.Int)
+ if rule != nil {
+ forks = append(forks, rule.Uint64())
+ }
+ }
+ // Sort the fork block numbers to permit chronological XOR
+ for i := 0; i < len(forks); i++ {
+ for j := i + 1; j < len(forks); j++ {
+ if forks[i] > forks[j] {
+ forks[i], forks[j] = forks[j], forks[i]
+ }
+ }
+ }
+ // Deduplicate block numbers applying multiple forks
+ for i := 1; i < len(forks); i++ {
+ if forks[i] == forks[i-1] {
+ forks = append(forks[:i], forks[i+1:]...)
+ i--
+ }
+ }
+ // Skip any forks in block 0, that's the genesis ruleset
+ if len(forks) > 0 && forks[0] == 0 {
+ forks = forks[1:]
+ }
+ return forks
+}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
new file mode 100644
index 0000000..6112de0
--- /dev/null
+++ b/core/rawdb/accessors_state.go
@@ -0,0 +1,96 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// ReadPreimage retrieves a single preimage of the provided hash.
+func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(preimageKey(hash))
+ return data
+}
+
+// WritePreimages writes the provided set of preimages to the database.
+func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
+ for hash, preimage := range preimages {
+ if err := db.Put(preimageKey(hash), preimage); err != nil {
+ log.Crit("Failed to store trie preimage", "err", err)
+ }
+ }
+ preimageCounter.Inc(int64(len(preimages)))
+ preimageHitCounter.Inc(int64(len(preimages)))
+}
+
+// ReadCode retrieves the contract code of the provided code hash.
+func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ // Try with the legacy code scheme first, if not then try with current
+ // scheme. Since most of the code will be found with legacy scheme.
+ //
+ // todo(rjl493456442) change the order when we forcibly upgrade the code
+ // scheme with snapshot.
+ data, _ := db.Get(hash[:])
+ if len(data) != 0 {
+ return data
+ }
+ return ReadCodeWithPrefix(db, hash)
+}
+
+// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
+// The main difference between this function and ReadCode is this function
+// will only check the existence with latest scheme(with prefix).
+func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(codeKey(hash))
+ return data
+}
+
+// WriteCode writes the provided contract code database.
+func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
+ if err := db.Put(codeKey(hash), code); err != nil {
+ log.Crit("Failed to store contract code", "err", err)
+ }
+}
+
+// DeleteCode deletes the specified contract code from the database.
+func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(codeKey(hash)); err != nil {
+ log.Crit("Failed to delete contract code", "err", err)
+ }
+}
+
+// ReadTrieNode retrieves the trie node of the provided hash.
+func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(hash.Bytes())
+ return data
+}
+
+// WriteTrieNode writes the provided trie node database.
+func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
+ if err := db.Put(hash.Bytes(), node); err != nil {
+ log.Crit("Failed to store trie node", "err", err)
+ }
+}
+
+// DeleteTrieNode deletes the specified trie node from the database.
+func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(hash.Bytes()); err != nil {
+ log.Crit("Failed to delete trie node", "err", err)
+ }
+}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
new file mode 100644
index 0000000..3130e92
--- /dev/null
+++ b/core/rawdb/chain_iterator.go
@@ -0,0 +1,304 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
+// of frozen ancient blocks. The method iterates over all the frozen blocks and
+// injects into the database the block hash->number mappings.
+func InitDatabaseFromFreezer(db ethdb.Database) {
+ // If we can't access the freezer or it's empty, abort
+ frozen, err := db.Ancients()
+ if err != nil || frozen == 0 {
+ return
+ }
+ var (
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
+ hash common.Hash
+ )
+ for i := uint64(0); i < frozen; i++ {
+ // Since the freezer has all data in sequential order on a file,
+ // it would be 'neat' to read more data in one go, and let the
+ // freezerdb return N items (e.g up to 1000 items per go)
+ // That would require an API change in Ancients though
+ if h, err := db.Ancient(freezerHashTable, i); err != nil {
+ log.Crit("Failed to init database from freezer", "err", err)
+ } else {
+ hash = common.BytesToHash(h)
+ }
+ WriteHeaderNumber(batch, hash, i)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+
+ WriteHeadHeaderHash(db, hash)
+ WriteHeadFastBlockHash(db, hash)
+ log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+type blockTxHashes struct {
+ number uint64
+ hashes []common.Hash
+}
+
+// iterateTransactions iterates over all transactions in the (canon) block
+// number(s) given, and yields the hashes on a channel
+func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool) (chan *blockTxHashes, chan struct{}) {
+ // One thread sequentially reads data from db
+ type numberRlp struct {
+ number uint64
+ rlp rlp.RawValue
+ }
+ if to == from {
+ return nil, nil
+ }
+ threads := to - from
+ if cpus := runtime.NumCPU(); threads > uint64(cpus) {
+ threads = uint64(cpus)
+ }
+ var (
+ rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
+ hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
+ abortCh = make(chan struct{})
+ )
+ // lookup runs in one instance
+ lookup := func() {
+ n, end := from, to
+ if reverse {
+ n, end = to-1, from-1
+ }
+ defer close(rlpCh)
+ for n != end {
+ data := ReadCanonicalBodyRLP(db, n)
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case rlpCh <- &numberRlp{n, data}:
+ case <-abortCh:
+ return
+ }
+ if reverse {
+ n--
+ } else {
+ n++
+ }
+ }
+ }
+ // process runs in parallel
+ nThreadsAlive := int32(threads)
+ process := func() {
+ defer func() {
+ // Last processor closes the result channel
+ if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
+ close(hashesCh)
+ }
+ }()
+
+ var hasher = sha3.NewLegacyKeccak256()
+ for data := range rlpCh {
+ it, err := rlp.NewListIterator(data.rlp)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ it.Next()
+ txs := it.Value()
+ txIt, err := rlp.NewListIterator(txs)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var hashes []common.Hash
+ for txIt.Next() {
+ if err := txIt.Err(); err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var txHash common.Hash
+ hasher.Reset()
+ hasher.Write(txIt.Value())
+ hasher.Sum(txHash[:0])
+ hashes = append(hashes, txHash)
+ }
+ result := &blockTxHashes{
+ hashes: hashes,
+ number: data.number,
+ }
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case hashesCh <- result:
+ case <-abortCh:
+ return
+ }
+ }
+ }
+ go lookup() // start the sequential db accessor
+ for i := 0; i < int(threads); i++ {
+ go process()
+ }
+ return hashesCh, abortCh
+}
+
+// IndexTransactions creates txlookup indices of the specified block range.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, true)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ // Since we iterate in reverse, we expect the first number to come
+ // in to be [to-1]. Therefore, setting lastNum to means that the
+ // prqueue gap-evaluation will work correctly
+ lastNum = to
+ queue = prque.New(nil)
+ // for stats reporting
+ blocks, txs = 0, 0
+ )
+ defer close(abortCh)
+
+ for chanDelivery := range hashesCh {
+ // Push the delivery into the queue and process contiguous ranges.
+ // Since we iterate in reverse, so lower numbers have lower prio, and
+ // we can use the number directly as prio marker
+ queue.Push(chanDelivery, int64(chanDelivery.number))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); priority != int64(lastNum-1) {
+ break
+ }
+ // Next block available, pop it off and index it
+ delivery := queue.PopItem().(*blockTxHashes)
+ lastNum = delivery.number
+ WriteTxLookupEntries(batch, delivery.number, delivery.hashes)
+ blocks++
+ txs += len(delivery.hashes)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ // Also write the tail there
+ WriteTxIndexTail(batch, lastNum)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ if lastNum < to {
+ WriteTxIndexTail(batch, lastNum)
+ // No need to write the batch if we never entered the loop above...
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ }
+ log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+// UnindexTransactions removes txlookup indices of the specified block range.
+func UnindexTransactions(db ethdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ // Write flag first and then unindex the transaction indices. Some indices
+ // will be left in the database if crash happens but it's fine.
+ WriteTxIndexTail(db, to)
+ // If only one block is unindexed, do it directly
+ //if from+1 == to {
+ // data := ReadCanonicalBodyRLP(db, uint64(from))
+ // DeleteTxLookupEntries(db, ReadBlock(db, ReadCanonicalHash(db, from), from))
+ // log.Info("Unindexed transactions", "blocks", 1, "tail", to)
+ // return
+ //}
+ // TODO @holiman, add this back (if we want it)
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, false)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ )
+ defer close(abortCh)
+ // Otherwise spin up the concurrent iterator and unindexer
+ blocks, txs := 0, 0
+ for delivery := range hashesCh {
+ DeleteTxLookupEntries(batch, delivery.hashes)
+ txs += len(delivery.hashes)
+ blocks++
+
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ // A batch counts the size of deletion as '1', so we need to flush more
+ // often than that.
+ if blocks%1000 == 0 {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+}
diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go
deleted file mode 100644
index 31636ee..0000000
--- a/core/state/snapshot/difflayer_test.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package snapshot
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/VictoriaMetrics/fastcache"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
-)
-
-func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
- copy := make(map[common.Hash]struct{})
- for hash := range destructs {
- copy[hash] = struct{}{}
- }
- return copy
-}
-
-func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
- copy := make(map[common.Hash][]byte)
- for hash, blob := range accounts {
- copy[hash] = blob
- }
- return copy
-}
-
-func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
- copy := make(map[common.Hash]map[common.Hash][]byte)
- for accHash, slots := range storage {
- copy[accHash] = make(map[common.Hash][]byte)
- for slotHash, blob := range slots {
- copy[accHash][slotHash] = blob
- }
- }
- return copy
-}
-
-// TestMergeBasics tests some simple merges
-func TestMergeBasics(t *testing.T) {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- // Fill up a parent
- for i := 0; i < 100; i++ {
- h := randomHash()
- data := randomAccount()
-
- accounts[h] = data
- if rand.Intn(4) == 0 {
- destructs[h] = struct{}{}
- }
- if rand.Intn(2) == 0 {
- accStorage := make(map[common.Hash][]byte)
- value := make([]byte, 32)
- rand.Read(value)
- accStorage[randomHash()] = value
- storage[h] = accStorage
- }
- }
- // Add some (identical) layers on top
- parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- // And flatten
- merged := (child.flatten()).(*diffLayer)
-
- { // Check account lists
- if have, want := len(merged.accountList), 0; have != want {
- t.Errorf("accountList wrong: have %v, want %v", have, want)
- }
- if have, want := len(merged.AccountList()), len(accounts); have != want {
- t.Errorf("AccountList() wrong: have %v, want %v", have, want)
- }
- if have, want := len(merged.accountList), len(accounts); have != want {
- t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
- }
- }
- { // Check account drops
- if have, want := len(merged.destructSet), len(destructs); have != want {
- t.Errorf("accountDrop wrong: have %v, want %v", have, want)
- }
- }
- { // Check storage lists
- i := 0
- for aHash, sMap := range storage {
- if have, want := len(merged.storageList), i; have != want {
- t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
- }
- list, _ := merged.StorageList(aHash)
- if have, want := len(list), len(sMap); have != want {
- t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
- }
- if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
- t.Errorf("storageList wrong: have %v, want %v", have, want)
- }
- i++
- }
- }
-}
-
-// TestMergeDelete tests some deletion
-func TestMergeDelete(t *testing.T) {
- var (
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- // Fill up a parent
- h1 := common.HexToHash("0x01")
- h2 := common.HexToHash("0x02")
-
- flipDrops := func() map[common.Hash]struct{} {
- return map[common.Hash]struct{}{
- h2: {},
- }
- }
- flipAccs := func() map[common.Hash][]byte {
- return map[common.Hash][]byte{
- h1: randomAccount(),
- }
- }
- flopDrops := func() map[common.Hash]struct{} {
- return map[common.Hash]struct{}{
- h1: {},
- }
- }
- flopAccs := func() map[common.Hash][]byte {
- return map[common.Hash][]byte{
- h2: randomAccount(),
- }
- }
- // Add some flipAccs-flopping layers on top
- parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
- child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
- child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
- child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
- child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
- child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
- child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
-
- if data, _ := child.Account(h1); data == nil {
- t.Errorf("last diff layer: expected %x account to be non-nil", h1)
- }
- if data, _ := child.Account(h2); data != nil {
- t.Errorf("last diff layer: expected %x account to be nil", h2)
- }
- if _, ok := child.destructSet[h1]; ok {
- t.Errorf("last diff layer: expected %x drop to be missing", h1)
- }
- if _, ok := child.destructSet[h2]; !ok {
- t.Errorf("last diff layer: expected %x drop to be present", h1)
- }
- // And flatten
- merged := (child.flatten()).(*diffLayer)
-
- if data, _ := merged.Account(h1); data == nil {
- t.Errorf("merged layer: expected %x account to be non-nil", h1)
- }
- if data, _ := merged.Account(h2); data != nil {
- t.Errorf("merged layer: expected %x account to be nil", h2)
- }
- if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
- t.Errorf("merged diff layer: expected %x drop to be present", h1)
- }
- if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
- t.Errorf("merged diff layer: expected %x drop to be present", h1)
- }
- // If we add more granular metering of memory, we can enable this again,
- // but it's not implemented for now
- //if have, want := merged.memory, child.memory; have != want {
- // t.Errorf("mem wrong: have %d, want %d", have, want)
- //}
-}
-
-// This tests that if we create a new account, and set a slot, and then merge
-// it, the lists will be correct.
-func TestInsertAndMerge(t *testing.T) {
- // Fill up a parent
- var (
- acc = common.HexToHash("0x01")
- slot = common.HexToHash("0x02")
- parent *diffLayer
- child *diffLayer
- )
- {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
- }
- {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- accounts[acc] = randomAccount()
- storage[acc] = make(map[common.Hash][]byte)
- storage[acc][slot] = []byte{0x01}
- child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
- }
- // And flatten
- merged := (child.flatten()).(*diffLayer)
- { // Check that slot value is present
- have, _ := merged.Storage(acc, slot)
- if want := []byte{0x01}; !bytes.Equal(have, want) {
- t.Errorf("merged slot value wrong: have %x, want %x", have, want)
- }
- }
-}
-
-func emptyLayer() *diskLayer {
- return &diskLayer{
- diskdb: memorydb.New(),
- cache: fastcache.New(500 * 1024),
- }
-}
-
-// BenchmarkSearch checks how long it takes to find a non-existing key
-// BenchmarkSearch-6 200000 10481 ns/op (1K per layer)
-// BenchmarkSearch-6 200000 10760 ns/op (10K per layer)
-// BenchmarkSearch-6 100000 17866 ns/op
-//
-// BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock()
-func BenchmarkSearch(b *testing.B) {
- // First, we set up 128 diff layers, with 1K items each
- fill := func(parent snapshot) *diffLayer {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- for i := 0; i < 10000; i++ {
- accounts[randomHash()] = randomAccount()
- }
- return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
- }
- var layer snapshot
- layer = emptyLayer()
- for i := 0; i < 128; i++ {
- layer = fill(layer)
- }
- key := crypto.Keccak256Hash([]byte{0x13, 0x38})
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- layer.AccountRLP(key)
- }
-}
-
-// BenchmarkSearchSlot checks how long it takes to find a non-existing key
-// - Number of layers: 128
-// - Each layers contains the account, with a couple of storage slots
-// BenchmarkSearchSlot-6 100000 14554 ns/op
-// BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex)
-// BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic)
-// With bloom filter:
-// BenchmarkSearchSlot-6 3467835 351 ns/op
-func BenchmarkSearchSlot(b *testing.B) {
- // First, we set up 128 diff layers, with 1K items each
- accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
- storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
- accountRLP := randomAccount()
- fill := func(parent snapshot) *diffLayer {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- accounts[accountKey] = accountRLP
-
- accStorage := make(map[common.Hash][]byte)
- for i := 0; i < 5; i++ {
- value := make([]byte, 32)
- rand.Read(value)
- accStorage[randomHash()] = value
- storage[accountKey] = accStorage
- }
- return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
- }
- var layer snapshot
- layer = emptyLayer()
- for i := 0; i < 128; i++ {
- layer = fill(layer)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- layer.Storage(accountKey, storageKey)
- }
-}
-
-// With accountList and sorting
-// BenchmarkFlatten-6 50 29890856 ns/op
-//
-// Without sorting and tracking accountlist
-// BenchmarkFlatten-6 300 5511511 ns/op
-func BenchmarkFlatten(b *testing.B) {
- fill := func(parent snapshot) *diffLayer {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- for i := 0; i < 100; i++ {
- accountKey := randomHash()
- accounts[accountKey] = randomAccount()
-
- accStorage := make(map[common.Hash][]byte)
- for i := 0; i < 20; i++ {
- value := make([]byte, 32)
- rand.Read(value)
- accStorage[randomHash()] = value
-
- }
- storage[accountKey] = accStorage
- }
- return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- var layer snapshot
- layer = emptyLayer()
- for i := 1; i < 128; i++ {
- layer = fill(layer)
- }
- b.StartTimer()
-
- for i := 1; i < 128; i++ {
- dl, ok := layer.(*diffLayer)
- if !ok {
- break
- }
- layer = dl.flatten()
- }
- b.StopTimer()
- }
-}
-
-// This test writes ~324M of diff layers to disk, spread over
-// - 128 individual layers,
-// - each with 200 accounts
-// - containing 200 slots
-//
-// BenchmarkJournal-6 1 1471373923 ns/ops
-// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
-func BenchmarkJournal(b *testing.B) {
- fill := func(parent snapshot) *diffLayer {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- for i := 0; i < 200; i++ {
- accountKey := randomHash()
- accounts[accountKey] = randomAccount()
-
- accStorage := make(map[common.Hash][]byte)
- for i := 0; i < 200; i++ {
- value := make([]byte, 32)
- rand.Read(value)
- accStorage[randomHash()] = value
-
- }
- storage[accountKey] = accStorage
- }
- return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
- }
- layer := snapshot(new(diskLayer))
- for i := 1; i < 128; i++ {
- layer = fill(layer)
- }
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- layer.Journal(new(bytes.Buffer))
- }
-}
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
deleted file mode 100644
index 5df5efc..0000000
--- a/core/state/snapshot/disklayer_test.go
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package snapshot
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "testing"
-
- "github.com/VictoriaMetrics/fastcache"
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/leveldb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
-)
-
-// reverse reverses the contents of a byte slice. It's used to update random accs
-// with deterministic changes.
-func reverse(blob []byte) []byte {
- res := make([]byte, len(blob))
- for i, b := range blob {
- res[len(blob)-1-i] = b
- }
- return res
-}
-
-// Tests that merging something into a disk layer persists it into the database
-// and invalidates any previously written and cached values.
-func TestDiskMerge(t *testing.T) {
- // Create some accounts in the disk layer
- db := memorydb.New()
-
- var (
- accNoModNoCache = common.Hash{0x1}
- accNoModCache = common.Hash{0x2}
- accModNoCache = common.Hash{0x3}
- accModCache = common.Hash{0x4}
- accDelNoCache = common.Hash{0x5}
- accDelCache = common.Hash{0x6}
- conNoModNoCache = common.Hash{0x7}
- conNoModNoCacheSlot = common.Hash{0x70}
- conNoModCache = common.Hash{0x8}
- conNoModCacheSlot = common.Hash{0x80}
- conModNoCache = common.Hash{0x9}
- conModNoCacheSlot = common.Hash{0x90}
- conModCache = common.Hash{0xa}
- conModCacheSlot = common.Hash{0xa0}
- conDelNoCache = common.Hash{0xb}
- conDelNoCacheSlot = common.Hash{0xb0}
- conDelCache = common.Hash{0xc}
- conDelCacheSlot = common.Hash{0xc0}
- conNukeNoCache = common.Hash{0xd}
- conNukeNoCacheSlot = common.Hash{0xd0}
- conNukeCache = common.Hash{0xe}
- conNukeCacheSlot = common.Hash{0xe0}
- baseRoot = randomHash()
- diffRoot = randomHash()
- )
-
- rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:])
- rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:])
- rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:])
- rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:])
- rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:])
- rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:])
-
- rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:])
- rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
- rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:])
- rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:])
- rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
- rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:])
- rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:])
- rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:])
- rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
- rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:])
- rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:])
-
- rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:])
- rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
- rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:])
- rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
-
- rawdb.WriteSnapshotRoot(db, baseRoot)
-
- // Create a disk layer based on the above and cache in some data
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- baseRoot: &diskLayer{
- diskdb: db,
- cache: fastcache.New(500 * 1024),
- root: baseRoot,
- },
- },
- }
- base := snaps.Snapshot(baseRoot)
- base.AccountRLP(accNoModCache)
- base.AccountRLP(accModCache)
- base.AccountRLP(accDelCache)
- base.Storage(conNoModCache, conNoModCacheSlot)
- base.Storage(conModCache, conModCacheSlot)
- base.Storage(conDelCache, conDelCacheSlot)
- base.Storage(conNukeCache, conNukeCacheSlot)
-
- // Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
- accDelNoCache: {},
- accDelCache: {},
- conNukeNoCache: {},
- conNukeCache: {},
- }, map[common.Hash][]byte{
- accModNoCache: reverse(accModNoCache[:]),
- accModCache: reverse(accModCache[:]),
- }, map[common.Hash]map[common.Hash][]byte{
- conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
- conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
- conDelNoCache: {conDelNoCacheSlot: nil},
- conDelCache: {conDelCacheSlot: nil},
- }); err != nil {
- t.Fatalf("failed to update snapshot tree: %v", err)
- }
- if err := snaps.Cap(diffRoot, 0); err != nil {
- t.Fatalf("failed to flatten snapshot tree: %v", err)
- }
- // Retrieve all the data through the disk layer and validate it
- base = snaps.Snapshot(diffRoot)
- if _, ok := base.(*diskLayer); !ok {
- t.Fatalf("update not flattend into the disk layer")
- }
-
- // assertAccount ensures that an account matches the given blob.
- assertAccount := func(account common.Hash, data []byte) {
- t.Helper()
- blob, err := base.AccountRLP(account)
- if err != nil {
- t.Errorf("account access (%x) failed: %v", account, err)
- } else if !bytes.Equal(blob, data) {
- t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data)
- }
- }
- assertAccount(accNoModNoCache, accNoModNoCache[:])
- assertAccount(accNoModCache, accNoModCache[:])
- assertAccount(accModNoCache, reverse(accModNoCache[:]))
- assertAccount(accModCache, reverse(accModCache[:]))
- assertAccount(accDelNoCache, nil)
- assertAccount(accDelCache, nil)
-
- // assertStorage ensures that a storage slot matches the given blob.
- assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
- t.Helper()
- blob, err := base.Storage(account, slot)
- if err != nil {
- t.Errorf("storage access (%x:%x) failed: %v", account, slot, err)
- } else if !bytes.Equal(blob, data) {
- t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
- }
- }
- assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
- assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
- assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
- assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
- assertStorage(conDelCache, conDelCacheSlot, nil)
- assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
- assertStorage(conNukeCache, conNukeCacheSlot, nil)
-
- // Retrieve all the data directly from the database and validate it
-
- // assertDatabaseAccount ensures that an account from the database matches the given blob.
- assertDatabaseAccount := func(account common.Hash, data []byte) {
- t.Helper()
- if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) {
- t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data)
- }
- }
- assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
- assertDatabaseAccount(accNoModCache, accNoModCache[:])
- assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
- assertDatabaseAccount(accModCache, reverse(accModCache[:]))
- assertDatabaseAccount(accDelNoCache, nil)
- assertDatabaseAccount(accDelCache, nil)
-
- // assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
- assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
- t.Helper()
- if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) {
- t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
- }
- }
- assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
- assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
- assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
- assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
- assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
- assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
- assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
-}
-
-// Tests that merging something into a disk layer persists it into the database
-// and invalidates any previously written and cached values, discarding anything
-// after the in-progress generation marker.
-func TestDiskPartialMerge(t *testing.T) {
- // Iterate the test a few times to ensure we pick various internal orderings
- // for the data slots as well as the progress marker.
- for i := 0; i < 1024; i++ {
- // Create some accounts in the disk layer
- db := memorydb.New()
-
- var (
- accNoModNoCache = randomHash()
- accNoModCache = randomHash()
- accModNoCache = randomHash()
- accModCache = randomHash()
- accDelNoCache = randomHash()
- accDelCache = randomHash()
- conNoModNoCache = randomHash()
- conNoModNoCacheSlot = randomHash()
- conNoModCache = randomHash()
- conNoModCacheSlot = randomHash()
- conModNoCache = randomHash()
- conModNoCacheSlot = randomHash()
- conModCache = randomHash()
- conModCacheSlot = randomHash()
- conDelNoCache = randomHash()
- conDelNoCacheSlot = randomHash()
- conDelCache = randomHash()
- conDelCacheSlot = randomHash()
- conNukeNoCache = randomHash()
- conNukeNoCacheSlot = randomHash()
- conNukeCache = randomHash()
- conNukeCacheSlot = randomHash()
- baseRoot = randomHash()
- diffRoot = randomHash()
- genMarker = append(randomHash().Bytes(), randomHash().Bytes()...)
- )
-
- // insertAccount injects an account into the database if it's after the
- // generator marker, drops the op otherwise. This is needed to seed the
- // database with a valid starting snapshot.
- insertAccount := func(account common.Hash, data []byte) {
- if bytes.Compare(account[:], genMarker) <= 0 {
- rawdb.WriteAccountSnapshot(db, account, data[:])
- }
- }
- insertAccount(accNoModNoCache, accNoModNoCache[:])
- insertAccount(accNoModCache, accNoModCache[:])
- insertAccount(accModNoCache, accModNoCache[:])
- insertAccount(accModCache, accModCache[:])
- insertAccount(accDelNoCache, accDelNoCache[:])
- insertAccount(accDelCache, accDelCache[:])
-
- // insertStorage injects a storage slot into the database if it's after
- // the generator marker, drops the op otherwise. This is needed to seed
- // the database with a valid starting snapshot.
- insertStorage := func(account common.Hash, slot common.Hash, data []byte) {
- if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 {
- rawdb.WriteStorageSnapshot(db, account, slot, data[:])
- }
- }
- insertAccount(conNoModNoCache, conNoModNoCache[:])
- insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
- insertAccount(conNoModCache, conNoModCache[:])
- insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- insertAccount(conModNoCache, conModNoCache[:])
- insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
- insertAccount(conModCache, conModCache[:])
- insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
- insertAccount(conDelNoCache, conDelNoCache[:])
- insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
- insertAccount(conDelCache, conDelCache[:])
- insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
-
- insertAccount(conNukeNoCache, conNukeNoCache[:])
- insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
- insertAccount(conNukeCache, conNukeCache[:])
- insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
-
- rawdb.WriteSnapshotRoot(db, baseRoot)
-
- // Create a disk layer based on the above using a random progress marker
- // and cache in some data.
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- baseRoot: &diskLayer{
- diskdb: db,
- cache: fastcache.New(500 * 1024),
- root: baseRoot,
- },
- },
- }
- snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker
- base := snaps.Snapshot(baseRoot)
-
- // assertAccount ensures that an account matches the given blob if it's
- // already covered by the disk snapshot, and errors out otherwise.
- assertAccount := func(account common.Hash, data []byte) {
- t.Helper()
- blob, err := base.AccountRLP(account)
- if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet {
- t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob)
- }
- if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
- t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
- }
- }
- assertAccount(accNoModCache, accNoModCache[:])
- assertAccount(accModCache, accModCache[:])
- assertAccount(accDelCache, accDelCache[:])
-
- // assertStorage ensures that a storage slot matches the given blob if
- // it's already covered by the disk snapshot, and errors out otherwise.
- assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
- t.Helper()
- blob, err := base.Storage(account, slot)
- if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet {
- t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
- }
- if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
- t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
- }
- }
- assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
- assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
- assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
-
- // Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
- accDelNoCache: {},
- accDelCache: {},
- conNukeNoCache: {},
- conNukeCache: {},
- }, map[common.Hash][]byte{
- accModNoCache: reverse(accModNoCache[:]),
- accModCache: reverse(accModCache[:]),
- }, map[common.Hash]map[common.Hash][]byte{
- conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
- conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
- conDelNoCache: {conDelNoCacheSlot: nil},
- conDelCache: {conDelCacheSlot: nil},
- }); err != nil {
- t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
- }
- if err := snaps.Cap(diffRoot, 0); err != nil {
- t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err)
- }
- // Retrieve all the data through the disk layer and validate it
- base = snaps.Snapshot(diffRoot)
- if _, ok := base.(*diskLayer); !ok {
- t.Fatalf("test %d: update not flattend into the disk layer", i)
- }
- assertAccount(accNoModNoCache, accNoModNoCache[:])
- assertAccount(accNoModCache, accNoModCache[:])
- assertAccount(accModNoCache, reverse(accModNoCache[:]))
- assertAccount(accModCache, reverse(accModCache[:]))
- assertAccount(accDelNoCache, nil)
- assertAccount(accDelCache, nil)
-
- assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
- assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
- assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
- assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
- assertStorage(conDelCache, conDelCacheSlot, nil)
- assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
- assertStorage(conNukeCache, conNukeCacheSlot, nil)
-
- // Retrieve all the data directly from the database and validate it
-
- // assertDatabaseAccount ensures that an account inside the database matches
- // the given blob if it's already covered by the disk snapshot, and does not
- // exist otherwise.
- assertDatabaseAccount := func(account common.Hash, data []byte) {
- t.Helper()
- blob := rawdb.ReadAccountSnapshot(db, account)
- if bytes.Compare(account[:], genMarker) > 0 && blob != nil {
- t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob)
- }
- if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
- t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
- }
- }
- assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
- assertDatabaseAccount(accNoModCache, accNoModCache[:])
- assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
- assertDatabaseAccount(accModCache, reverse(accModCache[:]))
- assertDatabaseAccount(accDelNoCache, nil)
- assertDatabaseAccount(accDelCache, nil)
-
- // assertDatabaseStorage ensures that a storage slot inside the database
- // matches the given blob if it's already covered by the disk snapshot,
- // and does not exist otherwise.
- assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
- t.Helper()
- blob := rawdb.ReadStorageSnapshot(db, account, slot)
- if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil {
- t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
- }
- if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
- t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
- }
- }
- assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
- assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
- assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
- assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
- assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
- assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
- assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
- assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
- }
-}
-
-// Tests that merging something into a disk layer persists it into the database
-// and invalidates any previously written and cached values, discarding anything
-// after the in-progress generation marker.
-//
-// This test case is a tiny specialized case of TestDiskPartialMerge, which tests
-// some very specific cornercases that random tests won't ever trigger.
-func TestDiskMidAccountPartialMerge(t *testing.T) {
- // TODO(@karalabe) ?
-}
-
-// TestDiskSeek tests that seek-operations work on the disk layer
-func TestDiskSeek(t *testing.T) {
- // Create some accounts in the disk layer
- var db ethdb.Database
-
- if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil {
- t.Fatal(err)
- } else {
- defer os.RemoveAll(dir)
- diskdb, err := leveldb.New(dir, 256, 0, "")
- if err != nil {
- t.Fatal(err)
- }
- db = rawdb.NewDatabase(diskdb)
- }
- // Fill even keys [0,2,4...]
- for i := 0; i < 0xff; i += 2 {
- acc := common.Hash{byte(i)}
- rawdb.WriteAccountSnapshot(db, acc, acc[:])
- }
- // Add an 'higher' key, with incorrect (higher) prefix
- highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1}
- db.Put(highKey, []byte{0xff, 0xff})
-
- baseRoot := randomHash()
- rawdb.WriteSnapshotRoot(db, baseRoot)
-
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- baseRoot: &diskLayer{
- diskdb: db,
- cache: fastcache.New(500 * 1024),
- root: baseRoot,
- },
- },
- }
- // Test some different seek positions
- type testcase struct {
- pos byte
- expkey byte
- }
- var cases = []testcase{
- {0xff, 0x55}, // this should exit immediately without checking key
- {0x01, 0x02},
- {0xfe, 0xfe},
- {0xfd, 0xfe},
- {0x00, 0x00},
- }
- for i, tc := range cases {
- it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos})
- if err != nil {
- t.Fatalf("case %d, error: %v", i, err)
- }
- count := 0
- for it.Next() {
- k, v, err := it.Hash()[0], it.Account()[0], it.Error()
- if err != nil {
- t.Fatalf("test %d, item %d, error: %v", i, count, err)
- }
- // First item in iterator should have the expected key
- if count == 0 && k != tc.expkey {
- t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey)
- }
- count++
- if v != k {
- t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k)
- }
- }
- }
-}
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
deleted file mode 100644
index ef4859c..0000000
--- a/core/state/snapshot/iterator_test.go
+++ /dev/null
@@ -1,1046 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package snapshot
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "math/rand"
- "testing"
-
- "github.com/VictoriaMetrics/fastcache"
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ethereum/go-ethereum/common"
-)
-
-// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
-func TestAccountIteratorBasics(t *testing.T) {
- var (
- destructs = make(map[common.Hash]struct{})
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- // Fill up a parent
- for i := 0; i < 100; i++ {
- h := randomHash()
- data := randomAccount()
-
- accounts[h] = data
- if rand.Intn(4) == 0 {
- destructs[h] = struct{}{}
- }
- if rand.Intn(2) == 0 {
- accStorage := make(map[common.Hash][]byte)
- value := make([]byte, 32)
- rand.Read(value)
- accStorage[randomHash()] = value
- storage[h] = accStorage
- }
- }
- // Add some (identical) layers on top
- diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- it := diffLayer.AccountIterator(common.Hash{})
- verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
-
- diskLayer := diffToDisk(diffLayer)
- it = diskLayer.AccountIterator(common.Hash{})
- verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
-}
-
-// TestStorageIteratorBasics tests some simple single-layer(diff and disk) iteration for storage
-func TestStorageIteratorBasics(t *testing.T) {
- var (
- nilStorage = make(map[common.Hash]int)
- accounts = make(map[common.Hash][]byte)
- storage = make(map[common.Hash]map[common.Hash][]byte)
- )
- // Fill some random data
- for i := 0; i < 10; i++ {
- h := randomHash()
- accounts[h] = randomAccount()
-
- accStorage := make(map[common.Hash][]byte)
- value := make([]byte, 32)
-
- var nilstorage int
- for i := 0; i < 100; i++ {
- rand.Read(value)
- if rand.Intn(2) == 0 {
- accStorage[randomHash()] = common.CopyBytes(value)
- } else {
- accStorage[randomHash()] = nil // delete slot
- nilstorage += 1
- }
- }
- storage[h] = accStorage
- nilStorage[h] = nilstorage
- }
- // Add some (identical) layers on top
- diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
- for account := range accounts {
- it, _ := diffLayer.StorageIterator(account, common.Hash{})
- verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
- }
-
- diskLayer := diffToDisk(diffLayer)
- for account := range accounts {
- it, _ := diskLayer.StorageIterator(account, common.Hash{})
- verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
- }
-}
-
-type testIterator struct {
- values []byte
-}
-
-func newTestIterator(values ...byte) *testIterator {
- return &testIterator{values}
-}
-
-func (ti *testIterator) Seek(common.Hash) {
- panic("implement me")
-}
-
-func (ti *testIterator) Next() bool {
- ti.values = ti.values[1:]
- return len(ti.values) > 0
-}
-
-func (ti *testIterator) Error() error {
- return nil
-}
-
-func (ti *testIterator) Hash() common.Hash {
- return common.BytesToHash([]byte{ti.values[0]})
-}
-
-func (ti *testIterator) Account() []byte {
- return nil
-}
-
-func (ti *testIterator) Slot() []byte {
- return nil
-}
-
-func (ti *testIterator) Release() {}
-
-func TestFastIteratorBasics(t *testing.T) {
- type testCase struct {
- lists [][]byte
- expKeys []byte
- }
- for i, tc := range []testCase{
- {lists: [][]byte{{0, 1, 8}, {1, 2, 8}, {2, 9}, {4},
- {7, 14, 15}, {9, 13, 15, 16}},
- expKeys: []byte{0, 1, 2, 4, 7, 8, 9, 13, 14, 15, 16}},
- {lists: [][]byte{{0, 8}, {1, 2, 8}, {7, 14, 15}, {8, 9},
- {9, 10}, {10, 13, 15, 16}},
- expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}},
- } {
- var iterators []*weightedIterator
- for i, data := range tc.lists {
- it := newTestIterator(data...)
- iterators = append(iterators, &weightedIterator{it, i})
- }
- fi := &fastIterator{
- iterators: iterators,
- initiated: false,
- }
- count := 0
- for fi.Next() {
- if got, exp := fi.Hash()[31], tc.expKeys[count]; exp != got {
- t.Errorf("tc %d, [%d]: got %d exp %d", i, count, got, exp)
- }
- count++
- }
- }
-}
-
-type verifyContent int
-
-const (
- verifyNothing verifyContent = iota
- verifyAccount
- verifyStorage
-)
-
-func verifyIterator(t *testing.T, expCount int, it Iterator, verify verifyContent) {
- t.Helper()
-
- var (
- count = 0
- last = common.Hash{}
- )
- for it.Next() {
- hash := it.Hash()
- if bytes.Compare(last[:], hash[:]) >= 0 {
- t.Errorf("wrong order: %x >= %x", last, hash)
- }
- count++
- if verify == verifyAccount && len(it.(AccountIterator).Account()) == 0 {
- t.Errorf("iterator returned nil-value for hash %x", hash)
- } else if verify == verifyStorage && len(it.(StorageIterator).Slot()) == 0 {
- t.Errorf("iterator returned nil-value for hash %x", hash)
- }
- last = hash
- }
- if count != expCount {
- t.Errorf("iterator count mismatch: have %d, want %d", count, expCount)
- }
- if err := it.Error(); err != nil {
- t.Errorf("iterator failed: %v", err)
- }
-}
-
-// TestAccountIteratorTraversal tests some simple multi-layer iteration.
-func TestAccountIteratorTraversal(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
- randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
-
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
- randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
-
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
- randomAccountSet("0xcc", "0xf0", "0xff"), nil)
-
- // Verify the single and multi-layer iterators
- head := snaps.Snapshot(common.HexToHash("0x04"))
-
- verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
- verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
-
- it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- verifyIterator(t, 7, it, verifyAccount)
- it.Release()
-
- // Test after persist some bottom-most layers into the disk,
- // the functionalities still work.
- limit := aggregatorMemoryLimit
- defer func() {
- aggregatorMemoryLimit = limit
- }()
- aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
- snaps.Cap(common.HexToHash("0x04"), 2)
- verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- verifyIterator(t, 7, it, verifyAccount)
- it.Release()
-}
-
-func TestStorageIteratorTraversal(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
-
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
-
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
-
- // Verify the single and multi-layer iterators
- head := snaps.Snapshot(common.HexToHash("0x04"))
-
- diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 3, diffIter, verifyNothing)
- verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
-
- it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 6, it, verifyStorage)
- it.Release()
-
- // Test after persist some bottom-most layers into the disk,
- // the functionalities still work.
- limit := aggregatorMemoryLimit
- defer func() {
- aggregatorMemoryLimit = limit
- }()
- aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
- snaps.Cap(common.HexToHash("0x04"), 2)
- verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 6, it, verifyStorage)
- it.Release()
-}
-
-// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
-// also expect the correct values to show up.
-func TestAccountIteratorTraversalValues(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Create a batch of account sets to seed subsequent layers with
- var (
- a = make(map[common.Hash][]byte)
- b = make(map[common.Hash][]byte)
- c = make(map[common.Hash][]byte)
- d = make(map[common.Hash][]byte)
- e = make(map[common.Hash][]byte)
- f = make(map[common.Hash][]byte)
- g = make(map[common.Hash][]byte)
- h = make(map[common.Hash][]byte)
- )
- for i := byte(2); i < 0xff; i++ {
- a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
- if i > 20 && i%2 == 0 {
- b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
- }
- if i%4 == 0 {
- c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
- }
- if i%7 == 0 {
- d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
- }
- if i%8 == 0 {
- e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
- }
- if i > 50 || i < 85 {
- f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
- }
- if i%64 == 0 {
- g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
- }
- if i%128 == 0 {
- h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
- }
- }
- // Assemble a stack of snapshots from the account layers
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
- snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
- snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
- snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
-
- it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
- head := snaps.Snapshot(common.HexToHash("0x09"))
- for it.Next() {
- hash := it.Hash()
- want, err := head.AccountRLP(hash)
- if err != nil {
- t.Fatalf("failed to retrieve expected account: %v", err)
- }
- if have := it.Account(); !bytes.Equal(want, have) {
- t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
- }
- }
- it.Release()
-
- // Test after persist some bottom-most layers into the disk,
- // the functionalities still work.
- limit := aggregatorMemoryLimit
- defer func() {
- aggregatorMemoryLimit = limit
- }()
- aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
- snaps.Cap(common.HexToHash("0x09"), 2)
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
- for it.Next() {
- hash := it.Hash()
- want, err := head.AccountRLP(hash)
- if err != nil {
- t.Fatalf("failed to retrieve expected account: %v", err)
- }
- if have := it.Account(); !bytes.Equal(want, have) {
- t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
- }
- }
- it.Release()
-}
-
-func TestStorageIteratorTraversalValues(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- wrapStorage := func(storage map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
- return map[common.Hash]map[common.Hash][]byte{
- common.HexToHash("0xaa"): storage,
- }
- }
- // Create a batch of storage sets to seed subsequent layers with
- var (
- a = make(map[common.Hash][]byte)
- b = make(map[common.Hash][]byte)
- c = make(map[common.Hash][]byte)
- d = make(map[common.Hash][]byte)
- e = make(map[common.Hash][]byte)
- f = make(map[common.Hash][]byte)
- g = make(map[common.Hash][]byte)
- h = make(map[common.Hash][]byte)
- )
- for i := byte(2); i < 0xff; i++ {
- a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
- if i > 20 && i%2 == 0 {
- b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
- }
- if i%4 == 0 {
- c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
- }
- if i%7 == 0 {
- d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
- }
- if i%8 == 0 {
- e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
- }
- if i > 50 || i < 85 {
- f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
- }
- if i%64 == 0 {
- g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
- }
- if i%128 == 0 {
- h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
- }
- }
- // Assemble a stack of snapshots from the account layers
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
- snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
- snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
- snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
-
- it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
- head := snaps.Snapshot(common.HexToHash("0x09"))
- for it.Next() {
- hash := it.Hash()
- want, err := head.Storage(common.HexToHash("0xaa"), hash)
- if err != nil {
- t.Fatalf("failed to retrieve expected storage slot: %v", err)
- }
- if have := it.Slot(); !bytes.Equal(want, have) {
- t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
- }
- }
- it.Release()
-
- // Test after persist some bottom-most layers into the disk,
- // the functionalities still work.
- limit := aggregatorMemoryLimit
- defer func() {
- aggregatorMemoryLimit = limit
- }()
- aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
- snaps.Cap(common.HexToHash("0x09"), 2)
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
- for it.Next() {
- hash := it.Hash()
- want, err := head.Storage(common.HexToHash("0xaa"), hash)
- if err != nil {
- t.Fatalf("failed to retrieve expected slot: %v", err)
- }
- if have := it.Slot(); !bytes.Equal(want, have) {
- t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
- }
- }
- it.Release()
-}
-
-// This testcase is notorious, all layers contain the exact same 200 accounts.
-func TestAccountIteratorLargeTraversal(t *testing.T) {
- // Create a custom account factory to recreate the same addresses
- makeAccounts := func(num int) map[common.Hash][]byte {
- accounts := make(map[common.Hash][]byte)
- for i := 0; i < num; i++ {
- h := common.Hash{}
- binary.BigEndian.PutUint64(h[:], uint64(i+1))
- accounts[h] = randomAccount()
- }
- return accounts
- }
- // Build up a large stack of snapshots
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- for i := 1; i < 128; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
- }
- // Iterate the entire stack and ensure everything is hit only once
- head := snaps.Snapshot(common.HexToHash("0x80"))
- verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
- verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
-
- it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
- verifyIterator(t, 200, it, verifyAccount)
- it.Release()
-
- // Test after persist some bottom-most layers into the disk,
- // the functionalities still work.
- limit := aggregatorMemoryLimit
- defer func() {
- aggregatorMemoryLimit = limit
- }()
- aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
- snaps.Cap(common.HexToHash("0x80"), 2)
-
- verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
- verifyIterator(t, 200, it, verifyAccount)
- it.Release()
-}
-
-// TestAccountIteratorFlattening tests what happens when we
-// - have a live iterator on child C (parent C1 -> C2 .. CN)
-// - flattens C2 all the way into CN
-// - continues iterating
-func TestAccountIteratorFlattening(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Create a stack of diffs on top
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
- randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
-
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
- randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
-
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
- randomAccountSet("0xcc", "0xf0", "0xff"), nil)
-
- // Create an iterator and flatten the data from underneath it
- it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- defer it.Release()
-
- if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
- t.Fatalf("failed to flatten snapshot stack: %v", err)
- }
- //verifyIterator(t, 7, it)
-}
-
-func TestAccountIteratorSeek(t *testing.T) {
- // Create a snapshot stack with some initial data
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
- randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
-
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
- randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
-
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
- randomAccountSet("0xcc", "0xf0", "0xff"), nil)
-
- // Account set is now
- // 02: aa, ee, f0, ff
- // 03: aa, bb, dd, ee, f0 (, f0), ff
- // 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff)
- // Construct various iterators and ensure their traversal is correct
- it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd"))
- defer it.Release()
- verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"))
- defer it.Release()
- verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff"))
- defer it.Release()
- verifyIterator(t, 1, it, verifyAccount) // expected: ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1"))
- defer it.Release()
- verifyIterator(t, 0, it, verifyAccount) // expected: nothing
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb"))
- defer it.Release()
- verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef"))
- defer it.Release()
- verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0"))
- defer it.Release()
- verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff"))
- defer it.Release()
- verifyIterator(t, 1, it, verifyAccount) // expected: ff
-
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1"))
- defer it.Release()
- verifyIterator(t, 0, it, verifyAccount) // expected: nothing
-}
-
-func TestStorageIteratorSeek(t *testing.T) {
- // Create a snapshot stack with some initial data
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
-
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
-
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
-
- // Account set is now
- // 02: 01, 03, 05
- // 03: 01, 02, 03, 05 (, 05), 06
- // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
- // Construct various iterators and ensure their traversal is correct
- it, _ := snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
- defer it.Release()
- verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
- defer it.Release()
- verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
- defer it.Release()
- verifyIterator(t, 1, it, verifyStorage) // expected: 05
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
- defer it.Release()
- verifyIterator(t, 0, it, verifyStorage) // expected: nothing
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
- defer it.Release()
- verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
- defer it.Release()
- verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
- defer it.Release()
- verifyIterator(t, 1, it, verifyStorage) // expected: 08
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
- defer it.Release()
- verifyIterator(t, 0, it, verifyStorage) // expected: nothing
-}
-
-// TestAccountIteratorDeletions tests that the iterator behaves correct when there are
-// deleted accounts (where the Account() value is nil). The iterator
-// should not output any accounts or nil-values for those cases.
-func TestAccountIteratorDeletions(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
- nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
-
- deleted := common.HexToHash("0x22")
- destructed := map[common.Hash]struct{}{
- deleted: {},
- }
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
- destructed, randomAccountSet("0x11", "0x33"), nil)
-
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
- nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
-
- // The output should be 11,33,44,55
- it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- // Do a quick check
- verifyIterator(t, 4, it, verifyAccount)
- it.Release()
-
- // And a more detailed verification that we indeed do not see '0x22'
- it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- defer it.Release()
- for it.Next() {
- hash := it.Hash()
- if it.Account() == nil {
- t.Errorf("iterator returned nil-value for hash %x", hash)
- }
- if hash == deleted {
- t.Errorf("expected deleted elem %x to not be returned by iterator", deleted)
- }
- }
-}
-
-func TestStorageIteratorDeletions(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
-
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
-
- // The output should be 02,04,05,06
- it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 4, it, verifyStorage)
- it.Release()
-
- // The output should be 04,05,06
- it, _ = snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
- verifyIterator(t, 3, it, verifyStorage)
- it.Release()
-
- // Destruct the whole storage
- destructed := map[common.Hash]struct{}{
- common.HexToHash("0xaa"): {},
- }
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
-
- it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 0, it, verifyStorage)
- it.Release()
-
- // Re-insert the slots of the same account
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
- randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
-
- // The output should be 07,08,09
- it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 3, it, verifyStorage)
- it.Release()
-
- // Destruct the whole storage but re-create the account in the same layer
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
- it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
- verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
- it.Release()
-
- verifyIterator(t, 2, snaps.Snapshot(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
-}
-
-// BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the
-// exact same 200 accounts. That means that we need to process 2000 items, but
-// only spit out 200 values eventually.
-//
-// The value-fetching benchmark is easy on the binary iterator, since it never has to reach
-// down at any depth for retrieving the values -- all are on the toppmost layer
-//
-// BenchmarkAccountIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op
-// BenchmarkAccountIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op
-// BenchmarkAccountIteratorTraversal/fast_iterator_keys-6 1923 677966 ns/op
-// BenchmarkAccountIteratorTraversal/fast_iterator_values-6 1741 649967 ns/op
-func BenchmarkAccountIteratorTraversal(b *testing.B) {
- // Create a custom account factory to recreate the same addresses
- makeAccounts := func(num int) map[common.Hash][]byte {
- accounts := make(map[common.Hash][]byte)
- for i := 0; i < num; i++ {
- h := common.Hash{}
- binary.BigEndian.PutUint64(h[:], uint64(i+1))
- accounts[h] = randomAccount()
- }
- return accounts
- }
- // Build up a large stack of snapshots
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- for i := 1; i <= 100; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
- }
- // We call this once before the benchmark, so the creation of
- // sorted accountlists are not included in the results.
- head := snaps.Snapshot(common.HexToHash("0x65"))
- head.(*diffLayer).newBinaryAccountIterator()
-
- b.Run("binary iterator keys", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- got := 0
- it := head.(*diffLayer).newBinaryAccountIterator()
- for it.Next() {
- got++
- }
- if exp := 200; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
- b.Run("binary iterator values", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- got := 0
- it := head.(*diffLayer).newBinaryAccountIterator()
- for it.Next() {
- got++
- head.(*diffLayer).accountRLP(it.Hash(), 0)
- }
- if exp := 200; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
- b.Run("fast iterator keys", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
- defer it.Release()
-
- got := 0
- for it.Next() {
- got++
- }
- if exp := 200; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
- b.Run("fast iterator values", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
- defer it.Release()
-
- got := 0
- for it.Next() {
- got++
- it.Account()
- }
- if exp := 200; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
-}
-
-// BenchmarkAccountIteratorLargeBaselayer is a pretty realistic benchmark, where
-// the baselayer is a lot larger than the upper layer.
-//
-// This is heavy on the binary iterator, which in most cases will have to
-// call recursively 100 times for the majority of the values
-//
-// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(keys)-6 514 1971999 ns/op
-// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(values)-6 61 18997492 ns/op
-// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(keys)-6 10000 114385 ns/op
-// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(values)-6 4047 296823 ns/op
-func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
- // Create a custom account factory to recreate the same addresses
- makeAccounts := func(num int) map[common.Hash][]byte {
- accounts := make(map[common.Hash][]byte)
- for i := 0; i < num; i++ {
- h := common.Hash{}
- binary.BigEndian.PutUint64(h[:], uint64(i+1))
- accounts[h] = randomAccount()
- }
- return accounts
- }
- // Build up a large stack of snapshots
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
- for i := 2; i <= 100; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
- }
- // We call this once before the benchmark, so the creation of
- // sorted accountlists are not included in the results.
- head := snaps.Snapshot(common.HexToHash("0x65"))
- head.(*diffLayer).newBinaryAccountIterator()
-
- b.Run("binary iterator (keys)", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- got := 0
- it := head.(*diffLayer).newBinaryAccountIterator()
- for it.Next() {
- got++
- }
- if exp := 2000; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
- b.Run("binary iterator (values)", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- got := 0
- it := head.(*diffLayer).newBinaryAccountIterator()
- for it.Next() {
- got++
- v := it.Hash()
- head.(*diffLayer).accountRLP(v, 0)
- }
- if exp := 2000; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
- b.Run("fast iterator (keys)", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
- defer it.Release()
-
- got := 0
- for it.Next() {
- got++
- }
- if exp := 2000; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
- b.Run("fast iterator (values)", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
- defer it.Release()
-
- got := 0
- for it.Next() {
- it.Account()
- got++
- }
- if exp := 2000; got != exp {
- b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
- }
- }
- })
-}
-
-/*
-func BenchmarkBinaryAccountIteration(b *testing.B) {
- benchmarkAccountIteration(b, func(snap snapshot) AccountIterator {
- return snap.(*diffLayer).newBinaryAccountIterator()
- })
-}
-
-func BenchmarkFastAccountIteration(b *testing.B) {
- benchmarkAccountIteration(b, newFastAccountIterator)
-}
-
-func benchmarkAccountIteration(b *testing.B, iterator func(snap snapshot) AccountIterator) {
- // Create a diff stack and randomize the accounts across them
- layers := make([]map[common.Hash][]byte, 128)
- for i := 0; i < len(layers); i++ {
- layers[i] = make(map[common.Hash][]byte)
- }
- for i := 0; i < b.N; i++ {
- depth := rand.Intn(len(layers))
- layers[depth][randomHash()] = randomAccount()
- }
- stack := snapshot(emptyLayer())
- for _, layer := range layers {
- stack = stack.Update(common.Hash{}, layer, nil, nil)
- }
- // Reset the timers and report all the stats
- it := iterator(stack)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for it.Next() {
- }
-}
-*/
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
deleted file mode 100644
index 94e3610..0000000
--- a/core/state/snapshot/snapshot_test.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package snapshot
-
-import (
- "fmt"
- "math/big"
- "math/rand"
- "testing"
-
- "github.com/VictoriaMetrics/fastcache"
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/rlp"
-)
-
-// randomHash generates a random blob of data and returns it as a hash.
-func randomHash() common.Hash {
- var hash common.Hash
- if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil {
- panic(err)
- }
- return hash
-}
-
-// randomAccount generates a random account and returns it RLP encoded.
-func randomAccount() []byte {
- root := randomHash()
- a := Account{
- Balance: big.NewInt(rand.Int63()),
- Nonce: rand.Uint64(),
- Root: root[:],
- CodeHash: emptyCode[:],
- }
- data, _ := rlp.EncodeToBytes(a)
- return data
-}
-
-// randomAccountSet generates a set of random accounts with the given strings as
-// the account address hashes.
-func randomAccountSet(hashes ...string) map[common.Hash][]byte {
- accounts := make(map[common.Hash][]byte)
- for _, hash := range hashes {
- accounts[common.HexToHash(hash)] = randomAccount()
- }
- return accounts
-}
-
-// randomStorageSet generates a set of random slots with the given strings as
-// the slot addresses.
-func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte {
- storages := make(map[common.Hash]map[common.Hash][]byte)
- for index, account := range accounts {
- storages[common.HexToHash(account)] = make(map[common.Hash][]byte)
-
- if index < len(hashes) {
- hashes := hashes[index]
- for _, hash := range hashes {
- storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes()
- }
- }
- if index < len(nilStorage) {
- nils := nilStorage[index]
- for _, hash := range nils {
- storages[common.HexToHash(account)][common.HexToHash(hash)] = nil
- }
- }
- }
- return storages
-}
-
-// Tests that if a disk layer becomes stale, no active external references will
-// be returned with junk data. This version of the test flattens every diff layer
-// to check internal corner case around the bottom-most memory accumulator.
-func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Retrieve a reference to the base and commit a diff on top
- ref := snaps.Snapshot(base.root)
-
- accounts := map[common.Hash][]byte{
- common.HexToHash("0xa1"): randomAccount(),
- }
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if n := len(snaps.layers); n != 2 {
- t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2)
- }
- // Commit the diff layer onto the disk and ensure it's persisted
- if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
- t.Fatalf("failed to merge diff layer onto disk: %v", err)
- }
- // Since the base layer was modified, ensure that data retrieval on the external reference fail
- if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
- }
- if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
- }
- if n := len(snaps.layers); n != 1 {
- t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 1)
- fmt.Println(snaps.layers)
- }
-}
-
-// Tests that if a disk layer becomes stale, no active external references will
-// be returned with junk data. This version of the test retains the bottom diff
-// layer to check the usual mode of operation where the accumulator is retained.
-func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Retrieve a reference to the base and commit two diffs on top
- ref := snaps.Snapshot(base.root)
-
- accounts := map[common.Hash][]byte{
- common.HexToHash("0xa1"): randomAccount(),
- }
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if n := len(snaps.layers); n != 3 {
- t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3)
- }
- // Commit the diff layer onto the disk and ensure it's persisted
- defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit)
- aggregatorMemoryLimit = 0
-
- if err := snaps.Cap(common.HexToHash("0x03"), 2); err != nil {
- t.Fatalf("failed to merge diff layer onto disk: %v", err)
- }
- // Since the base layer was modified, ensure that data retrievald on the external reference fail
- if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
- }
- if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
- }
- if n := len(snaps.layers); n != 2 {
- t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2)
- fmt.Println(snaps.layers)
- }
-}
-
-// Tests that if a diff layer becomes stale, no active external references will
-// be returned with junk data. This version of the test flattens every diff layer
-// to check internal corner case around the bottom-most memory accumulator.
-func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Commit two diffs on top and retrieve a reference to the bottommost
- accounts := map[common.Hash][]byte{
- common.HexToHash("0xa1"): randomAccount(),
- }
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if n := len(snaps.layers); n != 3 {
- t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3)
- }
- ref := snaps.Snapshot(common.HexToHash("0x02"))
-
- // Flatten the diff layer into the bottom accumulator
- if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil {
- t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
- }
- // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
- if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
- }
- if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
- }
- if n := len(snaps.layers); n != 2 {
- t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2)
- fmt.Println(snaps.layers)
- }
-}
-
-// Tests that if a diff layer becomes stale, no active external references will
-// be returned with junk data. This version of the test retains the bottom diff
-// layer to check the usual mode of operation where the accumulator is retained.
-func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Commit three diffs on top and retrieve a reference to the bottommost
- accounts := map[common.Hash][]byte{
- common.HexToHash("0xa1"): randomAccount(),
- }
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if n := len(snaps.layers); n != 4 {
- t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 4)
- }
- ref := snaps.Snapshot(common.HexToHash("0x02"))
-
- // Doing a Cap operation with many allowed layers should be a no-op
- exp := len(snaps.layers)
- if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil {
- t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
- }
- if got := len(snaps.layers); got != exp {
- t.Errorf("layers modified, got %d exp %d", got, exp)
- }
- // Flatten the diff layer into the bottom accumulator
- if err := snaps.Cap(common.HexToHash("0x04"), 2); err != nil {
- t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
- }
- // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
- if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
- }
- if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
- }
- if n := len(snaps.layers); n != 3 {
- t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 3)
- fmt.Println(snaps.layers)
- }
-}
-
-// TestPostCapBasicDataAccess tests some functionality regarding capping/flattening.
-func TestPostCapBasicDataAccess(t *testing.T) {
- // setAccount is a helper to construct a random account entry and assign it to
- // an account slot in a snapshot
- setAccount := func(accKey string) map[common.Hash][]byte {
- return map[common.Hash][]byte{
- common.HexToHash(accKey): randomAccount(),
- }
- }
- // Create a starting base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // The lowest difflayer
- snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
- snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
- snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
-
- snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
- snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
-
- // checkExist verifies if an account exiss in a snapshot
- checkExist := func(layer *diffLayer, key string) error {
- if data, _ := layer.Account(common.HexToHash(key)); data == nil {
- return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key))
- }
- return nil
- }
- // shouldErr checks that an account access errors as expected
- shouldErr := func(layer *diffLayer, key string) error {
- if data, err := layer.Account(common.HexToHash(key)); err == nil {
- return fmt.Errorf("expected error, got data %x", data)
- }
- return nil
- }
- // check basics
- snap := snaps.Snapshot(common.HexToHash("0xb3")).(*diffLayer)
-
- if err := checkExist(snap, "0xa1"); err != nil {
- t.Error(err)
- }
- if err := checkExist(snap, "0xb2"); err != nil {
- t.Error(err)
- }
- if err := checkExist(snap, "0xb3"); err != nil {
- t.Error(err)
- }
- // Cap to a bad root should fail
- if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil {
- t.Errorf("expected error, got none")
- }
- // Now, merge the a-chain
- snaps.Cap(common.HexToHash("0xa3"), 0)
-
- // At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is
- // the parent of b2, b2 should no longer be able to iterate into parent.
-
- // These should still be accessible
- if err := checkExist(snap, "0xb2"); err != nil {
- t.Error(err)
- }
- if err := checkExist(snap, "0xb3"); err != nil {
- t.Error(err)
- }
- // But these would need iteration into the modified parent
- if err := shouldErr(snap, "0xa1"); err != nil {
- t.Error(err)
- }
- if err := shouldErr(snap, "0xa2"); err != nil {
- t.Error(err)
- }
- if err := shouldErr(snap, "0xa3"); err != nil {
- t.Error(err)
- }
- // Now, merge it again, just for fun. It should now error, since a3
- // is a disk layer
- if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil {
- t.Error("expected error capping the disk layer, got none")
- }
-}
diff --git a/core/state/snapshot/wipe_test.go b/core/state/snapshot/wipe_test.go
deleted file mode 100644
index a656982..0000000
--- a/core/state/snapshot/wipe_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package snapshot
-
-import (
- "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
-)
-
-// Tests that given a database with random data content, all parts of a snapshot
-// can be crrectly wiped without touching anything else.
-func TestWipe(t *testing.T) {
- // Create a database with some random snapshot data
- db := memorydb.New()
-
- for i := 0; i < 128; i++ {
- account := randomHash()
- rawdb.WriteAccountSnapshot(db, account, randomHash().Bytes())
- for j := 0; j < 1024; j++ {
- rawdb.WriteStorageSnapshot(db, account, randomHash(), randomHash().Bytes())
- }
- }
- rawdb.WriteSnapshotRoot(db, randomHash())
-
- // Add some random non-snapshot data too to make wiping harder
- for i := 0; i < 65536; i++ {
- // Generate a key that's the wrong length for a state snapshot item
- var keysize int
- for keysize == 0 || keysize == 32 || keysize == 64 {
- keysize = 8 + rand.Intn(64) // +8 to ensure we will "never" randomize duplicates
- }
- // Randomize the suffix, dedup and inject it under the snapshot namespace
- keysuffix := make([]byte, keysize)
- rand.Read(keysuffix)
-
- if rand.Int31n(2) == 0 {
- db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes())
- } else {
- db.Put(append(rawdb.SnapshotStoragePrefix, keysuffix...), randomHash().Bytes())
- }
- }
- // Sanity check that all the keys are present
- var items int
-
- it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
- defer it.Release()
-
- for it.Next() {
- key := it.Key()
- if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
- items++
- }
- }
- it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
- defer it.Release()
-
- for it.Next() {
- key := it.Key()
- if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength {
- items++
- }
- }
- if items != 128+128*1024 {
- t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128+128*1024)
- }
- if hash := rawdb.ReadSnapshotRoot(db); hash == (common.Hash{}) {
- t.Errorf("snapshot block marker mismatch: have %#x, want <not-nil>", hash)
- }
- // Wipe all snapshot entries from the database
- <-wipeSnapshot(db, true)
-
- // Iterate over the database end ensure no snapshot information remains
- it = db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
- defer it.Release()
-
- for it.Next() {
- key := it.Key()
- if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
- t.Errorf("snapshot entry remained after wipe: %x", key)
- }
- }
- it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
- defer it.Release()
-
- for it.Next() {
- key := it.Key()
- if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength {
- t.Errorf("snapshot entry remained after wipe: %x", key)
- }
- }
- if hash := rawdb.ReadSnapshotRoot(db); hash != (common.Hash{}) {
- t.Errorf("snapshot block marker remained after wipe: %#x", hash)
- }
- // Iterate over the database and ensure miscellaneous items are present
- items = 0
-
- it = db.NewIterator(nil, nil)
- defer it.Release()
-
- for it.Next() {
- items++
- }
- if items != 65536 {
- t.Fatalf("misc item count mismatch: have %d, want %d", items, 65536)
- }
-}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 35ce39f..abfa2aa 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -17,6 +17,7 @@
package vm
import (
+ "errors"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ethereum/go-ethereum/common"
@@ -265,7 +266,12 @@ func opBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([
func opBalanceMultiCoin(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) {
addr, cid := callContext.stack.pop(), callContext.stack.pop()
- callContext.stack.push(interpreter.evm.StateDB.GetBalanceMultiCoin(common.BigToAddress(addr), common.BigToHash(cid)))
+ res, err := uint256.FromBig(interpreter.evm.StateDB.GetBalanceMultiCoin(
+ common.BigToAddress(addr.ToBig()), common.BigToHash(cid.ToBig())))
+ if err {
+ return nil, errors.New("balance overflow")
+ }
+ callContext.stack.push(res)
return nil, nil
}
@@ -725,7 +731,7 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx)
// Pop other call parameters.
addr, value, cid, value2, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
- coinID := common.BigToHash(cid)
+ coinID := common.BigToHash(cid.ToBig())
// Get the arguments from the memory.
args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
@@ -875,8 +881,8 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([
return nil, nil
}
-func opEMC(pc *uint64, interpreter *EVMInterpreter, contract *Contract, callContext *callCtx) ([]byte, error) {
- return nil, interpreter.evm.StateDB.EnableMultiCoin(contract.Address())
+func opEMC(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) {
+ return nil, interpreter.evm.StateDB.EnableMultiCoin(callContext.contract.Address())
}
// following functions are used by the instruction jump table
diff --git a/coreth.go b/coreth.go
index 1fe2613..4d0c2ee 100644
--- a/coreth.go
+++ b/coreth.go
@@ -2,7 +2,7 @@ package coreth
import (
"crypto/ecdsa"
- "fmt"
+ //"fmt"
"io"
"os"
@@ -17,7 +17,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/trie"
+ //"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/mattn/go-isatty"
)
@@ -52,18 +53,18 @@ func NewETHChain(config *eth.Config, nodecfg *node.Config, etherBase *common.Add
if nodecfg == nil {
nodecfg = &node.Config{}
}
- mux := new(event.TypeMux)
- ctx, ep, err := node.NewServiceContext(nodecfg, mux)
+ //mux := new(event.TypeMux)
+ node, err := node.New(nodecfg)
if err != nil {
panic(err)
}
- if ep != "" {
- log.Info(fmt.Sprintf("temporary keystore = %s", ep))
- }
+ //if ep != "" {
+ // log.Info(fmt.Sprintf("temporary keystore = %s", ep))
+ //}
cb := new(dummy.ConsensusCallbacks)
mcb := new(miner.MinerCallbacks)
bcb := new(eth.BackendCallbacks)
- backend, _ := eth.New(&ctx, config, cb, mcb, bcb, chainDB)
+ backend, _ := eth.New(node, config, cb, mcb, bcb, chainDB)
chain := &ETHChain{backend: backend, cb: cb, mcb: mcb, bcb: bcb}
if etherBase == nil {
etherBase = &BlackholeAddr
@@ -85,7 +86,7 @@ func (self *ETHChain) GenBlock() {
}
func (self *ETHChain) VerifyBlock(block *types.Block) bool {
- txnHash := types.DeriveSha(block.Transactions())
+ txnHash := types.DeriveSha(block.Transactions(), new(trie.Trie))
uncleHash := types.CalcUncleHash(block.Uncles())
ethHeader := block.Header()
if txnHash != ethHeader.TxHash || uncleHash != ethHeader.UncleHash {
diff --git a/eth/api.go b/eth/api.go
index 92ac928..ada2a97 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -371,7 +371,7 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta
var block *types.Block
if number == rpc.LatestBlockNumber {
block = api.eth.blockchain.CurrentBlock()
- } else if blockNr == rpc.AcceptedBlockNumber {
+ } else if number == rpc.AcceptedBlockNumber {
block = api.eth.AcceptedBlock()
} else {
block = api.eth.blockchain.GetBlockByNumber(uint64(number))
diff --git a/eth/backend.go b/eth/backend.go
index 9222181..b1c29a5 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -41,7 +41,6 @@ import (
"github.com/ava-labs/coreth/node"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/bloombits"
@@ -50,6 +49,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -127,7 +127,7 @@ func New(stack *node.Node, config *Config,
var err error
if chainDb == nil {
// Assemble the Ethereum object
- chainDb, err = ctx.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/")
+ chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/")
if err != nil {
return nil, err
}
@@ -223,7 +223,7 @@ func New(stack *node.Node, config *Config,
}
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
- eth.dialCandidates, err = eth.setupDiscovery(&stack.Config().P2P)
+ //eth.dialCandidates, err = eth.setupDiscovery(&stack.Config().P2P)
if err != nil {
return nil, err
}
@@ -233,8 +233,8 @@ func New(stack *node.Node, config *Config,
// Register the backend on the node
stack.RegisterAPIs(eth.APIs())
- stack.RegisterProtocols(eth.Protocols())
- stack.RegisterLifecycle(eth)
+ //stack.RegisterProtocols(eth.Protocols())
+ //stack.RegisterLifecycle(eth)
return eth, nil
}
diff --git a/eth/protocol.go b/eth/protocol.go
index 94260c2..ef5dcde 100644
--- a/eth/protocol.go
+++ b/eth/protocol.go
@@ -22,6 +22,7 @@ import (
"math/big"
"github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/forkid"
"github.com/ava-labs/coreth/core/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 8753c05..9915ad4 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -30,7 +30,6 @@ import (
"github.com/ava-labs/coreth/accounts/scwallet"
"github.com/ava-labs/coreth/consensus/ethash"
"github.com/ava-labs/coreth/core"
- "github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/params"
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index ae4196f..cee6b57 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -22,6 +22,7 @@ import (
"math/big"
"github.com/ava-labs/coreth/accounts"
+ "github.com/ava-labs/coreth/consensus"
"github.com/ava-labs/coreth/core"
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
diff --git a/miner/miner.go b/miner/miner.go
index 57938e1..e8e59a4 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -30,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/log"
)
// Backend wraps all methods required for mining.
@@ -60,7 +59,7 @@ type Miner struct {
func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool, mcb *MinerCallbacks) *Miner {
return &Miner{
- worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, mcb),
+ worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, mcb),
}
}
diff --git a/node/api.go b/node/api.go
index e74d10b..4589d25 100644
--- a/node/api.go
+++ b/node/api.go
@@ -21,6 +21,7 @@ import (
"fmt"
//"strings"
+ "github.com/ava-labs/coreth/internal/debug"
"github.com/ava-labs/coreth/rpc"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
diff --git a/node/node.go b/node/node.go
index c6e4181..3ed89ed 100644
--- a/node/node.go
+++ b/node/node.go
@@ -18,14 +18,13 @@ package node
import (
"errors"
+ "os"
"path/filepath"
- "reflect"
"strings"
"sync"
"github.com/ava-labs/coreth/accounts"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/internal/debug"
"github.com/ava-labs/coreth/rpc"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -48,7 +47,6 @@ type Node struct {
state int // Tracks state of node lifecycle
lock sync.Mutex
- lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle
rpcAPIs []rpc.API // List of APIs currently provided by the node
inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
@@ -61,6 +59,25 @@ const (
closedState
)
+func (n *Node) openDataDir() error {
+ if n.config.DataDir == "" {
+ return nil // ephemeral
+ }
+
+ instdir := filepath.Join(n.config.DataDir, n.config.name())
+ if err := os.MkdirAll(instdir, 0700); err != nil {
+ return err
+ }
+ // Lock the instance directory to prevent concurrent use by another instance as well as
+ // accidental use of the instance directory as a database.
+ release, _, err := fileutil.Flock(filepath.Join(instdir, "LOCK"))
+ if err != nil {
+ return convertFileLockError(err)
+ }
+ n.dirLock = release
+ return nil
+}
+
// New creates a new P2P node, ready for protocol registration.
func New(conf *Config) (*Node, error) {
// Copy config and resolve the datadir so future changes to the current
@@ -266,3 +283,14 @@ func (n *Node) closeDatabases() (errors []error) {
}
return errors
}
+
+// RegisterAPIs registers the APIs a service provides on the node.
+func (n *Node) RegisterAPIs(apis []rpc.API) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ if n.state != initializingState {
+ panic("can't register APIs on running/stopped node")
+ }
+ n.rpcAPIs = append(n.rpcAPIs, apis...)
+}
diff --git a/rpc/metrics.go b/rpc/metrics.go
new file mode 100644
index 0000000..7fb6fc0
--- /dev/null
+++ b/rpc/metrics.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rpc
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+var (
+ rpcRequestGauge = metrics.NewRegisteredGauge("rpc/requests", nil)
+ successfulRequestGauge = metrics.NewRegisteredGauge("rpc/success", nil)
+ failedReqeustGauge = metrics.NewRegisteredGauge("rpc/failure", nil)
+ rpcServingTimer = metrics.NewRegisteredTimer("rpc/duration/all", nil)
+)
+
+func newRPCServingTimer(method string, valid bool) metrics.Timer {
+ flag := "success"
+ if !valid {
+ flag = "failure"
+ }
+ m := fmt.Sprintf("rpc/duration/%s/%s", method, flag)
+ return metrics.GetOrRegisterTimer(m, nil)
+}