From 78745551c077bf54151202138c2629f288769561 Mon Sep 17 00:00:00 2001 From: Determinant Date: Tue, 15 Sep 2020 23:55:34 -0400 Subject: WIP: geth-tavum --- consensus/clique/api.go | 62 +++++++- consensus/clique/clique.go | 58 +++---- consensus/clique/snapshot.go | 6 +- consensus/consensus.go | 32 ++-- consensus/errors.go | 2 +- consensus/ethash/algorithm.go | 8 +- consensus/ethash/api.go | 30 ++-- consensus/ethash/consensus.go | 44 +++--- consensus/ethash/ethash.go | 175 +++++++++------------ consensus/ethash/sealer.go | 357 +++++++++++++++++++++++++----------------- consensus/misc/dao.go | 2 +- consensus/misc/forks.go | 2 +- 12 files changed, 439 insertions(+), 339 deletions(-) (limited to 'consensus') diff --git a/consensus/clique/api.go b/consensus/clique/api.go index 04e74eb..4776b97 100644 --- a/consensus/clique/api.go +++ b/consensus/clique/api.go @@ -17,16 +17,18 @@ package clique import ( + "fmt" + "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) // API is a user facing RPC API to allow controlling the signer and voting // mechanisms of the proof-of-authority scheme. type API struct { - chain consensus.ChainReader + chain consensus.ChainHeaderReader clique *Clique } @@ -117,3 +119,59 @@ func (api *API) Discard(address common.Address) { delete(api.clique.proposals, address) } + +type status struct { + InturnPercent float64 `json:"inturnPercent"` + SigningStatus map[common.Address]int `json:"sealerActivity"` + NumBlocks uint64 `json:"numBlocks"` +} + +// Status returns the status of the last N blocks, +// - the number of active signers, +// - the number of signers, +// - the percentage of in-turn blocks +func (api *API) Status() (*status, error) { + var ( + numBlocks = uint64(64) + header = api.chain.CurrentHeader() + diff = uint64(0) + optimals = 0 + ) + snap, err := api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + if err != nil { + return nil, err + } + var ( + signers = snap.signers() + end = header.Number.Uint64() + start = end - numBlocks + ) + if numBlocks > end { + start = 1 + numBlocks = end - start + } + signStatus := make(map[common.Address]int) + for _, s := range signers { + signStatus[s] = 0 + } + for n := start; n < end; n++ { + h := api.chain.GetHeaderByNumber(n) + if h == nil { + return nil, fmt.Errorf("missing block %d", n) + } + if h.Difficulty.Cmp(diffInTurn) == 0 { + optimals++ + } + diff += h.Difficulty.Uint64() + sealer, err := api.clique.Author(h) + if err != nil { + return nil, err + } + signStatus[sealer]++ + } + return &status{ + InturnPercent: float64(100*optimals) / float64(numBlocks), + SigningStatus: signStatus, + NumBlocks: numBlocks, + }, nil +} diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 3714733..d239aca 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -33,12 +33,13 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/ethdb" - "github.com/ava-labs/go-ethereum/log" - "github.com/ava-labs/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" lru "github.com/hashicorp/golang-lru" "golang.org/x/crypto/sha3" ) @@ -121,9 +122,9 @@ var ( // turn of the signer. errWrongDifficulty = errors.New("wrong difficulty") - // ErrInvalidTimestamp is returned if the timestamp of a block is lower than + // errInvalidTimestamp is returned if the timestamp of a block is lower than // the previous block's timestamp + the minimum block period. - ErrInvalidTimestamp = errors.New("invalid timestamp") + errInvalidTimestamp = errors.New("invalid timestamp") // errInvalidVotingChain is returned if an authorization list is attempted to // be modified via out-of-range or non-contiguous headers. @@ -137,9 +138,8 @@ var ( errRecentlySigned = errors.New("recently signed") ) -// SignerFn is a signer callback function to request a header to be signed by a -// backing account. -type SignerFn func(accounts.Account, string, []byte) ([]byte, error) +// SignerFn hashes and signs the data to be signed by a backing account. +type SignerFn func(signer accounts.Account, mimeType string, message []byte) ([]byte, error) // ecrecover extracts the Ethereum account address from a signed header. func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) { @@ -213,14 +213,14 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) { } // VerifyHeader checks whether a header conforms to the consensus rules. -func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { +func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { return c.verifyHeader(chain, header, nil) } // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The // method returns a quit channel to abort the operations and a results channel to // retrieve the async verifications (the order is that of the input slice). -func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { abort := make(chan struct{}) results := make(chan error, len(headers)) @@ -242,7 +242,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Hea // caller may optionally pass in a batch of parents (ascending order) to avoid // looking those up from the database. This is useful for concurrently verifying // a batch of new headers. -func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { +func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { if header.Number == nil { return errUnknownBlock } @@ -305,13 +305,13 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, // rather depend on a batch of previous headers. The caller may optionally pass // in a batch of parents (ascending order) to avoid looking those up from the // database. This is useful for concurrently verifying a batch of new headers. -func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { +func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { // The genesis block is the always valid dead-end number := header.Number.Uint64() if number == 0 { return nil } - // Ensure that the block's timestamp isn't too close to it's parent + // Ensure that the block's timestamp isn't too close to its parent var parent *types.Header if len(parents) > 0 { parent = parents[len(parents)-1] @@ -322,7 +322,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type return consensus.ErrUnknownAncestor } if parent.Time+c.config.Period > header.Time { - return ErrInvalidTimestamp + return errInvalidTimestamp } // Retrieve the snapshot needed to verify this header and cache it snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) @@ -345,7 +345,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type } // snapshot retrieves the authorization snapshot at a given point in time. -func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { +func (c *Clique) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { // Search for a snapshot in memory or on disk for checkpoints var ( headers []*types.Header @@ -369,7 +369,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo // at a checkpoint block without a parent (light client CHT), or we have piled // up more headers than allowed to be reorged (chain reinit from a freezer), // consider the checkpoint trusted and snapshot it. - if number == 0 || (number%c.config.Epoch == 0 && (len(headers) > params.ImmutabilityThreshold || chain.GetHeaderByNumber(number-1) == nil)) { + if number == 0 || (number%c.config.Epoch == 0 && (len(headers) > params.FullImmutabilityThreshold || chain.GetHeaderByNumber(number-1) == nil)) { checkpoint := chain.GetHeaderByNumber(number) if checkpoint != nil { hash := checkpoint.Hash() @@ -436,7 +436,7 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e // VerifySeal implements consensus.Engine, checking whether the signature contained // in the header satisfies the consensus protocol requirements. -func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error { +func (c *Clique) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { return c.verifySeal(chain, header, nil) } @@ -444,7 +444,7 @@ func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) e // consensus protocol requirements. The method accepts an optional list of parent // headers that aren't yet part of the local blockchain to generate the snapshots // from. -func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { +func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { // Verifying the genesis block is not supported number := header.Number.Uint64() if number == 0 { @@ -487,7 +487,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p // Prepare implements consensus.Engine, preparing all the consensus fields of the // header for running the transactions on top. -func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error { +func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { // If the block isn't a checkpoint, cast a random vote (good enough for now) header.Coinbase = common.Address{} header.Nonce = types.BlockNonce{} @@ -522,7 +522,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro // Set the correct difficulty header.Difficulty = CalcDifficulty(snap, c.signer) - // Ensure the extra data has all it's components + // Ensure the extra data has all its components if len(header.Extra) < extraVanity { header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...) } @@ -552,7 +552,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro // Finalize implements consensus.Engine, ensuring no uncles are set, nor block // rewards given. -func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { +func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { // No block rewards in PoA, so the state remains as is and uncles are dropped header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) header.UncleHash = types.CalcUncleHash(nil) @@ -560,13 +560,13 @@ func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, sta // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // nor block rewards given, and returns the final block. -func (c *Clique) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { +func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { // No block rewards in PoA, so the state remains as is and uncles are dropped header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) header.UncleHash = types.CalcUncleHash(nil) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, nil), nil + return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil } // Authorize injects a private key into the consensus engine to mint new blocks @@ -581,7 +581,7 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) { // Seal implements consensus.Engine, attempting to create a sealed block using // the local signing credentials. -func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { +func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { header := block.Header() // Sealing the genesis block is not supported @@ -654,7 +654,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty // that a new block should have based on the previous blocks in the chain and the // current signer. -func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { +func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil) if err != nil { return nil @@ -684,7 +684,7 @@ func (c *Clique) Close() error { // APIs implements consensus.Engine, returning the user facing RPC API to allow // controlling the signer voting. -func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API { +func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API { return []rpc.API{{ Namespace: "clique", Version: "1.0", diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 6660c0f..2bf7430 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -24,9 +24,9 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/ethdb" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" lru "github.com/hashicorp/golang-lru" ) diff --git a/consensus/consensus.go b/consensus/consensus.go index 603a3e9..b29abac 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -24,12 +24,12 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) -// ChainReader defines a small collection of methods needed to access the local -// blockchain during header and/or uncle verification. -type ChainReader interface { +// ChainHeaderReader defines a small collection of methods needed to access the local +// blockchain during header verification. +type ChainHeaderReader interface { // Config retrieves the blockchain's chain configuration. Config() *params.ChainConfig @@ -44,6 +44,12 @@ type ChainReader interface { // GetHeaderByHash retrieves a block header from the database by its hash. GetHeaderByHash(hash common.Hash) *types.Header +} + +// ChainReader defines a small collection of methods needed to access the local +// blockchain during header and/or uncle verification. +type ChainReader interface { + ChainHeaderReader // GetBlock retrieves a block from the database by hash and number. GetBlock(hash common.Hash, number uint64) *types.Block @@ -59,13 +65,13 @@ type Engine interface { // VerifyHeader checks whether a header conforms to the consensus rules of a // given engine. Verifying the seal may be done optionally here, or explicitly // via the VerifySeal method. - VerifyHeader(chain ChainReader, header *types.Header, seal bool) error + VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // concurrently. The method returns a quit channel to abort the operations and // a results channel to retrieve the async verifications (the order is that of // the input slice). - VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) + VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) // VerifyUncles verifies that the given block's uncles conform to the consensus // rules of a given engine. @@ -73,18 +79,18 @@ type Engine interface { // VerifySeal checks whether the crypto seal on a header is valid according to // the consensus rules of the given engine. - VerifySeal(chain ChainReader, header *types.Header) error + VerifySeal(chain ChainHeaderReader, header *types.Header) error // Prepare initializes the consensus fields of a block header according to the // rules of a particular engine. The changes are executed inline. - Prepare(chain ChainReader, header *types.Header) error + Prepare(chain ChainHeaderReader, header *types.Header) error // Finalize runs any post-transaction state modifications (e.g. block rewards) // but does not assemble the block. // // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). - Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, + Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block @@ -92,7 +98,7 @@ type Engine interface { // // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). - FinalizeAndAssemble(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, + FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) // Seal generates a new sealing request for the given input block and pushes @@ -100,17 +106,17 @@ type Engine interface { // // Note, the method returns immediately and will send the result async. More // than one result may also be returned depending on the consensus algorithm. - Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error + Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error // SealHash returns the hash of a block prior to it being sealed. SealHash(header *types.Header) common.Hash // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty // that a new block should have. - CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int + CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int // APIs returns the RPC APIs this consensus engine provides. - APIs(chain ChainReader) []rpc.API + APIs(chain ChainHeaderReader) []rpc.API // Close terminates any background threads maintained by the consensus engine. Close() error diff --git a/consensus/errors.go b/consensus/errors.go index a005c5f..ac5242f 100644 --- a/consensus/errors.go +++ b/consensus/errors.go @@ -31,7 +31,7 @@ var ( // to the current node. ErrFutureBlock = errors.New("block in the future") - // ErrInvalidNumber is returned if a block's number doesn't equal it's parent's + // ErrInvalidNumber is returned if a block's number doesn't equal its parent's // plus one. ErrInvalidNumber = errors.New("invalid block number") ) diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 2b66c33..d6c8710 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -27,10 +27,10 @@ import ( "time" "unsafe" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/bitutil" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "golang.org/x/crypto/sha3" ) diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go index 34ed386..c983504 100644 --- a/consensus/ethash/api.go +++ b/consensus/ethash/api.go @@ -20,15 +20,15 @@ import ( "errors" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) var errEthashStopped = errors.New("ethash stopped") // API exposes ethash related methods for the RPC interface. type API struct { - ethash *Ethash // Make sure the mode of ethash is normal. + ethash *Ethash } // GetWork returns a work package for external miner. @@ -39,7 +39,7 @@ type API struct { // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty // result[3] - hex encoded block number func (api *API) GetWork() ([4]string, error) { - if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + if api.ethash.remote == nil { return [4]string{}, errors.New("not supported") } @@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) { workCh = make(chan [4]string, 1) errc = make(chan error, 1) ) - select { - case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: - case <-api.ethash.exitCh: + case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: + case <-api.ethash.remote.exitCh: return [4]string{}, errEthashStopped } - select { case work := <-workCh: return work, nil @@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) { // It returns an indication if the work was accepted. // Note either an invalid solution, a stale work a non-existent work will return false. func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool { - if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + if api.ethash.remote == nil { return false } var errc = make(chan error, 1) - select { - case api.ethash.submitWorkCh <- &mineResult{ + case api.ethash.remote.submitWorkCh <- &mineResult{ nonce: nonce, mixDigest: digest, hash: hash, errc: errc, }: - case <-api.ethash.exitCh: + case <-api.ethash.remote.exitCh: return false } - err := <-errc return err == nil } @@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo // It accepts the miner hash rate and an identifier which must be unique // between nodes. func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool { - if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + if api.ethash.remote == nil { return false } var done = make(chan struct{}, 1) - select { - case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: - case <-api.ethash.exitCh: + case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: + case <-api.ethash.remote.exitCh: return false } // Block until hash rate submitted successfully. <-done - return true } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index dc88a79..151761c 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -29,10 +29,11 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/math" - "github.com/ava-labs/go-ethereum/rlp" mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -44,6 +45,11 @@ var ( maxUncles = 2 // Maximum number of uncles allowed in a single block allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks + // calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384. + // It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks. + // Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384 + calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000)) + // calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople. // It returns the difficulty that a new block should have when created at time given the // parent block's time and difficulty. The calculation uses the Byzantium rules, but with @@ -63,7 +69,7 @@ var ( // codebase, inherently breaking if the engine is swapped out. Please put common // error types into the consensus package. var ( - errZeroBlockTime = errors.New("timestamp equals parent's") + errOlderBlockTime = errors.New("timestamp older than parent") errTooManyUncles = errors.New("too many uncles") errDuplicateUncle = errors.New("duplicate uncle") errUncleIsAncestor = errors.New("uncle is ancestor") @@ -81,12 +87,12 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) { // VerifyHeader checks whether a header conforms to the consensus rules of the // stock Ethereum ethash engine. -func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { +func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { // If we're running a full engine faking, accept any input as valid if ethash.config.PowMode == ModeFullFake { return nil } - // Short circuit if the header is known, or it's parent not + // Short circuit if the header is known, or its parent not number := header.Number.Uint64() if chain.GetHeader(header.Hash(), number) != nil { return nil @@ -102,7 +108,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // concurrently. The method returns a quit channel to abort the operations and // a results channel to retrieve the async verifications. -func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { // If we're running a full engine faking, accept any input as valid if ethash.config.PowMode == ModeFullFake || len(headers) == 0 { abort, results := make(chan struct{}), make(chan error, len(headers)) @@ -164,7 +170,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*type return abort, errorsOut } -func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error { +func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error { var parent *types.Header if index == 0 { parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) @@ -238,7 +244,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo // verifyHeader checks whether a header conforms to the consensus rules of the // stock Ethereum ethash engine. // See YP section 4.3.4. "Block Header Validity" -func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error { +func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error { // Ensure that the header's extra-data section is of a reasonable size if uint64(len(header.Extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) @@ -250,9 +256,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * } } if header.Time <= parent.Time { - return errZeroBlockTime + return errOlderBlockTime } - // Verify the block's difficulty based in it's timestamp and parent's difficulty + // Verify the block's difficulty based on its timestamp and parent's difficulty expected := ethash.CalcDifficulty(chain, header.Time, parent) if expected.Cmp(header.Difficulty) != 0 { @@ -301,7 +307,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * // CalcDifficulty is the difficulty adjustment algorithm. It returns // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. -func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { +func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { return CalcDifficulty(chain.Config(), time, parent) } @@ -311,6 +317,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { next := new(big.Int).Add(parent.Number, big1) switch { + case config.IsMuirGlacier(next): + return calcDifficultyEip2384(time, parent) case config.IsConstantinople(next): return calcDifficultyConstantinople(time, parent) case config.IsByzantium(next): @@ -479,14 +487,14 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { // VerifySeal implements consensus.Engine, checking whether the given block satisfies // the PoW difficulty requirements. -func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error { +func (ethash *Ethash) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { return ethash.verifySeal(chain, header, false) } // verifySeal checks whether a block satisfies the PoW difficulty requirements, // either using the usual ethash cache for it, or alternatively using a full DAG // to make remote mining fast. -func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error { +func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error { // If we're running a fake PoW, accept any seal as valid if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { time.Sleep(ethash.fakeDelay) @@ -551,7 +559,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the ethash protocol. The changes are done inline. -func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error { +func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) if parent == nil { return consensus.ErrUnknownAncestor @@ -562,7 +570,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) // Finalize implements consensus.Engine, accumulating the block and uncle rewards, // setting the final state on the header -func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { +func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { // Accumulate any block and uncle rewards and commit the final state root accumulateRewards(chain.Config(), state, header, uncles) header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) @@ -570,13 +578,13 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. -func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { +func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { // Accumulate any block and uncle rewards and commit the final state root accumulateRewards(chain.Config(), state, header, uncles) header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, nil), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 53420d0..4a3912d 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -34,12 +34,10 @@ import ( "unsafe" "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/log" - "github.com/ava-labs/go-ethereum/metrics" mmap "github.com/edsrzf/mmap-go" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/hashicorp/golang-lru/simplelru" ) @@ -50,7 +48,7 @@ var ( two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // sharedEthash is a full instance that can be shared between multiple users. - sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false) + sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil}, nil, false) // algorithmRevision is the data structure version used for file naming. algorithmRevision = 23 @@ -67,7 +65,7 @@ func isLittleEndian() bool { } // memoryMap tries to memory map a file of uint32s for read only access. -func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { +func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) { file, err := os.OpenFile(path, os.O_RDONLY, 0644) if err != nil { return nil, nil, nil, err @@ -84,6 +82,13 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { return nil, nil, nil, ErrInvalidDumpMagic } } + if lock { + if err := mem.Lock(); err != nil { + mem.Unmap() + file.Close() + return nil, nil, nil, err + } + } return file, mem, buffer[len(dumpMagic):], err } @@ -109,7 +114,7 @@ func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write // access, fill it with the data from a generator and then move it into the final // path requested. -func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { +func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { // Ensure the data folder exists if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return nil, nil, nil, err @@ -144,7 +149,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint if err := os.Rename(temp, path); err != nil { return nil, nil, nil, err } - return memoryMap(path) + return memoryMap(path, lock) } // lru tracks caches or datasets by their last use time, keeping at most N of them. @@ -215,7 +220,7 @@ func newCache(epoch uint64) interface{} { } // generate ensures that the cache content is generated before use. -func (c *cache) generate(dir string, limit int, test bool) { +func (c *cache) generate(dir string, limit int, lock bool, test bool) { c.once.Do(func() { size := cacheSize(c.epoch*epochLength + 1) seed := seedHash(c.epoch*epochLength + 1) @@ -242,7 +247,7 @@ func (c *cache) generate(dir string, limit int, test bool) { // Try to load the file from disk and memory map it var err error - c.dump, c.mmap, c.cache, err = memoryMap(path) + c.dump, c.mmap, c.cache, err = memoryMap(path, lock) if err == nil { logger.Debug("Loaded old ethash cache from disk") return @@ -250,7 +255,7 @@ func (c *cache) generate(dir string, limit int, test bool) { logger.Debug("Failed to load old ethash cache", "err", err) // No previous cache available, create a new cache file to fill - c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) + c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) if err != nil { logger.Error("Failed to generate mapped ethash cache", "err", err) @@ -292,7 +297,7 @@ func newDataset(epoch uint64) interface{} { } // generate ensures that the dataset content is generated before use. -func (d *dataset) generate(dir string, limit int, test bool) { +func (d *dataset) generate(dir string, limit int, lock bool, test bool) { d.once.Do(func() { // Mark the dataset generated after we're done. This is needed for remote defer atomic.StoreUint32(&d.done, 1) @@ -328,7 +333,7 @@ func (d *dataset) generate(dir string, limit int, test bool) { // Try to load the file from disk and memory map it var err error - d.dump, d.mmap, d.dataset, err = memoryMap(path) + d.dump, d.mmap, d.dataset, err = memoryMap(path, lock) if err == nil { logger.Debug("Loaded old ethash dataset from disk") return @@ -339,7 +344,7 @@ func (d *dataset) generate(dir string, limit int, test bool) { cache := make([]uint32, csize/4) generateCache(cache, d.epoch, seed) - d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) + d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) if err != nil { logger.Error("Failed to generate mapped ethash dataset", "err", err) @@ -374,13 +379,13 @@ func (d *dataset) finalizer() { // MakeCache generates a new ethash cache and optionally stores it to disk. func MakeCache(block uint64, dir string) { c := cache{epoch: block / epochLength} - c.generate(dir, math.MaxInt32, false) + c.generate(dir, math.MaxInt32, false, false) } // MakeDataset generates a new ethash dataset and optionally stores it to disk. func MakeDataset(block uint64, dir string) { d := dataset{epoch: block / epochLength} - d.generate(dir, math.MaxInt32, false) + d.generate(dir, math.MaxInt32, false, false) } // Mode defines the type and amount of PoW verification an ethash engine makes. @@ -396,43 +401,17 @@ const ( // Config are the configuration parameters of the ethash. type Config struct { - CacheDir string - CachesInMem int - CachesOnDisk int - DatasetDir string - DatasetsInMem int - DatasetsOnDisk int - PowMode Mode -} + CacheDir string + CachesInMem int + CachesOnDisk int + CachesLockMmap bool + DatasetDir string + DatasetsInMem int + DatasetsOnDisk int + DatasetsLockMmap bool + PowMode Mode -// sealTask wraps a seal block with relative result channel for remote sealer thread. -type sealTask struct { - block *types.Block - results chan<- *types.Block -} - -// mineResult wraps the pow solution parameters for the specified block. -type mineResult struct { - nonce types.BlockNonce - mixDigest common.Hash - hash common.Hash - - errc chan error -} - -// hashrate wraps the hash rate submitted by the remote sealer. -type hashrate struct { - id common.Hash - ping time.Time - rate uint64 - - done chan struct{} -} - -// sealWork wraps a seal work package for remote sealer. -type sealWork struct { - errc chan error - res chan [4]string + Log log.Logger `toml:"-"` } // Ethash is a consensus engine based on proof-of-work implementing the ethash @@ -448,52 +427,42 @@ type Ethash struct { threads int // Number of threads to mine on if mining update chan struct{} // Notification channel to update mining parameters hashrate metrics.Meter // Meter tracking the average hashrate - - // Remote sealer related fields - workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer - fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work - submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result - fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. - submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate + remote *remoteSealer // The fields below are hooks for testing shared *Ethash // Shared PoW verifier to avoid cache regeneration fakeFail uint64 // Block number which fails PoW check even in fake mode fakeDelay time.Duration // Time delay to sleep for before returning from verify - lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields - closeOnce sync.Once // Ensures exit channel will not be closed twice. - exitCh chan chan error // Notification channel to exiting backend threads + lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields + closeOnce sync.Once // Ensures exit channel will not be closed twice. } // New creates a full sized ethash PoW scheme and starts a background thread for // remote mining, also optionally notifying a batch of remote services of new work // packages. func New(config Config, notify []string, noverify bool) *Ethash { + if config.Log == nil { + config.Log = log.Root() + } if config.CachesInMem <= 0 { - log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) + config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) config.CachesInMem = 1 } if config.CacheDir != "" && config.CachesOnDisk > 0 { - log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) + config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) } if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { - log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) + config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) } ethash := &Ethash{ - config: config, - caches: newlru("cache", config.CachesInMem, newCache), - datasets: newlru("dataset", config.DatasetsInMem, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeterForced(), - workCh: make(chan *sealTask), - fetchWorkCh: make(chan *sealWork), - submitWorkCh: make(chan *mineResult), - fetchRateCh: make(chan chan uint64), - submitRateCh: make(chan *hashrate), - exitCh: make(chan chan error), - } - go ethash.remote(notify, noverify) + config: config, + caches: newlru("cache", config.CachesInMem, newCache), + datasets: newlru("dataset", config.DatasetsInMem, newDataset), + update: make(chan struct{}), + hashrate: metrics.NewMeterForced(), + } + ethash.remote = startRemoteSealer(ethash, notify, noverify) return ethash } @@ -501,19 +470,13 @@ func New(config Config, notify []string, noverify bool) *Ethash { // purposes. func NewTester(notify []string, noverify bool) *Ethash { ethash := &Ethash{ - config: Config{PowMode: ModeTest}, - caches: newlru("cache", 1, newCache), - datasets: newlru("dataset", 1, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeterForced(), - workCh: make(chan *sealTask), - fetchWorkCh: make(chan *sealWork), - submitWorkCh: make(chan *mineResult), - fetchRateCh: make(chan chan uint64), - submitRateCh: make(chan *hashrate), - exitCh: make(chan chan error), - } - go ethash.remote(notify, noverify) + config: Config{PowMode: ModeTest, Log: log.Root()}, + caches: newlru("cache", 1, newCache), + datasets: newlru("dataset", 1, newDataset), + update: make(chan struct{}), + hashrate: metrics.NewMeterForced(), + } + ethash.remote = startRemoteSealer(ethash, notify, noverify) return ethash } @@ -524,6 +487,7 @@ func NewFaker() *Ethash { return &Ethash{ config: Config{ PowMode: ModeFake, + Log: log.Root(), }, } } @@ -535,6 +499,7 @@ func NewFakeFailer(fail uint64) *Ethash { return &Ethash{ config: Config{ PowMode: ModeFake, + Log: log.Root(), }, fakeFail: fail, } @@ -547,6 +512,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash { return &Ethash{ config: Config{ PowMode: ModeFake, + Log: log.Root(), }, fakeDelay: delay, } @@ -558,6 +524,7 @@ func NewFullFaker() *Ethash { return &Ethash{ config: Config{ PowMode: ModeFullFake, + Log: log.Root(), }, } } @@ -573,13 +540,11 @@ func (ethash *Ethash) Close() error { var err error ethash.closeOnce.Do(func() { // Short circuit if the exit channel is not allocated. - if ethash.exitCh == nil { + if ethash.remote == nil { return } - errc := make(chan error) - ethash.exitCh <- errc - err = <-errc - close(ethash.exitCh) + close(ethash.remote.requestExit) + <-ethash.remote.exitCh }) return err } @@ -593,12 +558,12 @@ func (ethash *Ethash) cache(block uint64) *cache { current := currentI.(*cache) // Wait for generation finish. - current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) + current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) // If we need a new future cache, now's a good time to regenerate it. if futureI != nil { future := futureI.(*cache) - go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) + go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) } return current } @@ -618,20 +583,20 @@ func (ethash *Ethash) dataset(block uint64, async bool) *dataset { // If async is specified, generate everything in a background thread if async && !current.generated() { go func() { - current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) if futureI != nil { future := futureI.(*dataset) - future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) } }() } else { // Either blocking generation was requested, or already done - current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) if futureI != nil { future := futureI.(*dataset) - go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) } } return current @@ -680,8 +645,8 @@ func (ethash *Ethash) Hashrate() float64 { var res = make(chan uint64, 1) select { - case ethash.fetchRateCh <- res: - case <-ethash.exitCh: + case ethash.remote.fetchRateCh <- res: + case <-ethash.remote.exitCh: // Return local hashrate only if ethash is stopped. return ethash.hashrate.Rate1() } @@ -691,7 +656,7 @@ func (ethash *Ethash) Hashrate() float64 { } // APIs implements consensus.Engine, returning the user facing RPC APIs. -func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { +func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { // In order to ensure backward compatibility, we exposes ethash RPC APIs // to both eth and ethash namespaces. return []rpc.API{ diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index 799be05..cb8eed7 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -18,6 +18,7 @@ package ethash import ( "bytes" + "context" crand "crypto/rand" "encoding/json" "errors" @@ -31,9 +32,8 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) const ( @@ -48,7 +48,7 @@ var ( // Seal implements consensus.Engine, attempting to find a nonce that satisfies // the block's difficulty requirements. -func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { +func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { // If we're running a fake PoW, simply return a 0 nonce immediately if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { header := block.Header() @@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu select { case results <- block.WithSeal(header): default: - log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header())) + ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header())) } return nil } @@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu threads = 0 // Allows disabling local mining without extra logic around local/remote } // Push new work to remote sealer - if ethash.workCh != nil { - ethash.workCh <- &sealTask{block: block, results: results} + if ethash.remote != nil { + ethash.remote.workCh <- &sealTask{block: block, results: results} } var ( pend sync.WaitGroup @@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu select { case results <- result: default: - log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header())) + ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header())) } close(abort) case <-ethash.update: // Thread count was changed on user request, restart close(abort) if err := ethash.Seal(chain, block, results, stop); err != nil { - log.Error("Failed to restart sealing after update", "err", err) + ethash.config.Log.Error("Failed to restart sealing after update", "err", err) } } // Wait for all miners to terminate and return the block @@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s attempts = int64(0) nonce = seed ) - logger := log.New("miner", id) + logger := ethash.config.Log.New("miner", id) logger.Trace("Started ethash search for new nonces", "seed", seed) search: for { @@ -186,160 +186,128 @@ search: runtime.KeepAlive(dataset) } -// remote is a standalone goroutine to handle remote mining related stuff. -func (ethash *Ethash) remote(notify []string, noverify bool) { - var ( - works = make(map[common.Hash]*types.Block) - rates = make(map[common.Hash]hashrate) +// This is the timeout for HTTP requests to notify external miners. +const remoteSealerTimeout = 1 * time.Second - results chan<- *types.Block - currentBlock *types.Block - currentWork [4]string +type remoteSealer struct { + works map[common.Hash]*types.Block + rates map[common.Hash]hashrate + currentBlock *types.Block + currentWork [4]string + notifyCtx context.Context + cancelNotify context.CancelFunc // cancels all notification requests + reqWG sync.WaitGroup // tracks notification request goroutines - notifyTransport = &http.Transport{} - notifyClient = &http.Client{ - Transport: notifyTransport, - Timeout: time.Second, - } - notifyReqs = make([]*http.Request, len(notify)) - ) - // notifyWork notifies all the specified mining endpoints of the availability of - // new work to be processed. - notifyWork := func() { - work := currentWork - blob, _ := json.Marshal(work) - - for i, url := range notify { - // Terminate any previously pending request and create the new work - if notifyReqs[i] != nil { - notifyTransport.CancelRequest(notifyReqs[i]) - } - notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob)) - notifyReqs[i].Header.Set("Content-Type", "application/json") - - // Push the new work concurrently to all the remote nodes - go func(req *http.Request, url string) { - res, err := notifyClient.Do(req) - if err != nil { - log.Warn("Failed to notify remote miner", "err", err) - } else { - log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2]) - res.Body.Close() - } - }(notifyReqs[i], url) - } - } - // makeWork creates a work package for external miner. - // - // The work package consists of 3 strings: - // result[0], 32 bytes hex encoded current block header pow-hash - // result[1], 32 bytes hex encoded seed hash used for DAG - // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty - // result[3], hex encoded block number - makeWork := func(block *types.Block) { - hash := ethash.SealHash(block.Header()) - - currentWork[0] = hash.Hex() - currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() - currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() - currentWork[3] = hexutil.EncodeBig(block.Number()) - - // Trace the seal work fetched by remote sealer. - currentBlock = block - works[hash] = block - } - // submitWork verifies the submitted pow solution, returning - // whether the solution was accepted or not (not can be both a bad pow as well as - // any other error, like no pending work or stale mining result). - submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool { - if currentBlock == nil { - log.Error("Pending work without block", "sealhash", sealhash) - return false - } - // Make sure the work submitted is present - block := works[sealhash] - if block == nil { - log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64()) - return false - } - // Verify the correctness of submitted result. - header := block.Header() - header.Nonce = nonce - header.MixDigest = mixDigest - - start := time.Now() - if !noverify { - if err := ethash.verifySeal(nil, header, true); err != nil { - log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err) - return false - } - } - // Make sure the result channel is assigned. - if results == nil { - log.Warn("Ethash result channel is empty, submitted mining result is rejected") - return false - } - log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + ethash *Ethash + noverify bool + notifyURLs []string + results chan<- *types.Block + workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer + fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work + submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result + fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. + submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate + requestExit chan struct{} + exitCh chan struct{} +} - // Solutions seems to be valid, return to the miner and notify acceptance. - solution := block.WithSeal(header) +// sealTask wraps a seal block with relative result channel for remote sealer thread. +type sealTask struct { + block *types.Block + results chan<- *types.Block +} - // The submitted solution is within the scope of acceptance. - if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() { - select { - case results <- solution: - log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) - return true - default: - log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) - return false - } - } - // The submitted block is too old to accept, drop it. - log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) - return false +// mineResult wraps the pow solution parameters for the specified block. +type mineResult struct { + nonce types.BlockNonce + mixDigest common.Hash + hash common.Hash + + errc chan error +} + +// hashrate wraps the hash rate submitted by the remote sealer. +type hashrate struct { + id common.Hash + ping time.Time + rate uint64 + + done chan struct{} +} + +// sealWork wraps a seal work package for remote sealer. +type sealWork struct { + errc chan error + res chan [4]string +} + +func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer { + ctx, cancel := context.WithCancel(context.Background()) + s := &remoteSealer{ + ethash: ethash, + noverify: noverify, + notifyURLs: urls, + notifyCtx: ctx, + cancelNotify: cancel, + works: make(map[common.Hash]*types.Block), + rates: make(map[common.Hash]hashrate), + workCh: make(chan *sealTask), + fetchWorkCh: make(chan *sealWork), + submitWorkCh: make(chan *mineResult), + fetchRateCh: make(chan chan uint64), + submitRateCh: make(chan *hashrate), + requestExit: make(chan struct{}), + exitCh: make(chan struct{}), } + go s.loop() + return s +} + +func (s *remoteSealer) loop() { + defer func() { + s.ethash.config.Log.Trace("Ethash remote sealer is exiting") + s.cancelNotify() + s.reqWG.Wait() + close(s.exitCh) + }() ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() for { select { - case work := <-ethash.workCh: + case work := <-s.workCh: // Update current work with new received block. // Note same work can be past twice, happens when changing CPU threads. - results = work.results + s.results = work.results + s.makeWork(work.block) + s.notifyWork() - makeWork(work.block) - - // Notify and requested URLs of the new work availability - notifyWork() - - case work := <-ethash.fetchWorkCh: + case work := <-s.fetchWorkCh: // Return current mining work to remote miner. - if currentBlock == nil { + if s.currentBlock == nil { work.errc <- errNoMiningWork } else { - work.res <- currentWork + work.res <- s.currentWork } - case result := <-ethash.submitWorkCh: + case result := <-s.submitWorkCh: // Verify submitted PoW solution based on maintained mining blocks. - if submitWork(result.nonce, result.mixDigest, result.hash) { + if s.submitWork(result.nonce, result.mixDigest, result.hash) { result.errc <- nil } else { result.errc <- errInvalidSealResult } - case result := <-ethash.submitRateCh: + case result := <-s.submitRateCh: // Trace remote sealer's hash rate by submitted value. - rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} + s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} close(result.done) - case req := <-ethash.fetchRateCh: + case req := <-s.fetchRateCh: // Gather all hash rate submitted by remote sealer. var total uint64 - for _, rate := range rates { + for _, rate := range s.rates { // this could overflow total += rate.rate } @@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) { case <-ticker.C: // Clear stale submitted hash rate. - for id, rate := range rates { + for id, rate := range s.rates { if time.Since(rate.ping) > 10*time.Second { - delete(rates, id) + delete(s.rates, id) } } // Clear stale pending blocks - if currentBlock != nil { - for hash, block := range works { - if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() { - delete(works, hash) + if s.currentBlock != nil { + for hash, block := range s.works { + if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() { + delete(s.works, hash) } } } - case errc := <-ethash.exitCh: - // Exit remote loop if ethash is closed and return relevant error. - errc <- nil - log.Trace("Ethash remote sealer is exiting") + case <-s.requestExit: return } } } + +// makeWork creates a work package for external miner. +// +// The work package consists of 3 strings: +// result[0], 32 bytes hex encoded current block header pow-hash +// result[1], 32 bytes hex encoded seed hash used for DAG +// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3], hex encoded block number +func (s *remoteSealer) makeWork(block *types.Block) { + hash := s.ethash.SealHash(block.Header()) + s.currentWork[0] = hash.Hex() + s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() + s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() + s.currentWork[3] = hexutil.EncodeBig(block.Number()) + + // Trace the seal work fetched by remote sealer. + s.currentBlock = block + s.works[hash] = block +} + +// notifyWork notifies all the specified mining endpoints of the availability of +// new work to be processed. +func (s *remoteSealer) notifyWork() { + work := s.currentWork + blob, _ := json.Marshal(work) + s.reqWG.Add(len(s.notifyURLs)) + for _, url := range s.notifyURLs { + go s.sendNotification(s.notifyCtx, url, blob, work) + } +} + +func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) { + defer s.reqWG.Done() + + req, err := http.NewRequest("POST", url, bytes.NewReader(json)) + if err != nil { + s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err) + return + } + ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) + defer cancel() + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err) + } else { + s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) + resp.Body.Close() + } +} + +// submitWork verifies the submitted pow solution, returning +// whether the solution was accepted or not (not can be both a bad pow as well as +// any other error, like no pending work or stale mining result). +func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool { + if s.currentBlock == nil { + s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash) + return false + } + // Make sure the work submitted is present + block := s.works[sealhash] + if block == nil { + s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64()) + return false + } + // Verify the correctness of submitted result. + header := block.Header() + header.Nonce = nonce + header.MixDigest = mixDigest + + start := time.Now() + if !s.noverify { + if err := s.ethash.verifySeal(nil, header, true); err != nil { + s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err) + return false + } + } + // Make sure the result channel is assigned. + if s.results == nil { + s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected") + return false + } + s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + + // Solutions seems to be valid, return to the miner and notify acceptance. + solution := block.WithSeal(header) + + // The submitted solution is within the scope of acceptance. + if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() { + select { + case s.results <- solution: + s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) + return true + default: + s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) + return false + } + } + // The submitted block is too old to accept, drop it. + s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) + return false +} diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go index 8ce79a2..4c80a59 100644 --- a/consensus/misc/dao.go +++ b/consensus/misc/dao.go @@ -27,7 +27,7 @@ import ( ) var ( - // ErrBadProDAOExtra is returned if a header doens't support the DAO fork on a + // ErrBadProDAOExtra is returned if a header doesn't support the DAO fork on a // pro-fork client. ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data") diff --git a/consensus/misc/forks.go b/consensus/misc/forks.go index 06792b0..72b7124 100644 --- a/consensus/misc/forks.go +++ b/consensus/misc/forks.go @@ -21,7 +21,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) // VerifyForkHashes verifies that blocks conforming to network hard-forks do have -- cgit v1.2.3-70-g09d2 From 8478802ddacc027a8d8c866da9365f6739d9d9d4 Mon Sep 17 00:00:00 2001 From: Determinant Date: Wed, 16 Sep 2020 18:26:55 -0400 Subject: ... --- accounts/accounts.go | 8 ++++---- accounts/external/backend.go | 14 +++++++------- accounts/keystore/account_cache.go | 4 ++-- accounts/keystore/file_cache.go | 2 +- accounts/keystore/key.go | 4 ++-- accounts/keystore/keystore.go | 6 +++--- accounts/keystore/passphrase.go | 8 ++++---- accounts/keystore/plain.go | 2 +- accounts/keystore/presale.go | 2 +- accounts/keystore/wallet.go | 4 ++-- accounts/keystore/watch.go | 2 +- accounts/manager.go | 4 ++-- accounts/scwallet/hub.go | 6 +++--- accounts/scwallet/securechannel.go | 2 +- accounts/scwallet/wallet.go | 8 ++++---- consensus/dummy/consensus.go | 4 ++-- core/blockchain.go | 2 +- core/gen_genesis.go | 6 +++--- core/gen_genesis_account.go | 6 +++--- core/state/journal.go | 2 +- core/types/receipt.go | 2 +- core/types/transaction_signing.go | 2 +- coreth.go | 10 +++++----- examples/block/main.go | 8 ++++---- examples/chain/main.go | 8 ++++---- examples/counter/main.go | 8 ++++---- examples/multicoin/main.go | 10 +++++----- examples/payments/main.go | 6 +++--- node/api.go | 8 ++++---- plugin/evm/block.go | 2 +- plugin/evm/database.go | 2 +- plugin/evm/export_tx.go | 2 +- plugin/evm/import_tx.go | 2 +- plugin/evm/service.go | 6 +++--- plugin/evm/tx.go | 2 +- plugin/evm/user.go | 2 +- plugin/evm/vm.go | 8 ++++---- 37 files changed, 92 insertions(+), 92 deletions(-) (limited to 'consensus') diff --git a/accounts/accounts.go b/accounts/accounts.go index 2f39684..17978d3 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -22,10 +22,10 @@ import ( "math/big" "github.com/ava-labs/coreth/core/types" - ethereum "github.com/ava-labs/go-ethereum" - gethaccounts "github.com/ava-labs/go-ethereum/accounts" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/event" + ethereum "github.com/ethereum/go-ethereum" + gethaccounts "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/event" "golang.org/x/crypto/sha3" ) diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 41329d3..f3744cf 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -23,13 +23,13 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/event" - "github.com/ava-labs/go-ethereum/log" - "github.com/ava-labs/go-ethereum/rpc" - "github.com/ava-labs/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/signer/core" ) type ExternalBackend struct { diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 2067ccb..76bd552 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -28,8 +28,8 @@ import ( "time" "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" mapset "github.com/deckarep/golang-set" ) diff --git a/accounts/keystore/file_cache.go b/accounts/keystore/file_cache.go index ac87f0c..73ff6ae 100644 --- a/accounts/keystore/file_cache.go +++ b/accounts/keystore/file_cache.go @@ -25,7 +25,7 @@ import ( "time" mapset "github.com/deckarep/golang-set" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/log" ) // fileCache is a cache of files seen during scan of keystore. diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go index 3654daa..dc54adb 100644 --- a/accounts/keystore/key.go +++ b/accounts/keystore/key.go @@ -30,8 +30,8 @@ import ( "time" "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/pborman/uuid" ) diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 1bcaf13..cb4df04 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -34,9 +34,9 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/event" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" ) var ( diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 04f31ff..f8a62c6 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -39,9 +39,9 @@ import ( "path/filepath" "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/math" - "github.com/ava-labs/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/pborman/uuid" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" @@ -121,7 +121,7 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er "This indicates that the keystore is corrupted. \n" + "The corrupted file is stored at \n%v\n" + "Please file a ticket at:\n\n" + - "https://github.com/ava-labs/go-ethereum/issues." + + "https://github.com/ethereum/go-ethereum/issues." + "The error was : %s" //lint:ignore ST1005 This is a message for the user return fmt.Errorf(msg, tmpName, err) diff --git a/accounts/keystore/plain.go b/accounts/keystore/plain.go index bc5b377..f62a133 100644 --- a/accounts/keystore/plain.go +++ b/accounts/keystore/plain.go @@ -22,7 +22,7 @@ import ( "os" "path/filepath" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) type keyStorePlain struct { diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go index 5122b0d..4833c6c 100644 --- a/accounts/keystore/presale.go +++ b/accounts/keystore/presale.go @@ -26,7 +26,7 @@ import ( "fmt" "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto" "github.com/pborman/uuid" "golang.org/x/crypto/pbkdf2" ) diff --git a/accounts/keystore/wallet.go b/accounts/keystore/wallet.go index 3be901b..7092385 100644 --- a/accounts/keystore/wallet.go +++ b/accounts/keystore/wallet.go @@ -21,8 +21,8 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/core/types" - ethereum "github.com/ava-labs/go-ethereum" - "github.com/ava-labs/go-ethereum/crypto" + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/crypto" ) // keystoreWallet implements the accounts.Wallet interface for the original diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go index 7fa5b3c..d6ef533 100644 --- a/accounts/keystore/watch.go +++ b/accounts/keystore/watch.go @@ -21,7 +21,7 @@ package keystore import ( "time" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/log" "github.com/rjeczalik/notify" ) diff --git a/accounts/manager.go b/accounts/manager.go index 87d3df8..acf41ed 100644 --- a/accounts/manager.go +++ b/accounts/manager.go @@ -21,8 +21,8 @@ import ( "sort" "sync" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/event" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/event" ) // Config contains the settings of the global account manager. diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index 67483b8..1d0c124 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -42,9 +42,9 @@ import ( "time" "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/event" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" pcsc "github.com/gballet/go-libpcsclite" ) diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index a5666c9..9b70c69 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -25,7 +25,7 @@ import ( "crypto/sha512" "fmt" - "github.com/ava-labs/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto" pcsc "github.com/gballet/go-libpcsclite" "github.com/wsddn/go-ecdh" "golang.org/x/crypto/pbkdf2" diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index 5d2aff8..2f08c17 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -35,10 +35,10 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/core/types" - ethereum "github.com/ava-labs/go-ethereum" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/log" + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" pcsc "github.com/gballet/go-libpcsclite" "github.com/status-im/keycard-go/derivationpath" ) diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 494e4be..2108684 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -13,8 +13,8 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" mapset "github.com/deckarep/golang-set" ) diff --git a/core/blockchain.go b/core/blockchain.go index 342790e..b861220 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -31,10 +31,10 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/state/snapshot" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" diff --git a/core/gen_genesis.go b/core/gen_genesis.go index 97175f7..b683ace 100644 --- a/core/gen_genesis.go +++ b/core/gen_genesis.go @@ -8,9 +8,9 @@ import ( "math/big" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" ) var _ = (*genesisSpecMarshaling)(nil) diff --git a/core/gen_genesis_account.go b/core/gen_genesis_account.go index b90b658..a2f503f 100644 --- a/core/gen_genesis_account.go +++ b/core/gen_genesis_account.go @@ -7,9 +7,9 @@ import ( "errors" "math/big" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" ) var _ = (*genesisAccountMarshaling)(nil) diff --git a/core/state/journal.go b/core/state/journal.go index cfa1a4a..0cc556b 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -19,7 +19,7 @@ package state import ( "math/big" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) // journalEntry is a modification entry in the state change journal that can be diff --git a/core/types/receipt.go b/core/types/receipt.go index 4d57f59..21ac8a1 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -24,7 +24,7 @@ import ( "math/big" "unsafe" - "github.com/ethereum/coreth/params" + "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index ecc65d5..819c680 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -22,7 +22,7 @@ import ( "fmt" "math/big" - "github.com/ethereum/coreth/params" + "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) diff --git a/coreth.go b/coreth.go index c8a0e41..1fe2613 100644 --- a/coreth.go +++ b/coreth.go @@ -14,11 +14,11 @@ import ( "github.com/ava-labs/coreth/miner" "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/ethdb" - "github.com/ava-labs/go-ethereum/event" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" "github.com/mattn/go-isatty" ) diff --git a/examples/block/main.go b/examples/block/main.go index 45d7586..89540c6 100644 --- a/examples/block/main.go +++ b/examples/block/main.go @@ -8,10 +8,10 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" "math/big" ) diff --git a/examples/chain/main.go b/examples/chain/main.go index 26fc5ce..17003b5 100644 --- a/examples/chain/main.go +++ b/examples/chain/main.go @@ -8,10 +8,10 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/log" - "github.com/ava-labs/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" "math/big" "sync" ) diff --git a/examples/counter/main.go b/examples/counter/main.go index 85aa9d1..9e18cd8 100644 --- a/examples/counter/main.go +++ b/examples/counter/main.go @@ -10,10 +10,10 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/compiler" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/compiler" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "go/build" "math/big" "os" diff --git a/examples/multicoin/main.go b/examples/multicoin/main.go index 792a5b5..1f96647 100644 --- a/examples/multicoin/main.go +++ b/examples/multicoin/main.go @@ -10,11 +10,11 @@ import ( "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/accounts/abi" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/compiler" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/compiler" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "go/build" "math/big" "os" diff --git a/examples/payments/main.go b/examples/payments/main.go index 40a90c2..fee2060 100644 --- a/examples/payments/main.go +++ b/examples/payments/main.go @@ -8,9 +8,9 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" "math/big" ) diff --git a/node/api.go b/node/api.go index 4196a6b..e74d10b 100644 --- a/node/api.go +++ b/node/api.go @@ -22,10 +22,10 @@ import ( //"strings" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/p2p" - "github.com/ava-labs/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" ) // apis returns the collection of built-in RPC APIs. diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 134fa29..98280f0 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -8,7 +8,7 @@ import ( "fmt" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rlp" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" diff --git a/plugin/evm/database.go b/plugin/evm/database.go index aedbc9b..38b21d0 100644 --- a/plugin/evm/database.go +++ b/plugin/evm/database.go @@ -6,7 +6,7 @@ package evm import ( "errors" - "github.com/ava-labs/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ava-labs/avalanchego/database" ) diff --git a/plugin/evm/export_tx.go b/plugin/evm/export_tx.go index 43f858c..0487c44 100644 --- a/plugin/evm/export_tx.go +++ b/plugin/evm/export_tx.go @@ -8,7 +8,7 @@ import ( "math/big" "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/log" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" diff --git a/plugin/evm/import_tx.go b/plugin/evm/import_tx.go index c6ad2d9..7d17c4e 100644 --- a/plugin/evm/import_tx.go +++ b/plugin/evm/import_tx.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) // UnsignedImportTx is an unsigned ImportTx diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 33429b5..65d389d 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -20,9 +20,9 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - ethcrypto "github.com/ava-labs/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + ethcrypto "github.com/ethereum/go-ethereum/crypto" ) const ( diff --git a/plugin/evm/tx.go b/plugin/evm/tx.go index e573580..e39a053 100644 --- a/plugin/evm/tx.go +++ b/plugin/evm/tx.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) // Max size of memo field diff --git a/plugin/evm/user.go b/plugin/evm/user.go index cfa302b..b751634 100644 --- a/plugin/evm/user.go +++ b/plugin/evm/user.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" ) // Key in the database whose corresponding value is the list of diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 9ab4781..5c5a5b5 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -22,11 +22,11 @@ import ( "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/rlp" - "github.com/ava-labs/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" - ethcrypto "github.com/ava-labs/go-ethereum/crypto" + ethcrypto "github.com/ethereum/go-ethereum/crypto" avalancheRPC "github.com/gorilla/rpc/v2" -- cgit v1.2.3-70-g09d2 From b1ac5e6ce73c37378e575d6291e3c5f1ee170430 Mon Sep 17 00:00:00 2001 From: Determinant Date: Wed, 16 Sep 2020 19:00:25 -0400 Subject: build success --- consensus/clique/clique.go | 2 +- consensus/dummy/consensus.go | 31 +- consensus/ethash/consensus.go | 2 +- core/blockchain.go | 2 +- core/forkid/forkid.go | 258 ++++++++ core/rawdb/accessors_state.go | 96 +++ core/rawdb/chain_iterator.go | 304 ++++++++++ core/state/snapshot/difflayer_test.go | 400 ------------- core/state/snapshot/disklayer_test.go | 511 ---------------- core/state/snapshot/iterator_test.go | 1046 --------------------------------- core/state/snapshot/snapshot_test.go | 371 ------------ core/state/snapshot/wipe_test.go | 124 ---- core/vm/instructions.go | 14 +- coreth.go | 19 +- eth/api.go | 2 +- eth/backend.go | 10 +- eth/protocol.go | 1 + internal/ethapi/api.go | 1 - internal/ethapi/backend.go | 1 + miner/miner.go | 3 +- node/api.go | 1 + node/node.go | 34 +- rpc/metrics.go | 39 ++ 23 files changed, 777 insertions(+), 2495 deletions(-) create mode 100644 core/forkid/forkid.go create mode 100644 core/rawdb/accessors_state.go create mode 100644 core/rawdb/chain_iterator.go delete mode 100644 core/state/snapshot/difflayer_test.go delete mode 100644 core/state/snapshot/disklayer_test.go delete mode 100644 core/state/snapshot/iterator_test.go delete mode 100644 core/state/snapshot/snapshot_test.go delete mode 100644 core/state/snapshot/wipe_test.go create mode 100644 rpc/metrics.go (limited to 'consensus') diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index d239aca..b27525b 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -566,7 +566,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * header.UncleHash = types.CalcUncleHash(nil) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil + return types.NewBlock(header, txs, nil, receipts, new(trie.Trie), nil), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 2108684..da63673 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -13,14 +13,15 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/trie" ) -type OnFinalizeCallbackType = func(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) +type OnFinalizeCallbackType = func(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) type OnFinalizeAndAssembleCallbackType = func(state *state.StateDB, txs []*types.Transaction) ([]byte, error) -type OnAPIsCallbackType = func(consensus.ChainReader) []rpc.API +type OnAPIsCallbackType = func(consensus.ChainHeaderReader) []rpc.API type OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB) error type ConsensusCallbacks struct { @@ -55,7 +56,7 @@ var ( ) // modified from consensus.go -func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error { +func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error { // Ensure that the header's extra-data section is of a reasonable size if uint64(len(header.Extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) @@ -103,7 +104,7 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, paren return nil } -func (self *DummyEngine) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error { +func (self *DummyEngine) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error { var parent *types.Header if index == 0 { parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) @@ -123,7 +124,7 @@ func (self *DummyEngine) Author(header *types.Header) (common.Address, error) { return header.Coinbase, nil } -func (self *DummyEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { +func (self *DummyEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { // Short circuit if the header is known, or it's parent not number := header.Number.Uint64() if chain.GetHeader(header.Hash(), number) != nil { @@ -137,7 +138,7 @@ func (self *DummyEngine) VerifyHeader(chain consensus.ChainReader, header *types return self.verifyHeader(chain, header, parent, false, seal) } -func (self *DummyEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (self *DummyEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { // Spawn as many workers as allowed threads workers := runtime.GOMAXPROCS(0) if len(headers) < workers { @@ -239,17 +240,17 @@ func (self *DummyEngine) VerifyUncles(chain consensus.ChainReader, block *types. return nil } -func (self *DummyEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error { +func (self *DummyEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { return nil } -func (self *DummyEngine) Prepare(chain consensus.ChainReader, header *types.Header) error { +func (self *DummyEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { header.Difficulty = big.NewInt(1) return nil } func (self *DummyEngine) Finalize( - chain consensus.ChainReader, header *types.Header, + chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { if self.cb.OnFinalize != nil { @@ -259,7 +260,7 @@ func (self *DummyEngine) Finalize( header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) } -func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, +func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { var extdata []byte if self.cb.OnFinalizeAndAssemble != nil { @@ -273,10 +274,10 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainReader, header header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, extdata), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie), extdata), nil } -func (self *DummyEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) (err error) { +func (self *DummyEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) (err error) { if self.cb.OnSeal != nil { err = self.cb.OnSeal(block) } else { @@ -314,11 +315,11 @@ func (self *DummyEngine) SealHash(header *types.Header) (hash common.Hash) { return hash } -func (self *DummyEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { +func (self *DummyEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { return big.NewInt(1) } -func (self *DummyEngine) APIs(chain consensus.ChainReader) (res []rpc.API) { +func (self *DummyEngine) APIs(chain consensus.ChainHeaderReader) (res []rpc.API) { res = nil if self.cb.OnAPIs != nil { res = self.cb.OnAPIs(chain) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 151761c..dc56b6f 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -584,7 +584,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie), nil), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/core/blockchain.go b/core/blockchain.go index b861220..82e3b6c 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2498,6 +2498,6 @@ func (bc *BlockChain) ManualHead(hash common.Hash) error { } bc.chainmu.Lock() defer bc.chainmu.Unlock() - bc.insert(block) + bc.writeHeadBlock(block) return nil } diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go new file mode 100644 index 0000000..1d6563d --- /dev/null +++ b/core/forkid/forkid.go @@ -0,0 +1,258 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124). +package forkid + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "math" + "math/big" + "reflect" + "strings" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +var ( + // ErrRemoteStale is returned by the validator if a remote fork checksum is a + // subset of our already applied forks, but the announced next fork block is + // not on our already passed chain. + ErrRemoteStale = errors.New("remote needs update") + + // ErrLocalIncompatibleOrStale is returned by the validator if a remote fork + // checksum does not match any local checksum variation, signalling that the + // two chains have diverged in the past at some point (possibly at genesis). + ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update") +) + +// Blockchain defines all necessary method to build a forkID. +type Blockchain interface { + // Config retrieves the chain's fork configuration. + Config() *params.ChainConfig + + // Genesis retrieves the chain's genesis block. + Genesis() *types.Block + + // CurrentHeader retrieves the current head header of the canonical chain. + CurrentHeader() *types.Header +} + +// ID is a fork identifier as defined by EIP-2124. +type ID struct { + Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers + Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known +} + +// Filter is a fork id filter to validate a remotely advertised ID. +type Filter func(id ID) error + +// NewID calculates the Ethereum fork ID from the chain config and head. +func NewID(chain Blockchain) ID { + return newID( + chain.Config(), + chain.Genesis().Hash(), + chain.CurrentHeader().Number.Uint64(), + ) +} + +// newID is the internal version of NewID, which takes extracted values as its +// arguments instead of a chain. The reason is to allow testing the IDs without +// having to simulate an entire blockchain. +func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID { + // Calculate the starting checksum from the genesis hash + hash := crc32.ChecksumIEEE(genesis[:]) + + // Calculate the current fork checksum and the next fork block + var next uint64 + for _, fork := range gatherForks(config) { + if fork <= head { + // Fork already passed, checksum the previous hash and the fork number + hash = checksumUpdate(hash, fork) + continue + } + next = fork + break + } + return ID{Hash: checksumToBytes(hash), Next: next} +} + +// NewFilter creates a filter that returns if a fork ID should be rejected or not +// based on the local chain's status. +func NewFilter(chain Blockchain) Filter { + return newFilter( + chain.Config(), + chain.Genesis().Hash(), + func() uint64 { + return chain.CurrentHeader().Number.Uint64() + }, + ) +} + +// NewStaticFilter creates a filter at block zero. +func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter { + head := func() uint64 { return 0 } + return newFilter(config, genesis, head) +} + +// newFilter is the internal version of NewFilter, taking closures as its arguments +// instead of a chain. The reason is to allow testing it without having to simulate +// an entire blockchain. +func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter { + // Calculate the all the valid fork hash and fork next combos + var ( + forks = gatherForks(config) + sums = make([][4]byte, len(forks)+1) // 0th is the genesis + ) + hash := crc32.ChecksumIEEE(genesis[:]) + sums[0] = checksumToBytes(hash) + for i, fork := range forks { + hash = checksumUpdate(hash, fork) + sums[i+1] = checksumToBytes(hash) + } + // Add two sentries to simplify the fork checks and don't require special + // casing the last one. + forks = append(forks, math.MaxUint64) // Last fork will never be passed + + // Create a validator that will filter out incompatible chains + return func(id ID) error { + // Run the fork checksum validation ruleset: + // 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT. + // The two nodes are in the same fork state currently. They might know + // of differing future forks, but that's not relevant until the fork + // triggers (might be postponed, nodes might be updated to match). + // 1a. A remotely announced but remotely not passed block is already passed + // locally, disconnect, since the chains are incompatible. + // 1b. No remotely announced fork; or not yet passed locally, connect. + // 2. If the remote FORK_CSUM is a subset of the local past forks and the + // remote FORK_NEXT matches with the locally following fork block number, + // connect. + // Remote node is currently syncing. It might eventually diverge from + // us, but at this current point in time we don't have enough information. + // 3. If the remote FORK_CSUM is a superset of the local past forks and can + // be completed with locally known future forks, connect. + // Local node is currently syncing. It might eventually diverge from + // the remote, but at this current point in time we don't have enough + // information. + // 4. Reject in all other cases. + head := headfn() + for i, fork := range forks { + // If our head is beyond this fork, continue to the next (we have a dummy + // fork of maxuint64 as the last item to always fail this check eventually). + if head > fork { + continue + } + // Found the first unpassed fork block, check if our current state matches + // the remote checksum (rule #1). + if sums[i] == id.Hash { + // Fork checksum matched, check if a remote future fork block already passed + // locally without the local node being aware of it (rule #1a). + if id.Next > 0 && head >= id.Next { + return ErrLocalIncompatibleOrStale + } + // Haven't passed locally a remote-only fork, accept the connection (rule #1b). + return nil + } + // The local and remote nodes are in different forks currently, check if the + // remote checksum is a subset of our local forks (rule #2). + for j := 0; j < i; j++ { + if sums[j] == id.Hash { + // Remote checksum is a subset, validate based on the announced next fork + if forks[j] != id.Next { + return ErrRemoteStale + } + return nil + } + } + // Remote chain is not a subset of our local one, check if it's a superset by + // any chance, signalling that we're simply out of sync (rule #3). + for j := i + 1; j < len(sums); j++ { + if sums[j] == id.Hash { + // Yay, remote checksum is a superset, ignore upcoming forks + return nil + } + } + // No exact, subset or superset match. We are on differing chains, reject. + return ErrLocalIncompatibleOrStale + } + log.Error("Impossible fork ID validation", "id", id) + return nil // Something's very wrong, accept rather than reject + } +} + +// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous +// one and a fork block number (equivalent to CRC32(original-blob || fork)). +func checksumUpdate(hash uint32, fork uint64) uint32 { + var blob [8]byte + binary.BigEndian.PutUint64(blob[:], fork) + return crc32.Update(hash, crc32.IEEETable, blob[:]) +} + +// checksumToBytes converts a uint32 checksum into a [4]byte array. +func checksumToBytes(hash uint32) [4]byte { + var blob [4]byte + binary.BigEndian.PutUint32(blob[:], hash) + return blob +} + +// gatherForks gathers all the known forks and creates a sorted list out of them. +func gatherForks(config *params.ChainConfig) []uint64 { + // Gather all the fork block numbers via reflection + kind := reflect.TypeOf(params.ChainConfig{}) + conf := reflect.ValueOf(config).Elem() + + var forks []uint64 + for i := 0; i < kind.NumField(); i++ { + // Fetch the next field and skip non-fork rules + field := kind.Field(i) + if !strings.HasSuffix(field.Name, "Block") { + continue + } + if field.Type != reflect.TypeOf(new(big.Int)) { + continue + } + // Extract the fork rule block number and aggregate it + rule := conf.Field(i).Interface().(*big.Int) + if rule != nil { + forks = append(forks, rule.Uint64()) + } + } + // Sort the fork block numbers to permit chronological XOR + for i := 0; i < len(forks); i++ { + for j := i + 1; j < len(forks); j++ { + if forks[i] > forks[j] { + forks[i], forks[j] = forks[j], forks[i] + } + } + } + // Deduplicate block numbers applying multiple forks + for i := 1; i < len(forks); i++ { + if forks[i] == forks[i-1] { + forks = append(forks[:i], forks[i+1:]...) + i-- + } + } + // Skip any forks in block 0, that's the genesis ruleset + if len(forks) > 0 && forks[0] == 0 { + forks = forks[1:] + } + return forks +} diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go new file mode 100644 index 0000000..6112de0 --- /dev/null +++ b/core/rawdb/accessors_state.go @@ -0,0 +1,96 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +// ReadPreimage retrieves a single preimage of the provided hash. +func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(preimageKey(hash)) + return data +} + +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } + } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) +} + +// ReadCode retrieves the contract code of the provided code hash. +func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { + // Try with the legacy code scheme first, if not then try with current + // scheme. Since most of the code will be found with legacy scheme. + // + // todo(rjl493456442) change the order when we forcibly upgrade the code + // scheme with snapshot. + data, _ := db.Get(hash[:]) + if len(data) != 0 { + return data + } + return ReadCodeWithPrefix(db, hash) +} + +// ReadCodeWithPrefix retrieves the contract code of the provided code hash. +// The main difference between this function and ReadCode is this function +// will only check the existence with latest scheme(with prefix). +func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(codeKey(hash)) + return data +} + +// WriteCode writes the provided contract code database. +func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { + if err := db.Put(codeKey(hash), code); err != nil { + log.Crit("Failed to store contract code", "err", err) + } +} + +// DeleteCode deletes the specified contract code from the database. +func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(codeKey(hash)); err != nil { + log.Crit("Failed to delete contract code", "err", err) + } +} + +// ReadTrieNode retrieves the trie node of the provided hash. +func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(hash.Bytes()) + return data +} + +// WriteTrieNode writes the provided trie node database. +func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { + if err := db.Put(hash.Bytes(), node); err != nil { + log.Crit("Failed to store trie node", "err", err) + } +} + +// DeleteTrieNode deletes the specified trie node from the database. +func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(hash.Bytes()); err != nil { + log.Crit("Failed to delete trie node", "err", err) + } +} diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go new file mode 100644 index 0000000..3130e92 --- /dev/null +++ b/core/rawdb/chain_iterator.go @@ -0,0 +1,304 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "runtime" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" +) + +// InitDatabaseFromFreezer reinitializes an empty database from a previous batch +// of frozen ancient blocks. The method iterates over all the frozen blocks and +// injects into the database the block hash->number mappings. +func InitDatabaseFromFreezer(db ethdb.Database) { + // If we can't access the freezer or it's empty, abort + frozen, err := db.Ancients() + if err != nil || frozen == 0 { + return + } + var ( + batch = db.NewBatch() + start = time.Now() + logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log + hash common.Hash + ) + for i := uint64(0); i < frozen; i++ { + // Since the freezer has all data in sequential order on a file, + // it would be 'neat' to read more data in one go, and let the + // freezerdb return N items (e.g up to 1000 items per go) + // That would require an API change in Ancients though + if h, err := db.Ancient(freezerHashTable, i); err != nil { + log.Crit("Failed to init database from freezer", "err", err) + } else { + hash = common.BytesToHash(h) + } + WriteHeaderNumber(batch, hash, i) + // If enough data was accumulated in memory or we're at the last block, dump to disk + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Crit("Failed to write data to db", "err", err) + } + batch.Reset() + } + // If we've spent too much time already, notify the user of what we're doing + if time.Since(logged) > 8*time.Second { + log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + if err := batch.Write(); err != nil { + log.Crit("Failed to write data to db", "err", err) + } + batch.Reset() + + WriteHeadHeaderHash(db, hash) + WriteHeadFastBlockHash(db, hash) + log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start))) +} + +type blockTxHashes struct { + number uint64 + hashes []common.Hash +} + +// iterateTransactions iterates over all transactions in the (canon) block +// number(s) given, and yields the hashes on a channel +func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool) (chan *blockTxHashes, chan struct{}) { + // One thread sequentially reads data from db + type numberRlp struct { + number uint64 + rlp rlp.RawValue + } + if to == from { + return nil, nil + } + threads := to - from + if cpus := runtime.NumCPU(); threads > uint64(cpus) { + threads = uint64(cpus) + } + var ( + rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel + hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh + abortCh = make(chan struct{}) + ) + // lookup runs in one instance + lookup := func() { + n, end := from, to + if reverse { + n, end = to-1, from-1 + } + defer close(rlpCh) + for n != end { + data := ReadCanonicalBodyRLP(db, n) + // Feed the block to the aggregator, or abort on interrupt + select { + case rlpCh <- &numberRlp{n, data}: + case <-abortCh: + return + } + if reverse { + n-- + } else { + n++ + } + } + } + // process runs in parallel + nThreadsAlive := int32(threads) + process := func() { + defer func() { + // Last processor closes the result channel + if atomic.AddInt32(&nThreadsAlive, -1) == 0 { + close(hashesCh) + } + }() + + var hasher = sha3.NewLegacyKeccak256() + for data := range rlpCh { + it, err := rlp.NewListIterator(data.rlp) + if err != nil { + log.Warn("tx iteration error", "error", err) + return + } + it.Next() + txs := it.Value() + txIt, err := rlp.NewListIterator(txs) + if err != nil { + log.Warn("tx iteration error", "error", err) + return + } + var hashes []common.Hash + for txIt.Next() { + if err := txIt.Err(); err != nil { + log.Warn("tx iteration error", "error", err) + return + } + var txHash common.Hash + hasher.Reset() + hasher.Write(txIt.Value()) + hasher.Sum(txHash[:0]) + hashes = append(hashes, txHash) + } + result := &blockTxHashes{ + hashes: hashes, + number: data.number, + } + // Feed the block to the aggregator, or abort on interrupt + select { + case hashesCh <- result: + case <-abortCh: + return + } + } + } + go lookup() // start the sequential db accessor + for i := 0; i < int(threads); i++ { + go process() + } + return hashesCh, abortCh +} + +// IndexTransactions creates txlookup indices of the specified block range. +// +// This function iterates canonical chain in reverse order, it has one main advantage: +// We can write tx index tail flag periodically even without the whole indexing +// procedure is finished. So that we can resume indexing procedure next time quickly. +func IndexTransactions(db ethdb.Database, from uint64, to uint64) { + // short circuit for invalid range + if from >= to { + return + } + var ( + hashesCh, abortCh = iterateTransactions(db, from, to, true) + batch = db.NewBatch() + start = time.Now() + logged = start.Add(-7 * time.Second) + // Since we iterate in reverse, we expect the first number to come + // in to be [to-1]. Therefore, setting lastNum to means that the + // prqueue gap-evaluation will work correctly + lastNum = to + queue = prque.New(nil) + // for stats reporting + blocks, txs = 0, 0 + ) + defer close(abortCh) + + for chanDelivery := range hashesCh { + // Push the delivery into the queue and process contiguous ranges. + // Since we iterate in reverse, so lower numbers have lower prio, and + // we can use the number directly as prio marker + queue.Push(chanDelivery, int64(chanDelivery.number)) + for !queue.Empty() { + // If the next available item is gapped, return + if _, priority := queue.Peek(); priority != int64(lastNum-1) { + break + } + // Next block available, pop it off and index it + delivery := queue.PopItem().(*blockTxHashes) + lastNum = delivery.number + WriteTxLookupEntries(batch, delivery.number, delivery.hashes) + blocks++ + txs += len(delivery.hashes) + // If enough data was accumulated in memory or we're at the last block, dump to disk + if batch.ValueSize() > ethdb.IdealBatchSize { + // Also write the tail there + WriteTxIndexTail(batch, lastNum) + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + batch.Reset() + } + // If we've spent too much time already, notify the user of what we're doing + if time.Since(logged) > 8*time.Second { + log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + } + if lastNum < to { + WriteTxIndexTail(batch, lastNum) + // No need to write the batch if we never entered the loop above... + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + } + log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) +} + +// UnindexTransactions removes txlookup indices of the specified block range. +func UnindexTransactions(db ethdb.Database, from uint64, to uint64) { + // short circuit for invalid range + if from >= to { + return + } + // Write flag first and then unindex the transaction indices. Some indices + // will be left in the database if crash happens but it's fine. + WriteTxIndexTail(db, to) + // If only one block is unindexed, do it directly + //if from+1 == to { + // data := ReadCanonicalBodyRLP(db, uint64(from)) + // DeleteTxLookupEntries(db, ReadBlock(db, ReadCanonicalHash(db, from), from)) + // log.Info("Unindexed transactions", "blocks", 1, "tail", to) + // return + //} + // TODO @holiman, add this back (if we want it) + var ( + hashesCh, abortCh = iterateTransactions(db, from, to, false) + batch = db.NewBatch() + start = time.Now() + logged = start.Add(-7 * time.Second) + ) + defer close(abortCh) + // Otherwise spin up the concurrent iterator and unindexer + blocks, txs := 0, 0 + for delivery := range hashesCh { + DeleteTxLookupEntries(batch, delivery.hashes) + txs += len(delivery.hashes) + blocks++ + + // If enough data was accumulated in memory or we're at the last block, dump to disk + // A batch counts the size of deletion as '1', so we need to flush more + // often than that. + if blocks%1000 == 0 { + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + batch.Reset() + } + // If we've spent too much time already, notify the user of what we're doing + if time.Since(logged) > 8*time.Second { + log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) +} diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go deleted file mode 100644 index 31636ee..0000000 --- a/core/state/snapshot/difflayer_test.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "bytes" - "math/rand" - "testing" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb/memorydb" -) - -func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} { - copy := make(map[common.Hash]struct{}) - for hash := range destructs { - copy[hash] = struct{}{} - } - return copy -} - -func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte { - copy := make(map[common.Hash][]byte) - for hash, blob := range accounts { - copy[hash] = blob - } - return copy -} - -func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { - copy := make(map[common.Hash]map[common.Hash][]byte) - for accHash, slots := range storage { - copy[accHash] = make(map[common.Hash][]byte) - for slotHash, blob := range slots { - copy[accHash][slotHash] = blob - } - } - return copy -} - -// TestMergeBasics tests some simple merges -func TestMergeBasics(t *testing.T) { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - // Fill up a parent - for i := 0; i < 100; i++ { - h := randomHash() - data := randomAccount() - - accounts[h] = data - if rand.Intn(4) == 0 { - destructs[h] = struct{}{} - } - if rand.Intn(2) == 0 { - accStorage := make(map[common.Hash][]byte) - value := make([]byte, 32) - rand.Read(value) - accStorage[randomHash()] = value - storage[h] = accStorage - } - } - // Add some (identical) layers on top - parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) - child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) - // And flatten - merged := (child.flatten()).(*diffLayer) - - { // Check account lists - if have, want := len(merged.accountList), 0; have != want { - t.Errorf("accountList wrong: have %v, want %v", have, want) - } - if have, want := len(merged.AccountList()), len(accounts); have != want { - t.Errorf("AccountList() wrong: have %v, want %v", have, want) - } - if have, want := len(merged.accountList), len(accounts); have != want { - t.Errorf("accountList [2] wrong: have %v, want %v", have, want) - } - } - { // Check account drops - if have, want := len(merged.destructSet), len(destructs); have != want { - t.Errorf("accountDrop wrong: have %v, want %v", have, want) - } - } - { // Check storage lists - i := 0 - for aHash, sMap := range storage { - if have, want := len(merged.storageList), i; have != want { - t.Errorf("[1] storageList wrong: have %v, want %v", have, want) - } - list, _ := merged.StorageList(aHash) - if have, want := len(list), len(sMap); have != want { - t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want) - } - if have, want := len(merged.storageList[aHash]), len(sMap); have != want { - t.Errorf("storageList wrong: have %v, want %v", have, want) - } - i++ - } - } -} - -// TestMergeDelete tests some deletion -func TestMergeDelete(t *testing.T) { - var ( - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - // Fill up a parent - h1 := common.HexToHash("0x01") - h2 := common.HexToHash("0x02") - - flipDrops := func() map[common.Hash]struct{} { - return map[common.Hash]struct{}{ - h2: {}, - } - } - flipAccs := func() map[common.Hash][]byte { - return map[common.Hash][]byte{ - h1: randomAccount(), - } - } - flopDrops := func() map[common.Hash]struct{} { - return map[common.Hash]struct{}{ - h1: {}, - } - } - flopAccs := func() map[common.Hash][]byte { - return map[common.Hash][]byte{ - h2: randomAccount(), - } - } - // Add some flipAccs-flopping layers on top - parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage) - child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) - child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) - child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) - - if data, _ := child.Account(h1); data == nil { - t.Errorf("last diff layer: expected %x account to be non-nil", h1) - } - if data, _ := child.Account(h2); data != nil { - t.Errorf("last diff layer: expected %x account to be nil", h2) - } - if _, ok := child.destructSet[h1]; ok { - t.Errorf("last diff layer: expected %x drop to be missing", h1) - } - if _, ok := child.destructSet[h2]; !ok { - t.Errorf("last diff layer: expected %x drop to be present", h1) - } - // And flatten - merged := (child.flatten()).(*diffLayer) - - if data, _ := merged.Account(h1); data == nil { - t.Errorf("merged layer: expected %x account to be non-nil", h1) - } - if data, _ := merged.Account(h2); data != nil { - t.Errorf("merged layer: expected %x account to be nil", h2) - } - if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk! - t.Errorf("merged diff layer: expected %x drop to be present", h1) - } - if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk! - t.Errorf("merged diff layer: expected %x drop to be present", h1) - } - // If we add more granular metering of memory, we can enable this again, - // but it's not implemented for now - //if have, want := merged.memory, child.memory; have != want { - // t.Errorf("mem wrong: have %d, want %d", have, want) - //} -} - -// This tests that if we create a new account, and set a slot, and then merge -// it, the lists will be correct. -func TestInsertAndMerge(t *testing.T) { - // Fill up a parent - var ( - acc = common.HexToHash("0x01") - slot = common.HexToHash("0x02") - parent *diffLayer - child *diffLayer - ) - { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage) - } - { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - accounts[acc] = randomAccount() - storage[acc] = make(map[common.Hash][]byte) - storage[acc][slot] = []byte{0x01} - child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) - } - // And flatten - merged := (child.flatten()).(*diffLayer) - { // Check that slot value is present - have, _ := merged.Storage(acc, slot) - if want := []byte{0x01}; !bytes.Equal(have, want) { - t.Errorf("merged slot value wrong: have %x, want %x", have, want) - } - } -} - -func emptyLayer() *diskLayer { - return &diskLayer{ - diskdb: memorydb.New(), - cache: fastcache.New(500 * 1024), - } -} - -// BenchmarkSearch checks how long it takes to find a non-existing key -// BenchmarkSearch-6 200000 10481 ns/op (1K per layer) -// BenchmarkSearch-6 200000 10760 ns/op (10K per layer) -// BenchmarkSearch-6 100000 17866 ns/op -// -// BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock() -func BenchmarkSearch(b *testing.B) { - // First, we set up 128 diff layers, with 1K items each - fill := func(parent snapshot) *diffLayer { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - for i := 0; i < 10000; i++ { - accounts[randomHash()] = randomAccount() - } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) - } - var layer snapshot - layer = emptyLayer() - for i := 0; i < 128; i++ { - layer = fill(layer) - } - key := crypto.Keccak256Hash([]byte{0x13, 0x38}) - b.ResetTimer() - for i := 0; i < b.N; i++ { - layer.AccountRLP(key) - } -} - -// BenchmarkSearchSlot checks how long it takes to find a non-existing key -// - Number of layers: 128 -// - Each layers contains the account, with a couple of storage slots -// BenchmarkSearchSlot-6 100000 14554 ns/op -// BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex) -// BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic) -// With bloom filter: -// BenchmarkSearchSlot-6 3467835 351 ns/op -func BenchmarkSearchSlot(b *testing.B) { - // First, we set up 128 diff layers, with 1K items each - accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) - storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) - accountRLP := randomAccount() - fill := func(parent snapshot) *diffLayer { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - accounts[accountKey] = accountRLP - - accStorage := make(map[common.Hash][]byte) - for i := 0; i < 5; i++ { - value := make([]byte, 32) - rand.Read(value) - accStorage[randomHash()] = value - storage[accountKey] = accStorage - } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) - } - var layer snapshot - layer = emptyLayer() - for i := 0; i < 128; i++ { - layer = fill(layer) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - layer.Storage(accountKey, storageKey) - } -} - -// With accountList and sorting -// BenchmarkFlatten-6 50 29890856 ns/op -// -// Without sorting and tracking accountlist -// BenchmarkFlatten-6 300 5511511 ns/op -func BenchmarkFlatten(b *testing.B) { - fill := func(parent snapshot) *diffLayer { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - for i := 0; i < 100; i++ { - accountKey := randomHash() - accounts[accountKey] = randomAccount() - - accStorage := make(map[common.Hash][]byte) - for i := 0; i < 20; i++ { - value := make([]byte, 32) - rand.Read(value) - accStorage[randomHash()] = value - - } - storage[accountKey] = accStorage - } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - var layer snapshot - layer = emptyLayer() - for i := 1; i < 128; i++ { - layer = fill(layer) - } - b.StartTimer() - - for i := 1; i < 128; i++ { - dl, ok := layer.(*diffLayer) - if !ok { - break - } - layer = dl.flatten() - } - b.StopTimer() - } -} - -// This test writes ~324M of diff layers to disk, spread over -// - 128 individual layers, -// - each with 200 accounts -// - containing 200 slots -// -// BenchmarkJournal-6 1 1471373923 ns/ops -// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer -func BenchmarkJournal(b *testing.B) { - fill := func(parent snapshot) *diffLayer { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - for i := 0; i < 200; i++ { - accountKey := randomHash() - accounts[accountKey] = randomAccount() - - accStorage := make(map[common.Hash][]byte) - for i := 0; i < 200; i++ { - value := make([]byte, 32) - rand.Read(value) - accStorage[randomHash()] = value - - } - storage[accountKey] = accStorage - } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) - } - layer := snapshot(new(diskLayer)) - for i := 1; i < 128; i++ { - layer = fill(layer) - } - b.ResetTimer() - - for i := 0; i < b.N; i++ { - layer.Journal(new(bytes.Buffer)) - } -} diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go deleted file mode 100644 index 5df5efc..0000000 --- a/core/state/snapshot/disklayer_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/leveldb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" -) - -// reverse reverses the contents of a byte slice. It's used to update random accs -// with deterministic changes. -func reverse(blob []byte) []byte { - res := make([]byte, len(blob)) - for i, b := range blob { - res[len(blob)-1-i] = b - } - return res -} - -// Tests that merging something into a disk layer persists it into the database -// and invalidates any previously written and cached values. -func TestDiskMerge(t *testing.T) { - // Create some accounts in the disk layer - db := memorydb.New() - - var ( - accNoModNoCache = common.Hash{0x1} - accNoModCache = common.Hash{0x2} - accModNoCache = common.Hash{0x3} - accModCache = common.Hash{0x4} - accDelNoCache = common.Hash{0x5} - accDelCache = common.Hash{0x6} - conNoModNoCache = common.Hash{0x7} - conNoModNoCacheSlot = common.Hash{0x70} - conNoModCache = common.Hash{0x8} - conNoModCacheSlot = common.Hash{0x80} - conModNoCache = common.Hash{0x9} - conModNoCacheSlot = common.Hash{0x90} - conModCache = common.Hash{0xa} - conModCacheSlot = common.Hash{0xa0} - conDelNoCache = common.Hash{0xb} - conDelNoCacheSlot = common.Hash{0xb0} - conDelCache = common.Hash{0xc} - conDelCacheSlot = common.Hash{0xc0} - conNukeNoCache = common.Hash{0xd} - conNukeNoCacheSlot = common.Hash{0xd0} - conNukeCache = common.Hash{0xe} - conNukeCacheSlot = common.Hash{0xe0} - baseRoot = randomHash() - diffRoot = randomHash() - ) - - rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) - rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) - rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) - rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) - rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) - rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) - - rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) - rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) - rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) - rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) - rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) - rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) - rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) - rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) - rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) - rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) - rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) - - rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) - rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) - rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) - rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) - - rawdb.WriteSnapshotRoot(db, baseRoot) - - // Create a disk layer based on the above and cache in some data - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - baseRoot: &diskLayer{ - diskdb: db, - cache: fastcache.New(500 * 1024), - root: baseRoot, - }, - }, - } - base := snaps.Snapshot(baseRoot) - base.AccountRLP(accNoModCache) - base.AccountRLP(accModCache) - base.AccountRLP(accDelCache) - base.Storage(conNoModCache, conNoModCacheSlot) - base.Storage(conModCache, conModCacheSlot) - base.Storage(conDelCache, conDelCacheSlot) - base.Storage(conNukeCache, conNukeCacheSlot) - - // Modify or delete some accounts, flatten everything onto disk - if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ - accDelNoCache: {}, - accDelCache: {}, - conNukeNoCache: {}, - conNukeCache: {}, - }, map[common.Hash][]byte{ - accModNoCache: reverse(accModNoCache[:]), - accModCache: reverse(accModCache[:]), - }, map[common.Hash]map[common.Hash][]byte{ - conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, - conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, - conDelNoCache: {conDelNoCacheSlot: nil}, - conDelCache: {conDelCacheSlot: nil}, - }); err != nil { - t.Fatalf("failed to update snapshot tree: %v", err) - } - if err := snaps.Cap(diffRoot, 0); err != nil { - t.Fatalf("failed to flatten snapshot tree: %v", err) - } - // Retrieve all the data through the disk layer and validate it - base = snaps.Snapshot(diffRoot) - if _, ok := base.(*diskLayer); !ok { - t.Fatalf("update not flattend into the disk layer") - } - - // assertAccount ensures that an account matches the given blob. - assertAccount := func(account common.Hash, data []byte) { - t.Helper() - blob, err := base.AccountRLP(account) - if err != nil { - t.Errorf("account access (%x) failed: %v", account, err) - } else if !bytes.Equal(blob, data) { - t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) - } - } - assertAccount(accNoModNoCache, accNoModNoCache[:]) - assertAccount(accNoModCache, accNoModCache[:]) - assertAccount(accModNoCache, reverse(accModNoCache[:])) - assertAccount(accModCache, reverse(accModCache[:])) - assertAccount(accDelNoCache, nil) - assertAccount(accDelCache, nil) - - // assertStorage ensures that a storage slot matches the given blob. - assertStorage := func(account common.Hash, slot common.Hash, data []byte) { - t.Helper() - blob, err := base.Storage(account, slot) - if err != nil { - t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) - } else if !bytes.Equal(blob, data) { - t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) - } - } - assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) - assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) - assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) - assertStorage(conDelNoCache, conDelNoCacheSlot, nil) - assertStorage(conDelCache, conDelCacheSlot, nil) - assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) - assertStorage(conNukeCache, conNukeCacheSlot, nil) - - // Retrieve all the data directly from the database and validate it - - // assertDatabaseAccount ensures that an account from the database matches the given blob. - assertDatabaseAccount := func(account common.Hash, data []byte) { - t.Helper() - if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { - t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) - } - } - assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) - assertDatabaseAccount(accNoModCache, accNoModCache[:]) - assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) - assertDatabaseAccount(accModCache, reverse(accModCache[:])) - assertDatabaseAccount(accDelNoCache, nil) - assertDatabaseAccount(accDelCache, nil) - - // assertDatabaseStorage ensures that a storage slot from the database matches the given blob. - assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { - t.Helper() - if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { - t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) - } - } - assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) - assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) - assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) - assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) - assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) - assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) - assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) -} - -// Tests that merging something into a disk layer persists it into the database -// and invalidates any previously written and cached values, discarding anything -// after the in-progress generation marker. -func TestDiskPartialMerge(t *testing.T) { - // Iterate the test a few times to ensure we pick various internal orderings - // for the data slots as well as the progress marker. - for i := 0; i < 1024; i++ { - // Create some accounts in the disk layer - db := memorydb.New() - - var ( - accNoModNoCache = randomHash() - accNoModCache = randomHash() - accModNoCache = randomHash() - accModCache = randomHash() - accDelNoCache = randomHash() - accDelCache = randomHash() - conNoModNoCache = randomHash() - conNoModNoCacheSlot = randomHash() - conNoModCache = randomHash() - conNoModCacheSlot = randomHash() - conModNoCache = randomHash() - conModNoCacheSlot = randomHash() - conModCache = randomHash() - conModCacheSlot = randomHash() - conDelNoCache = randomHash() - conDelNoCacheSlot = randomHash() - conDelCache = randomHash() - conDelCacheSlot = randomHash() - conNukeNoCache = randomHash() - conNukeNoCacheSlot = randomHash() - conNukeCache = randomHash() - conNukeCacheSlot = randomHash() - baseRoot = randomHash() - diffRoot = randomHash() - genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) - ) - - // insertAccount injects an account into the database if it's after the - // generator marker, drops the op otherwise. This is needed to seed the - // database with a valid starting snapshot. - insertAccount := func(account common.Hash, data []byte) { - if bytes.Compare(account[:], genMarker) <= 0 { - rawdb.WriteAccountSnapshot(db, account, data[:]) - } - } - insertAccount(accNoModNoCache, accNoModNoCache[:]) - insertAccount(accNoModCache, accNoModCache[:]) - insertAccount(accModNoCache, accModNoCache[:]) - insertAccount(accModCache, accModCache[:]) - insertAccount(accDelNoCache, accDelNoCache[:]) - insertAccount(accDelCache, accDelCache[:]) - - // insertStorage injects a storage slot into the database if it's after - // the generator marker, drops the op otherwise. This is needed to seed - // the database with a valid starting snapshot. - insertStorage := func(account common.Hash, slot common.Hash, data []byte) { - if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { - rawdb.WriteStorageSnapshot(db, account, slot, data[:]) - } - } - insertAccount(conNoModNoCache, conNoModNoCache[:]) - insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) - insertAccount(conNoModCache, conNoModCache[:]) - insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - insertAccount(conModNoCache, conModNoCache[:]) - insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) - insertAccount(conModCache, conModCache[:]) - insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) - insertAccount(conDelNoCache, conDelNoCache[:]) - insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) - insertAccount(conDelCache, conDelCache[:]) - insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) - - insertAccount(conNukeNoCache, conNukeNoCache[:]) - insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) - insertAccount(conNukeCache, conNukeCache[:]) - insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) - - rawdb.WriteSnapshotRoot(db, baseRoot) - - // Create a disk layer based on the above using a random progress marker - // and cache in some data. - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - baseRoot: &diskLayer{ - diskdb: db, - cache: fastcache.New(500 * 1024), - root: baseRoot, - }, - }, - } - snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker - base := snaps.Snapshot(baseRoot) - - // assertAccount ensures that an account matches the given blob if it's - // already covered by the disk snapshot, and errors out otherwise. - assertAccount := func(account common.Hash, data []byte) { - t.Helper() - blob, err := base.AccountRLP(account) - if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { - t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob) - } - if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { - t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) - } - } - assertAccount(accNoModCache, accNoModCache[:]) - assertAccount(accModCache, accModCache[:]) - assertAccount(accDelCache, accDelCache[:]) - - // assertStorage ensures that a storage slot matches the given blob if - // it's already covered by the disk snapshot, and errors out otherwise. - assertStorage := func(account common.Hash, slot common.Hash, data []byte) { - t.Helper() - blob, err := base.Storage(account, slot) - if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { - t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) - } - if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { - t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) - } - } - assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) - assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) - assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) - - // Modify or delete some accounts, flatten everything onto disk - if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ - accDelNoCache: {}, - accDelCache: {}, - conNukeNoCache: {}, - conNukeCache: {}, - }, map[common.Hash][]byte{ - accModNoCache: reverse(accModNoCache[:]), - accModCache: reverse(accModCache[:]), - }, map[common.Hash]map[common.Hash][]byte{ - conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, - conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, - conDelNoCache: {conDelNoCacheSlot: nil}, - conDelCache: {conDelCacheSlot: nil}, - }); err != nil { - t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) - } - if err := snaps.Cap(diffRoot, 0); err != nil { - t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) - } - // Retrieve all the data through the disk layer and validate it - base = snaps.Snapshot(diffRoot) - if _, ok := base.(*diskLayer); !ok { - t.Fatalf("test %d: update not flattend into the disk layer", i) - } - assertAccount(accNoModNoCache, accNoModNoCache[:]) - assertAccount(accNoModCache, accNoModCache[:]) - assertAccount(accModNoCache, reverse(accModNoCache[:])) - assertAccount(accModCache, reverse(accModCache[:])) - assertAccount(accDelNoCache, nil) - assertAccount(accDelCache, nil) - - assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) - assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) - assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) - assertStorage(conDelNoCache, conDelNoCacheSlot, nil) - assertStorage(conDelCache, conDelCacheSlot, nil) - assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) - assertStorage(conNukeCache, conNukeCacheSlot, nil) - - // Retrieve all the data directly from the database and validate it - - // assertDatabaseAccount ensures that an account inside the database matches - // the given blob if it's already covered by the disk snapshot, and does not - // exist otherwise. - assertDatabaseAccount := func(account common.Hash, data []byte) { - t.Helper() - blob := rawdb.ReadAccountSnapshot(db, account) - if bytes.Compare(account[:], genMarker) > 0 && blob != nil { - t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob) - } - if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { - t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) - } - } - assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) - assertDatabaseAccount(accNoModCache, accNoModCache[:]) - assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) - assertDatabaseAccount(accModCache, reverse(accModCache[:])) - assertDatabaseAccount(accDelNoCache, nil) - assertDatabaseAccount(accDelCache, nil) - - // assertDatabaseStorage ensures that a storage slot inside the database - // matches the given blob if it's already covered by the disk snapshot, - // and does not exist otherwise. - assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { - t.Helper() - blob := rawdb.ReadStorageSnapshot(db, account, slot) - if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { - t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) - } - if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { - t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) - } - } - assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) - assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) - assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) - assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) - assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) - assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) - assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) - assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) - } -} - -// Tests that merging something into a disk layer persists it into the database -// and invalidates any previously written and cached values, discarding anything -// after the in-progress generation marker. -// -// This test case is a tiny specialized case of TestDiskPartialMerge, which tests -// some very specific cornercases that random tests won't ever trigger. -func TestDiskMidAccountPartialMerge(t *testing.T) { - // TODO(@karalabe) ? -} - -// TestDiskSeek tests that seek-operations work on the disk layer -func TestDiskSeek(t *testing.T) { - // Create some accounts in the disk layer - var db ethdb.Database - - if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil { - t.Fatal(err) - } else { - defer os.RemoveAll(dir) - diskdb, err := leveldb.New(dir, 256, 0, "") - if err != nil { - t.Fatal(err) - } - db = rawdb.NewDatabase(diskdb) - } - // Fill even keys [0,2,4...] - for i := 0; i < 0xff; i += 2 { - acc := common.Hash{byte(i)} - rawdb.WriteAccountSnapshot(db, acc, acc[:]) - } - // Add an 'higher' key, with incorrect (higher) prefix - highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1} - db.Put(highKey, []byte{0xff, 0xff}) - - baseRoot := randomHash() - rawdb.WriteSnapshotRoot(db, baseRoot) - - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - baseRoot: &diskLayer{ - diskdb: db, - cache: fastcache.New(500 * 1024), - root: baseRoot, - }, - }, - } - // Test some different seek positions - type testcase struct { - pos byte - expkey byte - } - var cases = []testcase{ - {0xff, 0x55}, // this should exit immediately without checking key - {0x01, 0x02}, - {0xfe, 0xfe}, - {0xfd, 0xfe}, - {0x00, 0x00}, - } - for i, tc := range cases { - it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos}) - if err != nil { - t.Fatalf("case %d, error: %v", i, err) - } - count := 0 - for it.Next() { - k, v, err := it.Hash()[0], it.Account()[0], it.Error() - if err != nil { - t.Fatalf("test %d, item %d, error: %v", i, count, err) - } - // First item in iterator should have the expected key - if count == 0 && k != tc.expkey { - t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey) - } - count++ - if v != k { - t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k) - } - } - } -} diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go deleted file mode 100644 index ef4859c..0000000 --- a/core/state/snapshot/iterator_test.go +++ /dev/null @@ -1,1046 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "bytes" - "encoding/binary" - "fmt" - "math/rand" - "testing" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ethereum/go-ethereum/common" -) - -// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration -func TestAccountIteratorBasics(t *testing.T) { - var ( - destructs = make(map[common.Hash]struct{}) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - // Fill up a parent - for i := 0; i < 100; i++ { - h := randomHash() - data := randomAccount() - - accounts[h] = data - if rand.Intn(4) == 0 { - destructs[h] = struct{}{} - } - if rand.Intn(2) == 0 { - accStorage := make(map[common.Hash][]byte) - value := make([]byte, 32) - rand.Read(value) - accStorage[randomHash()] = value - storage[h] = accStorage - } - } - // Add some (identical) layers on top - diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) - it := diffLayer.AccountIterator(common.Hash{}) - verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator - - diskLayer := diffToDisk(diffLayer) - it = diskLayer.AccountIterator(common.Hash{}) - verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator -} - -// TestStorageIteratorBasics tests some simple single-layer(diff and disk) iteration for storage -func TestStorageIteratorBasics(t *testing.T) { - var ( - nilStorage = make(map[common.Hash]int) - accounts = make(map[common.Hash][]byte) - storage = make(map[common.Hash]map[common.Hash][]byte) - ) - // Fill some random data - for i := 0; i < 10; i++ { - h := randomHash() - accounts[h] = randomAccount() - - accStorage := make(map[common.Hash][]byte) - value := make([]byte, 32) - - var nilstorage int - for i := 0; i < 100; i++ { - rand.Read(value) - if rand.Intn(2) == 0 { - accStorage[randomHash()] = common.CopyBytes(value) - } else { - accStorage[randomHash()] = nil // delete slot - nilstorage += 1 - } - } - storage[h] = accStorage - nilStorage[h] = nilstorage - } - // Add some (identical) layers on top - diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage)) - for account := range accounts { - it, _ := diffLayer.StorageIterator(account, common.Hash{}) - verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator - } - - diskLayer := diffToDisk(diffLayer) - for account := range accounts { - it, _ := diskLayer.StorageIterator(account, common.Hash{}) - verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator - } -} - -type testIterator struct { - values []byte -} - -func newTestIterator(values ...byte) *testIterator { - return &testIterator{values} -} - -func (ti *testIterator) Seek(common.Hash) { - panic("implement me") -} - -func (ti *testIterator) Next() bool { - ti.values = ti.values[1:] - return len(ti.values) > 0 -} - -func (ti *testIterator) Error() error { - return nil -} - -func (ti *testIterator) Hash() common.Hash { - return common.BytesToHash([]byte{ti.values[0]}) -} - -func (ti *testIterator) Account() []byte { - return nil -} - -func (ti *testIterator) Slot() []byte { - return nil -} - -func (ti *testIterator) Release() {} - -func TestFastIteratorBasics(t *testing.T) { - type testCase struct { - lists [][]byte - expKeys []byte - } - for i, tc := range []testCase{ - {lists: [][]byte{{0, 1, 8}, {1, 2, 8}, {2, 9}, {4}, - {7, 14, 15}, {9, 13, 15, 16}}, - expKeys: []byte{0, 1, 2, 4, 7, 8, 9, 13, 14, 15, 16}}, - {lists: [][]byte{{0, 8}, {1, 2, 8}, {7, 14, 15}, {8, 9}, - {9, 10}, {10, 13, 15, 16}}, - expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}}, - } { - var iterators []*weightedIterator - for i, data := range tc.lists { - it := newTestIterator(data...) - iterators = append(iterators, &weightedIterator{it, i}) - } - fi := &fastIterator{ - iterators: iterators, - initiated: false, - } - count := 0 - for fi.Next() { - if got, exp := fi.Hash()[31], tc.expKeys[count]; exp != got { - t.Errorf("tc %d, [%d]: got %d exp %d", i, count, got, exp) - } - count++ - } - } -} - -type verifyContent int - -const ( - verifyNothing verifyContent = iota - verifyAccount - verifyStorage -) - -func verifyIterator(t *testing.T, expCount int, it Iterator, verify verifyContent) { - t.Helper() - - var ( - count = 0 - last = common.Hash{} - ) - for it.Next() { - hash := it.Hash() - if bytes.Compare(last[:], hash[:]) >= 0 { - t.Errorf("wrong order: %x >= %x", last, hash) - } - count++ - if verify == verifyAccount && len(it.(AccountIterator).Account()) == 0 { - t.Errorf("iterator returned nil-value for hash %x", hash) - } else if verify == verifyStorage && len(it.(StorageIterator).Slot()) == 0 { - t.Errorf("iterator returned nil-value for hash %x", hash) - } - last = hash - } - if count != expCount { - t.Errorf("iterator count mismatch: have %d, want %d", count, expCount) - } - if err := it.Error(); err != nil { - t.Errorf("iterator failed: %v", err) - } -} - -// TestAccountIteratorTraversal tests some simple multi-layer iteration. -func TestAccountIteratorTraversal(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Stack three diff layers on top with various overlaps - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) - - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil) - - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil) - - // Verify the single and multi-layer iterators - head := snaps.Snapshot(common.HexToHash("0x04")) - - verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing) - verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) - - it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) - verifyIterator(t, 7, it, verifyAccount) - it.Release() - - // Test after persist some bottom-most layers into the disk, - // the functionalities still work. - limit := aggregatorMemoryLimit - defer func() { - aggregatorMemoryLimit = limit - }() - aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x04"), 2) - verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) - - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) - verifyIterator(t, 7, it, verifyAccount) - it.Release() -} - -func TestStorageIteratorTraversal(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Stack three diff layers on top with various overlaps - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) - - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil)) - - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) - - // Verify the single and multi-layer iterators - head := snaps.Snapshot(common.HexToHash("0x04")) - - diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 3, diffIter, verifyNothing) - verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) - - it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 6, it, verifyStorage) - it.Release() - - // Test after persist some bottom-most layers into the disk, - // the functionalities still work. - limit := aggregatorMemoryLimit - defer func() { - aggregatorMemoryLimit = limit - }() - aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x04"), 2) - verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) - - it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 6, it, verifyStorage) - it.Release() -} - -// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we -// also expect the correct values to show up. -func TestAccountIteratorTraversalValues(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Create a batch of account sets to seed subsequent layers with - var ( - a = make(map[common.Hash][]byte) - b = make(map[common.Hash][]byte) - c = make(map[common.Hash][]byte) - d = make(map[common.Hash][]byte) - e = make(map[common.Hash][]byte) - f = make(map[common.Hash][]byte) - g = make(map[common.Hash][]byte) - h = make(map[common.Hash][]byte) - ) - for i := byte(2); i < 0xff; i++ { - a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i)) - if i > 20 && i%2 == 0 { - b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i)) - } - if i%4 == 0 { - c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i)) - } - if i%7 == 0 { - d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i)) - } - if i%8 == 0 { - e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i)) - } - if i > 50 || i < 85 { - f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i)) - } - if i%64 == 0 { - g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i)) - } - if i%128 == 0 { - h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i)) - } - } - // Assemble a stack of snapshots from the account layers - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil) - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil) - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil) - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil) - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil) - snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil) - snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil) - snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil) - - it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) - head := snaps.Snapshot(common.HexToHash("0x09")) - for it.Next() { - hash := it.Hash() - want, err := head.AccountRLP(hash) - if err != nil { - t.Fatalf("failed to retrieve expected account: %v", err) - } - if have := it.Account(); !bytes.Equal(want, have) { - t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want) - } - } - it.Release() - - // Test after persist some bottom-most layers into the disk, - // the functionalities still work. - limit := aggregatorMemoryLimit - defer func() { - aggregatorMemoryLimit = limit - }() - aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x09"), 2) - - it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) - for it.Next() { - hash := it.Hash() - want, err := head.AccountRLP(hash) - if err != nil { - t.Fatalf("failed to retrieve expected account: %v", err) - } - if have := it.Account(); !bytes.Equal(want, have) { - t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want) - } - } - it.Release() -} - -func TestStorageIteratorTraversalValues(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - wrapStorage := func(storage map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { - return map[common.Hash]map[common.Hash][]byte{ - common.HexToHash("0xaa"): storage, - } - } - // Create a batch of storage sets to seed subsequent layers with - var ( - a = make(map[common.Hash][]byte) - b = make(map[common.Hash][]byte) - c = make(map[common.Hash][]byte) - d = make(map[common.Hash][]byte) - e = make(map[common.Hash][]byte) - f = make(map[common.Hash][]byte) - g = make(map[common.Hash][]byte) - h = make(map[common.Hash][]byte) - ) - for i := byte(2); i < 0xff; i++ { - a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i)) - if i > 20 && i%2 == 0 { - b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i)) - } - if i%4 == 0 { - c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i)) - } - if i%7 == 0 { - d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i)) - } - if i%8 == 0 { - e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i)) - } - if i > 50 || i < 85 { - f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i)) - } - if i%64 == 0 { - g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i)) - } - if i%128 == 0 { - h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i)) - } - } - // Assemble a stack of snapshots from the account layers - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a)) - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b)) - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c)) - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d)) - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e)) - snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e)) - snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g)) - snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h)) - - it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{}) - head := snaps.Snapshot(common.HexToHash("0x09")) - for it.Next() { - hash := it.Hash() - want, err := head.Storage(common.HexToHash("0xaa"), hash) - if err != nil { - t.Fatalf("failed to retrieve expected storage slot: %v", err) - } - if have := it.Slot(); !bytes.Equal(want, have) { - t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want) - } - } - it.Release() - - // Test after persist some bottom-most layers into the disk, - // the functionalities still work. - limit := aggregatorMemoryLimit - defer func() { - aggregatorMemoryLimit = limit - }() - aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x09"), 2) - - it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{}) - for it.Next() { - hash := it.Hash() - want, err := head.Storage(common.HexToHash("0xaa"), hash) - if err != nil { - t.Fatalf("failed to retrieve expected slot: %v", err) - } - if have := it.Slot(); !bytes.Equal(want, have) { - t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want) - } - } - it.Release() -} - -// This testcase is notorious, all layers contain the exact same 200 accounts. -func TestAccountIteratorLargeTraversal(t *testing.T) { - // Create a custom account factory to recreate the same addresses - makeAccounts := func(num int) map[common.Hash][]byte { - accounts := make(map[common.Hash][]byte) - for i := 0; i < num; i++ { - h := common.Hash{} - binary.BigEndian.PutUint64(h[:], uint64(i+1)) - accounts[h] = randomAccount() - } - return accounts - } - // Build up a large stack of snapshots - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - for i := 1; i < 128; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) - } - // Iterate the entire stack and ensure everything is hit only once - head := snaps.Snapshot(common.HexToHash("0x80")) - verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing) - verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) - - it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{}) - verifyIterator(t, 200, it, verifyAccount) - it.Release() - - // Test after persist some bottom-most layers into the disk, - // the functionalities still work. - limit := aggregatorMemoryLimit - defer func() { - aggregatorMemoryLimit = limit - }() - aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x80"), 2) - - verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) - - it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{}) - verifyIterator(t, 200, it, verifyAccount) - it.Release() -} - -// TestAccountIteratorFlattening tests what happens when we -// - have a live iterator on child C (parent C1 -> C2 .. CN) -// - flattens C2 all the way into CN -// - continues iterating -func TestAccountIteratorFlattening(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Create a stack of diffs on top - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) - - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil) - - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil) - - // Create an iterator and flatten the data from underneath it - it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) - defer it.Release() - - if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { - t.Fatalf("failed to flatten snapshot stack: %v", err) - } - //verifyIterator(t, 7, it) -} - -func TestAccountIteratorSeek(t *testing.T) { - // Create a snapshot stack with some initial data - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) - - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil) - - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil) - - // Account set is now - // 02: aa, ee, f0, ff - // 03: aa, bb, dd, ee, f0 (, f0), ff - // 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff) - // Construct various iterators and ensure their traversal is correct - it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd")) - defer it.Release() - verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa")) - defer it.Release() - verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff")) - defer it.Release() - verifyIterator(t, 1, it, verifyAccount) // expected: ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1")) - defer it.Release() - verifyIterator(t, 0, it, verifyAccount) // expected: nothing - - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb")) - defer it.Release() - verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef")) - defer it.Release() - verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0")) - defer it.Release() - verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff")) - defer it.Release() - verifyIterator(t, 1, it, verifyAccount) // expected: ff - - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1")) - defer it.Release() - verifyIterator(t, 0, it, verifyAccount) // expected: nothing -} - -func TestStorageIteratorSeek(t *testing.T) { - // Create a snapshot stack with some initial data - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Stack three diff layers on top with various overlaps - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) - - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil)) - - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil)) - - // Account set is now - // 02: 01, 03, 05 - // 03: 01, 02, 03, 05 (, 05), 06 - // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08 - // Construct various iterators and ensure their traversal is correct - it, _ := snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01")) - defer it.Release() - verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05 - - it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02")) - defer it.Release() - verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05 - - it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5")) - defer it.Release() - verifyIterator(t, 1, it, verifyStorage) // expected: 05 - - it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6")) - defer it.Release() - verifyIterator(t, 0, it, verifyStorage) // expected: nothing - - it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01")) - defer it.Release() - verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08 - - it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05")) - defer it.Release() - verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08 - - it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08")) - defer it.Release() - verifyIterator(t, 1, it, verifyStorage) // expected: 08 - - it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09")) - defer it.Release() - verifyIterator(t, 0, it, verifyStorage) // expected: nothing -} - -// TestAccountIteratorDeletions tests that the iterator behaves correct when there are -// deleted accounts (where the Account() value is nil). The iterator -// should not output any accounts or nil-values for those cases. -func TestAccountIteratorDeletions(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Stack three diff layers on top with various overlaps - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), - nil, randomAccountSet("0x11", "0x22", "0x33"), nil) - - deleted := common.HexToHash("0x22") - destructed := map[common.Hash]struct{}{ - deleted: {}, - } - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), - destructed, randomAccountSet("0x11", "0x33"), nil) - - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), - nil, randomAccountSet("0x33", "0x44", "0x55"), nil) - - // The output should be 11,33,44,55 - it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) - // Do a quick check - verifyIterator(t, 4, it, verifyAccount) - it.Release() - - // And a more detailed verification that we indeed do not see '0x22' - it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) - defer it.Release() - for it.Next() { - hash := it.Hash() - if it.Account() == nil { - t.Errorf("iterator returned nil-value for hash %x", hash) - } - if hash == deleted { - t.Errorf("expected deleted elem %x to not be returned by iterator", deleted) - } - } -} - -func TestStorageIteratorDeletions(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Stack three diff layers on top with various overlaps - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) - - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}})) - - // The output should be 02,04,05,06 - it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 4, it, verifyStorage) - it.Release() - - // The output should be 04,05,06 - it, _ = snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03")) - verifyIterator(t, 3, it, verifyStorage) - it.Release() - - // Destruct the whole storage - destructed := map[common.Hash]struct{}{ - common.HexToHash("0xaa"): {}, - } - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil) - - it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 0, it, verifyStorage) - it.Release() - - // Re-insert the slots of the same account - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil)) - - // The output should be 07,08,09 - it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 3, it, verifyStorage) - it.Release() - - // Destruct the whole storage but re-create the account in the same layer - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil)) - it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{}) - verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12 - it.Release() - - verifyIterator(t, 2, snaps.Snapshot(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) -} - -// BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the -// exact same 200 accounts. That means that we need to process 2000 items, but -// only spit out 200 values eventually. -// -// The value-fetching benchmark is easy on the binary iterator, since it never has to reach -// down at any depth for retrieving the values -- all are on the toppmost layer -// -// BenchmarkAccountIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op -// BenchmarkAccountIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op -// BenchmarkAccountIteratorTraversal/fast_iterator_keys-6 1923 677966 ns/op -// BenchmarkAccountIteratorTraversal/fast_iterator_values-6 1741 649967 ns/op -func BenchmarkAccountIteratorTraversal(b *testing.B) { - // Create a custom account factory to recreate the same addresses - makeAccounts := func(num int) map[common.Hash][]byte { - accounts := make(map[common.Hash][]byte) - for i := 0; i < num; i++ { - h := common.Hash{} - binary.BigEndian.PutUint64(h[:], uint64(i+1)) - accounts[h] = randomAccount() - } - return accounts - } - // Build up a large stack of snapshots - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - for i := 1; i <= 100; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) - } - // We call this once before the benchmark, so the creation of - // sorted accountlists are not included in the results. - head := snaps.Snapshot(common.HexToHash("0x65")) - head.(*diffLayer).newBinaryAccountIterator() - - b.Run("binary iterator keys", func(b *testing.B) { - for i := 0; i < b.N; i++ { - got := 0 - it := head.(*diffLayer).newBinaryAccountIterator() - for it.Next() { - got++ - } - if exp := 200; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) - b.Run("binary iterator values", func(b *testing.B) { - for i := 0; i < b.N; i++ { - got := 0 - it := head.(*diffLayer).newBinaryAccountIterator() - for it.Next() { - got++ - head.(*diffLayer).accountRLP(it.Hash(), 0) - } - if exp := 200; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) - b.Run("fast iterator keys", func(b *testing.B) { - for i := 0; i < b.N; i++ { - it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) - defer it.Release() - - got := 0 - for it.Next() { - got++ - } - if exp := 200; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) - b.Run("fast iterator values", func(b *testing.B) { - for i := 0; i < b.N; i++ { - it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) - defer it.Release() - - got := 0 - for it.Next() { - got++ - it.Account() - } - if exp := 200; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) -} - -// BenchmarkAccountIteratorLargeBaselayer is a pretty realistic benchmark, where -// the baselayer is a lot larger than the upper layer. -// -// This is heavy on the binary iterator, which in most cases will have to -// call recursively 100 times for the majority of the values -// -// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(keys)-6 514 1971999 ns/op -// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(values)-6 61 18997492 ns/op -// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(keys)-6 10000 114385 ns/op -// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(values)-6 4047 296823 ns/op -func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { - // Create a custom account factory to recreate the same addresses - makeAccounts := func(num int) map[common.Hash][]byte { - accounts := make(map[common.Hash][]byte) - for i := 0; i < num; i++ { - h := common.Hash{} - binary.BigEndian.PutUint64(h[:], uint64(i+1)) - accounts[h] = randomAccount() - } - return accounts - } - // Build up a large stack of snapshots - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil) - for i := 2; i <= 100; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil) - } - // We call this once before the benchmark, so the creation of - // sorted accountlists are not included in the results. - head := snaps.Snapshot(common.HexToHash("0x65")) - head.(*diffLayer).newBinaryAccountIterator() - - b.Run("binary iterator (keys)", func(b *testing.B) { - for i := 0; i < b.N; i++ { - got := 0 - it := head.(*diffLayer).newBinaryAccountIterator() - for it.Next() { - got++ - } - if exp := 2000; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) - b.Run("binary iterator (values)", func(b *testing.B) { - for i := 0; i < b.N; i++ { - got := 0 - it := head.(*diffLayer).newBinaryAccountIterator() - for it.Next() { - got++ - v := it.Hash() - head.(*diffLayer).accountRLP(v, 0) - } - if exp := 2000; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) - b.Run("fast iterator (keys)", func(b *testing.B) { - for i := 0; i < b.N; i++ { - it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) - defer it.Release() - - got := 0 - for it.Next() { - got++ - } - if exp := 2000; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) - b.Run("fast iterator (values)", func(b *testing.B) { - for i := 0; i < b.N; i++ { - it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) - defer it.Release() - - got := 0 - for it.Next() { - it.Account() - got++ - } - if exp := 2000; got != exp { - b.Errorf("iterator len wrong, expected %d, got %d", exp, got) - } - } - }) -} - -/* -func BenchmarkBinaryAccountIteration(b *testing.B) { - benchmarkAccountIteration(b, func(snap snapshot) AccountIterator { - return snap.(*diffLayer).newBinaryAccountIterator() - }) -} - -func BenchmarkFastAccountIteration(b *testing.B) { - benchmarkAccountIteration(b, newFastAccountIterator) -} - -func benchmarkAccountIteration(b *testing.B, iterator func(snap snapshot) AccountIterator) { - // Create a diff stack and randomize the accounts across them - layers := make([]map[common.Hash][]byte, 128) - for i := 0; i < len(layers); i++ { - layers[i] = make(map[common.Hash][]byte) - } - for i := 0; i < b.N; i++ { - depth := rand.Intn(len(layers)) - layers[depth][randomHash()] = randomAccount() - } - stack := snapshot(emptyLayer()) - for _, layer := range layers { - stack = stack.Update(common.Hash{}, layer, nil, nil) - } - // Reset the timers and report all the stats - it := iterator(stack) - - b.ResetTimer() - b.ReportAllocs() - - for it.Next() { - } -} -*/ diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go deleted file mode 100644 index 94e3610..0000000 --- a/core/state/snapshot/snapshot_test.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "fmt" - "math/big" - "math/rand" - "testing" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" -) - -// randomHash generates a random blob of data and returns it as a hash. -func randomHash() common.Hash { - var hash common.Hash - if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil { - panic(err) - } - return hash -} - -// randomAccount generates a random account and returns it RLP encoded. -func randomAccount() []byte { - root := randomHash() - a := Account{ - Balance: big.NewInt(rand.Int63()), - Nonce: rand.Uint64(), - Root: root[:], - CodeHash: emptyCode[:], - } - data, _ := rlp.EncodeToBytes(a) - return data -} - -// randomAccountSet generates a set of random accounts with the given strings as -// the account address hashes. -func randomAccountSet(hashes ...string) map[common.Hash][]byte { - accounts := make(map[common.Hash][]byte) - for _, hash := range hashes { - accounts[common.HexToHash(hash)] = randomAccount() - } - return accounts -} - -// randomStorageSet generates a set of random slots with the given strings as -// the slot addresses. -func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte { - storages := make(map[common.Hash]map[common.Hash][]byte) - for index, account := range accounts { - storages[common.HexToHash(account)] = make(map[common.Hash][]byte) - - if index < len(hashes) { - hashes := hashes[index] - for _, hash := range hashes { - storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes() - } - } - if index < len(nilStorage) { - nils := nilStorage[index] - for _, hash := range nils { - storages[common.HexToHash(account)][common.HexToHash(hash)] = nil - } - } - } - return storages -} - -// Tests that if a disk layer becomes stale, no active external references will -// be returned with junk data. This version of the test flattens every diff layer -// to check internal corner case around the bottom-most memory accumulator. -func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Retrieve a reference to the base and commit a diff on top - ref := snaps.Snapshot(base.root) - - accounts := map[common.Hash][]byte{ - common.HexToHash("0xa1"): randomAccount(), - } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if n := len(snaps.layers); n != 2 { - t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2) - } - // Commit the diff layer onto the disk and ensure it's persisted - if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil { - t.Fatalf("failed to merge diff layer onto disk: %v", err) - } - // Since the base layer was modified, ensure that data retrieval on the external reference fail - if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { - t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) - } - if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { - t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) - } - if n := len(snaps.layers); n != 1 { - t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 1) - fmt.Println(snaps.layers) - } -} - -// Tests that if a disk layer becomes stale, no active external references will -// be returned with junk data. This version of the test retains the bottom diff -// layer to check the usual mode of operation where the accumulator is retained. -func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Retrieve a reference to the base and commit two diffs on top - ref := snaps.Snapshot(base.root) - - accounts := map[common.Hash][]byte{ - common.HexToHash("0xa1"): randomAccount(), - } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if n := len(snaps.layers); n != 3 { - t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3) - } - // Commit the diff layer onto the disk and ensure it's persisted - defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) - aggregatorMemoryLimit = 0 - - if err := snaps.Cap(common.HexToHash("0x03"), 2); err != nil { - t.Fatalf("failed to merge diff layer onto disk: %v", err) - } - // Since the base layer was modified, ensure that data retrievald on the external reference fail - if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { - t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) - } - if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { - t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) - } - if n := len(snaps.layers); n != 2 { - t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2) - fmt.Println(snaps.layers) - } -} - -// Tests that if a diff layer becomes stale, no active external references will -// be returned with junk data. This version of the test flattens every diff layer -// to check internal corner case around the bottom-most memory accumulator. -func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Commit two diffs on top and retrieve a reference to the bottommost - accounts := map[common.Hash][]byte{ - common.HexToHash("0xa1"): randomAccount(), - } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if n := len(snaps.layers); n != 3 { - t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3) - } - ref := snaps.Snapshot(common.HexToHash("0x02")) - - // Flatten the diff layer into the bottom accumulator - if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil { - t.Fatalf("failed to flatten diff layer into accumulator: %v", err) - } - // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail - if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { - t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) - } - if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { - t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) - } - if n := len(snaps.layers); n != 2 { - t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2) - fmt.Println(snaps.layers) - } -} - -// Tests that if a diff layer becomes stale, no active external references will -// be returned with junk data. This version of the test retains the bottom diff -// layer to check the usual mode of operation where the accumulator is retained. -func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Commit three diffs on top and retrieve a reference to the bottommost - accounts := map[common.Hash][]byte{ - common.HexToHash("0xa1"): randomAccount(), - } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if n := len(snaps.layers); n != 4 { - t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 4) - } - ref := snaps.Snapshot(common.HexToHash("0x02")) - - // Doing a Cap operation with many allowed layers should be a no-op - exp := len(snaps.layers) - if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil { - t.Fatalf("failed to flatten diff layer into accumulator: %v", err) - } - if got := len(snaps.layers); got != exp { - t.Errorf("layers modified, got %d exp %d", got, exp) - } - // Flatten the diff layer into the bottom accumulator - if err := snaps.Cap(common.HexToHash("0x04"), 2); err != nil { - t.Fatalf("failed to flatten diff layer into accumulator: %v", err) - } - // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail - if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { - t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) - } - if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { - t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) - } - if n := len(snaps.layers); n != 3 { - t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 3) - fmt.Println(snaps.layers) - } -} - -// TestPostCapBasicDataAccess tests some functionality regarding capping/flattening. -func TestPostCapBasicDataAccess(t *testing.T) { - // setAccount is a helper to construct a random account entry and assign it to - // an account slot in a snapshot - setAccount := func(accKey string) map[common.Hash][]byte { - return map[common.Hash][]byte{ - common.HexToHash(accKey): randomAccount(), - } - } - // Create a starting base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // The lowest difflayer - snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) - snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) - snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil) - - snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) - snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) - - // checkExist verifies if an account exiss in a snapshot - checkExist := func(layer *diffLayer, key string) error { - if data, _ := layer.Account(common.HexToHash(key)); data == nil { - return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key)) - } - return nil - } - // shouldErr checks that an account access errors as expected - shouldErr := func(layer *diffLayer, key string) error { - if data, err := layer.Account(common.HexToHash(key)); err == nil { - return fmt.Errorf("expected error, got data %x", data) - } - return nil - } - // check basics - snap := snaps.Snapshot(common.HexToHash("0xb3")).(*diffLayer) - - if err := checkExist(snap, "0xa1"); err != nil { - t.Error(err) - } - if err := checkExist(snap, "0xb2"); err != nil { - t.Error(err) - } - if err := checkExist(snap, "0xb3"); err != nil { - t.Error(err) - } - // Cap to a bad root should fail - if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil { - t.Errorf("expected error, got none") - } - // Now, merge the a-chain - snaps.Cap(common.HexToHash("0xa3"), 0) - - // At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is - // the parent of b2, b2 should no longer be able to iterate into parent. - - // These should still be accessible - if err := checkExist(snap, "0xb2"); err != nil { - t.Error(err) - } - if err := checkExist(snap, "0xb3"); err != nil { - t.Error(err) - } - // But these would need iteration into the modified parent - if err := shouldErr(snap, "0xa1"); err != nil { - t.Error(err) - } - if err := shouldErr(snap, "0xa2"); err != nil { - t.Error(err) - } - if err := shouldErr(snap, "0xa3"); err != nil { - t.Error(err) - } - // Now, merge it again, just for fun. It should now error, since a3 - // is a disk layer - if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil { - t.Error("expected error capping the disk layer, got none") - } -} diff --git a/core/state/snapshot/wipe_test.go b/core/state/snapshot/wipe_test.go deleted file mode 100644 index a656982..0000000 --- a/core/state/snapshot/wipe_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "math/rand" - "testing" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb/memorydb" -) - -// Tests that given a database with random data content, all parts of a snapshot -// can be crrectly wiped without touching anything else. -func TestWipe(t *testing.T) { - // Create a database with some random snapshot data - db := memorydb.New() - - for i := 0; i < 128; i++ { - account := randomHash() - rawdb.WriteAccountSnapshot(db, account, randomHash().Bytes()) - for j := 0; j < 1024; j++ { - rawdb.WriteStorageSnapshot(db, account, randomHash(), randomHash().Bytes()) - } - } - rawdb.WriteSnapshotRoot(db, randomHash()) - - // Add some random non-snapshot data too to make wiping harder - for i := 0; i < 65536; i++ { - // Generate a key that's the wrong length for a state snapshot item - var keysize int - for keysize == 0 || keysize == 32 || keysize == 64 { - keysize = 8 + rand.Intn(64) // +8 to ensure we will "never" randomize duplicates - } - // Randomize the suffix, dedup and inject it under the snapshot namespace - keysuffix := make([]byte, keysize) - rand.Read(keysuffix) - - if rand.Int31n(2) == 0 { - db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes()) - } else { - db.Put(append(rawdb.SnapshotStoragePrefix, keysuffix...), randomHash().Bytes()) - } - } - // Sanity check that all the keys are present - var items int - - it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { - items++ - } - } - it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength { - items++ - } - } - if items != 128+128*1024 { - t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128+128*1024) - } - if hash := rawdb.ReadSnapshotRoot(db); hash == (common.Hash{}) { - t.Errorf("snapshot block marker mismatch: have %#x, want ", hash) - } - // Wipe all snapshot entries from the database - <-wipeSnapshot(db, true) - - // Iterate over the database end ensure no snapshot information remains - it = db.NewIterator(rawdb.SnapshotAccountPrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { - t.Errorf("snapshot entry remained after wipe: %x", key) - } - } - it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength { - t.Errorf("snapshot entry remained after wipe: %x", key) - } - } - if hash := rawdb.ReadSnapshotRoot(db); hash != (common.Hash{}) { - t.Errorf("snapshot block marker remained after wipe: %#x", hash) - } - // Iterate over the database and ensure miscellaneous items are present - items = 0 - - it = db.NewIterator(nil, nil) - defer it.Release() - - for it.Next() { - items++ - } - if items != 65536 { - t.Fatalf("misc item count mismatch: have %d, want %d", items, 65536) - } -} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 35ce39f..abfa2aa 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -17,6 +17,7 @@ package vm import ( + "errors" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" @@ -265,7 +266,12 @@ func opBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ func opBalanceMultiCoin(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { addr, cid := callContext.stack.pop(), callContext.stack.pop() - callContext.stack.push(interpreter.evm.StateDB.GetBalanceMultiCoin(common.BigToAddress(addr), common.BigToHash(cid))) + res, err := uint256.FromBig(interpreter.evm.StateDB.GetBalanceMultiCoin( + common.BigToAddress(addr.ToBig()), common.BigToHash(cid.ToBig()))) + if err { + return nil, errors.New("balance overflow") + } + callContext.stack.push(res) return nil, nil } @@ -725,7 +731,7 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) // Pop other call parameters. addr, value, cid, value2, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.Address(addr.Bytes20()) - coinID := common.BigToHash(cid) + coinID := common.BigToHash(cid.ToBig()) // Get the arguments from the memory. args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) @@ -875,8 +881,8 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([ return nil, nil } -func opEMC(pc *uint64, interpreter *EVMInterpreter, contract *Contract, callContext *callCtx) ([]byte, error) { - return nil, interpreter.evm.StateDB.EnableMultiCoin(contract.Address()) +func opEMC(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { + return nil, interpreter.evm.StateDB.EnableMultiCoin(callContext.contract.Address()) } // following functions are used by the instruction jump table diff --git a/coreth.go b/coreth.go index 1fe2613..4d0c2ee 100644 --- a/coreth.go +++ b/coreth.go @@ -2,7 +2,7 @@ package coreth import ( "crypto/ecdsa" - "fmt" + //"fmt" "io" "os" @@ -17,7 +17,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/trie" + //"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/mattn/go-isatty" ) @@ -52,18 +53,18 @@ func NewETHChain(config *eth.Config, nodecfg *node.Config, etherBase *common.Add if nodecfg == nil { nodecfg = &node.Config{} } - mux := new(event.TypeMux) - ctx, ep, err := node.NewServiceContext(nodecfg, mux) + //mux := new(event.TypeMux) + node, err := node.New(nodecfg) if err != nil { panic(err) } - if ep != "" { - log.Info(fmt.Sprintf("temporary keystore = %s", ep)) - } + //if ep != "" { + // log.Info(fmt.Sprintf("temporary keystore = %s", ep)) + //} cb := new(dummy.ConsensusCallbacks) mcb := new(miner.MinerCallbacks) bcb := new(eth.BackendCallbacks) - backend, _ := eth.New(&ctx, config, cb, mcb, bcb, chainDB) + backend, _ := eth.New(node, config, cb, mcb, bcb, chainDB) chain := ÐChain{backend: backend, cb: cb, mcb: mcb, bcb: bcb} if etherBase == nil { etherBase = &BlackholeAddr @@ -85,7 +86,7 @@ func (self *ETHChain) GenBlock() { } func (self *ETHChain) VerifyBlock(block *types.Block) bool { - txnHash := types.DeriveSha(block.Transactions()) + txnHash := types.DeriveSha(block.Transactions(), new(trie.Trie)) uncleHash := types.CalcUncleHash(block.Uncles()) ethHeader := block.Header() if txnHash != ethHeader.TxHash || uncleHash != ethHeader.UncleHash { diff --git a/eth/api.go b/eth/api.go index 92ac928..ada2a97 100644 --- a/eth/api.go +++ b/eth/api.go @@ -371,7 +371,7 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta var block *types.Block if number == rpc.LatestBlockNumber { block = api.eth.blockchain.CurrentBlock() - } else if blockNr == rpc.AcceptedBlockNumber { + } else if number == rpc.AcceptedBlockNumber { block = api.eth.AcceptedBlock() } else { block = api.eth.blockchain.GetBlockByNumber(uint64(number)) diff --git a/eth/backend.go b/eth/backend.go index 9222181..b1c29a5 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -41,7 +41,6 @@ import ( "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/bloombits" @@ -50,6 +49,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" ) @@ -127,7 +127,7 @@ func New(stack *node.Node, config *Config, var err error if chainDb == nil { // Assemble the Ethereum object - chainDb, err = ctx.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/") + chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/") if err != nil { return nil, err } @@ -223,7 +223,7 @@ func New(stack *node.Node, config *Config, } eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams) - eth.dialCandidates, err = eth.setupDiscovery(&stack.Config().P2P) + //eth.dialCandidates, err = eth.setupDiscovery(&stack.Config().P2P) if err != nil { return nil, err } @@ -233,8 +233,8 @@ func New(stack *node.Node, config *Config, // Register the backend on the node stack.RegisterAPIs(eth.APIs()) - stack.RegisterProtocols(eth.Protocols()) - stack.RegisterLifecycle(eth) + //stack.RegisterProtocols(eth.Protocols()) + //stack.RegisterLifecycle(eth) return eth, nil } diff --git a/eth/protocol.go b/eth/protocol.go index 94260c2..ef5dcde 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -22,6 +22,7 @@ import ( "math/big" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/forkid" "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 8753c05..9915ad4 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -30,7 +30,6 @@ import ( "github.com/ava-labs/coreth/accounts/scwallet" "github.com/ava-labs/coreth/consensus/ethash" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index ae4196f..cee6b57 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -22,6 +22,7 @@ import ( "math/big" "github.com/ava-labs/coreth/accounts" + "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" diff --git a/miner/miner.go b/miner/miner.go index 57938e1..e8e59a4 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -30,7 +30,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" ) // Backend wraps all methods required for mining. @@ -60,7 +59,7 @@ type Miner struct { func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool, mcb *MinerCallbacks) *Miner { return &Miner{ - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, mcb), + worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, mcb), } } diff --git a/node/api.go b/node/api.go index e74d10b..4589d25 100644 --- a/node/api.go +++ b/node/api.go @@ -21,6 +21,7 @@ import ( "fmt" //"strings" + "github.com/ava-labs/coreth/internal/debug" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" diff --git a/node/node.go b/node/node.go index c6e4181..3ed89ed 100644 --- a/node/node.go +++ b/node/node.go @@ -18,14 +18,13 @@ package node import ( "errors" + "os" "path/filepath" - "reflect" "strings" "sync" "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/internal/debug" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -48,7 +47,6 @@ type Node struct { state int // Tracks state of node lifecycle lock sync.Mutex - lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle rpcAPIs []rpc.API // List of APIs currently provided by the node inprocHandler *rpc.Server // In-process RPC request handler to process the API requests @@ -61,6 +59,25 @@ const ( closedState ) +func (n *Node) openDataDir() error { + if n.config.DataDir == "" { + return nil // ephemeral + } + + instdir := filepath.Join(n.config.DataDir, n.config.name()) + if err := os.MkdirAll(instdir, 0700); err != nil { + return err + } + // Lock the instance directory to prevent concurrent use by another instance as well as + // accidental use of the instance directory as a database. + release, _, err := fileutil.Flock(filepath.Join(instdir, "LOCK")) + if err != nil { + return convertFileLockError(err) + } + n.dirLock = release + return nil +} + // New creates a new P2P node, ready for protocol registration. func New(conf *Config) (*Node, error) { // Copy config and resolve the datadir so future changes to the current @@ -266,3 +283,14 @@ func (n *Node) closeDatabases() (errors []error) { } return errors } + +// RegisterAPIs registers the APIs a service provides on the node. +func (n *Node) RegisterAPIs(apis []rpc.API) { + n.lock.Lock() + defer n.lock.Unlock() + + if n.state != initializingState { + panic("can't register APIs on running/stopped node") + } + n.rpcAPIs = append(n.rpcAPIs, apis...) +} diff --git a/rpc/metrics.go b/rpc/metrics.go new file mode 100644 index 0000000..7fb6fc0 --- /dev/null +++ b/rpc/metrics.go @@ -0,0 +1,39 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rpc + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/metrics" +) + +var ( + rpcRequestGauge = metrics.NewRegisteredGauge("rpc/requests", nil) + successfulRequestGauge = metrics.NewRegisteredGauge("rpc/success", nil) + failedReqeustGauge = metrics.NewRegisteredGauge("rpc/failure", nil) + rpcServingTimer = metrics.NewRegisteredTimer("rpc/duration/all", nil) +) + +func newRPCServingTimer(method string, valid bool) metrics.Timer { + flag := "success" + if !valid { + flag = "failure" + } + m := fmt.Sprintf("rpc/duration/%s/%s", method, flag) + return metrics.GetOrRegisterTimer(m, nil) +} -- cgit v1.2.3-70-g09d2 From ed839907e592ad25e6119e145e7e05ca78b00fcd Mon Sep 17 00:00:00 2001 From: Determinant Date: Wed, 16 Sep 2020 22:54:37 -0400 Subject: ... --- accounts/external/backend.go | 2 +- consensus/dummy/consensus.go | 4 ++-- core/blockchain.go | 17 ++++++++-------- core/state/database.go | 2 +- core/state/dump.go | 43 ++++++++++++++++++++++------------------- core/state/snapshot/account.go | 20 ++++++++++--------- core/state/snapshot/generate.go | 11 ++++++----- core/state/statedb.go | 5 +++-- core/types/block.go | 32 +++++++++++++++++++----------- core/vm/evm.go | 12 +++++++----- eth/api_backend.go | 2 +- eth/backend.go | 2 +- eth/filters/filter.go | 2 +- examples/chain/main.go | 4 +++- examples/counter/counter.sol | 2 +- examples/multicoin/main.go | 4 ++-- internal/ethapi/backend.go | 2 +- plugin/evm/vm.go | 2 +- 18 files changed, 95 insertions(+), 73 deletions(-) (limited to 'consensus') diff --git a/accounts/external/backend.go b/accounts/external/backend.go index f3744cf..e0fc91d 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -23,12 +23,12 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core" ) diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index da63673..42e224d 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -262,10 +262,10 @@ func (self *DummyEngine) Finalize( func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { - var extdata []byte + var extdata *[]byte if self.cb.OnFinalizeAndAssemble != nil { ret, err := self.cb.OnFinalizeAndAssemble(state, txs) - extdata = ret + extdata = &ret if err != nil { return nil, err } diff --git a/core/blockchain.go b/core/blockchain.go index 82e3b6c..b3a7ffa 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2492,12 +2492,13 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr } func (bc *BlockChain) ManualHead(hash common.Hash) error { - block := bc.GetBlockByHash(hash) - if block == nil { - return errors.New("block not found") - } - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - bc.writeHeadBlock(block) - return nil + return bc.FastSyncCommitHead(hash) + //block := bc.GetBlockByHash(hash) + //if block == nil { + // return errors.New("block not found") + //} + //bc.chainmu.Lock() + //defer bc.chainmu.Unlock() + //bc.writeHeadBlock(block) + //return nil } diff --git a/core/state/database.go b/core/state/database.go index a9342f5..385c25d 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -21,8 +21,8 @@ import ( "fmt" "github.com/VictoriaMetrics/fastcache" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" lru "github.com/hashicorp/golang-lru" diff --git a/core/state/dump.go b/core/state/dump.go index 9bb946d..6f09398 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -37,14 +37,15 @@ type DumpCollector interface { // DumpAccount represents an account in the state. type DumpAccount struct { - Balance string `json:"balance"` - Nonce uint64 `json:"nonce"` - Root string `json:"root"` - CodeHash string `json:"codeHash"` - Code string `json:"code,omitempty"` - Storage map[common.Hash]string `json:"storage,omitempty"` - Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode - SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key + Balance string `json:"balance"` + Nonce uint64 `json:"nonce"` + Root string `json:"root"` + CodeHash string `json:"codeHash"` + IsMultiCoin bool `json:"isMultiCoin"` + Code string `json:"code,omitempty"` + Storage map[common.Hash]string `json:"storage,omitempty"` + Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode + SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key } @@ -89,14 +90,15 @@ type iterativeDump struct { // OnAccount implements DumpCollector interface func (d iterativeDump) OnAccount(addr common.Address, account DumpAccount) { dumpAccount := &DumpAccount{ - Balance: account.Balance, - Nonce: account.Nonce, - Root: account.Root, - CodeHash: account.CodeHash, - Code: account.Code, - Storage: account.Storage, - SecureKey: account.SecureKey, - Address: nil, + Balance: account.Balance, + Nonce: account.Nonce, + Root: account.Root, + CodeHash: account.CodeHash, + IsMultiCoin: account.IsMultiCoin, + Code: account.Code, + Storage: account.Storage, + SecureKey: account.SecureKey, + Address: nil, } if addr != (common.Address{}) { dumpAccount.Address = &addr @@ -123,10 +125,11 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, panic(err) } account := DumpAccount{ - Balance: data.Balance.String(), - Nonce: data.Nonce, - Root: common.Bytes2Hex(data.Root[:]), - CodeHash: common.Bytes2Hex(data.CodeHash), + Balance: data.Balance.String(), + Nonce: data.Nonce, + Root: common.Bytes2Hex(data.Root[:]), + CodeHash: common.Bytes2Hex(data.CodeHash), + IsMultiCoin: data.IsMultiCoin, } addrBytes := s.trie.GetKey(it.Key) if addrBytes == nil { diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go index b92e942..303c2fc 100644 --- a/core/state/snapshot/account.go +++ b/core/state/snapshot/account.go @@ -29,17 +29,19 @@ import ( // or slim-snapshot format which replaces the empty root and code hash as nil // byte slice. type Account struct { - Nonce uint64 - Balance *big.Int - Root []byte - CodeHash []byte + Nonce uint64 + Balance *big.Int + Root []byte + CodeHash []byte + IsMultiCoin bool } // SlimAccount converts a state.Account content into a slim snapshot account -func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account { +func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte, isMultiCoin bool) Account { slim := Account{ - Nonce: nonce, - Balance: balance, + Nonce: nonce, + Balance: balance, + IsMultiCoin: isMultiCoin, } if root != emptyRoot { slim.Root = root[:] @@ -52,8 +54,8 @@ func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []by // SlimAccountRLP converts a state.Account content into a slim snapshot // version RLP encoded. -func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte { - data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash)) +func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte, isMultiCoin bool) []byte { + data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash, isMultiCoin)) if err != nil { panic(err) } diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index dac782f..27f8dfc 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -161,15 +161,16 @@ func (dl *diskLayer) generate(stats *generatorStats) { accountHash := common.BytesToHash(accIt.Key) var acc struct { - Nonce uint64 - Balance *big.Int - Root common.Hash - CodeHash []byte + Nonce uint64 + Balance *big.Int + Root common.Hash + CodeHash []byte + IsMultiCoin bool } if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { log.Crit("Invalid account encountered during snapshot creation", "err", err) } - data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) + data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin) // If the account is not yet in-progress, write it out if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { diff --git a/core/state/statedb.go b/core/state/statedb.go index 805c607..b472bd7 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -270,11 +270,12 @@ func (self *StateDB) GetBalanceMultiCoin(addr common.Address, coinID common.Hash func (self *StateDB) EnableMultiCoin(addr common.Address) error { stateObject := self.GetOrNewStateObject(addr) if stateObject.data.Root != emptyRoot && stateObject.data.Root != zeroRoot { - return errors.New("not a fresh account") + return errors.New(fmt.Sprintf("not a fresh account: %s", stateObject.data.Root.Hex())) } if !stateObject.EnableMultiCoin() { return errors.New("multi-coin mode already enabled") } + log.Debug(fmt.Sprintf("enabled MC for %s", addr.Hex())) return nil } @@ -527,7 +528,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) { // enough to track account updates at commit time, deletions need tracking // at transaction boundary level to ensure we capture state clearing. if s.snap != nil { - s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) + s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash, obj.data.IsMultiCoin) } } diff --git a/core/types/block.go b/core/types/block.go index 37f3464..99d6cc8 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -166,7 +166,7 @@ type Body struct { Transactions []*Transaction Uncles []*Header Version uint32 - ExtData []byte `rlp:"nil"` + ExtData *[]byte `rlp:"nil"` } // Block represents an entire block in the Ethereum blockchain. @@ -175,7 +175,7 @@ type Block struct { uncles []*Header transactions Transactions version uint32 - extdata []byte + extdata *[]byte // caches hash atomic.Value @@ -216,7 +216,7 @@ type myextblock struct { Txs []*Transaction Uncles []*Header Version uint32 - ExtData []byte `rlp:"nil"` + ExtData *[]byte `rlp:"nil"` } // [deprecated by eth/63] @@ -235,7 +235,7 @@ type storageblock struct { // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles // and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher, extdata []byte) *Block { +func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher, extdata *[]byte) *Block { b := &Block{header: CopyHeader(header), td: new(big.Int)} // TODO: panic if len(txs) != len(receipts) @@ -264,8 +264,11 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } } - b.extdata = make([]byte, len(extdata)) - copy(b.extdata, extdata) + if extdata != nil { + data := make([]byte, len(*extdata)) + b.extdata = &data + copy(*b.extdata, *extdata) + } return b } @@ -321,13 +324,13 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { } func (b *Block) SetExtraData(data []byte) { - b.extdata = data + b.extdata = &data b.header.ExtDataHash = rlpHash(data) b.hash = atomic.Value{} } func (b *Block) ExtraData() []byte { - return b.extdata + return *b.extdata } func (b *Block) SetVersion(ver uint32) { @@ -460,16 +463,23 @@ func (b *Block) WithSeal(header *Header) *Block { } // WithBody returns a new block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, version uint32, extdata []byte) *Block { +func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, version uint32, extdata *[]byte) *Block { + var data *[]byte + if extdata != nil { + _data := make([]byte, len(*extdata)) + data = &_data + } block := &Block{ header: CopyHeader(b.header), transactions: make([]*Transaction, len(transactions)), uncles: make([]*Header, len(uncles)), - extdata: make([]byte, len(extdata)), + extdata: data, version: version, } copy(block.transactions, transactions) - copy(block.extdata, extdata) + if data != nil { + copy(*block.extdata, *extdata) + } for i := range uncles { block.uncles[i] = CopyHeader(uncles[i]) } diff --git a/core/vm/evm.go b/core/vm/evm.go index 85b7ba7..e895211 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -275,17 +275,19 @@ func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte return nil, gas, ErrDepth } - mcerr := evm.Context.CanTransferMC(evm.StateDB, caller.Address(), addr, coinID, value2) + // Fail if we're trying to transfer more than the available balance + if value.Sign() != 0 && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { + return nil, gas, ErrInsufficientBalance + } + + var to = AccountRef(addr) + mcerr := evm.Context.CanTransferMC(evm.StateDB, caller.Address(), to.Address(), coinID, value2) if mcerr == 1 { return nil, gas, ErrInsufficientBalance } else if mcerr != 0 { return nil, gas, ErrIncompatibleAccount } - // Fail if we're trying to transfer more than the available balance - if value.Sign() != 0 && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { - return nil, gas, ErrInsufficientBalance - } snapshot := evm.StateDB.Snapshot() p, isPrecompile := evm.precompile(addr) diff --git a/eth/api_backend.go b/eth/api_backend.go index 65c3be4..bbc8691 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -24,6 +24,7 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" @@ -33,7 +34,6 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" diff --git a/eth/backend.go b/eth/backend.go index b1c29a5..728ec4d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -31,6 +31,7 @@ import ( "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/consensus/ethash" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -43,7 +44,6 @@ import ( "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" diff --git a/eth/filters/filter.go b/eth/filters/filter.go index e189391..48a76c7 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -22,10 +22,10 @@ import ( "math/big" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" ) diff --git a/examples/chain/main.go b/examples/chain/main.go index 17003b5..94c5d6b 100644 --- a/examples/chain/main.go +++ b/examples/chain/main.go @@ -138,6 +138,8 @@ func run(config *eth.Config, a1, a2, b1, b2 int) { bob := NewTestChain("bob", config, aliceBlk, bobBlk, aliceAck, bobAck) alice.Start() bob.Start() + log.Info("alice genesis", "block", alice.chain.GetGenesisBlock().Hash().Hex()) + log.Info("bob genesis", "block", bob.chain.GetGenesisBlock().Hash().Hex()) alice.GenRandomTree(a1, a2) log.Info("alice finished generating the tree") //time.Sleep(1 * time.Second) @@ -205,7 +207,7 @@ func main() { ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), - IstanbulBlock: nil, + IstanbulBlock: big.NewInt(0), Ethash: nil, } diff --git a/examples/counter/counter.sol b/examples/counter/counter.sol index b21c3bf..952b925 100644 --- a/examples/counter/counter.sol +++ b/examples/counter/counter.sol @@ -1,4 +1,4 @@ -pragma solidity ^0.6.0; +pragma solidity >=0.6.0; contract Counter { uint256 x; diff --git a/examples/multicoin/main.go b/examples/multicoin/main.go index 1f96647..fc379d4 100644 --- a/examples/multicoin/main.go +++ b/examples/multicoin/main.go @@ -47,12 +47,12 @@ func main() { ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), - IstanbulBlock: nil, + IstanbulBlock: big.NewInt(0), Ethash: nil, } // configure the genesis block - genesisJSON := `{"config":{"chainId":1,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"751a0b96e1042bee789452ecb20253fba40dbe85":{"balance":"0x1000000000000000", "mcbalance": {"0x0000000000000000000000000000000000000000000000000000000000000000": 1000000000000000000}}, "0100000000000000000000000000000000000000": {"code": "0x730000000000000000000000000000000000000000301460806040526004361061004b5760003560e01c80631e01043914610050578063abb24ba014610092578063b6510bb3146100a9575b600080fd5b61007c6004803603602081101561006657600080fd5b8101908080359060200190929190505050610118565b6040518082815260200191505060405180910390f35b81801561009e57600080fd5b506100a761013b565b005b8180156100b557600080fd5b50610116600480360360808110156100cc57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001909291908035906020019092919050505061013e565b005b60003073ffffffffffffffffffffffffffffffffffffffff1682905d9050919050565b5c565b8373ffffffffffffffffffffffffffffffffffffffff1681836108fc8690811502906040516000604051808303818888878c8af69550505050505015801561018a573d6000803e3d6000fd5b505050505056fea2646970667358221220ed2100d6623a884d196eceefabe5e03da4309a2562bb25262f3874f1acb31cd764736f6c634300060a0033", "balance": "0x0", "mcbalance": {}}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` + genesisJSON := `{"config":{"chainId":1,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"751a0b96e1042bee789452ecb20253fba40dbe85":{"balance":"0x1000000000000000", "mcbalance": {"0x0000000000000000000000000000000000000000000000000000000000000000": 1000000000000000000}}, "0100000000000000000000000000000000000000": {"code": "0x730000000000000000000000000000000000000000301460806040526004361061004b5760003560e01c80631e01043914610050578063abb24ba014610092578063b6510bb3146100a9575b600080fd5b61007c6004803603602081101561006657600080fd5b8101908080359060200190929190505050610118565b6040518082815260200191505060405180910390f35b81801561009e57600080fd5b506100a761013b565b005b8180156100b557600080fd5b50610116600480360360808110156100cc57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001909291908035906020019092919050505061013e565b005b60003073ffffffffffffffffffffffffffffffffffffffff168290cd9050919050565bce565b8373ffffffffffffffffffffffffffffffffffffffff1681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801561018a573d6000803e3d6000fd5b505050505056fea26469706673582212205c4d96aa2a6488c426daa9567616a383dd6156eb3fe84f4905239179e553fc0f64736f6c634300060a0033", "balance": "0x0", "mcbalance": {}}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` mcAbiJSON := `[{"inputs":[],"name":"enableMultiCoin","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"coinid","type":"uint256"}],"name":"getBalance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address payable","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint256","name":"coinid","type":"uint256"},{"internalType":"uint256","name":"amount2","type":"uint256"}],"name":"transfer","outputs":[],"stateMutability":"nonpayable","type":"function"}]` genesisKey := "0xabd71b35d559563fea757f0f5edbde286fb8c043105b15abb7cd57189306d7d1" diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index cee6b57..f89a3aa 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -24,13 +24,13 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 5c5a5b5..993727e 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -22,9 +22,9 @@ import ( "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" ethcrypto "github.com/ethereum/go-ethereum/crypto" -- cgit v1.2.3-70-g09d2 From 3ea6a7940e40677b629270dfc7a1466bca295bd3 Mon Sep 17 00:00:00 2001 From: Determinant Date: Fri, 18 Sep 2020 13:09:22 -0400 Subject: clean up the code --- consensus/dummy/consensus.go | 4 ++-- core/types/block.go | 26 +++++++++++++++----------- examples/block/main.go | 2 +- examples/counter/main.go | 2 +- examples/payments/main.go | 2 +- plugin/evm/database.go | 10 ---------- plugin/evm/vm.go | 22 ++++++++++------------ rpc/types.go | 2 -- 8 files changed, 30 insertions(+), 40 deletions(-) (limited to 'consensus') diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 42e224d..da63673 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -262,10 +262,10 @@ func (self *DummyEngine) Finalize( func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { - var extdata *[]byte + var extdata []byte if self.cb.OnFinalizeAndAssemble != nil { ret, err := self.cb.OnFinalizeAndAssemble(state, txs) - extdata = &ret + extdata = ret if err != nil { return nil, err } diff --git a/core/types/block.go b/core/types/block.go index 0a93601..8e23488 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -235,7 +235,7 @@ type storageblock struct { // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles // and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher, extdata *[]byte) *Block { +func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher, extdata []byte) *Block { b := &Block{header: CopyHeader(header), td: new(big.Int)} // TODO: panic if len(txs) != len(receipts) @@ -265,9 +265,9 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } if extdata != nil { - data := make([]byte, len(*extdata)) - b.extdata = &data - copy(*b.extdata, *extdata) + _data := make([]byte, len(extdata)) + b.extdata = &_data + copy(*b.extdata, extdata) } return b @@ -324,7 +324,13 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { } func (b *Block) SetExtraData(data []byte) { - b.extdata = &data + if data != nil { + _data := make([]byte, len(data)) + b.extdata = &_data + copy(*b.extdata, data) + } else { + b.extdata = nil + } b.header.ExtDataHash = rlpHash(data) b.hash = atomic.Value{} } @@ -467,22 +473,20 @@ func (b *Block) WithSeal(header *Header) *Block { // WithBody returns a new block with the given transaction and uncle contents. func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, version uint32, extdata *[]byte) *Block { - var data *[]byte + var extdataCopied *[]byte if extdata != nil { _data := make([]byte, len(*extdata)) - data = &_data + extdataCopied = &_data + copy(*extdataCopied, *extdata) } block := &Block{ header: CopyHeader(b.header), transactions: make([]*Transaction, len(transactions)), uncles: make([]*Header, len(uncles)), - extdata: data, + extdata: extdataCopied, version: version, } copy(block.transactions, transactions) - if data != nil { - copy(*block.extdata, *extdata) - } for i := range uncles { block.uncles[i] = CopyHeader(uncles[i]) } diff --git a/examples/block/main.go b/examples/block/main.go index 89540c6..55fedfe 100644 --- a/examples/block/main.go +++ b/examples/block/main.go @@ -37,7 +37,7 @@ func main() { ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), - IstanbulBlock: nil, + IstanbulBlock: big.NewInt(0), Ethash: nil, } diff --git a/examples/counter/main.go b/examples/counter/main.go index 9e18cd8..f78f28f 100644 --- a/examples/counter/main.go +++ b/examples/counter/main.go @@ -45,7 +45,7 @@ func main() { ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), - IstanbulBlock: nil, + IstanbulBlock: big.NewInt(0), Ethash: nil, } diff --git a/examples/payments/main.go b/examples/payments/main.go index fee2060..9023b3a 100644 --- a/examples/payments/main.go +++ b/examples/payments/main.go @@ -36,7 +36,7 @@ func main() { ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), - IstanbulBlock: nil, + IstanbulBlock: big.NewInt(0), Ethash: nil, } diff --git a/plugin/evm/database.go b/plugin/evm/database.go index 81d1c1d..18890fa 100644 --- a/plugin/evm/database.go +++ b/plugin/evm/database.go @@ -46,16 +46,6 @@ func (db Database) Sync() error { return errOpNotSupported } // NewBatch implements ethdb.Database func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} } -//// NewIterator implements ethdb.Database -//func (db Database) NewIterator() ethdb.Iterator { -// return db.Database.NewIterator() -//} - -//// NewIteratorWithPrefix implements ethdb.Database -//func (db Database) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator { -// return db.NewIteratorWithPrefix(prefix) -//} - // NewIterator implements ethdb.Database func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { return db.NewIteratorWithStartAndPrefix(start, prefix) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index c02b835..200a08d 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -154,14 +154,14 @@ func init() { type VM struct { ctx *snow.Context - chainID *big.Int - networkID uint64 - genesisHash common.Hash - chain *coreth.ETHChain - chaindb Database - newBlockChan chan *Block - networkChan chan<- commonEng.Message - newTxPoolHeadChan *event.TypeMuxSubscription + chainID *big.Int + networkID uint64 + genesisHash common.Hash + chain *coreth.ETHChain + chaindb Database + newBlockChan chan *Block + networkChan chan<- commonEng.Message + newMinedBlockSub *event.TypeMuxSubscription acceptedDB database.Database @@ -349,15 +349,13 @@ func (vm *VM) Initialize( vm.bdTimerState = bdTimerStateLong vm.bdGenWaitFlag = true - //vm.newTxPoolHeadChan = make(chan core.NewTxPoolHeadEvent, 1) vm.txPoolStabilizedOk = make(chan struct{}, 1) vm.txPoolStabilizedShutdownChan = make(chan struct{}, 1) // Signal goroutine to shutdown // TODO: read size from options vm.pendingAtomicTxs = make(chan *Tx, 1024) vm.atomicTxSubmitChan = make(chan struct{}, 1) vm.shutdownSubmitChan = make(chan struct{}, 1) - //chain.GetTxPool().SubscribeNewHeadEvent(vm.newTxPoolHeadChan) - vm.newTxPoolHeadChan = vm.chain.SubscribeNewMinedBlockEvent() + vm.newMinedBlockSub = vm.chain.SubscribeNewMinedBlockEvent() vm.shutdownWg.Add(1) go ctx.Log.RecoverAndPanic(vm.awaitTxPoolStabilized) chain.Start() @@ -733,7 +731,7 @@ func (vm *VM) awaitTxPoolStabilized() { defer vm.shutdownWg.Done() for { select { - case e := <-vm.newTxPoolHeadChan.Chan(): + case e := <-vm.newMinedBlockSub.Chan(): switch h := e.Data.(type) { case core.NewMinedBlockEvent: vm.txPoolStabilizedLock.Lock() diff --git a/rpc/types.go b/rpc/types.go index 3ee46f3..99e29fc 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -92,7 +92,6 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { *bn = EarliestBlockNumber return nil case "latest": - //*bn = LatestBlockNumber *bn = AcceptedBlockNumber return nil case "pending": @@ -148,7 +147,6 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { bnh.BlockNumber = &bn return nil case "latest": - //*bn = LatestBlockNumber bn := AcceptedBlockNumber bnh.BlockNumber = &bn return nil -- cgit v1.2.3-70-g09d2