aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/events.go7
-rw-r--r--core/evm.go38
-rw-r--r--core/genesis.go78
-rw-r--r--core/rawdb/accessors_chain.go238
-rw-r--r--core/state/journal.go10
-rw-r--r--core/state/state_object.go161
-rw-r--r--core/state/statedb.go682
-rw-r--r--core/state_processor.go22
8 files changed, 826 insertions, 410 deletions
diff --git a/core/events.go b/core/events.go
index f05e69b..28fbc44 100644
--- a/core/events.go
+++ b/core/events.go
@@ -18,7 +18,7 @@ package core
import (
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common"
)
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
@@ -27,11 +27,6 @@ type NewTxsEvent struct{ Txs []*types.Transaction }
// NewTxPoolHeadEvent is posted when the pool head is updated.
type NewTxPoolHeadEvent struct{ Block *types.Block }
-// PendingLogsEvent is posted pre mining and notifies of pending logs.
-type PendingLogsEvent struct {
- Logs []*types.Log
-}
-
// NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block }
diff --git a/core/evm.go b/core/evm.go
index 796b312..74891d7 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -22,8 +22,8 @@ import (
"github.com/ava-labs/coreth/consensus"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
- "github.com/ava-labs/go-ethereum/common"
- "github.com/ava-labs/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
)
// ChainContext supports retrieving headers and consensus parameters from the
@@ -63,24 +63,32 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author
// GetHashFn returns a GetHashFunc which retrieves header hashes by number
func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash {
- var cache map[uint64]common.Hash
+ // Cache will initially contain [refHash.parent],
+ // Then fill up with [refHash.p, refHash.pp, refHash.ppp, ...]
+ var cache []common.Hash
return func(n uint64) common.Hash {
// If there's no hash cache yet, make one
- if cache == nil {
- cache = map[uint64]common.Hash{
- ref.Number.Uint64() - 1: ref.ParentHash,
- }
+ if len(cache) == 0 {
+ cache = append(cache, ref.ParentHash)
}
- // Try to fulfill the request from the cache
- if hash, ok := cache[n]; ok {
- return hash
+ if idx := ref.Number.Uint64() - n - 1; idx < uint64(len(cache)) {
+ return cache[idx]
}
- // Not cached, iterate the blocks and cache the hashes
- for header := chain.GetHeader(ref.ParentHash, ref.Number.Uint64()-1); header != nil; header = chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) {
- cache[header.Number.Uint64()-1] = header.ParentHash
- if n == header.Number.Uint64()-1 {
- return header.ParentHash
+ // No luck in the cache, but we can start iterating from the last element we already know
+ lastKnownHash := cache[len(cache)-1]
+ lastKnownNumber := ref.Number.Uint64() - uint64(len(cache))
+
+ for {
+ header := chain.GetHeader(lastKnownHash, lastKnownNumber)
+ if header == nil {
+ break
+ }
+ cache = append(cache, header.ParentHash)
+ lastKnownHash = header.ParentHash
+ lastKnownNumber = header.Number.Uint64() - 1
+ if n == lastKnownNumber {
+ return lastKnownHash
}
}
return common.Hash{}
diff --git a/core/genesis.go b/core/genesis.go
index 7d21d00..e48f411 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -29,13 +29,14 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/go-ethereum/common"
- "github.com/ava-labs/go-ethereum/common/hexutil"
- "github.com/ava-labs/go-ethereum/common/math"
- "github.com/ava-labs/go-ethereum/crypto"
- "github.com/ava-labs/go-ethereum/ethdb"
- "github.com/ava-labs/go-ethereum/log"
- "github.com/ava-labs/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
)
//go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
@@ -155,10 +156,6 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlockWithOverride(db, genesis, nil)
-}
-
-func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -181,7 +178,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
// We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0)
- if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0)); err != nil {
+ if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0, ""), nil); err != nil {
if genesis == nil {
genesis = DefaultGenesisBlock()
}
@@ -207,8 +204,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
// Get the existing chain configuration.
newcfg := configOrDefault(genesis, stored)
- if overrideIstanbul != nil {
- newcfg.IstanbulBlock = overrideIstanbul
+ if err := newcfg.CheckConfigForkOrder(); err != nil {
+ return newcfg, common.Hash{}, err
}
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
@@ -243,8 +240,14 @@ func configOrDefault(g *Genesis, ghash common.Hash) *params.ChainConfig {
return g.Config
case ghash == params.MainnetGenesisHash:
return params.MainnetChainConfig
- case ghash == params.TestnetGenesisHash:
- return params.TestnetChainConfig
+ case ghash == params.RopstenGenesisHash:
+ return params.RopstenChainConfig
+ case ghash == params.RinkebyGenesisHash:
+ return params.RinkebyChainConfig
+ case ghash == params.GoerliGenesisHash:
+ return params.GoerliChainConfig
+ case ghash == params.YoloV1GenesisHash:
+ return params.YoloV1ChainConfig
default:
return params.AllEthashProtocolChanges
}
@@ -256,7 +259,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil {
db = rawdb.NewMemoryDatabase()
}
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(db), nil)
for addr, account := range g.Alloc {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
@@ -292,9 +295,9 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.Difficulty = params.GenesisDifficulty
}
statedb.Commit(false)
- statedb.Database().TrieDB().Commit(root, true)
+ statedb.Database().TrieDB().Commit(root, true, nil)
- return types.NewBlock(head, nil, nil, nil, nil)
+ return types.NewBlock(head, nil, nil, nil, new(trie.Trie), nil)
}
// Commit writes the block and state of a genesis specification to the database.
@@ -304,6 +307,13 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if block.Number().Sign() != 0 {
return nil, fmt.Errorf("can't commit genesis block with number > 0")
}
+ config := g.Config
+ if config == nil {
+ config = params.AllEthashProtocolChanges
+ }
+ if err := config.CheckConfigForkOrder(); err != nil {
+ return nil, err
+ }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
@@ -311,11 +321,6 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
rawdb.WriteHeadBlockHash(db, block.Hash())
rawdb.WriteHeadFastBlockHash(db, block.Hash())
rawdb.WriteHeadHeaderHash(db, block.Hash())
-
- config := g.Config
- if config == nil {
- config = params.AllEthashProtocolChanges
- }
rawdb.WriteChainConfig(db, block.Hash(), config)
return block, nil
}
@@ -348,15 +353,15 @@ func DefaultGenesisBlock() *Genesis {
}
}
-// DefaultTestnetGenesisBlock returns the Ropsten network genesis block.
-func DefaultTestnetGenesisBlock() *Genesis {
+// DefaultRopstenGenesisBlock returns the Ropsten network genesis block.
+func DefaultRopstenGenesisBlock() *Genesis {
return &Genesis{
- Config: params.TestnetChainConfig,
+ Config: params.RopstenChainConfig,
Nonce: 66,
ExtraData: hexutil.MustDecode("0x3535353535353535353535353535353535353535353535353535353535353535"),
GasLimit: 16777216,
Difficulty: big.NewInt(1048576),
- Alloc: decodePrealloc(testnetAllocData),
+ Alloc: decodePrealloc(ropstenAllocData),
}
}
@@ -384,8 +389,18 @@ func DefaultGoerliGenesisBlock() *Genesis {
}
}
-// DeveloperGenesisBlock returns the 'geth --dev' genesis block. Note, this must
-// be seeded with the
+func DefaultYoloV1GenesisBlock() *Genesis {
+ return &Genesis{
+ Config: params.YoloV1ChainConfig,
+ Timestamp: 0x5ed754f1,
+ ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000008a37866fd3627c9205a37c8685666f32ec07bb1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
+ GasLimit: 0x47b760,
+ Difficulty: big.NewInt(1),
+ Alloc: decodePrealloc(yoloV1AllocData),
+ }
+}
+
+// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one
config := *params.AllCliqueProtocolChanges
@@ -395,7 +410,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
return &Genesis{
Config: &config,
ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...),
- GasLimit: 6283185,
+ GasLimit: 11500000,
Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
@@ -406,6 +421,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
+ common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b
faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
},
}
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index fdfd6ec..cd48885 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -23,10 +23,11 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/go-ethereum/common"
- "github.com/ava-labs/go-ethereum/ethdb"
- "github.com/ava-labs/go-ethereum/log"
- "github.com/ava-labs/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
)
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
@@ -68,7 +69,7 @@ func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
prefix := headerKeyPrefix(number)
hashes := make([]common.Hash, 0, 1)
- it := db.NewIteratorWithPrefix(prefix)
+ it := db.NewIterator(prefix, nil)
defer it.Release()
for it.Next() {
@@ -79,6 +80,39 @@ func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
return hashes
}
+// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
+// certain chain range. If the accumulated entries reaches the given threshold,
+// abort the iteration and return the semi-finish result.
+func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
+ // Short circuit if the limit is 0.
+ if limit == 0 {
+ return nil, nil
+ }
+ var (
+ numbers []uint64
+ hashes []common.Hash
+ )
+ // Construct the key prefix of start point.
+ start, end := headerHashKey(from), headerHashKey(to)
+ it := db.NewIterator(nil, start)
+ defer it.Release()
+
+ for it.Next() {
+ if bytes.Compare(it.Key(), end) >= 0 {
+ break
+ }
+ if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
+ numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
+ hashes = append(hashes, common.BytesToHash(it.Value()))
+ // If the accumulated entries reaches the limit threshold, return.
+ if len(numbers) >= limit {
+ break
+ }
+ }
+ }
+ return numbers, hashes
+}
+
// ReadHeaderNumber returns the header number assigned to a hash.
func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
data, _ := db.Get(headerNumberKey(hash))
@@ -153,6 +187,32 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
+// full synced, the last pivot will always be nil.
+func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(lastPivotKey)
+ if len(data) == 0 {
+ return nil
+ }
+ var pivot uint64
+ if err := rlp.DecodeBytes(data, &pivot); err != nil {
+ log.Error("Invalid pivot block number in database", "err", err)
+ return nil
+ }
+ return &pivot
+}
+
+// WriteLastPivotNumber stores the number of the last pivot block.
+func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
+ enc, err := rlp.EncodeToBytes(pivot)
+ if err != nil {
+ log.Crit("Failed to encode pivot block number", "err", err)
+ }
+ if err := db.Put(lastPivotKey, enc); err != nil {
+ log.Crit("Failed to store pivot block number", "err", err)
+ }
+}
+
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
// reporting correct numbers across restarts.
func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
@@ -171,20 +231,66 @@ func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
}
}
+// ReadTxIndexTail retrieves the number of oldest indexed block
+// whose transaction indices has been indexed. If the corresponding entry
+// is non-existent in database it means the indexing has been finished.
+func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(txIndexTailKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteTxIndexTail stores the number of oldest indexed block
+// into database.
+func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store the transaction index tail", "err", err)
+ }
+}
+
+// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
+func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(fastTxLookupLimitKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
+func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
+ }
+}
+
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // First try to look up the data in ancient database. Extra hash
+ // comparison is necessary since ancient database only maintains
+ // the canonical data.
data, _ := db.Ancient(freezerHeaderTable, number)
- if len(data) == 0 {
- data, _ = db.Get(headerKey(number, hash))
- // In the background freezer is moving data from leveldb to flatten files.
- // So during the first check for ancient db, the data is not yet in there,
- // but when we reach into leveldb, the data was already moved. That would
- // result in a not found error.
- if len(data) == 0 {
- data, _ = db.Ancient(freezerHeaderTable, number)
- }
+ if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
+ return data
}
- return data
+ // Then try to look up the data in leveldb.
+ data, _ = db.Get(headerKey(number, hash))
+ if len(data) > 0 {
+ return data
+ }
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ data, _ = db.Ancient(freezerHeaderTable, number)
+ if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
+ return data
+ }
+ return nil // Can't find the data anywhere.
}
// HasHeader verifies the existence of a block header corresponding to the hash.
@@ -251,9 +357,43 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // First try to look up the data in ancient database. Extra hash
+ // comparison is necessary since ancient database only maintains
+ // the canonical data.
+ data, _ := db.Ancient(freezerBodiesTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ // Then try to look up the data in leveldb.
+ data, _ = db.Get(blockBodyKey(number, hash))
+ if len(data) > 0 {
+ return data
+ }
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ data, _ = db.Ancient(freezerBodiesTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ return nil // Can't find the data anywhere.
+}
+
+// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
+// block at number, in RLP encoding.
+func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
+ // If it's an ancient one, we don't need the canonical hash
data, _ := db.Ancient(freezerBodiesTable, number)
if len(data) == 0 {
- data, _ = db.Get(blockBodyKey(number, hash))
+ // Need to get the hash
+ data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
@@ -315,18 +455,33 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // First try to look up the data in ancient database. Extra hash
+ // comparison is necessary since ancient database only maintains
+ // the canonical data.
data, _ := db.Ancient(freezerDifficultyTable, number)
- if len(data) == 0 {
- data, _ = db.Get(headerTDKey(number, hash))
- // In the background freezer is moving data from leveldb to flatten files.
- // So during the first check for ancient db, the data is not yet in there,
- // but when we reach into leveldb, the data was already moved. That would
- // result in a not found error.
- if len(data) == 0 {
- data, _ = db.Ancient(freezerDifficultyTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
}
}
- return data
+ // Then try to look up the data in leveldb.
+ data, _ = db.Get(headerTDKey(number, hash))
+ if len(data) > 0 {
+ return data
+ }
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ data, _ = db.Ancient(freezerDifficultyTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ return nil // Can't find the data anywhere.
}
// ReadTd retrieves a block's total difficulty corresponding to the hash.
@@ -375,18 +530,33 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // First try to look up the data in ancient database. Extra hash
+ // comparison is necessary since ancient database only maintains
+ // the canonical data.
data, _ := db.Ancient(freezerReceiptTable, number)
- if len(data) == 0 {
- data, _ = db.Get(blockReceiptsKey(number, hash))
- // In the background freezer is moving data from leveldb to flatten files.
- // So during the first check for ancient db, the data is not yet in there,
- // but when we reach into leveldb, the data was already moved. That would
- // result in a not found error.
- if len(data) == 0 {
- data, _ = db.Ancient(freezerReceiptTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
}
}
- return data
+ // Then try to look up the data in leveldb.
+ data, _ = db.Get(blockReceiptsKey(number, hash))
+ if len(data) > 0 {
+ return data
+ }
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ data, _ = db.Ancient(freezerReceiptTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ return nil // Can't find the data anywhere.
}
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
diff --git a/core/state/journal.go b/core/state/journal.go
index 6e85173..cfa1a4a 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -90,7 +90,8 @@ type (
account *common.Address
}
resetObjectChange struct {
- prev *stateObject
+ prev *stateObject
+ prevdestruct bool
}
suicideChange struct {
account *common.Address
@@ -130,9 +131,7 @@ type (
hash common.Hash
}
touchChange struct {
- account *common.Address
- prev bool
- prevDirty bool
+ account *common.Address
}
)
@@ -147,6 +146,9 @@ func (ch createObjectChange) dirtied() *common.Address {
func (ch resetObjectChange) revert(s *StateDB) {
s.setStateObject(ch.prev)
+ if !ch.prevdestruct && s.snap != nil {
+ delete(s.snapDestructs, ch.prev.addrHash)
+ }
}
func (ch resetObjectChange) dirtied() *common.Address {
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 9c47dc4..2893f80 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -23,10 +23,10 @@ import (
"math/big"
"time"
- "github.com/ava-labs/go-ethereum/common"
- "github.com/ava-labs/go-ethereum/crypto"
- "github.com/ava-labs/go-ethereum/metrics"
- "github.com/ava-labs/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/rlp"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -79,9 +79,10 @@ type stateObject struct {
trie Trie // storage trie, which becomes non-nil on first access
code Code // contract bytecode, which gets set when code is loaded
- originStorage Storage // Storage cache of original entries to dedup rewrites
- dirtyStorage Storage // Storage entries that need to be flushed to disk
- fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
+ originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
+ pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
+ dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
+ fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
// Cache flags.
// When an object is marked suicided it will be delete from the trie
@@ -114,13 +115,17 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
if data.CodeHash == nil {
data.CodeHash = emptyCodeHash
}
+ if data.Root == (common.Hash{}) {
+ data.Root = emptyRoot
+ }
return &stateObject{
- db: db,
- address: address,
- addrHash: crypto.Keccak256Hash(address[:]),
- data: data,
- originStorage: make(Storage),
- dirtyStorage: make(Storage),
+ db: db,
+ address: address,
+ addrHash: crypto.Keccak256Hash(address[:]),
+ data: data,
+ originStorage: make(Storage),
+ pendingStorage: make(Storage),
+ dirtyStorage: make(Storage),
}
}
@@ -184,21 +189,44 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if s.fakeStorage != nil {
return s.fakeStorage[key]
}
- // If we have the original value cached, return that
- value, cached := s.originStorage[key]
- if cached {
+ // If we have a pending write or clean cached, return that
+ if value, pending := s.pendingStorage[key]; pending {
return value
}
- // Track the amount of time wasted on reading the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now())
+ if value, cached := s.originStorage[key]; cached {
+ return value
}
- // Otherwise load the value from the database
- enc, err := s.getTrie(db).TryGet(key[:])
- if err != nil {
- s.setError(err)
- return common.Hash{}
+ // If no live objects are available, attempt to use snapshots
+ var (
+ enc []byte
+ err error
+ )
+ if s.db.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.db.SnapshotStorageReads += time.Since(start) }(time.Now())
+ }
+ // If the object was destructed in *this* block (and potentially resurrected),
+ // the storage has been cleared out, and we should *not* consult the previous
+ // snapshot about any storage values. The only possible alternatives are:
+ // 1) resurrect happened, and new slot values were set -- those should
+ // have been handles via pendingStorage above.
+ // 2) we don't have new values, and can deliver empty response back
+ if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
+ return common.Hash{}
+ }
+ enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
+ }
+ // If snapshot unavailable or reading from it failed, load from the database
+ if s.db.snap == nil || err != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now())
+ }
+ if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
+ s.setError(err)
+ return common.Hash{}
+ }
}
+ var value common.Hash
if len(enc) > 0 {
_, content, _, err := rlp.Split(enc)
if err != nil {
@@ -253,38 +281,73 @@ func (s *stateObject) setState(key, value common.Hash) {
s.dirtyStorage[key] = value
}
+// finalise moves all dirty storage slots into the pending area to be hashed or
+// committed later. It is invoked at the end of every transaction.
+func (s *stateObject) finalise() {
+ for key, value := range s.dirtyStorage {
+ s.pendingStorage[key] = value
+ }
+ if len(s.dirtyStorage) > 0 {
+ s.dirtyStorage = make(Storage)
+ }
+}
+
// updateTrie writes cached storage modifications into the object's storage trie.
+// It will return nil if the trie has not been loaded and no changes have been made
func (s *stateObject) updateTrie(db Database) Trie {
+ // Make sure all dirty slots are finalized into the pending storage area
+ s.finalise()
+ if len(s.pendingStorage) == 0 {
+ return s.trie
+ }
// Track the amount of time wasted on updating the storge trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
}
- // Update all the dirty slots in the trie
+ // Retrieve the snapshot storage map for the object
+ var storage map[common.Hash][]byte
+ if s.db.snap != nil {
+ // Retrieve the old storage map, if available, create a new one otherwise
+ storage = s.db.snapStorage[s.addrHash]
+ if storage == nil {
+ storage = make(map[common.Hash][]byte)
+ s.db.snapStorage[s.addrHash] = storage
+ }
+ }
+ // Insert all the pending updates into the trie
tr := s.getTrie(db)
- for key, value := range s.dirtyStorage {
- delete(s.dirtyStorage, key)
-
+ for key, value := range s.pendingStorage {
// Skip noop changes, persist actual changes
if value == s.originStorage[key] {
continue
}
s.originStorage[key] = value
+ var v []byte
if (value == common.Hash{}) {
s.setError(tr.TryDelete(key[:]))
- continue
+ } else {
+ // Encoding []byte cannot fail, ok to ignore the error.
+ v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
+ s.setError(tr.TryUpdate(key[:], v))
+ }
+ // If state snapshotting is active, cache the data til commit
+ if storage != nil {
+ storage[crypto.Keccak256Hash(key[:])] = v // v will be nil if value is 0x00
}
- // Encoding []byte cannot fail, ok to ignore the error.
- v, _ := rlp.EncodeToBytes(bytes.TrimLeft(value[:], "\x00"))
- s.setError(tr.TryUpdate(key[:], v))
+ }
+ if len(s.pendingStorage) > 0 {
+ s.pendingStorage = make(Storage)
}
return tr
}
// UpdateRoot sets the trie root to the current root hash of
func (s *stateObject) updateRoot(db Database) {
- s.updateTrie(db)
-
+ // If nothing changed, don't bother with hashing anything
+ if s.updateTrie(db) == nil {
+ return
+ }
// Track the amount of time wasted on hashing the storge trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
@@ -295,7 +358,10 @@ func (s *stateObject) updateRoot(db Database) {
// CommitTrie the storage trie of the object to db.
// This updates the trie root.
func (s *stateObject) CommitTrie(db Database) error {
- s.updateTrie(db)
+ // If nothing changed, don't bother with hashing anything
+ if s.updateTrie(db) == nil {
+ return nil
+ }
if s.dbErr != nil {
return s.dbErr
}
@@ -310,22 +376,21 @@ func (s *stateObject) CommitTrie(db Database) error {
return err
}
-// AddBalance removes amount from c's balance.
+// AddBalance adds amount to s's balance.
// It is used to add funds to the destination account of a transfer.
func (s *stateObject) AddBalance(amount *big.Int) {
- // EIP158: We must check emptiness for the objects such that the account
+ // EIP161: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
if amount.Sign() == 0 {
if s.empty() {
s.touch()
}
-
return
}
s.SetBalance(new(big.Int).Add(s.Balance(), amount))
}
-// SubBalance removes amount from c's balance.
+// SubBalance removes amount from s's balance.
// It is used to remove funds from the origin account of a transfer.
func (s *stateObject) SubBalance(amount *big.Int) {
if amount.Sign() == 0 {
@@ -388,6 +453,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
stateObject.code = s.code
stateObject.dirtyStorage = s.dirtyStorage.Copy()
stateObject.originStorage = s.originStorage.Copy()
+ stateObject.pendingStorage = s.pendingStorage.Copy()
stateObject.suicided = s.suicided
stateObject.dirtyCode = s.dirtyCode
stateObject.deleted = s.deleted
@@ -419,6 +485,23 @@ func (s *stateObject) Code(db Database) []byte {
return code
}
+// CodeSize returns the size of the contract code associated with this object,
+// or zero if none. This method is an almost mirror of Code, but uses a cache
+// inside the database to avoid loading codes seen recently.
+func (s *stateObject) CodeSize(db Database) int {
+ if s.code != nil {
+ return len(s.code)
+ }
+ if bytes.Equal(s.CodeHash(), emptyCodeHash) {
+ return 0
+ }
+ size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash()))
+ if err != nil {
+ s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
+ }
+ return size
+}
+
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
prevcode := s.Code(s.db.db)
s.db.journal.append(codeChange{
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 9c7535b..805c607 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -24,13 +24,15 @@ import (
"sort"
"time"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/go-ethereum/common"
- "github.com/ava-labs/go-ethereum/crypto"
- "github.com/ava-labs/go-ethereum/log"
- "github.com/ava-labs/go-ethereum/metrics"
- "github.com/ava-labs/go-ethereum/rlp"
- "github.com/ava-labs/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
)
type revision struct {
@@ -42,9 +44,6 @@ var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
zeroRoot = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000000")
-
- // emptyCode is the known hash of the empty EVM bytecode.
- emptyCode = crypto.Keccak256Hash(nil)
)
type proofList [][]byte
@@ -58,7 +57,7 @@ func (n *proofList) Delete(key []byte) error {
panic("not supported")
}
-// StateDBs within the ethereum protocol are used to store anything
+// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
// * Contracts
@@ -67,9 +66,16 @@ type StateDB struct {
db Database
trie Trie
+ snaps *snapshot.Tree
+ snap snapshot.Snapshot
+ snapDestructs map[common.Hash]struct{}
+ snapAccounts map[common.Hash][]byte
+ snapStorage map[common.Hash]map[common.Hash][]byte
+
// This map holds 'live' objects, which will get modified while processing a state transition.
- stateObjects map[common.Address]*stateObject
- stateObjectsDirty map[common.Address]struct{}
+ stateObjects map[common.Address]*stateObject
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
// DB error.
// State objects are used by the consensus core and VM which are
@@ -95,134 +101,157 @@ type StateDB struct {
nextRevisionId int
// Measurements gathered during execution for debugging purposes
- AccountReads time.Duration
- AccountHashes time.Duration
- AccountUpdates time.Duration
- AccountCommits time.Duration
- StorageReads time.Duration
- StorageHashes time.Duration
- StorageUpdates time.Duration
- StorageCommits time.Duration
-}
-
-// Create a new state from a given trie.
-func New(root common.Hash, db Database) (*StateDB, error) {
+ AccountReads time.Duration
+ AccountHashes time.Duration
+ AccountUpdates time.Duration
+ AccountCommits time.Duration
+ StorageReads time.Duration
+ StorageHashes time.Duration
+ StorageUpdates time.Duration
+ StorageCommits time.Duration
+ SnapshotAccountReads time.Duration
+ SnapshotStorageReads time.Duration
+ SnapshotCommits time.Duration
+}
+
+// New creates a new state from a given trie.
+func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
tr, err := db.OpenTrie(root)
if err != nil {
return nil, err
}
- return &StateDB{
- db: db,
- trie: tr,
- stateObjects: make(map[common.Address]*stateObject),
- stateObjectsDirty: make(map[common.Address]struct{}),
- logs: make(map[common.Hash][]*types.Log),
- preimages: make(map[common.Hash][]byte),
- journal: newJournal(),
- }, nil
+ sdb := &StateDB{
+ db: db,
+ trie: tr,
+ snaps: snaps,
+ stateObjects: make(map[common.Address]*stateObject),
+ stateObjectsPending: make(map[common.Address]struct{}),
+ stateObjectsDirty: make(map[common.Address]struct{}),
+ logs: make(map[common.Hash][]*types.Log),
+ preimages: make(map[common.Hash][]byte),
+ journal: newJournal(),
+ }
+ if sdb.snaps != nil {
+ if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
+ sdb.snapDestructs = make(map[common.Hash]struct{})
+ sdb.snapAccounts = make(map[common.Hash][]byte)
+ sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
+ }
+ }
+ return sdb, nil
}
// setError remembers the first non-nil error it is called with.
-func (self *StateDB) setError(err error) {
- if self.dbErr == nil {
- self.dbErr = err
+func (s *StateDB) setError(err error) {
+ if s.dbErr == nil {
+ s.dbErr = err
}
}
-func (self *StateDB) Error() error {
- return self.dbErr
+func (s *StateDB) Error() error {
+ return s.dbErr
}
// Reset clears out all ephemeral state objects from the state db, but keeps
// the underlying state trie to avoid reloading data for the next operations.
-func (self *StateDB) Reset(root common.Hash) error {
- tr, err := self.db.OpenTrie(root)
+func (s *StateDB) Reset(root common.Hash) error {
+ tr, err := s.db.OpenTrie(root)
if err != nil {
return err
}
- self.trie = tr
- self.stateObjects = make(map[common.Address]*stateObject)
- self.stateObjectsDirty = make(map[common.Address]struct{})
- self.thash = common.Hash{}
- self.bhash = common.Hash{}
- self.txIndex = 0
- self.logs = make(map[common.Hash][]*types.Log)
- self.logSize = 0
- self.preimages = make(map[common.Hash][]byte)
- self.clearJournalAndRefund()
+ s.trie = tr
+ s.stateObjects = make(map[common.Address]*stateObject)
+ s.stateObjectsPending = make(map[common.Address]struct{})
+ s.stateObjectsDirty = make(map[common.Address]struct{})
+ s.thash = common.Hash{}
+ s.bhash = common.Hash{}
+ s.txIndex = 0
+ s.logs = make(map[common.Hash][]*types.Log)
+ s.logSize = 0
+ s.preimages = make(map[common.Hash][]byte)
+ s.clearJournalAndRefund()
+
+ if s.snaps != nil {
+ s.snapAccounts, s.snapDestructs, s.snapStorage = nil, nil, nil
+ if s.snap = s.snaps.Snapshot(root); s.snap != nil {
+ s.snapDestructs = make(map[common.Hash]struct{})
+ s.snapAccounts = make(map[common.Hash][]byte)
+ s.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
+ }
+ }
return nil
}
-func (self *StateDB) AddLog(log *types.Log) {
- self.journal.append(addLogChange{txhash: self.thash})
+func (s *StateDB) AddLog(log *types.Log) {
+ s.journal.append(addLogChange{txhash: s.thash})
- log.TxHash = self.thash
- log.BlockHash = self.bhash
- log.TxIndex = uint(self.txIndex)
- log.Index = self.logSize
- self.logs[self.thash] = append(self.logs[self.thash], log)
- self.logSize++
+ log.TxHash = s.thash
+ log.BlockHash = s.bhash
+ log.TxIndex = uint(s.txIndex)
+ log.Index = s.logSize
+ s.logs[s.thash] = append(s.logs[s.thash], log)
+ s.logSize++
}
-func (self *StateDB) GetLogs(hash common.Hash) []*types.Log {
- return self.logs[hash]
+func (s *StateDB) GetLogs(hash common.Hash) []*types.Log {
+ return s.logs[hash]
}
-func (self *StateDB) Logs() []*types.Log {
+func (s *StateDB) Logs() []*types.Log {
var logs []*types.Log
- for _, lgs := range self.logs {
+ for _, lgs := range s.logs {
logs = append(logs, lgs...)
}
return logs
}
// AddPreimage records a SHA3 preimage seen by the VM.
-func (self *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
- if _, ok := self.preimages[hash]; !ok {
- self.journal.append(addPreimageChange{hash: hash})
+func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
+ if _, ok := s.preimages[hash]; !ok {
+ s.journal.append(addPreimageChange{hash: hash})
pi := make([]byte, len(preimage))
copy(pi, preimage)
- self.preimages[hash] = pi
+ s.preimages[hash] = pi
}
}
// Preimages returns a list of SHA3 preimages that have been submitted.
-func (self *StateDB) Preimages() map[common.Hash][]byte {
- return self.preimages
+func (s *StateDB) Preimages() map[common.Hash][]byte {
+ return s.preimages
}
// AddRefund adds gas to the refund counter
-func (self *StateDB) AddRefund(gas uint64) {
- self.journal.append(refundChange{prev: self.refund})
- self.refund += gas
+func (s *StateDB) AddRefund(gas uint64) {
+ s.journal.append(refundChange{prev: s.refund})
+ s.refund += gas
}
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
-func (self *StateDB) SubRefund(gas uint64) {
- self.journal.append(refundChange{prev: self.refund})
- if gas > self.refund {
- panic("Refund counter below zero")
+func (s *StateDB) SubRefund(gas uint64) {
+ s.journal.append(refundChange{prev: s.refund})
+ if gas > s.refund {
+ panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund))
}
- self.refund -= gas
+ s.refund -= gas
}
// Exist reports whether the given account address exists in the state.
// Notably this also returns true for suicided accounts.
-func (self *StateDB) Exist(addr common.Address) bool {
- return self.getStateObject(addr) != nil
+func (s *StateDB) Exist(addr common.Address) bool {
+ return s.getStateObject(addr) != nil
}
// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
-func (self *StateDB) Empty(addr common.Address) bool {
- so := self.getStateObject(addr)
+func (s *StateDB) Empty(addr common.Address) bool {
+ so := s.getStateObject(addr)
return so == nil || so.empty()
}
-// Retrieve the balance from the given address or 0 if object not found
-func (self *StateDB) GetBalance(addr common.Address) *big.Int {
- stateObject := self.getStateObject(addr)
+// GetBalance retrieves the balance from the given address or 0 if object not found
+func (s *StateDB) GetBalance(addr common.Address) *big.Int {
+ stateObject := s.getStateObject(addr)
if stateObject != nil {
return stateObject.Balance()
}
@@ -262,8 +291,8 @@ func (self *StateDB) IsMultiCoin(addr common.Address) bool {
return false
}
-func (self *StateDB) GetNonce(addr common.Address) uint64 {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) GetNonce(addr common.Address) uint64 {
+ stateObject := s.getStateObject(addr)
if stateObject != nil {
return stateObject.Nonce()
}
@@ -272,40 +301,33 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 {
}
// TxIndex returns the current transaction index set by Prepare.
-func (self *StateDB) TxIndex() int {
- return self.txIndex
+func (s *StateDB) TxIndex() int {
+ return s.txIndex
}
// BlockHash returns the current block hash set by Prepare.
-func (self *StateDB) BlockHash() common.Hash {
- return self.bhash
+func (s *StateDB) BlockHash() common.Hash {
+ return s.bhash
}
-func (self *StateDB) GetCode(addr common.Address) []byte {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) GetCode(addr common.Address) []byte {
+ stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.Code(self.db)
+ return stateObject.Code(s.db)
}
return nil
}
-func (self *StateDB) GetCodeSize(addr common.Address) int {
- stateObject := self.getStateObject(addr)
- if stateObject == nil {
- return 0
- }
- if stateObject.code != nil {
- return len(stateObject.code)
- }
- size, err := self.db.ContractCodeSize(stateObject.addrHash, common.BytesToHash(stateObject.CodeHash()))
- if err != nil {
- self.setError(err)
+func (s *StateDB) GetCodeSize(addr common.Address) int {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.CodeSize(s.db)
}
- return size
+ return 0
}
-func (self *StateDB) GetCodeHash(addr common.Address) common.Hash {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
+ stateObject := s.getStateObject(addr)
if stateObject == nil {
return common.Hash{}
}
@@ -313,25 +335,25 @@ func (self *StateDB) GetCodeHash(addr common.Address) common.Hash {
}
// GetState retrieves a value from the given account's storage trie.
-func (self *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
+ stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetState(self.db, hash)
+ return stateObject.GetState(s.db, hash)
}
return common.Hash{}
}
// GetProof returns the MerkleProof for a given Account
-func (self *StateDB) GetProof(a common.Address) ([][]byte, error) {
+func (s *StateDB) GetProof(a common.Address) ([][]byte, error) {
var proof proofList
- err := self.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
+ err := s.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
return [][]byte(proof), err
}
-// GetProof returns the StorageProof for given key
-func (self *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
+// GetStorageProof returns the StorageProof for given key
+func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
var proof proofList
- trie := self.StorageTrie(a)
+ trie := s.StorageTrie(a)
if trie == nil {
return proof, errors.New("storage trie for requested address does not exist")
}
@@ -340,32 +362,33 @@ func (self *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byt
}
// GetCommittedState retrieves a value from the given account's committed storage trie.
-func (self *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
+ stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetCommittedState(self.db, hash)
+ return stateObject.GetCommittedState(s.db, hash)
}
return common.Hash{}
}
// Database retrieves the low level database supporting the lower level trie ops.
-func (self *StateDB) Database() Database {
- return self.db
+func (s *StateDB) Database() Database {
+ return s.db
}
// StorageTrie returns the storage trie of an account.
// The return value is a copy and is nil for non-existent accounts.
-func (self *StateDB) StorageTrie(addr common.Address) Trie {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) StorageTrie(addr common.Address) Trie {
+ stateObject := s.getStateObject(addr)
if stateObject == nil {
return nil
}
- cpy := stateObject.deepCopy(self)
- return cpy.updateTrie(self.db)
+ cpy := stateObject.deepCopy(s)
+ cpy.updateTrie(s.db)
+ return cpy.getTrie(s.db)
}
-func (self *StateDB) HasSuicided(addr common.Address) bool {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) HasSuicided(addr common.Address) bool {
+ stateObject := s.getStateObject(addr)
if stateObject != nil {
return stateObject.suicided
}
@@ -377,81 +400,81 @@ func (self *StateDB) HasSuicided(addr common.Address) bool {
*/
// AddBalance adds amount to the account associated with addr.
-func (self *StateDB) AddBalance(addr common.Address, amount *big.Int) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.AddBalance(amount)
}
}
// SubBalance subtracts amount from the account associated with addr.
-func (self *StateDB) SubBalance(addr common.Address, amount *big.Int) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SubBalance(amount)
}
}
-func (self *StateDB) SetBalance(addr common.Address, amount *big.Int) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetBalance(amount)
}
}
// AddBalance adds amount to the account associated with addr.
-func (self *StateDB) AddBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) AddBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- stateObject.AddBalanceMultiCoin(coinID, amount, self.db)
+ stateObject.AddBalanceMultiCoin(coinID, amount, s.db)
}
}
// SubBalance subtracts amount from the account associated with addr.
-func (self *StateDB) SubBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SubBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- stateObject.SubBalanceMultiCoin(coinID, amount, self.db)
+ stateObject.SubBalanceMultiCoin(coinID, amount, s.db)
}
}
-func (self *StateDB) SetBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SetBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- stateObject.SetBalanceMultiCoin(coinID, amount, self.db)
+ stateObject.SetBalanceMultiCoin(coinID, amount, s.db)
}
}
-func (self *StateDB) SetNonce(addr common.Address, nonce uint64) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetNonce(nonce)
}
}
-func (self *StateDB) SetCode(addr common.Address, code []byte) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SetCode(addr common.Address, code []byte) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetCode(crypto.Keccak256Hash(code), code)
}
}
-func (self *StateDB) SetState(addr common.Address, key, value common.Hash) (res error) {
+func (s *StateDB) SetState(addr common.Address, key, value common.Hash) (res error) {
res = nil
- stateObject := self.GetOrNewStateObject(addr)
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
if stateObject.data.IsMultiCoin {
NormalizeStateKey(&key)
}
- stateObject.SetState(self.db, key, value)
+ stateObject.SetState(s.db, key, value)
}
return
}
// SetStorage replaces the entire storage for the specified account with given
// storage. This function should only be used for debugging.
-func (self *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
- stateObject := self.GetOrNewStateObject(addr)
+func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
+ stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetStorage(storage)
}
@@ -462,12 +485,12 @@ func (self *StateDB) SetStorage(addr common.Address, storage map[common.Hash]com
//
// The account's state object is still available until the state is committed,
// getStateObject will return a non-nil account after Suicide.
-func (self *StateDB) Suicide(addr common.Address) bool {
- stateObject := self.getStateObject(addr)
+func (s *StateDB) Suicide(addr common.Address) bool {
+ stateObject := s.getStateObject(addr)
if stateObject == nil {
return false
}
- self.journal.append(suicideChange{
+ s.journal.append(suicideChange{
account: &addr,
prev: stateObject.suicided,
prevbalance: new(big.Int).Set(stateObject.Balance()),
@@ -483,90 +506,153 @@ func (self *StateDB) Suicide(addr common.Address) bool {
//
// updateStateObject writes the given object to the trie.
-func (s *StateDB) updateStateObject(stateObject *stateObject) {
+func (s *StateDB) updateStateObject(obj *stateObject) {
// Track the amount of time wasted on updating the account from the trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
}
// Encode the account and update the account trie
- addr := stateObject.Address()
+ addr := obj.Address()
- data, err := rlp.EncodeToBytes(stateObject)
+ data, err := rlp.EncodeToBytes(obj)
if err != nil {
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
}
- s.setError(s.trie.TryUpdate(addr[:], data))
+ if err = s.trie.TryUpdate(addr[:], data); err != nil {
+ s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
+ }
+
+ // If state snapshotting is active, cache the data til commit. Note, this
+ // update mechanism is not symmetric to the deletion, because whereas it is
+ // enough to track account updates at commit time, deletions need tracking
+ // at transaction boundary level to ensure we capture state clearing.
+ if s.snap != nil {
+ s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ }
}
// deleteStateObject removes the given object from the state trie.
-func (s *StateDB) deleteStateObject(stateObject *stateObject) {
+func (s *StateDB) deleteStateObject(obj *stateObject) {
// Track the amount of time wasted on deleting the account from the trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
}
// Delete the account from the trie
- stateObject.deleted = true
+ addr := obj.Address()
+ if err := s.trie.TryDelete(addr[:]); err != nil {
+ s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
+ }
+}
- addr := stateObject.Address()
- s.setError(s.trie.TryDelete(addr[:]))
+// getStateObject retrieves a state object given by the address, returning nil if
+// the object is not found or was deleted in this execution context. If you need
+// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
+func (s *StateDB) getStateObject(addr common.Address) *stateObject {
+ if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
+ return obj
+ }
+ return nil
}
-// Retrieve a state object given by the address. Returns nil if not found.
-func (s *StateDB) getStateObject(addr common.Address) (stateObject *stateObject) {
- // Prefer live objects
+// getDeletedStateObject is similar to getStateObject, but instead of returning
+// nil for a deleted state object, it returns the actual object with the deleted
+// flag set. This is needed by the state journal to revert to the correct s-
+// destructed object instead of wiping all knowledge about the state object.
+func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
+ // Prefer live objects if any is available
if obj := s.stateObjects[addr]; obj != nil {
- if obj.deleted {
- return nil
- }
return obj
}
- // Track the amount of time wasted on loading the object from the database
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
- }
- // Load the object from the database
- enc, err := s.trie.TryGet(addr[:])
- if len(enc) == 0 {
- s.setError(err)
- return nil
+ // If no live objects are available, attempt to use snapshots
+ var (
+ data *Account
+ err error
+ )
+ if s.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
+ }
+ var acc *snapshot.Account
+ if acc, err = s.snap.Account(crypto.Keccak256Hash(addr.Bytes())); err == nil {
+ if acc == nil {
+ return nil
+ }
+ data = &Account{
+ Nonce: acc.Nonce,
+ Balance: acc.Balance,
+ CodeHash: acc.CodeHash,
+ Root: common.BytesToHash(acc.Root),
+ }
+ if len(data.CodeHash) == 0 {
+ data.CodeHash = emptyCodeHash
+ }
+ if data.Root == (common.Hash{}) {
+ data.Root = emptyRoot
+ }
+ }
}
- var data Account
- if err := rlp.DecodeBytes(enc, &data); err != nil {
- log.Error("Failed to decode state object", "addr", addr, "err", err)
- return nil
+ // If snapshot unavailable or reading from it failed, load from the database
+ if s.snap == nil || err != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
+ }
+ enc, err := s.trie.TryGet(addr.Bytes())
+ if err != nil {
+ s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
+ return nil
+ }
+ if len(enc) == 0 {
+ return nil
+ }
+ data = new(Account)
+ if err := rlp.DecodeBytes(enc, data); err != nil {
+ log.Error("Failed to decode state object", "addr", addr, "err", err)
+ return nil
+ }
}
// Insert into the live set
- obj := newObject(s, addr, data)
+ obj := newObject(s, addr, *data)
s.setStateObject(obj)
return obj
}
-func (self *StateDB) setStateObject(object *stateObject) {
- self.stateObjects[object.Address()] = object
+func (s *StateDB) setStateObject(object *stateObject) {
+ s.stateObjects[object.Address()] = object
}
-// Retrieve a state object or create a new state object if nil.
-func (self *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
- stateObject := self.getStateObject(addr)
- if stateObject == nil || stateObject.deleted {
- stateObject, _ = self.createObject(addr)
+// GetOrNewStateObject retrieves a state object or create a new state object if nil.
+func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
+ stateObject := s.getStateObject(addr)
+ if stateObject == nil {
+ stateObject, _ = s.createObject(addr)
}
return stateObject
}
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
-func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
- prev = self.getStateObject(addr)
- newobj = newObject(self, addr, Account{})
+func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
+ prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
+
+ var prevdestruct bool
+ if s.snap != nil && prev != nil {
+ _, prevdestruct = s.snapDestructs[prev.addrHash]
+ if !prevdestruct {
+ s.snapDestructs[prev.addrHash] = struct{}{}
+ }
+ }
+ newobj = newObject(s, addr, Account{})
newobj.setNonce(0) // sets the object to dirty
if prev == nil {
- self.journal.append(createObjectChange{account: &addr})
+ s.journal.append(createObjectChange{account: &addr})
} else {
- self.journal.append(resetObjectChange{prev: prev})
+ s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ }
+ s.setStateObject(newobj)
+ if prev != nil && !prev.deleted {
+ return newobj, prev
}
- self.setStateObject(newobj)
- return newobj, prev
+ return newobj, nil
}
// CreateAccount explicitly creates a state object. If a state object with the address
@@ -579,8 +665,8 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
//
// Carrying over the balance ensures that Ether doesn't disappear.
-func (self *StateDB) CreateAccount(addr common.Address) {
- newObj, prev := self.createObject(addr)
+func (s *StateDB) CreateAccount(addr common.Address) {
+ newObj, prev := s.createObject(addr)
if prev != nil {
newObj.setBalance(prev.data.Balance)
}
@@ -617,40 +703,52 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
// Copy creates a deep, independent copy of the state.
// Snapshots of the copied state cannot be applied to the copy.
-func (self *StateDB) Copy() *StateDB {
+func (s *StateDB) Copy() *StateDB {
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
- db: self.db,
- trie: self.db.CopyTrie(self.trie),
- stateObjects: make(map[common.Address]*stateObject, len(self.journal.dirties)),
- stateObjectsDirty: make(map[common.Address]struct{}, len(self.journal.dirties)),
- refund: self.refund,
- logs: make(map[common.Hash][]*types.Log, len(self.logs)),
- logSize: self.logSize,
- preimages: make(map[common.Hash][]byte, len(self.preimages)),
- journal: newJournal(),
+ db: s.db,
+ trie: s.db.CopyTrie(s.trie),
+ stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
+ stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
+ stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
+ refund: s.refund,
+ logs: make(map[common.Hash][]*types.Log, len(s.logs)),
+ logSize: s.logSize,
+ preimages: make(map[common.Hash][]byte, len(s.preimages)),
+ journal: newJournal(),
}
// Copy the dirty states, logs, and preimages
- for addr := range self.journal.dirties {
+ for addr := range s.journal.dirties {
// As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527),
// and in the Finalise-method, there is a case where an object is in the journal but not
// in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for
// nil
- if object, exist := self.stateObjects[addr]; exist {
+ if object, exist := s.stateObjects[addr]; exist {
+ // Even though the original object is dirty, we are not copying the journal,
+ // so we need to make sure that anyside effect the journal would have caused
+ // during a commit (or similar op) is already applied to the copy.
state.stateObjects[addr] = object.deepCopy(state)
- state.stateObjectsDirty[addr] = struct{}{}
+
+ state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits
+ state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits
}
}
// Above, we don't copy the actual journal. This means that if the copy is copied, the
// loop above will be a no-op, since the copy's journal is empty.
// Thus, here we iterate over stateObjects, to enable copies of copies
- for addr := range self.stateObjectsDirty {
+ for addr := range s.stateObjectsPending {
+ if _, exist := state.stateObjects[addr]; !exist {
+ state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state)
+ }
+ state.stateObjectsPending[addr] = struct{}{}
+ }
+ for addr := range s.stateObjectsDirty {
if _, exist := state.stateObjects[addr]; !exist {
- state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state)
- state.stateObjectsDirty[addr] = struct{}{}
+ state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state)
}
+ state.stateObjectsDirty[addr] = struct{}{}
}
- for hash, logs := range self.logs {
+ for hash, logs := range s.logs {
cpy := make([]*types.Log, len(logs))
for i, l := range logs {
cpy[i] = new(types.Log)
@@ -658,46 +756,47 @@ func (self *StateDB) Copy() *StateDB {
}
state.logs[hash] = cpy
}
- for hash, preimage := range self.preimages {
+ for hash, preimage := range s.preimages {
state.preimages[hash] = preimage
}
return state
}
// Snapshot returns an identifier for the current revision of the state.
-func (self *StateDB) Snapshot() int {
- id := self.nextRevisionId
- self.nextRevisionId++
- self.validRevisions = append(self.validRevisions, revision{id, self.journal.length()})
+func (s *StateDB) Snapshot() int {
+ id := s.nextRevisionId
+ s.nextRevisionId++
+ s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()})
return id
}
// RevertToSnapshot reverts all state changes made since the given revision.
-func (self *StateDB) RevertToSnapshot(revid int) {
+func (s *StateDB) RevertToSnapshot(revid int) {
// Find the snapshot in the stack of valid snapshots.
- idx := sort.Search(len(self.validRevisions), func(i int) bool {
- return self.validRevisions[i].id >= revid
+ idx := sort.Search(len(s.validRevisions), func(i int) bool {
+ return s.validRevisions[i].id >= revid
})
- if idx == len(self.validRevisions) || self.validRevisions[idx].id != revid {
+ if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid {
panic(fmt.Errorf("revision id %v cannot be reverted", revid))
}
- snapshot := self.validRevisions[idx].journalIndex
+ snapshot := s.validRevisions[idx].journalIndex
// Replay the journal to undo changes and remove invalidated snapshots
- self.journal.revert(self, snapshot)
- self.validRevisions = self.validRevisions[:idx]
+ s.journal.revert(s, snapshot)
+ s.validRevisions = s.validRevisions[:idx]
}
// GetRefund returns the current value of the refund counter.
-func (self *StateDB) GetRefund() uint64 {
- return self.refund
+func (s *StateDB) GetRefund() uint64 {
+ return s.refund
}
-// Finalise finalises the state by removing the self destructed objects
-// and clears the journal as well as the refunds.
+// Finalise finalises the state by removing the s destructed objects and clears
+// the journal as well as the refunds. Finalise, however, will not push any updates
+// into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
for addr := range s.journal.dirties {
- stateObject, exist := s.stateObjects[addr]
+ obj, exist := s.stateObjects[addr]
if !exist {
// ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
// That tx goes out of gas, and although the notion of 'touched' does not exist there, the
@@ -707,13 +806,22 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// Thus, we can safely ignore it here
continue
}
-
- if stateObject.suicided || (deleteEmptyObjects && stateObject.empty()) {
- s.deleteStateObject(stateObject)
+ if obj.suicided || (deleteEmptyObjects && obj.empty()) {
+ obj.deleted = true
+
+ // If state snapshotting is active, also mark the destruction there.
+ // Note, we can't do this only at the end of a block because multiple
+ // transactions within the same block might self destruct and then
+ // ressurrect an account; but the snapshotter needs both events.
+ if s.snap != nil {
+ s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
+ delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect)
+ delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect)
+ }
} else {
- stateObject.updateRoot(s.db)
- s.updateStateObject(stateObject)
+ obj.finalise()
}
+ s.stateObjectsPending[addr] = struct{}{}
s.stateObjectsDirty[addr] = struct{}{}
}
// Invalidate journal because reverting across transactions is not allowed.
@@ -724,8 +832,21 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
+ // Finalise all the dirty storage states and write them into the tries
s.Finalise(deleteEmptyObjects)
+ for addr := range s.stateObjectsPending {
+ obj := s.stateObjects[addr]
+ if obj.deleted {
+ s.deleteStateObject(obj)
+ } else {
+ obj.updateRoot(s.db)
+ s.updateStateObject(obj)
+ }
+ }
+ if len(s.stateObjectsPending) > 0 {
+ s.stateObjectsPending = make(map[common.Address]struct{})
+ }
// Track the amount of time wasted on hashing the account trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
@@ -735,65 +856,86 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Prepare sets the current transaction hash and index and block hash which is
// used when the EVM emits new state logs.
-func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
- self.thash = thash
- self.bhash = bhash
- self.txIndex = ti
+func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) {
+ s.thash = thash
+ s.bhash = bhash
+ s.txIndex = ti
}
func (s *StateDB) clearJournalAndRefund() {
- s.journal = newJournal()
- s.validRevisions = s.validRevisions[:0]
- s.refund = 0
+ if len(s.journal.entries) > 0 {
+ s.journal = newJournal()
+ s.refund = 0
+ }
+ s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
}
// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
- defer s.clearJournalAndRefund()
-
- for addr := range s.journal.dirties {
- s.stateObjectsDirty[addr] = struct{}{}
+func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
+ if s.dbErr != nil {
+ return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
+ // Finalize any pending changes and merge everything into the tries
+ s.IntermediateRoot(deleteEmptyObjects)
+
// Commit objects to the trie, measuring the elapsed time
- for addr, stateObject := range s.stateObjects {
- _, isDirty := s.stateObjectsDirty[addr]
- switch {
- case stateObject.suicided || (isDirty && deleteEmptyObjects && stateObject.empty()):
- // If the object has been removed, don't bother syncing it
- // and just mark it for deletion in the trie.
- s.deleteStateObject(stateObject)
- case isDirty:
+ codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+ for addr := range s.stateObjectsDirty {
+ if obj := s.stateObjects[addr]; !obj.deleted {
// Write any contract code associated with the state object
- if stateObject.code != nil && stateObject.dirtyCode {
- s.db.TrieDB().InsertBlob(common.BytesToHash(stateObject.CodeHash()), stateObject.code)
- stateObject.dirtyCode = false
+ if obj.code != nil && obj.dirtyCode {
+ rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ obj.dirtyCode = false
}
- // Write any storage changes in the state object to its storage trie.
- if err := stateObject.CommitTrie(s.db); err != nil {
+ // Write any storage changes in the state object to its storage trie
+ if err := obj.CommitTrie(s.db); err != nil {
return common.Hash{}, err
}
- // Update the object in the main account trie.
- s.updateStateObject(stateObject)
}
- delete(s.stateObjectsDirty, addr)
+ }
+ if len(s.stateObjectsDirty) > 0 {
+ s.stateObjectsDirty = make(map[common.Address]struct{})
+ }
+ if codeWriter.ValueSize() > 0 {
+ if err := codeWriter.Write(); err != nil {
+ log.Crit("Failed to commit dirty codes", "error", err)
+ }
}
// Write the account trie changes, measuing the amount of wasted time
+ var start time.Time
if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountCommits += time.Since(start) }(time.Now())
+ start = time.Now()
}
- root, err = s.trie.Commit(func(leaf []byte, parent common.Hash) error {
- var account Account
+ // The onleaf func is called _serially_, so we can reuse the same account
+ // for unmarshalling every time.
+ var account Account
+ root, err := s.trie.Commit(func(path []byte, leaf []byte, parent common.Hash) error {
if err := rlp.DecodeBytes(leaf, &account); err != nil {
return nil
}
if account.Root != emptyRoot {
s.db.TrieDB().Reference(account.Root, parent)
}
- code := common.BytesToHash(account.CodeHash)
- if code != emptyCode {
- s.db.TrieDB().Reference(code, parent)
- }
return nil
})
+ if metrics.EnabledExpensive {
+ s.AccountCommits += time.Since(start)
+ }
+ // If snapshotting is enabled, update the snapshot tree with this new version
+ if s.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
+ }
+ // Only update if there's a state transition (skip empty Clique blocks)
+ if parent := s.snap.Root(); parent != root {
+ if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
+ log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
+ }
+ if err := s.snaps.Cap(root, 127); err != nil { // Persistent layer is 128th, the last available trie
+ log.Warn("Failed to cap snapshot tree", "root", root, "layers", 127, "err", err)
+ }
+ }
+ s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
+ }
return root, err
}
diff --git a/core/state_processor.go b/core/state_processor.go
index ab8759a..46ecd48 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -23,8 +23,8 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/go-ethereum/common"
- "github.com/ava-labs/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
)
// StateProcessor is a basic Processor, which takes care of transitioning
@@ -68,7 +68,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
statedb.Prepare(tx.Hash(), block.Hash(), i)
- receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg)
+ receipt, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg)
if err != nil {
return nil, nil, 0, err
}
@@ -88,10 +88,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
-func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) {
+func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) {
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
if err != nil {
- return nil, 0, err
+ return nil, err
}
// Create a new context to be used in the EVM environment
context := NewEVMContext(msg, header, bc, author)
@@ -99,9 +99,9 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(context, statedb, config, cfg)
// Apply the transaction to the current state (included in the env)
- _, gas, failed, err := ApplyMessage(vmenv, msg, gp)
+ result, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
- return nil, 0, err
+ return nil, err
}
// Update the state with pending changes
var root []byte
@@ -110,13 +110,13 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
} else {
root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
}
- *usedGas += gas
+ *usedGas += result.UsedGas
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing whether the root touch-delete accounts.
- receipt := types.NewReceipt(root, failed, *usedGas)
+ receipt := types.NewReceipt(root, result.Failed(), *usedGas)
receipt.TxHash = tx.Hash()
- receipt.GasUsed = gas
+ receipt.GasUsed = result.UsedGas
// if the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(vmenv.Context.Origin, tx.Nonce())
@@ -128,5 +128,5 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
receipt.BlockNumber = header.Number
receipt.TransactionIndex = uint(statedb.TxIndex())
- return receipt, gas, err
+ return receipt, err
}