aboutsummaryrefslogtreecommitdiff
path: root/core/rawdb
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2020-07-30 14:18:44 -0400
committerDeterminant <[email protected]>2020-07-30 14:18:44 -0400
commit0444e66f640999c15496066637841efcc0433934 (patch)
treec19aec2dced2e9129c880c19c52ca0f87b3d62f6 /core/rawdb
parentcffa0954bbdb43821d1b71d00f99fb705cecd25b (diff)
parent1f49826de2bb8bb4f5f99f69fd2beb039b1172d9 (diff)
Merge branch 'multi-coin'
Diffstat (limited to 'core/rawdb')
-rw-r--r--core/rawdb/accessors_chain.go560
-rw-r--r--core/rawdb/accessors_indexes.go131
-rw-r--r--core/rawdb/accessors_metadata.go98
-rw-r--r--core/rawdb/database.go355
-rw-r--r--core/rawdb/freezer.go393
-rw-r--r--core/rawdb/freezer_reinit.go127
-rw-r--r--core/rawdb/freezer_table.go637
-rw-r--r--core/rawdb/schema.go166
-rw-r--r--core/rawdb/table.go204
9 files changed, 2671 insertions, 0 deletions
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
new file mode 100644
index 0000000..7620eac
--- /dev/null
+++ b/core/rawdb/accessors_chain.go
@@ -0,0 +1,560 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/big"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/rlp"
+)
+
+// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
+func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
+ data, _ := db.Ancient(freezerHashTable, number)
+ if len(data) == 0 {
+ data, _ = db.Get(headerHashKey(number))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerHashTable, number)
+ }
+ }
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteCanonicalHash stores the hash assigned to a canonical block number.
+func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
+ log.Crit("Failed to store number to hash mapping", "err", err)
+ }
+}
+
+// DeleteCanonicalHash removes the number to hash canonical mapping.
+func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Delete(headerHashKey(number)); err != nil {
+ log.Crit("Failed to delete number to hash mapping", "err", err)
+ }
+}
+
+// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
+// both canonical and reorged forks included.
+func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
+ prefix := headerKeyPrefix(number)
+
+ hashes := make([]common.Hash, 0, 1)
+ it := db.NewIteratorWithPrefix(prefix)
+ defer it.Release()
+
+ for it.Next() {
+ if key := it.Key(); len(key) == len(prefix)+32 {
+ hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
+ }
+ }
+ return hashes
+}
+
+// ReadHeaderNumber returns the header number assigned to a hash.
+func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
+ data, _ := db.Get(headerNumberKey(hash))
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteHeaderNumber stores the hash->number mapping.
+func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ key := headerNumberKey(hash)
+ enc := encodeBlockNumber(number)
+ if err := db.Put(key, enc); err != nil {
+ log.Crit("Failed to store hash to number mapping", "err", err)
+ }
+}
+
+// DeleteHeaderNumber removes hash->number mapping.
+func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(headerNumberKey(hash)); err != nil {
+ log.Crit("Failed to delete hash to number mapping", "err", err)
+ }
+}
+
+// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
+func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(headHeaderKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteHeadHeaderHash stores the hash of the current canonical head header.
+func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
+ log.Crit("Failed to store last header's hash", "err", err)
+ }
+}
+
+// ReadHeadBlockHash retrieves the hash of the current canonical head block.
+func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(headBlockKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteHeadBlockHash stores the head block's hash.
+func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
+ log.Crit("Failed to store last block's hash", "err", err)
+ }
+}
+
+// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
+func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
+ data, _ := db.Get(headFastBlockKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
+func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
+ log.Crit("Failed to store last fast block's hash", "err", err)
+ }
+}
+
+// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
+// reporting correct numbers across restarts.
+func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
+ data, _ := db.Get(fastTrieProgressKey)
+ if len(data) == 0 {
+ return 0
+ }
+ return new(big.Int).SetBytes(data).Uint64()
+}
+
+// WriteFastTrieProgress stores the fast sync trie process counter to support
+// retrieving it across restarts.
+func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
+ if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
+ log.Crit("Failed to store fast sync trie progress", "err", err)
+ }
+}
+
+// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
+func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ data, _ := db.Ancient(freezerHeaderTable, number)
+ if len(data) == 0 {
+ data, _ = db.Get(headerKey(number, hash))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerHeaderTable, number)
+ }
+ }
+ return data
+}
+
+// HasHeader verifies the existence of a block header corresponding to the hash.
+func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
+ if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
+ return true
+ }
+ if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
+ return false
+ }
+ return true
+}
+
+// ReadHeader retrieves the block header corresponding to the hash.
+func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
+ data := ReadHeaderRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ header := new(types.Header)
+ if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
+ log.Error("Invalid block header RLP", "hash", hash, "err", err)
+ return nil
+ }
+ return header
+}
+
+// WriteHeader stores a block header into the database and also stores the hash-
+// to-number mapping.
+func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
+ var (
+ hash = header.Hash()
+ number = header.Number.Uint64()
+ )
+ // Write the hash -> number mapping
+ WriteHeaderNumber(db, hash, number)
+
+ // Write the encoded header
+ data, err := rlp.EncodeToBytes(header)
+ if err != nil {
+ log.Crit("Failed to RLP encode header", "err", err)
+ }
+ key := headerKey(number, hash)
+ if err := db.Put(key, data); err != nil {
+ log.Crit("Failed to store header", "err", err)
+ }
+}
+
+// DeleteHeader removes all block header data associated with a hash.
+func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ deleteHeaderWithoutNumber(db, hash, number)
+ if err := db.Delete(headerNumberKey(hash)); err != nil {
+ log.Crit("Failed to delete hash to number mapping", "err", err)
+ }
+}
+
+// deleteHeaderWithoutNumber removes only the block header but does not remove
+// the hash to number mapping.
+func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Delete(headerKey(number, hash)); err != nil {
+ log.Crit("Failed to delete header", "err", err)
+ }
+}
+
+// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
+func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ data, _ := db.Ancient(freezerBodiesTable, number)
+ if len(data) == 0 {
+ data, _ = db.Get(blockBodyKey(number, hash))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerBodiesTable, number)
+ }
+ }
+ return data
+}
+
+// WriteBodyRLP stores an RLP encoded block body into the database.
+func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
+ if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
+ log.Crit("Failed to store block body", "err", err)
+ }
+}
+
+// HasBody verifies the existence of a block body corresponding to the hash.
+func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
+ if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
+ return true
+ }
+ if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
+ return false
+ }
+ return true
+}
+
+// ReadBody retrieves the block body corresponding to the hash.
+func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
+ data := ReadBodyRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ body := new(types.Body)
+ if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
+ log.Error("Invalid block body RLP", "hash", hash, "err", err)
+ return nil
+ }
+ return body
+}
+
+// WriteBody stores a block body into the database.
+func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
+ data, err := rlp.EncodeToBytes(body)
+ if err != nil {
+ log.Crit("Failed to RLP encode body", "err", err)
+ }
+ WriteBodyRLP(db, hash, number, data)
+}
+
+// DeleteBody removes all block body data associated with a hash.
+func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Delete(blockBodyKey(number, hash)); err != nil {
+ log.Crit("Failed to delete block body", "err", err)
+ }
+}
+
+// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
+func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ data, _ := db.Ancient(freezerDifficultyTable, number)
+ if len(data) == 0 {
+ data, _ = db.Get(headerTDKey(number, hash))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerDifficultyTable, number)
+ }
+ }
+ return data
+}
+
+// ReadTd retrieves a block's total difficulty corresponding to the hash.
+func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
+ data := ReadTdRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ td := new(big.Int)
+ if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
+ log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
+ return nil
+ }
+ return td
+}
+
+// WriteTd stores the total difficulty of a block into the database.
+func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
+ data, err := rlp.EncodeToBytes(td)
+ if err != nil {
+ log.Crit("Failed to RLP encode block total difficulty", "err", err)
+ }
+ if err := db.Put(headerTDKey(number, hash), data); err != nil {
+ log.Crit("Failed to store block total difficulty", "err", err)
+ }
+}
+
+// DeleteTd removes all block total difficulty data associated with a hash.
+func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Delete(headerTDKey(number, hash)); err != nil {
+ log.Crit("Failed to delete block total difficulty", "err", err)
+ }
+}
+
+// HasReceipts verifies the existence of all the transaction receipts belonging
+// to a block.
+func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
+ if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
+ return true
+ }
+ if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
+ return false
+ }
+ return true
+}
+
+// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
+func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ data, _ := db.Ancient(freezerReceiptTable, number)
+ if len(data) == 0 {
+ data, _ = db.Get(blockReceiptsKey(number, hash))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerReceiptTable, number)
+ }
+ }
+ return data
+}
+
+// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
+// The receipt metadata fields are not guaranteed to be populated, so they
+// should not be used. Use ReadReceipts instead if the metadata is needed.
+func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
+ // Retrieve the flattened receipt slice
+ data := ReadReceiptsRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ // Convert the receipts from their storage form to their internal representation
+ storageReceipts := []*types.ReceiptForStorage{}
+ if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
+ log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
+ return nil
+ }
+ receipts := make(types.Receipts, len(storageReceipts))
+ for i, storageReceipt := range storageReceipts {
+ receipts[i] = (*types.Receipt)(storageReceipt)
+ }
+ return receipts
+}
+
+// ReadReceipts retrieves all the transaction receipts belonging to a block, including
+// its correspoinding metadata fields. If it is unable to populate these metadata
+// fields then nil is returned.
+//
+// The current implementation populates these metadata fields by reading the receipts'
+// corresponding block body, so if the block body is not found it will return nil even
+// if the receipt itself is stored.
+func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
+ // We're deriving many fields from the block body, retrieve beside the receipt
+ receipts := ReadRawReceipts(db, hash, number)
+ if receipts == nil {
+ return nil
+ }
+ body := ReadBody(db, hash, number)
+ if body == nil {
+ log.Error("Missing body but have receipt", "hash", hash, "number", number)
+ return nil
+ }
+ if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
+ log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
+ return nil
+ }
+ return receipts
+}
+
+// WriteReceipts stores all the transaction receipts belonging to a block.
+func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
+ // Convert the receipts into their storage form and serialize them
+ storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
+ for i, receipt := range receipts {
+ storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
+ }
+ bytes, err := rlp.EncodeToBytes(storageReceipts)
+ if err != nil {
+ log.Crit("Failed to encode block receipts", "err", err)
+ }
+ // Store the flattened receipt slice
+ if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
+ log.Crit("Failed to store block receipts", "err", err)
+ }
+}
+
+// DeleteReceipts removes all receipt data associated with a block hash.
+func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
+ log.Crit("Failed to delete block receipts", "err", err)
+ }
+}
+
+// ReadBlock retrieves an entire block corresponding to the hash, assembling it
+// back from the stored header and body. If either the header or body could not
+// be retrieved nil is returned.
+//
+// Note, due to concurrent download of header and block body the header and thus
+// canonical hash can be stored in the database but the body data not (yet).
+func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
+ header := ReadHeader(db, hash, number)
+ if header == nil {
+ return nil
+ }
+ body := ReadBody(db, hash, number)
+ if body == nil {
+ return nil
+ }
+ return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
+}
+
+// WriteBlock serializes a block into the database, header and body separately.
+func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
+ WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
+ WriteHeader(db, block.Header())
+}
+
+// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
+func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
+ // Encode all block components to RLP format.
+ headerBlob, err := rlp.EncodeToBytes(block.Header())
+ if err != nil {
+ log.Crit("Failed to RLP encode block header", "err", err)
+ }
+ bodyBlob, err := rlp.EncodeToBytes(block.Body())
+ if err != nil {
+ log.Crit("Failed to RLP encode body", "err", err)
+ }
+ storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
+ for i, receipt := range receipts {
+ storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
+ }
+ receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
+ if err != nil {
+ log.Crit("Failed to RLP encode block receipts", "err", err)
+ }
+ tdBlob, err := rlp.EncodeToBytes(td)
+ if err != nil {
+ log.Crit("Failed to RLP encode block total difficulty", "err", err)
+ }
+ // Write all blob to flatten files.
+ err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
+ if err != nil {
+ log.Crit("Failed to write block data to ancient store", "err", err)
+ }
+ return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
+}
+
+// DeleteBlock removes all block data associated with a hash.
+func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ DeleteReceipts(db, hash, number)
+ DeleteHeader(db, hash, number)
+ DeleteBody(db, hash, number)
+ DeleteTd(db, hash, number)
+}
+
+// DeleteBlockWithoutNumber removes all block data associated with a hash, except
+// the hash to number mapping.
+func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ DeleteReceipts(db, hash, number)
+ deleteHeaderWithoutNumber(db, hash, number)
+ DeleteBody(db, hash, number)
+ DeleteTd(db, hash, number)
+}
+
+// FindCommonAncestor returns the last common ancestor of two block headers
+func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
+ for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
+ a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
+ if a == nil {
+ return nil
+ }
+ }
+ for an := a.Number.Uint64(); an < b.Number.Uint64(); {
+ b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
+ if b == nil {
+ return nil
+ }
+ }
+ for a.Hash() != b.Hash() {
+ a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
+ if a == nil {
+ return nil
+ }
+ b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
+ if b == nil {
+ return nil
+ }
+ }
+ return a
+}
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
new file mode 100644
index 0000000..1dd478a
--- /dev/null
+++ b/core/rawdb/accessors_indexes.go
@@ -0,0 +1,131 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "math/big"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/rlp"
+)
+
+// ReadTxLookupEntry retrieves the positional metadata associated with a transaction
+// hash to allow retrieving the transaction or receipt by hash.
+func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) *uint64 {
+ data, _ := db.Get(txLookupKey(hash))
+ if len(data) == 0 {
+ return nil
+ }
+ // Database v6 tx lookup just stores the block number
+ if len(data) < common.HashLength {
+ number := new(big.Int).SetBytes(data).Uint64()
+ return &number
+ }
+ // Database v4-v5 tx lookup format just stores the hash
+ if len(data) == common.HashLength {
+ return ReadHeaderNumber(db, common.BytesToHash(data))
+ }
+ // Finally try database v3 tx lookup format
+ var entry LegacyTxLookupEntry
+ if err := rlp.DecodeBytes(data, &entry); err != nil {
+ log.Error("Invalid transaction lookup entry RLP", "hash", hash, "blob", data, "err", err)
+ return nil
+ }
+ return &entry.BlockIndex
+}
+
+// WriteTxLookupEntries stores a positional metadata for every transaction from
+// a block, enabling hash based transaction and receipt lookups.
+func WriteTxLookupEntries(db ethdb.KeyValueWriter, block *types.Block) {
+ number := block.Number().Bytes()
+ for _, tx := range block.Transactions() {
+ if err := db.Put(txLookupKey(tx.Hash()), number); err != nil {
+ log.Crit("Failed to store transaction lookup entry", "err", err)
+ }
+ }
+}
+
+// DeleteTxLookupEntry removes all transaction data associated with a hash.
+func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) {
+ db.Delete(txLookupKey(hash))
+}
+
+// ReadTransaction retrieves a specific transaction from the database, along with
+// its added positional metadata.
+func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
+ blockNumber := ReadTxLookupEntry(db, hash)
+ if blockNumber == nil {
+ return nil, common.Hash{}, 0, 0
+ }
+ blockHash := ReadCanonicalHash(db, *blockNumber)
+ if blockHash == (common.Hash{}) {
+ return nil, common.Hash{}, 0, 0
+ }
+ body := ReadBody(db, blockHash, *blockNumber)
+ if body == nil {
+ log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash)
+ return nil, common.Hash{}, 0, 0
+ }
+ for txIndex, tx := range body.Transactions {
+ if tx.Hash() == hash {
+ return tx, blockHash, *blockNumber, uint64(txIndex)
+ }
+ }
+ log.Error("Transaction not found", "number", blockNumber, "hash", blockHash, "txhash", hash)
+ return nil, common.Hash{}, 0, 0
+}
+
+// ReadReceipt retrieves a specific transaction receipt from the database, along with
+// its added positional metadata.
+func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
+ // Retrieve the context of the receipt based on the transaction hash
+ blockNumber := ReadTxLookupEntry(db, hash)
+ if blockNumber == nil {
+ return nil, common.Hash{}, 0, 0
+ }
+ blockHash := ReadCanonicalHash(db, *blockNumber)
+ if blockHash == (common.Hash{}) {
+ return nil, common.Hash{}, 0, 0
+ }
+ // Read all the receipts from the block and return the one with the matching hash
+ receipts := ReadReceipts(db, blockHash, *blockNumber, config)
+ for receiptIndex, receipt := range receipts {
+ if receipt.TxHash == hash {
+ return receipt, blockHash, *blockNumber, uint64(receiptIndex)
+ }
+ }
+ log.Error("Receipt not found", "number", blockNumber, "hash", blockHash, "txhash", hash)
+ return nil, common.Hash{}, 0, 0
+}
+
+// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
+// section and bit index from the.
+func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
+ return db.Get(bloomBitsKey(bit, section, head))
+}
+
+// WriteBloomBits stores the compressed bloom bits vector belonging to the given
+// section and bit index.
+func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
+ if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
+ log.Crit("Failed to store bloom bits", "err", err)
+ }
+}
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
new file mode 100644
index 0000000..7a17123
--- /dev/null
+++ b/core/rawdb/accessors_metadata.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "encoding/json"
+
+ "github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/rlp"
+)
+
+// ReadDatabaseVersion retrieves the version number of the database.
+func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 {
+ var version uint64
+
+ enc, _ := db.Get(databaseVerisionKey)
+ if len(enc) == 0 {
+ return nil
+ }
+ if err := rlp.DecodeBytes(enc, &version); err != nil {
+ return nil
+ }
+
+ return &version
+}
+
+// WriteDatabaseVersion stores the version number of the database
+func WriteDatabaseVersion(db ethdb.KeyValueWriter, version uint64) {
+ enc, err := rlp.EncodeToBytes(version)
+ if err != nil {
+ log.Crit("Failed to encode database version", "err", err)
+ }
+ if err = db.Put(databaseVerisionKey, enc); err != nil {
+ log.Crit("Failed to store the database version", "err", err)
+ }
+}
+
+// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
+func ReadChainConfig(db ethdb.KeyValueReader, hash common.Hash) *params.ChainConfig {
+ data, _ := db.Get(configKey(hash))
+ if len(data) == 0 {
+ return nil
+ }
+ var config params.ChainConfig
+ if err := json.Unmarshal(data, &config); err != nil {
+ log.Error("Invalid chain config JSON", "hash", hash, "err", err)
+ return nil
+ }
+ return &config
+}
+
+// WriteChainConfig writes the chain config settings to the database.
+func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) {
+ if cfg == nil {
+ return
+ }
+ data, err := json.Marshal(cfg)
+ if err != nil {
+ log.Crit("Failed to JSON encode chain config", "err", err)
+ }
+ if err := db.Put(configKey(hash), data); err != nil {
+ log.Crit("Failed to store chain config", "err", err)
+ }
+}
+
+// ReadPreimage retrieves a single preimage of the provided hash.
+func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(preimageKey(hash))
+ return data
+}
+
+// WritePreimages writes the provided set of preimages to the database.
+func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
+ for hash, preimage := range preimages {
+ if err := db.Put(preimageKey(hash), preimage); err != nil {
+ log.Crit("Failed to store trie preimage", "err", err)
+ }
+ }
+ preimageCounter.Inc(int64(len(preimages)))
+ preimageHitCounter.Inc(int64(len(preimages)))
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
new file mode 100644
index 0000000..f04c34f
--- /dev/null
+++ b/core/rawdb/database.go
@@ -0,0 +1,355 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/ethdb/leveldb"
+ "github.com/ava-labs/go-ethereum/ethdb/memorydb"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/olekukonko/tablewriter"
+)
+
+// freezerdb is a database wrapper that enabled freezer data retrievals.
+type freezerdb struct {
+ ethdb.KeyValueStore
+ ethdb.AncientStore
+}
+
+// Close implements io.Closer, closing both the fast key-value store as well as
+// the slow ancient tables.
+func (frdb *freezerdb) Close() error {
+ var errs []error
+ if err := frdb.KeyValueStore.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := frdb.AncientStore.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if len(errs) != 0 {
+ return fmt.Errorf("%v", errs)
+ }
+ return nil
+}
+
+// nofreezedb is a database wrapper that disables freezer data retrievals.
+type nofreezedb struct {
+ ethdb.KeyValueStore
+}
+
+// HasAncient returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
+ return false, errNotSupported
+}
+
+// Ancient returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
+ return nil, errNotSupported
+}
+
+// Ancients returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Ancients() (uint64, error) {
+ return 0, errNotSupported
+}
+
+// AncientSize returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
+ return 0, errNotSupported
+}
+
+// AppendAncient returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
+ return errNotSupported
+}
+
+// TruncateAncients returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateAncients(items uint64) error {
+ return errNotSupported
+}
+
+// Sync returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Sync() error {
+ return errNotSupported
+}
+
+// NewDatabase creates a high level database on top of a given key-value data
+// store without a freezer moving immutable chain segments into cold storage.
+func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
+ return &nofreezedb{
+ KeyValueStore: db,
+ }
+}
+
+// NewDatabaseWithFreezer creates a high level database on top of a given key-
+// value data store with a freezer moving immutable chain segments into cold
+// storage.
+func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string) (ethdb.Database, error) {
+ // Create the idle freezer instance
+ frdb, err := newFreezer(freezer, namespace)
+ if err != nil {
+ return nil, err
+ }
+ // Since the freezer can be stored separately from the user's key-value database,
+ // there's a fairly high probability that the user requests invalid combinations
+ // of the freezer and database. Ensure that we don't shoot ourselves in the foot
+ // by serving up conflicting data, leading to both datastores getting corrupted.
+ //
+ // - If both the freezer and key-value store is empty (no genesis), we just
+ // initialized a new empty freezer, so everything's fine.
+ // - If the key-value store is empty, but the freezer is not, we need to make
+ // sure the user's genesis matches the freezer. That will be checked in the
+ // blockchain, since we don't have the genesis block here (nor should we at
+ // this point care, the key-value/freezer combo is valid).
+ // - If neither the key-value store nor the freezer is empty, cross validate
+ // the genesis hashes to make sure they are compatible. If they are, also
+ // ensure that there's no gap between the freezer and sunsequently leveldb.
+ // - If the key-value store is not empty, but the freezer is we might just be
+ // upgrading to the freezer release, or we might have had a small chain and
+ // not frozen anything yet. Ensure that no blocks are missing yet from the
+ // key-value store, since that would mean we already had an old freezer.
+
+ // If the genesis hash is empty, we have a new key-value store, so nothing to
+ // validate in this method. If, however, the genesis hash is not nil, compare
+ // it to the freezer content.
+ if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 {
+ if frozen, _ := frdb.Ancients(); frozen > 0 {
+ // If the freezer already contains something, ensure that the genesis blocks
+ // match, otherwise we might mix up freezers across chains and destroy both
+ // the freezer and the key-value store.
+ if frgenesis, _ := frdb.Ancient(freezerHashTable, 0); !bytes.Equal(kvgenesis, frgenesis) {
+ return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
+ }
+ // Key-value store and freezer belong to the same network. Ensure that they
+ // are contiguous, otherwise we might end up with a non-functional freezer.
+ if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
+ // Subsequent header after the freezer limit is missing from the database.
+ // Reject startup is the database has a more recent head.
+ if *ReadHeaderNumber(db, ReadHeadHeaderHash(db)) > frozen-1 {
+ return nil, fmt.Errorf("gap (#%d) in the chain between ancients and leveldb", frozen)
+ }
+ // Database contains only older data than the freezer, this happens if the
+ // state was wiped and reinited from an existing freezer.
+ } else {
+ // Key-value store continues where the freezer left off, all is fine. We might
+ // have duplicate blocks (crash after freezer write but before kay-value store
+ // deletion, but that's fine).
+ }
+ } else {
+ // If the freezer is empty, ensure nothing was moved yet from the key-value
+ // store, otherwise we'll end up missing data. We check block #1 to decide
+ // if we froze anything previously or not, but do take care of databases with
+ // only the genesis block.
+ if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
+ // Key-value store contains more data than the genesis block, make sure we
+ // didn't freeze anything yet.
+ if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
+ return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
+ }
+ // Block #1 is still in the database, we're allowed to init a new feezer
+ } else {
+ // The head header is still the genesis, we're allowed to init a new feezer
+ }
+ }
+ }
+ // Freezer is consistent with the key-value database, permit combining the two
+ go frdb.freeze(db)
+
+ return &freezerdb{
+ KeyValueStore: db,
+ AncientStore: frdb,
+ }, nil
+}
+
+// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
+// freezer moving immutable chain segments into cold storage.
+func NewMemoryDatabase() ethdb.Database {
+ return NewDatabase(memorydb.New())
+}
+
+// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
+// with an initial starting capacity, but without a freezer moving immutable
+// chain segments into cold storage.
+func NewMemoryDatabaseWithCap(size int) ethdb.Database {
+ return NewDatabase(memorydb.NewWithCap(size))
+}
+
+// NewLevelDBDatabase creates a persistent key-value database without a freezer
+// moving immutable chain segments into cold storage.
+func NewLevelDBDatabase(file string, cache int, handles int, namespace string) (ethdb.Database, error) {
+ db, err := leveldb.New(file, cache, handles, namespace)
+ if err != nil {
+ return nil, err
+ }
+ return NewDatabase(db), nil
+}
+
+// NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
+// freezer moving immutable chain segments into cold storage.
+func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string) (ethdb.Database, error) {
+ kvdb, err := leveldb.New(file, cache, handles, namespace)
+ if err != nil {
+ return nil, err
+ }
+ frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace)
+ if err != nil {
+ kvdb.Close()
+ return nil, err
+ }
+ return frdb, nil
+}
+
+// InspectDatabase traverses the entire database and checks the size
+// of all different categories of data.
+func InspectDatabase(db ethdb.Database) error {
+ it := db.NewIterator()
+ defer it.Release()
+
+ var (
+ count int64
+ start = time.Now()
+ logged = time.Now()
+
+ // Key-value store statistics
+ total common.StorageSize
+ headerSize common.StorageSize
+ bodySize common.StorageSize
+ receiptSize common.StorageSize
+ tdSize common.StorageSize
+ numHashPairing common.StorageSize
+ hashNumPairing common.StorageSize
+ trieSize common.StorageSize
+ txlookupSize common.StorageSize
+ preimageSize common.StorageSize
+ bloomBitsSize common.StorageSize
+ cliqueSnapsSize common.StorageSize
+
+ // Ancient store statistics
+ ancientHeaders common.StorageSize
+ ancientBodies common.StorageSize
+ ancientReceipts common.StorageSize
+ ancientHashes common.StorageSize
+ ancientTds common.StorageSize
+
+ // Les statistic
+ chtTrieNodes common.StorageSize
+ bloomTrieNodes common.StorageSize
+
+ // Meta- and unaccounted data
+ metadata common.StorageSize
+ unaccounted common.StorageSize
+ )
+ // Inspect key-value database first.
+ for it.Next() {
+ var (
+ key = it.Key()
+ size = common.StorageSize(len(key) + len(it.Value()))
+ )
+ total += size
+ switch {
+ case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
+ tdSize += size
+ case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
+ numHashPairing += size
+ case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
+ headerSize += size
+ case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
+ hashNumPairing += size
+ case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
+ bodySize += size
+ case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
+ receiptSize += size
+ case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
+ txlookupSize += size
+ case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength):
+ preimageSize += size
+ case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
+ bloomBitsSize += size
+ case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
+ cliqueSnapsSize += size
+ case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength:
+ chtTrieNodes += size
+ case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength:
+ bloomTrieNodes += size
+ case len(key) == common.HashLength:
+ trieSize += size
+ default:
+ var accounted bool
+ for _, meta := range [][]byte{databaseVerisionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey} {
+ if bytes.Equal(key, meta) {
+ metadata += size
+ accounted = true
+ break
+ }
+ }
+ if !accounted {
+ unaccounted += size
+ }
+ }
+ count += 1
+ if count%1000 == 0 && time.Since(logged) > 8*time.Second {
+ log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ // Inspect append-only file store then.
+ ancients := []*common.StorageSize{&ancientHeaders, &ancientBodies, &ancientReceipts, &ancientHashes, &ancientTds}
+ for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
+ if size, err := db.AncientSize(category); err == nil {
+ *ancients[i] += common.StorageSize(size)
+ total += common.StorageSize(size)
+ }
+ }
+ // Display the database statistic.
+ stats := [][]string{
+ {"Key-Value store", "Headers", headerSize.String()},
+ {"Key-Value store", "Bodies", bodySize.String()},
+ {"Key-Value store", "Receipts", receiptSize.String()},
+ {"Key-Value store", "Difficulties", tdSize.String()},
+ {"Key-Value store", "Block number->hash", numHashPairing.String()},
+ {"Key-Value store", "Block hash->number", hashNumPairing.String()},
+ {"Key-Value store", "Transaction index", txlookupSize.String()},
+ {"Key-Value store", "Bloombit index", bloomBitsSize.String()},
+ {"Key-Value store", "Trie nodes", trieSize.String()},
+ {"Key-Value store", "Trie preimages", preimageSize.String()},
+ {"Key-Value store", "Clique snapshots", cliqueSnapsSize.String()},
+ {"Key-Value store", "Singleton metadata", metadata.String()},
+ {"Ancient store", "Headers", ancientHeaders.String()},
+ {"Ancient store", "Bodies", ancientBodies.String()},
+ {"Ancient store", "Receipts", ancientReceipts.String()},
+ {"Ancient store", "Difficulties", ancientTds.String()},
+ {"Ancient store", "Block number->hash", ancientHashes.String()},
+ {"Light client", "CHT trie nodes", chtTrieNodes.String()},
+ {"Light client", "Bloom trie nodes", bloomTrieNodes.String()},
+ }
+ table := tablewriter.NewWriter(os.Stdout)
+ table.SetHeader([]string{"Database", "Category", "Size"})
+ table.SetFooter([]string{"", "Total", total.String()})
+ table.AppendBulk(stats)
+ table.Render()
+
+ if unaccounted > 0 {
+ log.Error("Database contains unaccounted data", "size", unaccounted)
+ }
+ return nil
+}
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
new file mode 100644
index 0000000..ce2e879
--- /dev/null
+++ b/core/rawdb/freezer.go
@@ -0,0 +1,393 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "path/filepath"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/metrics"
+ "github.com/prometheus/tsdb/fileutil"
+)
+
+var (
+ // errUnknownTable is returned if the user attempts to read from a table that is
+ // not tracked by the freezer.
+ errUnknownTable = errors.New("unknown table")
+
+ // errOutOrderInsertion is returned if the user attempts to inject out-of-order
+ // binary blobs into the freezer.
+ errOutOrderInsertion = errors.New("the append operation is out-order")
+
+ // errSymlinkDatadir is returned if the ancient directory specified by user
+ // is a symbolic link.
+ errSymlinkDatadir = errors.New("symbolic link datadir is not supported")
+)
+
+const (
+ // freezerRecheckInterval is the frequency to check the key-value database for
+ // chain progression that might permit new blocks to be frozen into immutable
+ // storage.
+ freezerRecheckInterval = time.Minute
+
+ // freezerBatchLimit is the maximum number of blocks to freeze in one batch
+ // before doing an fsync and deleting it from the key-value store.
+ freezerBatchLimit = 30000
+)
+
+// freezer is an memory mapped append-only database to store immutable chain data
+// into flat files:
+//
+// - The append only nature ensures that disk writes are minimized.
+// - The memory mapping ensures we can max out system memory for caching without
+// reserving it for go-ethereum. This would also reduce the memory requirements
+// of Geth, and thus also GC overhead.
+type freezer struct {
+ // WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only
+ // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
+ // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
+ frozen uint64 // Number of blocks already frozen
+
+ tables map[string]*freezerTable // Data tables for storing everything
+ instanceLock fileutil.Releaser // File-system lock to prevent double opens
+}
+
+// newFreezer creates a chain freezer that moves ancient chain data into
+// append-only flat file containers.
+func newFreezer(datadir string, namespace string) (*freezer, error) {
+ // Create the initial freezer object
+ var (
+ readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
+ writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
+ sizeCounter = metrics.NewRegisteredCounter(namespace+"ancient/size", nil)
+ )
+ // Ensure the datadir is not a symbolic link if it exists.
+ if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
+ if info.Mode()&os.ModeSymlink != 0 {
+ log.Warn("Symbolic link ancient database is not supported", "path", datadir)
+ return nil, errSymlinkDatadir
+ }
+ }
+ // Leveldb uses LOCK as the filelock filename. To prevent the
+ // name collision, we use FLOCK as the lock name.
+ lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK"))
+ if err != nil {
+ return nil, err
+ }
+ // Open all the supported data tables
+ freezer := &freezer{
+ tables: make(map[string]*freezerTable),
+ instanceLock: lock,
+ }
+ for name, disableSnappy := range freezerNoSnappy {
+ table, err := newTable(datadir, name, readMeter, writeMeter, sizeCounter, disableSnappy)
+ if err != nil {
+ for _, table := range freezer.tables {
+ table.Close()
+ }
+ lock.Release()
+ return nil, err
+ }
+ freezer.tables[name] = table
+ }
+ if err := freezer.repair(); err != nil {
+ for _, table := range freezer.tables {
+ table.Close()
+ }
+ lock.Release()
+ return nil, err
+ }
+ log.Info("Opened ancient database", "database", datadir)
+ return freezer, nil
+}
+
+// Close terminates the chain freezer, unmapping all the data files.
+func (f *freezer) Close() error {
+ var errs []error
+ for _, table := range f.tables {
+ if err := table.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if err := f.instanceLock.Release(); err != nil {
+ errs = append(errs, err)
+ }
+ if errs != nil {
+ return fmt.Errorf("%v", errs)
+ }
+ return nil
+}
+
+// HasAncient returns an indicator whether the specified ancient data exists
+// in the freezer.
+func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
+ if table := f.tables[kind]; table != nil {
+ return table.has(number), nil
+ }
+ return false, nil
+}
+
+// Ancient retrieves an ancient binary blob from the append-only immutable files.
+func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
+ if table := f.tables[kind]; table != nil {
+ return table.Retrieve(number)
+ }
+ return nil, errUnknownTable
+}
+
+// Ancients returns the length of the frozen items.
+func (f *freezer) Ancients() (uint64, error) {
+ return atomic.LoadUint64(&f.frozen), nil
+}
+
+// AncientSize returns the ancient size of the specified category.
+func (f *freezer) AncientSize(kind string) (uint64, error) {
+ if table := f.tables[kind]; table != nil {
+ return table.size()
+ }
+ return 0, errUnknownTable
+}
+
+// AppendAncient injects all binary blobs belong to block at the end of the
+// append-only immutable table files.
+//
+// Notably, this function is lock free but kind of thread-safe. All out-of-order
+// injection will be rejected. But if two injections with same number happen at
+// the same time, we can get into the trouble.
+func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td []byte) (err error) {
+ // Ensure the binary blobs we are appending is continuous with freezer.
+ if atomic.LoadUint64(&f.frozen) != number {
+ return errOutOrderInsertion
+ }
+ // Rollback all inserted data if any insertion below failed to ensure
+ // the tables won't out of sync.
+ defer func() {
+ if err != nil {
+ rerr := f.repair()
+ if rerr != nil {
+ log.Crit("Failed to repair freezer", "err", rerr)
+ }
+ log.Info("Append ancient failed", "number", number, "err", err)
+ }
+ }()
+ // Inject all the components into the relevant data tables
+ if err := f.tables[freezerHashTable].Append(f.frozen, hash[:]); err != nil {
+ log.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err)
+ return err
+ }
+ if err := f.tables[freezerHeaderTable].Append(f.frozen, header); err != nil {
+ log.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err)
+ return err
+ }
+ if err := f.tables[freezerBodiesTable].Append(f.frozen, body); err != nil {
+ log.Error("Failed to append ancient body", "number", f.frozen, "hash", hash, "err", err)
+ return err
+ }
+ if err := f.tables[freezerReceiptTable].Append(f.frozen, receipts); err != nil {
+ log.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err)
+ return err
+ }
+ if err := f.tables[freezerDifficultyTable].Append(f.frozen, td); err != nil {
+ log.Error("Failed to append ancient difficulty", "number", f.frozen, "hash", hash, "err", err)
+ return err
+ }
+ atomic.AddUint64(&f.frozen, 1) // Only modify atomically
+ return nil
+}
+
+// Truncate discards any recent data above the provided threshold number.
+func (f *freezer) TruncateAncients(items uint64) error {
+ if atomic.LoadUint64(&f.frozen) <= items {
+ return nil
+ }
+ for _, table := range f.tables {
+ if err := table.truncate(items); err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint64(&f.frozen, items)
+ return nil
+}
+
+// sync flushes all data tables to disk.
+func (f *freezer) Sync() error {
+ var errs []error
+ for _, table := range f.tables {
+ if err := table.Sync(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if errs != nil {
+ return fmt.Errorf("%v", errs)
+ }
+ return nil
+}
+
+// freeze is a background thread that periodically checks the blockchain for any
+// import progress and moves ancient data from the fast database into the freezer.
+//
+// This functionality is deliberately broken off from block importing to avoid
+// incurring additional data shuffling delays on block propagation.
+func (f *freezer) freeze(db ethdb.KeyValueStore) {
+ nfdb := &nofreezedb{KeyValueStore: db}
+
+ for {
+ // Retrieve the freezing threshold.
+ hash := ReadHeadBlockHash(nfdb)
+ if hash == (common.Hash{}) {
+ log.Debug("Current full block hash unavailable") // new chain, empty database
+ time.Sleep(freezerRecheckInterval)
+ continue
+ }
+ number := ReadHeaderNumber(nfdb, hash)
+ switch {
+ case number == nil:
+ log.Error("Current full block number unavailable", "hash", hash)
+ time.Sleep(freezerRecheckInterval)
+ continue
+
+ case *number < params.ImmutabilityThreshold:
+ log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.ImmutabilityThreshold)
+ time.Sleep(freezerRecheckInterval)
+ continue
+
+ case *number-params.ImmutabilityThreshold <= f.frozen:
+ log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
+ time.Sleep(freezerRecheckInterval)
+ continue
+ }
+ head := ReadHeader(nfdb, hash, *number)
+ if head == nil {
+ log.Error("Current full block unavailable", "number", *number, "hash", hash)
+ time.Sleep(freezerRecheckInterval)
+ continue
+ }
+ // Seems we have data ready to be frozen, process in usable batches
+ limit := *number - params.ImmutabilityThreshold
+ if limit-f.frozen > freezerBatchLimit {
+ limit = f.frozen + freezerBatchLimit
+ }
+ var (
+ start = time.Now()
+ first = f.frozen
+ ancients = make([]common.Hash, 0, limit)
+ )
+ for f.frozen < limit {
+ // Retrieves all the components of the canonical block
+ hash := ReadCanonicalHash(nfdb, f.frozen)
+ if hash == (common.Hash{}) {
+ log.Error("Canonical hash missing, can't freeze", "number", f.frozen)
+ break
+ }
+ header := ReadHeaderRLP(nfdb, hash, f.frozen)
+ if len(header) == 0 {
+ log.Error("Block header missing, can't freeze", "number", f.frozen, "hash", hash)
+ break
+ }
+ body := ReadBodyRLP(nfdb, hash, f.frozen)
+ if len(body) == 0 {
+ log.Error("Block body missing, can't freeze", "number", f.frozen, "hash", hash)
+ break
+ }
+ receipts := ReadReceiptsRLP(nfdb, hash, f.frozen)
+ if len(receipts) == 0 {
+ log.Error("Block receipts missing, can't freeze", "number", f.frozen, "hash", hash)
+ break
+ }
+ td := ReadTdRLP(nfdb, hash, f.frozen)
+ if len(td) == 0 {
+ log.Error("Total difficulty missing, can't freeze", "number", f.frozen, "hash", hash)
+ break
+ }
+ log.Trace("Deep froze ancient block", "number", f.frozen, "hash", hash)
+ // Inject all the components into the relevant data tables
+ if err := f.AppendAncient(f.frozen, hash[:], header, body, receipts, td); err != nil {
+ break
+ }
+ ancients = append(ancients, hash)
+ }
+ // Batch of blocks have been frozen, flush them before wiping from leveldb
+ if err := f.Sync(); err != nil {
+ log.Crit("Failed to flush frozen tables", "err", err)
+ }
+ // Wipe out all data from the active database
+ batch := db.NewBatch()
+ for i := 0; i < len(ancients); i++ {
+ // Always keep the genesis block in active database
+ if first+uint64(i) != 0 {
+ DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
+ DeleteCanonicalHash(batch, first+uint64(i))
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete frozen canonical blocks", "err", err)
+ }
+ batch.Reset()
+ // Wipe out side chain also.
+ for number := first; number < f.frozen; number++ {
+ // Always keep the genesis block in active database
+ if number != 0 {
+ for _, hash := range ReadAllHashes(db, number) {
+ DeleteBlock(batch, hash, number)
+ }
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete frozen side blocks", "err", err)
+ }
+ // Log something friendly for the user
+ context := []interface{}{
+ "blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
+ }
+ if n := len(ancients); n > 0 {
+ context = append(context, []interface{}{"hash", ancients[n-1]}...)
+ }
+ log.Info("Deep froze chain segment", context...)
+
+ // Avoid database thrashing with tiny writes
+ if f.frozen-first < freezerBatchLimit {
+ time.Sleep(freezerRecheckInterval)
+ }
+ }
+}
+
+// repair truncates all data tables to the same length.
+func (f *freezer) repair() error {
+ min := uint64(math.MaxUint64)
+ for _, table := range f.tables {
+ items := atomic.LoadUint64(&table.items)
+ if min > items {
+ min = items
+ }
+ }
+ for _, table := range f.tables {
+ if err := table.truncate(min); err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint64(&f.frozen, min)
+ return nil
+}
diff --git a/core/rawdb/freezer_reinit.go b/core/rawdb/freezer_reinit.go
new file mode 100644
index 0000000..6b9fb79
--- /dev/null
+++ b/core/rawdb/freezer_reinit.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "errors"
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/common/prque"
+ "github.com/ava-labs/go-ethereum/ethdb"
+ "github.com/ava-labs/go-ethereum/log"
+)
+
+// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
+// of frozen ancient blocks. The method iterates over all the frozen blocks and
+// injects into the database the block hash->number mappings and the transaction
+// lookup entries.
+func InitDatabaseFromFreezer(db ethdb.Database) error {
+ // If we can't access the freezer or it's empty, abort
+ frozen, err := db.Ancients()
+ if err != nil || frozen == 0 {
+ return err
+ }
+ // Blocks previously frozen, iterate over- and hash them concurrently
+ var (
+ number = ^uint64(0) // -1
+ results = make(chan *types.Block, 4*runtime.NumCPU())
+ )
+ abort := make(chan struct{})
+ defer close(abort)
+
+ for i := 0; i < runtime.NumCPU(); i++ {
+ go func() {
+ for {
+ // Fetch the next task number, terminating if everything's done
+ n := atomic.AddUint64(&number, 1)
+ if n >= frozen {
+ return
+ }
+ // Retrieve the block from the freezer (no need for the hash, we pull by
+ // number from the freezer). If successful, pre-cache the block hash and
+ // the individual transaction hashes for storing into the database.
+ block := ReadBlock(db, common.Hash{}, n)
+ if block != nil {
+ block.Hash()
+ for _, tx := range block.Transactions() {
+ tx.Hash()
+ }
+ }
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case results <- block:
+ case <-abort:
+ return
+ }
+ }
+ }()
+ }
+ // Reassemble the blocks into a contiguous stream and push them out to disk
+ var (
+ queue = prque.New(nil)
+ next = int64(0)
+
+ batch = db.NewBatch()
+ start = time.Now()
+ logged time.Time
+ )
+ for i := uint64(0); i < frozen; i++ {
+ // Retrieve the next result and bail if it's nil
+ block := <-results
+ if block == nil {
+ return errors.New("broken ancient database")
+ }
+ // Push the block into the import queue and process contiguous ranges
+ queue.Push(block, -int64(block.NumberU64()))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); -priority != next {
+ break
+ }
+ // Next block available, pop it off and index it
+ block = queue.PopItem().(*types.Block)
+ next++
+
+ // Inject hash<->number mapping and txlookup indexes
+ WriteHeaderNumber(batch, block.Hash(), block.NumberU64())
+ WriteTxLookupEntries(batch, block)
+
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize || uint64(next) == frozen {
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Initializing chain from ancient data", "number", block.Number(), "hash", block.Hash(), "total", frozen-1, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ hash := ReadCanonicalHash(db, frozen-1)
+ WriteHeadHeaderHash(db, hash)
+ WriteHeadFastBlockHash(db, hash)
+
+ log.Info("Initialized chain from ancient data", "number", frozen-1, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
+ return nil
+}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
new file mode 100644
index 0000000..fc72669
--- /dev/null
+++ b/core/rawdb/freezer_table.go
@@ -0,0 +1,637 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/log"
+ "github.com/ava-labs/go-ethereum/metrics"
+ "github.com/golang/snappy"
+)
+
+var (
+ // errClosed is returned if an operation attempts to read from or write to the
+ // freezer table after it has already been closed.
+ errClosed = errors.New("closed")
+
+ // errOutOfBounds is returned if the item requested is not contained within the
+ // freezer table.
+ errOutOfBounds = errors.New("out of bounds")
+
+ // errNotSupported is returned if the database doesn't support the required operation.
+ errNotSupported = errors.New("this operation is not supported")
+)
+
+// indexEntry contains the number/id of the file that the data resides in, aswell as the
+// offset within the file to the end of the data
+// In serialized form, the filenum is stored as uint16.
+type indexEntry struct {
+ filenum uint32 // stored as uint16 ( 2 bytes)
+ offset uint32 // stored as uint32 ( 4 bytes)
+}
+
+const indexEntrySize = 6
+
+// unmarshallBinary deserializes binary b into the rawIndex entry.
+func (i *indexEntry) unmarshalBinary(b []byte) error {
+ i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
+ i.offset = binary.BigEndian.Uint32(b[2:6])
+ return nil
+}
+
+// marshallBinary serializes the rawIndex entry into binary.
+func (i *indexEntry) marshallBinary() []byte {
+ b := make([]byte, indexEntrySize)
+ binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
+ binary.BigEndian.PutUint32(b[2:6], i.offset)
+ return b
+}
+
+// freezerTable represents a single chained data table within the freezer (e.g. blocks).
+// It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
+// file (uncompressed 64 bit indices into the data file).
+type freezerTable struct {
+ // WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
+ // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
+ // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
+ items uint64 // Number of items stored in the table (including items removed from tail)
+
+ noCompression bool // if true, disables snappy compression. Note: does not work retroactively
+ maxFileSize uint32 // Max file size for data-files
+ name string
+ path string
+
+ head *os.File // File descriptor for the data head of the table
+ files map[uint32]*os.File // open files
+ headId uint32 // number of the currently active head file
+ tailId uint32 // number of the earliest file
+ index *os.File // File descriptor for the indexEntry file of the table
+
+ // In the case that old items are deleted (from the tail), we use itemOffset
+ // to count how many historic items have gone missing.
+ itemOffset uint32 // Offset (number of discarded items)
+
+ headBytes uint32 // Number of bytes written to the head file
+ readMeter metrics.Meter // Meter for measuring the effective amount of data read
+ writeMeter metrics.Meter // Meter for measuring the effective amount of data written
+ sizeCounter metrics.Counter // Counter for tracking the combined size of all freezer tables
+
+ logger log.Logger // Logger with database path and table name ambedded
+ lock sync.RWMutex // Mutex protecting the data file descriptors
+}
+
+// newTable opens a freezer table with default settings - 2G files
+func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, disableSnappy bool) (*freezerTable, error) {
+ return newCustomTable(path, name, readMeter, writeMeter, sizeCounter, 2*1000*1000*1000, disableSnappy)
+}
+
+// openFreezerFileForAppend opens a freezer table file and seeks to the end
+func openFreezerFileForAppend(filename string) (*os.File, error) {
+ // Open the file without the O_APPEND flag
+ // because it has differing behaviour during Truncate operations
+ // on different OS's
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ // Seek to end for append
+ if _, err = file.Seek(0, io.SeekEnd); err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+// openFreezerFileForReadOnly opens a freezer table file for read only access
+func openFreezerFileForReadOnly(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDONLY, 0644)
+}
+
+// openFreezerFileTruncated opens a freezer table making sure it is truncated
+func openFreezerFileTruncated(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+}
+
+// truncateFreezerFile resizes a freezer table file and seeks to the end
+func truncateFreezerFile(file *os.File, size int64) error {
+ if err := file.Truncate(size); err != nil {
+ return err
+ }
+ // Seek to end for append
+ if _, err := file.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+ return nil
+}
+
+// newCustomTable opens a freezer table, creating the data and index files if they are
+// non existent. Both files are truncated to the shortest common length to ensure
+// they don't go out of sync.
+func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
+ // Ensure the containing directory exists and open the indexEntry file
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return nil, err
+ }
+ var idxName string
+ if noCompression {
+ // Raw idx
+ idxName = fmt.Sprintf("%s.ridx", name)
+ } else {
+ // Compressed idx
+ idxName = fmt.Sprintf("%s.cidx", name)
+ }
+ offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
+ if err != nil {
+ return nil, err
+ }
+ // Create the table and repair any past inconsistency
+ tab := &freezerTable{
+ index: offsets,
+ files: make(map[uint32]*os.File),
+ readMeter: readMeter,
+ writeMeter: writeMeter,
+ sizeCounter: sizeCounter,
+ name: name,
+ path: path,
+ logger: log.New("database", path, "table", name),
+ noCompression: noCompression,
+ maxFileSize: maxFilesize,
+ }
+ if err := tab.repair(); err != nil {
+ tab.Close()
+ return nil, err
+ }
+ // Initialize the starting size counter
+ size, err := tab.sizeNolock()
+ if err != nil {
+ tab.Close()
+ return nil, err
+ }
+ tab.sizeCounter.Inc(int64(size))
+
+ return tab, nil
+}
+
+// repair cross checks the head and the index file and truncates them to
+// be in sync with each other after a potential crash / data loss.
+func (t *freezerTable) repair() error {
+ // Create a temporary offset buffer to init files with and read indexEntry into
+ buffer := make([]byte, indexEntrySize)
+
+ // If we've just created the files, initialize the index with the 0 indexEntry
+ stat, err := t.index.Stat()
+ if err != nil {
+ return err
+ }
+ if stat.Size() == 0 {
+ if _, err := t.index.Write(buffer); err != nil {
+ return err
+ }
+ }
+ // Ensure the index is a multiple of indexEntrySize bytes
+ if overflow := stat.Size() % indexEntrySize; overflow != 0 {
+ truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
+ }
+ // Retrieve the file sizes and prepare for truncation
+ if stat, err = t.index.Stat(); err != nil {
+ return err
+ }
+ offsetsSize := stat.Size()
+
+ // Open the head file
+ var (
+ firstIndex indexEntry
+ lastIndex indexEntry
+ contentSize int64
+ contentExp int64
+ )
+ // Read index zero, determine what file is the earliest
+ // and what item offset to use
+ t.index.ReadAt(buffer, 0)
+ firstIndex.unmarshalBinary(buffer)
+
+ t.tailId = firstIndex.offset
+ t.itemOffset = firstIndex.filenum
+
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ lastIndex.unmarshalBinary(buffer)
+ t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
+ if err != nil {
+ return err
+ }
+ if stat, err = t.head.Stat(); err != nil {
+ return err
+ }
+ contentSize = stat.Size()
+
+ // Keep truncating both files until they come in sync
+ contentExp = int64(lastIndex.offset)
+
+ for contentExp != contentSize {
+ // Truncate the head file to the last offset pointer
+ if contentExp < contentSize {
+ t.logger.Warn("Truncating dangling head", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
+ if err := truncateFreezerFile(t.head, contentExp); err != nil {
+ return err
+ }
+ contentSize = contentExp
+ }
+ // Truncate the index to point within the head file
+ if contentExp > contentSize {
+ t.logger.Warn("Truncating dangling indexes", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
+ if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil {
+ return err
+ }
+ offsetsSize -= indexEntrySize
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ var newLastIndex indexEntry
+ newLastIndex.unmarshalBinary(buffer)
+ // We might have slipped back into an earlier head-file here
+ if newLastIndex.filenum != lastIndex.filenum {
+ // Release earlier opened file
+ t.releaseFile(lastIndex.filenum)
+ if t.head, err = t.openFile(newLastIndex.filenum, openFreezerFileForAppend); err != nil {
+ return err
+ }
+ if stat, err = t.head.Stat(); err != nil {
+ // TODO, anything more we can do here?
+ // A data file has gone missing...
+ return err
+ }
+ contentSize = stat.Size()
+ }
+ lastIndex = newLastIndex
+ contentExp = int64(lastIndex.offset)
+ }
+ }
+ // Ensure all reparation changes have been written to disk
+ if err := t.index.Sync(); err != nil {
+ return err
+ }
+ if err := t.head.Sync(); err != nil {
+ return err
+ }
+ // Update the item and byte counters and return
+ t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
+ t.headBytes = uint32(contentSize)
+ t.headId = lastIndex.filenum
+
+ // Close opened files and preopen all files
+ if err := t.preopen(); err != nil {
+ return err
+ }
+ t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes))
+ return nil
+}
+
+// preopen opens all files that the freezer will need. This method should be called from an init-context,
+// since it assumes that it doesn't have to bother with locking
+// The rationale for doing preopen is to not have to do it from within Retrieve, thus not needing to ever
+// obtain a write-lock within Retrieve.
+func (t *freezerTable) preopen() (err error) {
+ // The repair might have already opened (some) files
+ t.releaseFilesAfter(0, false)
+ // Open all except head in RDONLY
+ for i := t.tailId; i < t.headId; i++ {
+ if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
+ return err
+ }
+ }
+ // Open head in read/write
+ t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
+ return err
+}
+
+// truncate discards any recent data above the provided threshold number.
+func (t *freezerTable) truncate(items uint64) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // If our item count is correct, don't do anything
+ if atomic.LoadUint64(&t.items) <= items {
+ return nil
+ }
+ // We need to truncate, save the old size for metrics tracking
+ oldSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ // Something's out of sync, truncate the table's offset index
+ t.logger.Warn("Truncating freezer table", "items", t.items, "limit", items)
+ if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
+ return err
+ }
+ // Calculate the new expected size of the data file and truncate it
+ buffer := make([]byte, indexEntrySize)
+ if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
+ return err
+ }
+ var expected indexEntry
+ expected.unmarshalBinary(buffer)
+
+ // We might need to truncate back to older files
+ if expected.filenum != t.headId {
+ // If already open for reading, force-reopen for writing
+ t.releaseFile(expected.filenum)
+ newHead, err := t.openFile(expected.filenum, openFreezerFileForAppend)
+ if err != nil {
+ return err
+ }
+ // Release any files _after the current head -- both the previous head
+ // and any files which may have been opened for reading
+ t.releaseFilesAfter(expected.filenum, true)
+ // Set back the historic head
+ t.head = newHead
+ atomic.StoreUint32(&t.headId, expected.filenum)
+ }
+ if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
+ return err
+ }
+ // All data files truncated, set internal counters and return
+ atomic.StoreUint64(&t.items, items)
+ atomic.StoreUint32(&t.headBytes, expected.offset)
+
+ // Retrieve the new size and update the total size counter
+ newSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ t.sizeCounter.Dec(int64(oldSize - newSize))
+
+ return nil
+}
+
+// Close closes all opened files.
+func (t *freezerTable) Close() error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ var errs []error
+ if err := t.index.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ t.index = nil
+
+ for _, f := range t.files {
+ if err := f.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ t.head = nil
+
+ if errs != nil {
+ return fmt.Errorf("%v", errs)
+ }
+ return nil
+}
+
+// openFile assumes that the write-lock is held by the caller
+func (t *freezerTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
+ var exist bool
+ if f, exist = t.files[num]; !exist {
+ var name string
+ if t.noCompression {
+ name = fmt.Sprintf("%s.%04d.rdat", t.name, num)
+ } else {
+ name = fmt.Sprintf("%s.%04d.cdat", t.name, num)
+ }
+ f, err = opener(filepath.Join(t.path, name))
+ if err != nil {
+ return nil, err
+ }
+ t.files[num] = f
+ }
+ return f, err
+}
+
+// releaseFile closes a file, and removes it from the open file cache.
+// Assumes that the caller holds the write lock
+func (t *freezerTable) releaseFile(num uint32) {
+ if f, exist := t.files[num]; exist {
+ delete(t.files, num)
+ f.Close()
+ }
+}
+
+// releaseFilesAfter closes all open files with a higher number, and optionally also deletes the files
+func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
+ for fnum, f := range t.files {
+ if fnum > num {
+ delete(t.files, fnum)
+ f.Close()
+ if remove {
+ os.Remove(f.Name())
+ }
+ }
+ }
+}
+
+// Append injects a binary blob at the end of the freezer table. The item number
+// is a precautionary parameter to ensure data correctness, but the table will
+// reject already existing data.
+//
+// Note, this method will *not* flush any data to disk so be sure to explicitly
+// fsync before irreversibly deleting data from the database.
+func (t *freezerTable) Append(item uint64, blob []byte) error {
+ // Read lock prevents competition with truncate
+ t.lock.RLock()
+ // Ensure the table is still accessible
+ if t.index == nil || t.head == nil {
+ t.lock.RUnlock()
+ return errClosed
+ }
+ // Ensure only the next item can be written, nothing else
+ if atomic.LoadUint64(&t.items) != item {
+ t.lock.RUnlock()
+ return fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
+ }
+ // Encode the blob and write it into the data file
+ if !t.noCompression {
+ blob = snappy.Encode(nil, blob)
+ }
+ bLen := uint32(len(blob))
+ if t.headBytes+bLen < bLen ||
+ t.headBytes+bLen > t.maxFileSize {
+ // we need a new file, writing would overflow
+ t.lock.RUnlock()
+ t.lock.Lock()
+ nextID := atomic.LoadUint32(&t.headId) + 1
+ // We open the next file in truncated mode -- if this file already
+ // exists, we need to start over from scratch on it
+ newHead, err := t.openFile(nextID, openFreezerFileTruncated)
+ if err != nil {
+ t.lock.Unlock()
+ return err
+ }
+ // Close old file, and reopen in RDONLY mode
+ t.releaseFile(t.headId)
+ t.openFile(t.headId, openFreezerFileForReadOnly)
+
+ // Swap out the current head
+ t.head = newHead
+ atomic.StoreUint32(&t.headBytes, 0)
+ atomic.StoreUint32(&t.headId, nextID)
+ t.lock.Unlock()
+ t.lock.RLock()
+ }
+
+ defer t.lock.RUnlock()
+ if _, err := t.head.Write(blob); err != nil {
+ return err
+ }
+ newOffset := atomic.AddUint32(&t.headBytes, bLen)
+ idx := indexEntry{
+ filenum: atomic.LoadUint32(&t.headId),
+ offset: newOffset,
+ }
+ // Write indexEntry
+ t.index.Write(idx.marshallBinary())
+
+ t.writeMeter.Mark(int64(bLen + indexEntrySize))
+ t.sizeCounter.Inc(int64(bLen + indexEntrySize))
+
+ atomic.AddUint64(&t.items, 1)
+ return nil
+}
+
+// getBounds returns the indexes for the item
+// returns start, end, filenumber and error
+func (t *freezerTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
+ var startIdx, endIdx indexEntry
+ buffer := make([]byte, indexEntrySize)
+ if _, err := t.index.ReadAt(buffer, int64(item*indexEntrySize)); err != nil {
+ return 0, 0, 0, err
+ }
+ startIdx.unmarshalBinary(buffer)
+ if _, err := t.index.ReadAt(buffer, int64((item+1)*indexEntrySize)); err != nil {
+ return 0, 0, 0, err
+ }
+ endIdx.unmarshalBinary(buffer)
+ if startIdx.filenum != endIdx.filenum {
+ // If a piece of data 'crosses' a data-file,
+ // it's actually in one piece on the second data-file.
+ // We return a zero-indexEntry for the second file as start
+ return 0, endIdx.offset, endIdx.filenum, nil
+ }
+ return startIdx.offset, endIdx.offset, endIdx.filenum, nil
+}
+
+// Retrieve looks up the data offset of an item with the given number and retrieves
+// the raw binary blob from the data file.
+func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
+ // Ensure the table and the item is accessible
+ if t.index == nil || t.head == nil {
+ return nil, errClosed
+ }
+ if atomic.LoadUint64(&t.items) <= item {
+ return nil, errOutOfBounds
+ }
+ // Ensure the item was not deleted from the tail either
+ offset := atomic.LoadUint32(&t.itemOffset)
+ if uint64(offset) > item {
+ return nil, errOutOfBounds
+ }
+ t.lock.RLock()
+ startOffset, endOffset, filenum, err := t.getBounds(item - uint64(offset))
+ if err != nil {
+ t.lock.RUnlock()
+ return nil, err
+ }
+ dataFile, exist := t.files[filenum]
+ if !exist {
+ t.lock.RUnlock()
+ return nil, fmt.Errorf("missing data file %d", filenum)
+ }
+ // Retrieve the data itself, decompress and return
+ blob := make([]byte, endOffset-startOffset)
+ if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
+ t.lock.RUnlock()
+ return nil, err
+ }
+ t.lock.RUnlock()
+ t.readMeter.Mark(int64(len(blob) + 2*indexEntrySize))
+
+ if t.noCompression {
+ return blob, nil
+ }
+ return snappy.Decode(nil, blob)
+}
+
+// has returns an indicator whether the specified number data
+// exists in the freezer table.
+func (t *freezerTable) has(number uint64) bool {
+ return atomic.LoadUint64(&t.items) > number
+}
+
+// size returns the total data size in the freezer table.
+func (t *freezerTable) size() (uint64, error) {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.sizeNolock()
+}
+
+// sizeNolock returns the total data size in the freezer table without obtaining
+// the mutex first.
+func (t *freezerTable) sizeNolock() (uint64, error) {
+ stat, err := t.index.Stat()
+ if err != nil {
+ return 0, err
+ }
+ total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size())
+ return total, nil
+}
+
+// Sync pushes any pending data from memory out to disk. This is an expensive
+// operation, so use it with care.
+func (t *freezerTable) Sync() error {
+ if err := t.index.Sync(); err != nil {
+ return err
+ }
+ return t.head.Sync()
+}
+
+// printIndex is a debug print utility function for testing
+func (t *freezerTable) printIndex() {
+ buf := make([]byte, indexEntrySize)
+
+ fmt.Printf("|-----------------|\n")
+ fmt.Printf("| fileno | offset |\n")
+ fmt.Printf("|--------+--------|\n")
+
+ for i := uint64(0); ; i++ {
+ if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
+ break
+ }
+ var entry indexEntry
+ entry.unmarshalBinary(buf)
+ fmt.Printf("| %03d | %03d | \n", entry.filenum, entry.offset)
+ if i > 100 {
+ fmt.Printf(" ... \n")
+ break
+ }
+ }
+ fmt.Printf("|-----------------|\n")
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
new file mode 100644
index 0000000..51d1f66
--- /dev/null
+++ b/core/rawdb/schema.go
@@ -0,0 +1,166 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package rawdb contains a collection of low level database accessors.
+package rawdb
+
+import (
+ "encoding/binary"
+
+ "github.com/ava-labs/go-ethereum/common"
+ "github.com/ava-labs/go-ethereum/metrics"
+)
+
+// The fields below define the low level database schema prefixing.
+var (
+ // databaseVerisionKey tracks the current database version.
+ databaseVerisionKey = []byte("DatabaseVersion")
+
+ // headHeaderKey tracks the latest known header's hash.
+ headHeaderKey = []byte("LastHeader")
+
+ // headBlockKey tracks the latest known full block's hash.
+ headBlockKey = []byte("LastBlock")
+
+ // headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
+ headFastBlockKey = []byte("LastFast")
+
+ // fastTrieProgressKey tracks the number of trie entries imported during fast sync.
+ fastTrieProgressKey = []byte("TrieSync")
+
+ // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
+ headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
+ headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
+ headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash
+ headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
+
+ blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
+ blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
+
+ txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
+ bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
+
+ preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
+ configPrefix = []byte("ethereum-config-") // config prefix for the db
+
+ // Chain index prefixes (use `i` + single byte to avoid mixing data types).
+ BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
+
+ preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
+ preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
+)
+
+const (
+ // freezerHeaderTable indicates the name of the freezer header table.
+ freezerHeaderTable = "headers"
+
+ // freezerHashTable indicates the name of the freezer canonical hash table.
+ freezerHashTable = "hashes"
+
+ // freezerBodiesTable indicates the name of the freezer block body table.
+ freezerBodiesTable = "bodies"
+
+ // freezerReceiptTable indicates the name of the freezer receipts table.
+ freezerReceiptTable = "receipts"
+
+ // freezerDifficultyTable indicates the name of the freezer total difficulty table.
+ freezerDifficultyTable = "diffs"
+)
+
+// freezerNoSnappy configures whether compression is disabled for the ancient-tables.
+// Hashes and difficulties don't compress well.
+var freezerNoSnappy = map[string]bool{
+ freezerHeaderTable: false,
+ freezerHashTable: true,
+ freezerBodiesTable: false,
+ freezerReceiptTable: false,
+ freezerDifficultyTable: true,
+}
+
+// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
+// fields.
+type LegacyTxLookupEntry struct {
+ BlockHash common.Hash
+ BlockIndex uint64
+ Index uint64
+}
+
+// encodeBlockNumber encodes a block number as big endian uint64
+func encodeBlockNumber(number uint64) []byte {
+ enc := make([]byte, 8)
+ binary.BigEndian.PutUint64(enc, number)
+ return enc
+}
+
+// headerKeyPrefix = headerPrefix + num (uint64 big endian)
+func headerKeyPrefix(number uint64) []byte {
+ return append(headerPrefix, encodeBlockNumber(number)...)
+}
+
+// headerKey = headerPrefix + num (uint64 big endian) + hash
+func headerKey(number uint64, hash common.Hash) []byte {
+ return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
+// headerTDKey = headerPrefix + num (uint64 big endian) + hash + headerTDSuffix
+func headerTDKey(number uint64, hash common.Hash) []byte {
+ return append(headerKey(number, hash), headerTDSuffix...)
+}
+
+// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
+func headerHashKey(number uint64) []byte {
+ return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
+}
+
+// headerNumberKey = headerNumberPrefix + hash
+func headerNumberKey(hash common.Hash) []byte {
+ return append(headerNumberPrefix, hash.Bytes()...)
+}
+
+// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash
+func blockBodyKey(number uint64, hash common.Hash) []byte {
+ return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
+// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
+func blockReceiptsKey(number uint64, hash common.Hash) []byte {
+ return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
+// txLookupKey = txLookupPrefix + hash
+func txLookupKey(hash common.Hash) []byte {
+ return append(txLookupPrefix, hash.Bytes()...)
+}
+
+// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
+func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
+ key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
+
+ binary.BigEndian.PutUint16(key[1:], uint16(bit))
+ binary.BigEndian.PutUint64(key[3:], section)
+
+ return key
+}
+
+// preimageKey = preimagePrefix + hash
+func preimageKey(hash common.Hash) []byte {
+ return append(preimagePrefix, hash.Bytes()...)
+}
+
+// configKey = configPrefix + hash
+func configKey(hash common.Hash) []byte {
+ return append(configPrefix, hash.Bytes()...)
+}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
new file mode 100644
index 0000000..f9078e8
--- /dev/null
+++ b/core/rawdb/table.go
@@ -0,0 +1,204 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb
+
+import (
+ "github.com/ava-labs/go-ethereum/ethdb"
+)
+
+// table is a wrapper around a database that prefixes each key access with a pre-
+// configured string.
+type table struct {
+ db ethdb.Database
+ prefix string
+}
+
+// NewTable returns a database object that prefixes all keys with a given string.
+func NewTable(db ethdb.Database, prefix string) ethdb.Database {
+ return &table{
+ db: db,
+ prefix: prefix,
+ }
+}
+
+// Close is a noop to implement the Database interface.
+func (t *table) Close() error {
+ return nil
+}
+
+// Has retrieves if a prefixed version of a key is present in the database.
+func (t *table) Has(key []byte) (bool, error) {
+ return t.db.Has(append([]byte(t.prefix), key...))
+}
+
+// Get retrieves the given prefixed key if it's present in the database.
+func (t *table) Get(key []byte) ([]byte, error) {
+ return t.db.Get(append([]byte(t.prefix), key...))
+}
+
+// HasAncient is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) HasAncient(kind string, number uint64) (bool, error) {
+ return t.db.HasAncient(kind, number)
+}
+
+// Ancient is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
+ return t.db.Ancient(kind, number)
+}
+
+// Ancients is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Ancients() (uint64, error) {
+ return t.db.Ancients()
+}
+
+// AncientSize is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) AncientSize(kind string) (uint64, error) {
+ return t.db.AncientSize(kind)
+}
+
+// AppendAncient is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
+ return t.db.AppendAncient(number, hash, header, body, receipts, td)
+}
+
+// TruncateAncients is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) TruncateAncients(items uint64) error {
+ return t.db.TruncateAncients(items)
+}
+
+// Sync is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Sync() error {
+ return t.db.Sync()
+}
+
+// Put inserts the given value into the database at a prefixed version of the
+// provided key.
+func (t *table) Put(key []byte, value []byte) error {
+ return t.db.Put(append([]byte(t.prefix), key...), value)
+}
+
+// Delete removes the given prefixed key from the database.
+func (t *table) Delete(key []byte) error {
+ return t.db.Delete(append([]byte(t.prefix), key...))
+}
+
+// NewIterator creates a binary-alphabetical iterator over the entire keyspace
+// contained within the database.
+func (t *table) NewIterator() ethdb.Iterator {
+ return t.NewIteratorWithPrefix(nil)
+}
+
+// NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
+// database content starting at a particular initial key (or after, if it does
+// not exist).
+func (t *table) NewIteratorWithStart(start []byte) ethdb.Iterator {
+ return t.db.NewIteratorWithStart(start)
+}
+
+// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
+// of database content with a particular key prefix.
+func (t *table) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator {
+ return t.db.NewIteratorWithPrefix(append([]byte(t.prefix), prefix...))
+}
+
+// Stat returns a particular internal stat of the database.
+func (t *table) Stat(property string) (string, error) {
+ return t.db.Stat(property)
+}
+
+// Compact flattens the underlying data store for the given key range. In essence,
+// deleted and overwritten versions are discarded, and the data is rearranged to
+// reduce the cost of operations needed to access them.
+//
+// A nil start is treated as a key before all keys in the data store; a nil limit
+// is treated as a key after all keys in the data store. If both is nil then it
+// will compact entire data store.
+func (t *table) Compact(start []byte, limit []byte) error {
+ // If no start was specified, use the table prefix as the first value
+ if start == nil {
+ start = []byte(t.prefix)
+ }
+ // If no limit was specified, use the first element not matching the prefix
+ // as the limit
+ if limit == nil {
+ limit = []byte(t.prefix)
+ for i := len(limit) - 1; i >= 0; i-- {
+ // Bump the current character, stopping if it doesn't overflow
+ limit[i]++
+ if limit[i] > 0 {
+ break
+ }
+ // Character overflown, proceed to the next or nil if the last
+ if i == 0 {
+ limit = nil
+ }
+ }
+ }
+ // Range correctly calculated based on table prefix, delegate down
+ return t.db.Compact(start, limit)
+}
+
+// NewBatch creates a write-only database that buffers changes to its host db
+// until a final write is called, each operation prefixing all keys with the
+// pre-configured string.
+func (t *table) NewBatch() ethdb.Batch {
+ return &tableBatch{t.db.NewBatch(), t.prefix}
+}
+
+// tableBatch is a wrapper around a database batch that prefixes each key access
+// with a pre-configured string.
+type tableBatch struct {
+ batch ethdb.Batch
+ prefix string
+}
+
+// Put inserts the given value into the batch for later committing.
+func (b *tableBatch) Put(key, value []byte) error {
+ return b.batch.Put(append([]byte(b.prefix), key...), value)
+}
+
+// Delete inserts the a key removal into the batch for later committing.
+func (b *tableBatch) Delete(key []byte) error {
+ return b.batch.Delete(append([]byte(b.prefix), key...))
+}
+
+// ValueSize retrieves the amount of data queued up for writing.
+func (b *tableBatch) ValueSize() int {
+ return b.batch.ValueSize()
+}
+
+// Write flushes any accumulated data to disk.
+func (b *tableBatch) Write() error {
+ return b.batch.Write()
+}
+
+// Reset resets the batch for reuse.
+func (b *tableBatch) Reset() {
+ b.batch.Reset()
+}
+
+// Replay replays the batch contents.
+func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error {
+ return b.batch.Replay(w)
+}