From d235e2c6a5788ec4a6cff15a16f56b38a3876a0d Mon Sep 17 00:00:00 2001 From: Determinant Date: Sun, 28 Jun 2020 14:47:41 -0400 Subject: ... --- consensus/clique/api.go | 119 +++++ consensus/clique/clique.go | 738 ++++++++++++++++++++++++++ consensus/clique/snapshot.go | 326 ++++++++++++ consensus/consensus.go | 125 +++++ consensus/dummy/consensus.go | 15 +- consensus/errors.go | 37 ++ consensus/ethash/algorithm.go | 1148 +++++++++++++++++++++++++++++++++++++++++ consensus/ethash/api.go | 118 +++++ consensus/ethash/consensus.go | 637 +++++++++++++++++++++++ consensus/ethash/ethash.go | 717 +++++++++++++++++++++++++ consensus/ethash/sealer.go | 371 +++++++++++++ consensus/misc/dao.go | 85 +++ consensus/misc/forks.go | 43 ++ 13 files changed, 4471 insertions(+), 8 deletions(-) create mode 100644 consensus/clique/api.go create mode 100644 consensus/clique/clique.go create mode 100644 consensus/clique/snapshot.go create mode 100644 consensus/consensus.go create mode 100644 consensus/errors.go create mode 100644 consensus/ethash/algorithm.go create mode 100644 consensus/ethash/api.go create mode 100644 consensus/ethash/consensus.go create mode 100644 consensus/ethash/ethash.go create mode 100644 consensus/ethash/sealer.go create mode 100644 consensus/misc/dao.go create mode 100644 consensus/misc/forks.go (limited to 'consensus') diff --git a/consensus/clique/api.go b/consensus/clique/api.go new file mode 100644 index 0000000..04e74eb --- /dev/null +++ b/consensus/clique/api.go @@ -0,0 +1,119 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package clique + +import ( + "github.com/ava-labs/coreth/consensus" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/rpc" + "github.com/ava-labs/go-ethereum/common" +) + +// API is a user facing RPC API to allow controlling the signer and voting +// mechanisms of the proof-of-authority scheme. +type API struct { + chain consensus.ChainReader + clique *Clique +} + +// GetSnapshot retrieves the state snapshot at a given block. +func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { + // Retrieve the requested block number (or current if none requested) + var header *types.Header + if number == nil || *number == rpc.LatestBlockNumber { + header = api.chain.CurrentHeader() + } else { + header = api.chain.GetHeaderByNumber(uint64(number.Int64())) + } + // Ensure we have an actually valid block and return its snapshot + if header == nil { + return nil, errUnknownBlock + } + return api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) +} + +// GetSnapshotAtHash retrieves the state snapshot at a given block. +func (api *API) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { + header := api.chain.GetHeaderByHash(hash) + if header == nil { + return nil, errUnknownBlock + } + return api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) +} + +// GetSigners retrieves the list of authorized signers at the specified block. +func (api *API) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) { + // Retrieve the requested block number (or current if none requested) + var header *types.Header + if number == nil || *number == rpc.LatestBlockNumber { + header = api.chain.CurrentHeader() + } else { + header = api.chain.GetHeaderByNumber(uint64(number.Int64())) + } + // Ensure we have an actually valid block and return the signers from its snapshot + if header == nil { + return nil, errUnknownBlock + } + snap, err := api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + if err != nil { + return nil, err + } + return snap.signers(), nil +} + +// GetSignersAtHash retrieves the list of authorized signers at the specified block. +func (api *API) GetSignersAtHash(hash common.Hash) ([]common.Address, error) { + header := api.chain.GetHeaderByHash(hash) + if header == nil { + return nil, errUnknownBlock + } + snap, err := api.clique.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + if err != nil { + return nil, err + } + return snap.signers(), nil +} + +// Proposals returns the current proposals the node tries to uphold and vote on. +func (api *API) Proposals() map[common.Address]bool { + api.clique.lock.RLock() + defer api.clique.lock.RUnlock() + + proposals := make(map[common.Address]bool) + for address, auth := range api.clique.proposals { + proposals[address] = auth + } + return proposals +} + +// Propose injects a new authorization proposal that the signer will attempt to +// push through. +func (api *API) Propose(address common.Address, auth bool) { + api.clique.lock.Lock() + defer api.clique.lock.Unlock() + + api.clique.proposals[address] = auth +} + +// Discard drops a currently running proposal, stopping the signer from casting +// further votes (either for or against). +func (api *API) Discard(address common.Address) { + api.clique.lock.Lock() + defer api.clique.lock.Unlock() + + delete(api.clique.proposals, address) +} diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go new file mode 100644 index 0000000..c97e198 --- /dev/null +++ b/consensus/clique/clique.go @@ -0,0 +1,738 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package clique implements the proof-of-authority consensus engine. +package clique + +import ( + "bytes" + "errors" + "io" + "math/big" + "math/rand" + "sync" + "time" + + "github.com/ava-labs/coreth/accounts" + "github.com/ava-labs/coreth/consensus" + "github.com/ava-labs/coreth/consensus/misc" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/common/hexutil" + "github.com/ava-labs/go-ethereum/crypto" + "github.com/ava-labs/go-ethereum/ethdb" + "github.com/ava-labs/go-ethereum/log" + "github.com/ava-labs/go-ethereum/rlp" + lru "github.com/hashicorp/golang-lru" + "golang.org/x/crypto/sha3" +) + +const ( + checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database + inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory + inmemorySignatures = 4096 // Number of recent block signatures to keep in memory + + wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers +) + +// Clique proof-of-authority protocol constants. +var ( + epochLength = uint64(30000) // Default number of blocks after which to checkpoint and reset the pending votes + + extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity + extraSeal = crypto.SignatureLength // Fixed number of extra-data suffix bytes reserved for signer seal + + nonceAuthVote = hexutil.MustDecode("0xffffffffffffffff") // Magic nonce number to vote on adding a new signer + nonceDropVote = hexutil.MustDecode("0x0000000000000000") // Magic nonce number to vote on removing a signer. + + uncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW. + + diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures + diffNoTurn = big.NewInt(1) // Block difficulty for out-of-turn signatures +) + +// Various error messages to mark blocks invalid. These should be private to +// prevent engine specific errors from being referenced in the remainder of the +// codebase, inherently breaking if the engine is swapped out. Please put common +// error types into the consensus package. +var ( + // errUnknownBlock is returned when the list of signers is requested for a block + // that is not part of the local blockchain. + errUnknownBlock = errors.New("unknown block") + + // errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition + // block has a beneficiary set to non-zeroes. + errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero") + + // errInvalidVote is returned if a nonce value is something else that the two + // allowed constants of 0x00..0 or 0xff..f. + errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f") + + // errInvalidCheckpointVote is returned if a checkpoint/epoch transition block + // has a vote nonce set to non-zeroes. + errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero") + + // errMissingVanity is returned if a block's extra-data section is shorter than + // 32 bytes, which is required to store the signer vanity. + errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing") + + // errMissingSignature is returned if a block's extra-data section doesn't seem + // to contain a 65 byte secp256k1 signature. + errMissingSignature = errors.New("extra-data 65 byte signature suffix missing") + + // errExtraSigners is returned if non-checkpoint block contain signer data in + // their extra-data fields. + errExtraSigners = errors.New("non-checkpoint block contains extra signer list") + + // errInvalidCheckpointSigners is returned if a checkpoint block contains an + // invalid list of signers (i.e. non divisible by 20 bytes). + errInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block") + + // errMismatchingCheckpointSigners is returned if a checkpoint block contains a + // list of signers different than the one the local node calculated. + errMismatchingCheckpointSigners = errors.New("mismatching signer list on checkpoint block") + + // errInvalidMixDigest is returned if a block's mix digest is non-zero. + errInvalidMixDigest = errors.New("non-zero mix digest") + + // errInvalidUncleHash is returned if a block contains an non-empty uncle list. + errInvalidUncleHash = errors.New("non empty uncle hash") + + // errInvalidDifficulty is returned if the difficulty of a block neither 1 or 2. + errInvalidDifficulty = errors.New("invalid difficulty") + + // errWrongDifficulty is returned if the difficulty of a block doesn't match the + // turn of the signer. + errWrongDifficulty = errors.New("wrong difficulty") + + // ErrInvalidTimestamp is returned if the timestamp of a block is lower than + // the previous block's timestamp + the minimum block period. + ErrInvalidTimestamp = errors.New("invalid timestamp") + + // errInvalidVotingChain is returned if an authorization list is attempted to + // be modified via out-of-range or non-contiguous headers. + errInvalidVotingChain = errors.New("invalid voting chain") + + // errUnauthorizedSigner is returned if a header is signed by a non-authorized entity. + errUnauthorizedSigner = errors.New("unauthorized signer") + + // errRecentlySigned is returned if a header is signed by an authorized entity + // that already signed a header recently, thus is temporarily not allowed to. + errRecentlySigned = errors.New("recently signed") +) + +// SignerFn is a signer callback function to request a header to be signed by a +// backing account. +type SignerFn func(accounts.Account, string, []byte) ([]byte, error) + +// ecrecover extracts the Ethereum account address from a signed header. +func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) { + // If the signature's already cached, return that + hash := header.Hash() + if address, known := sigcache.Get(hash); known { + return address.(common.Address), nil + } + // Retrieve the signature from the header extra-data + if len(header.Extra) < extraSeal { + return common.Address{}, errMissingSignature + } + signature := header.Extra[len(header.Extra)-extraSeal:] + + // Recover the public key and the Ethereum address + pubkey, err := crypto.Ecrecover(SealHash(header).Bytes(), signature) + if err != nil { + return common.Address{}, err + } + var signer common.Address + copy(signer[:], crypto.Keccak256(pubkey[1:])[12:]) + + sigcache.Add(hash, signer) + return signer, nil +} + +// Clique is the proof-of-authority consensus engine proposed to support the +// Ethereum testnet following the Ropsten attacks. +type Clique struct { + config *params.CliqueConfig // Consensus engine configuration parameters + db ethdb.Database // Database to store and retrieve snapshot checkpoints + + recents *lru.ARCCache // Snapshots for recent block to speed up reorgs + signatures *lru.ARCCache // Signatures of recent blocks to speed up mining + + proposals map[common.Address]bool // Current list of proposals we are pushing + + signer common.Address // Ethereum address of the signing key + signFn SignerFn // Signer function to authorize hashes with + lock sync.RWMutex // Protects the signer fields + + // The fields below are for testing only + fakeDiff bool // Skip difficulty verifications +} + +// New creates a Clique proof-of-authority consensus engine with the initial +// signers set to the ones provided by the user. +func New(config *params.CliqueConfig, db ethdb.Database) *Clique { + // Set any missing consensus parameters to their defaults + conf := *config + if conf.Epoch == 0 { + conf.Epoch = epochLength + } + // Allocate the snapshot caches and create the engine + recents, _ := lru.NewARC(inmemorySnapshots) + signatures, _ := lru.NewARC(inmemorySignatures) + + return &Clique{ + config: &conf, + db: db, + recents: recents, + signatures: signatures, + proposals: make(map[common.Address]bool), + } +} + +// Author implements consensus.Engine, returning the Ethereum address recovered +// from the signature in the header's extra-data section. +func (c *Clique) Author(header *types.Header) (common.Address, error) { + return ecrecover(header, c.signatures) +} + +// VerifyHeader checks whether a header conforms to the consensus rules. +func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { + return c.verifyHeader(chain, header, nil) +} + +// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The +// method returns a quit channel to abort the operations and a results channel to +// retrieve the async verifications (the order is that of the input slice). +func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { + abort := make(chan struct{}) + results := make(chan error, len(headers)) + + go func() { + for i, header := range headers { + err := c.verifyHeader(chain, header, headers[:i]) + + select { + case <-abort: + return + case results <- err: + } + } + }() + return abort, results +} + +// verifyHeader checks whether a header conforms to the consensus rules.The +// caller may optionally pass in a batch of parents (ascending order) to avoid +// looking those up from the database. This is useful for concurrently verifying +// a batch of new headers. +func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { + if header.Number == nil { + return errUnknownBlock + } + number := header.Number.Uint64() + + // Don't waste time checking blocks from the future + if header.Time > uint64(time.Now().Unix()) { + return consensus.ErrFutureBlock + } + // Checkpoint blocks need to enforce zero beneficiary + checkpoint := (number % c.config.Epoch) == 0 + if checkpoint && header.Coinbase != (common.Address{}) { + return errInvalidCheckpointBeneficiary + } + // Nonces must be 0x00..0 or 0xff..f, zeroes enforced on checkpoints + if !bytes.Equal(header.Nonce[:], nonceAuthVote) && !bytes.Equal(header.Nonce[:], nonceDropVote) { + return errInvalidVote + } + if checkpoint && !bytes.Equal(header.Nonce[:], nonceDropVote) { + return errInvalidCheckpointVote + } + // Check that the extra-data contains both the vanity and signature + if len(header.Extra) < extraVanity { + return errMissingVanity + } + if len(header.Extra) < extraVanity+extraSeal { + return errMissingSignature + } + // Ensure that the extra-data contains a signer list on checkpoint, but none otherwise + signersBytes := len(header.Extra) - extraVanity - extraSeal + if !checkpoint && signersBytes != 0 { + return errExtraSigners + } + if checkpoint && signersBytes%common.AddressLength != 0 { + return errInvalidCheckpointSigners + } + // Ensure that the mix digest is zero as we don't have fork protection currently + if header.MixDigest != (common.Hash{}) { + return errInvalidMixDigest + } + // Ensure that the block doesn't contain any uncles which are meaningless in PoA + if header.UncleHash != uncleHash { + return errInvalidUncleHash + } + // Ensure that the block's difficulty is meaningful (may not be correct at this point) + if number > 0 { + if header.Difficulty == nil || (header.Difficulty.Cmp(diffInTurn) != 0 && header.Difficulty.Cmp(diffNoTurn) != 0) { + return errInvalidDifficulty + } + } + // If all checks passed, validate any special fields for hard forks + if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil { + return err + } + // All basic checks passed, verify cascading fields + return c.verifyCascadingFields(chain, header, parents) +} + +// verifyCascadingFields verifies all the header fields that are not standalone, +// rather depend on a batch of previous headers. The caller may optionally pass +// in a batch of parents (ascending order) to avoid looking those up from the +// database. This is useful for concurrently verifying a batch of new headers. +func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { + // The genesis block is the always valid dead-end + number := header.Number.Uint64() + if number == 0 { + return nil + } + // Ensure that the block's timestamp isn't too close to it's parent + var parent *types.Header + if len(parents) > 0 { + parent = parents[len(parents)-1] + } else { + parent = chain.GetHeader(header.ParentHash, number-1) + } + if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash { + return consensus.ErrUnknownAncestor + } + if parent.Time+c.config.Period > header.Time { + return ErrInvalidTimestamp + } + // Retrieve the snapshot needed to verify this header and cache it + snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) + if err != nil { + return err + } + // If the block is a checkpoint block, verify the signer list + if number%c.config.Epoch == 0 { + signers := make([]byte, len(snap.Signers)*common.AddressLength) + for i, signer := range snap.signers() { + copy(signers[i*common.AddressLength:], signer[:]) + } + extraSuffix := len(header.Extra) - extraSeal + if !bytes.Equal(header.Extra[extraVanity:extraSuffix], signers) { + return errMismatchingCheckpointSigners + } + } + // All basic checks passed, verify the seal and return + return c.verifySeal(chain, header, parents) +} + +// snapshot retrieves the authorization snapshot at a given point in time. +func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { + // Search for a snapshot in memory or on disk for checkpoints + var ( + headers []*types.Header + snap *Snapshot + ) + for snap == nil { + // If an in-memory snapshot was found, use that + if s, ok := c.recents.Get(hash); ok { + snap = s.(*Snapshot) + break + } + // If an on-disk checkpoint snapshot can be found, use that + if number%checkpointInterval == 0 { + if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil { + log.Trace("Loaded voting snapshot from disk", "number", number, "hash", hash) + snap = s + break + } + } + // If we're at the genesis, snapshot the initial state. Alternatively if we're + // at a checkpoint block without a parent (light client CHT), or we have piled + // up more headers than allowed to be reorged (chain reinit from a freezer), + // consider the checkpoint trusted and snapshot it. + if number == 0 || (number%c.config.Epoch == 0 && (len(headers) > params.ImmutabilityThreshold || chain.GetHeaderByNumber(number-1) == nil)) { + checkpoint := chain.GetHeaderByNumber(number) + if checkpoint != nil { + hash := checkpoint.Hash() + + signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength) + for i := 0; i < len(signers); i++ { + copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:]) + } + snap = newSnapshot(c.config, c.signatures, number, hash, signers) + if err := snap.store(c.db); err != nil { + return nil, err + } + log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash) + break + } + } + // No snapshot for this header, gather the header and move backward + var header *types.Header + if len(parents) > 0 { + // If we have explicit parents, pick from there (enforced) + header = parents[len(parents)-1] + if header.Hash() != hash || header.Number.Uint64() != number { + return nil, consensus.ErrUnknownAncestor + } + parents = parents[:len(parents)-1] + } else { + // No explicit parents (or no more left), reach out to the database + header = chain.GetHeader(hash, number) + if header == nil { + return nil, consensus.ErrUnknownAncestor + } + } + headers = append(headers, header) + number, hash = number-1, header.ParentHash + } + // Previous snapshot found, apply any pending headers on top of it + for i := 0; i < len(headers)/2; i++ { + headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i] + } + snap, err := snap.apply(headers) + if err != nil { + return nil, err + } + c.recents.Add(snap.Hash, snap) + + // If we've generated a new checkpoint snapshot, save to disk + if snap.Number%checkpointInterval == 0 && len(headers) > 0 { + if err = snap.store(c.db); err != nil { + return nil, err + } + log.Trace("Stored voting snapshot to disk", "number", snap.Number, "hash", snap.Hash) + } + return snap, err +} + +// VerifyUncles implements consensus.Engine, always returning an error for any +// uncles as this consensus mechanism doesn't permit uncles. +func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { + if len(block.Uncles()) > 0 { + return errors.New("uncles not allowed") + } + return nil +} + +// VerifySeal implements consensus.Engine, checking whether the signature contained +// in the header satisfies the consensus protocol requirements. +func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error { + return c.verifySeal(chain, header, nil) +} + +// verifySeal checks whether the signature contained in the header satisfies the +// consensus protocol requirements. The method accepts an optional list of parent +// headers that aren't yet part of the local blockchain to generate the snapshots +// from. +func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { + // Verifying the genesis block is not supported + number := header.Number.Uint64() + if number == 0 { + return errUnknownBlock + } + // Retrieve the snapshot needed to verify this header and cache it + snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) + if err != nil { + return err + } + + // Resolve the authorization key and check against signers + signer, err := ecrecover(header, c.signatures) + if err != nil { + return err + } + if _, ok := snap.Signers[signer]; !ok { + return errUnauthorizedSigner + } + for seen, recent := range snap.Recents { + if recent == signer { + // Signer is among recents, only fail if the current block doesn't shift it out + if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit { + return errRecentlySigned + } + } + } + // Ensure that the difficulty corresponds to the turn-ness of the signer + if !c.fakeDiff { + inturn := snap.inturn(header.Number.Uint64(), signer) + if inturn && header.Difficulty.Cmp(diffInTurn) != 0 { + return errWrongDifficulty + } + if !inturn && header.Difficulty.Cmp(diffNoTurn) != 0 { + return errWrongDifficulty + } + } + return nil +} + +// Prepare implements consensus.Engine, preparing all the consensus fields of the +// header for running the transactions on top. +func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error { + // If the block isn't a checkpoint, cast a random vote (good enough for now) + header.Coinbase = common.Address{} + header.Nonce = types.BlockNonce{} + + number := header.Number.Uint64() + // Assemble the voting snapshot to check which votes make sense + snap, err := c.snapshot(chain, number-1, header.ParentHash, nil) + if err != nil { + return err + } + if number%c.config.Epoch != 0 { + c.lock.RLock() + + // Gather all the proposals that make sense voting on + addresses := make([]common.Address, 0, len(c.proposals)) + for address, authorize := range c.proposals { + if snap.validVote(address, authorize) { + addresses = append(addresses, address) + } + } + // If there's pending proposals, cast a vote on them + if len(addresses) > 0 { + header.Coinbase = addresses[rand.Intn(len(addresses))] + if c.proposals[header.Coinbase] { + copy(header.Nonce[:], nonceAuthVote) + } else { + copy(header.Nonce[:], nonceDropVote) + } + } + c.lock.RUnlock() + } + // Set the correct difficulty + header.Difficulty = CalcDifficulty(snap, c.signer) + + // Ensure the extra data has all it's components + if len(header.Extra) < extraVanity { + header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...) + } + header.Extra = header.Extra[:extraVanity] + + if number%c.config.Epoch == 0 { + for _, signer := range snap.signers() { + header.Extra = append(header.Extra, signer[:]...) + } + } + header.Extra = append(header.Extra, make([]byte, extraSeal)...) + + // Mix digest is reserved for now, set to empty + header.MixDigest = common.Hash{} + + // Ensure the timestamp has the correct delay + parent := chain.GetHeader(header.ParentHash, number-1) + if parent == nil { + return consensus.ErrUnknownAncestor + } + header.Time = parent.Time + c.config.Period + if header.Time < uint64(time.Now().Unix()) { + header.Time = uint64(time.Now().Unix()) + } + return nil +} + +// Finalize implements consensus.Engine, ensuring no uncles are set, nor block +// rewards given. +func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { + // No block rewards in PoA, so the state remains as is and uncles are dropped + header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + header.UncleHash = types.CalcUncleHash(nil) +} + +// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, +// nor block rewards given, and returns the final block. +func (c *Clique) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { + // No block rewards in PoA, so the state remains as is and uncles are dropped + header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + header.UncleHash = types.CalcUncleHash(nil) + + // Assemble and return the final block for sealing + return types.NewBlock(header, txs, nil, receipts), nil +} + +// Authorize injects a private key into the consensus engine to mint new blocks +// with. +func (c *Clique) Authorize(signer common.Address, signFn SignerFn) { + c.lock.Lock() + defer c.lock.Unlock() + + c.signer = signer + c.signFn = signFn +} + +// Seal implements consensus.Engine, attempting to create a sealed block using +// the local signing credentials. +func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { + header := block.Header() + + // Sealing the genesis block is not supported + number := header.Number.Uint64() + if number == 0 { + return errUnknownBlock + } + // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) + if c.config.Period == 0 && len(block.Transactions()) == 0 { + log.Info("Sealing paused, waiting for transactions") + return nil + } + // Don't hold the signer fields for the entire sealing procedure + c.lock.RLock() + signer, signFn := c.signer, c.signFn + c.lock.RUnlock() + + // Bail out if we're unauthorized to sign a block + snap, err := c.snapshot(chain, number-1, header.ParentHash, nil) + if err != nil { + return err + } + if _, authorized := snap.Signers[signer]; !authorized { + return errUnauthorizedSigner + } + // If we're amongst the recent signers, wait for the next block + for seen, recent := range snap.Recents { + if recent == signer { + // Signer is among recents, only wait if the current block doesn't shift it out + if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit { + log.Info("Signed recently, must wait for others") + return nil + } + } + } + // Sweet, the protocol permits us to sign the block, wait for our time + delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple + if header.Difficulty.Cmp(diffNoTurn) == 0 { + // It's not our turn explicitly to sign, delay it a bit + wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime + delay += time.Duration(rand.Int63n(int64(wiggle))) + + log.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle)) + } + // Sign all the things! + sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeClique, CliqueRLP(header)) + if err != nil { + return err + } + copy(header.Extra[len(header.Extra)-extraSeal:], sighash) + // Wait until sealing is terminated or delay timeout. + log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay)) + go func() { + select { + case <-stop: + return + case <-time.After(delay): + } + + select { + case results <- block.WithSeal(header): + default: + log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header)) + } + }() + + return nil +} + +// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty +// that a new block should have based on the previous blocks in the chain and the +// current signer. +func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { + snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil) + if err != nil { + return nil + } + return CalcDifficulty(snap, c.signer) +} + +// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty +// that a new block should have based on the previous blocks in the chain and the +// current signer. +func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int { + if snap.inturn(snap.Number+1, signer) { + return new(big.Int).Set(diffInTurn) + } + return new(big.Int).Set(diffNoTurn) +} + +// SealHash returns the hash of a block prior to it being sealed. +func (c *Clique) SealHash(header *types.Header) common.Hash { + return SealHash(header) +} + +// Close implements consensus.Engine. It's a noop for clique as there are no background threads. +func (c *Clique) Close() error { + return nil +} + +// APIs implements consensus.Engine, returning the user facing RPC API to allow +// controlling the signer voting. +func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API { + return []rpc.API{{ + Namespace: "clique", + Version: "1.0", + Service: &API{chain: chain, clique: c}, + Public: false, + }} +} + +// SealHash returns the hash of a block prior to it being sealed. +func SealHash(header *types.Header) (hash common.Hash) { + hasher := sha3.NewLegacyKeccak256() + encodeSigHeader(hasher, header) + hasher.Sum(hash[:0]) + return hash +} + +// CliqueRLP returns the rlp bytes which needs to be signed for the proof-of-authority +// sealing. The RLP to sign consists of the entire header apart from the 65 byte signature +// contained at the end of the extra data. +// +// Note, the method requires the extra data to be at least 65 bytes, otherwise it +// panics. This is done to avoid accidentally using both forms (signature present +// or not), which could be abused to produce different hashes for the same header. +func CliqueRLP(header *types.Header) []byte { + b := new(bytes.Buffer) + encodeSigHeader(b, header) + return b.Bytes() +} + +func encodeSigHeader(w io.Writer, header *types.Header) { + err := rlp.Encode(w, []interface{}{ + header.ParentHash, + header.UncleHash, + header.Coinbase, + header.Root, + header.TxHash, + header.ReceiptHash, + header.Bloom, + header.Difficulty, + header.Number, + header.GasLimit, + header.GasUsed, + header.Time, + header.Extra[:len(header.Extra)-crypto.SignatureLength], // Yes, this will panic if extra is too short + header.MixDigest, + header.Nonce, + }) + if err != nil { + panic("can't encode: " + err.Error()) + } +} diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go new file mode 100644 index 0000000..6660c0f --- /dev/null +++ b/consensus/clique/snapshot.go @@ -0,0 +1,326 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package clique + +import ( + "bytes" + "encoding/json" + "sort" + "time" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/ethdb" + "github.com/ava-labs/go-ethereum/log" + lru "github.com/hashicorp/golang-lru" +) + +// Vote represents a single vote that an authorized signer made to modify the +// list of authorizations. +type Vote struct { + Signer common.Address `json:"signer"` // Authorized signer that cast this vote + Block uint64 `json:"block"` // Block number the vote was cast in (expire old votes) + Address common.Address `json:"address"` // Account being voted on to change its authorization + Authorize bool `json:"authorize"` // Whether to authorize or deauthorize the voted account +} + +// Tally is a simple vote tally to keep the current score of votes. Votes that +// go against the proposal aren't counted since it's equivalent to not voting. +type Tally struct { + Authorize bool `json:"authorize"` // Whether the vote is about authorizing or kicking someone + Votes int `json:"votes"` // Number of votes until now wanting to pass the proposal +} + +// Snapshot is the state of the authorization voting at a given point in time. +type Snapshot struct { + config *params.CliqueConfig // Consensus engine parameters to fine tune behavior + sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover + + Number uint64 `json:"number"` // Block number where the snapshot was created + Hash common.Hash `json:"hash"` // Block hash where the snapshot was created + Signers map[common.Address]struct{} `json:"signers"` // Set of authorized signers at this moment + Recents map[uint64]common.Address `json:"recents"` // Set of recent signers for spam protections + Votes []*Vote `json:"votes"` // List of votes cast in chronological order + Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating +} + +// signersAscending implements the sort interface to allow sorting a list of addresses +type signersAscending []common.Address + +func (s signersAscending) Len() int { return len(s) } +func (s signersAscending) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 } +func (s signersAscending) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// newSnapshot creates a new snapshot with the specified startup parameters. This +// method does not initialize the set of recent signers, so only ever use if for +// the genesis block. +func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot { + snap := &Snapshot{ + config: config, + sigcache: sigcache, + Number: number, + Hash: hash, + Signers: make(map[common.Address]struct{}), + Recents: make(map[uint64]common.Address), + Tally: make(map[common.Address]Tally), + } + for _, signer := range signers { + snap.Signers[signer] = struct{}{} + } + return snap +} + +// loadSnapshot loads an existing snapshot from the database. +func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) { + blob, err := db.Get(append([]byte("clique-"), hash[:]...)) + if err != nil { + return nil, err + } + snap := new(Snapshot) + if err := json.Unmarshal(blob, snap); err != nil { + return nil, err + } + snap.config = config + snap.sigcache = sigcache + + return snap, nil +} + +// store inserts the snapshot into the database. +func (s *Snapshot) store(db ethdb.Database) error { + blob, err := json.Marshal(s) + if err != nil { + return err + } + return db.Put(append([]byte("clique-"), s.Hash[:]...), blob) +} + +// copy creates a deep copy of the snapshot, though not the individual votes. +func (s *Snapshot) copy() *Snapshot { + cpy := &Snapshot{ + config: s.config, + sigcache: s.sigcache, + Number: s.Number, + Hash: s.Hash, + Signers: make(map[common.Address]struct{}), + Recents: make(map[uint64]common.Address), + Votes: make([]*Vote, len(s.Votes)), + Tally: make(map[common.Address]Tally), + } + for signer := range s.Signers { + cpy.Signers[signer] = struct{}{} + } + for block, signer := range s.Recents { + cpy.Recents[block] = signer + } + for address, tally := range s.Tally { + cpy.Tally[address] = tally + } + copy(cpy.Votes, s.Votes) + + return cpy +} + +// validVote returns whether it makes sense to cast the specified vote in the +// given snapshot context (e.g. don't try to add an already authorized signer). +func (s *Snapshot) validVote(address common.Address, authorize bool) bool { + _, signer := s.Signers[address] + return (signer && !authorize) || (!signer && authorize) +} + +// cast adds a new vote into the tally. +func (s *Snapshot) cast(address common.Address, authorize bool) bool { + // Ensure the vote is meaningful + if !s.validVote(address, authorize) { + return false + } + // Cast the vote into an existing or new tally + if old, ok := s.Tally[address]; ok { + old.Votes++ + s.Tally[address] = old + } else { + s.Tally[address] = Tally{Authorize: authorize, Votes: 1} + } + return true +} + +// uncast removes a previously cast vote from the tally. +func (s *Snapshot) uncast(address common.Address, authorize bool) bool { + // If there's no tally, it's a dangling vote, just drop + tally, ok := s.Tally[address] + if !ok { + return false + } + // Ensure we only revert counted votes + if tally.Authorize != authorize { + return false + } + // Otherwise revert the vote + if tally.Votes > 1 { + tally.Votes-- + s.Tally[address] = tally + } else { + delete(s.Tally, address) + } + return true +} + +// apply creates a new authorization snapshot by applying the given headers to +// the original one. +func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { + // Allow passing in no headers for cleaner code + if len(headers) == 0 { + return s, nil + } + // Sanity check that the headers can be applied + for i := 0; i < len(headers)-1; i++ { + if headers[i+1].Number.Uint64() != headers[i].Number.Uint64()+1 { + return nil, errInvalidVotingChain + } + } + if headers[0].Number.Uint64() != s.Number+1 { + return nil, errInvalidVotingChain + } + // Iterate through the headers and create a new snapshot + snap := s.copy() + + var ( + start = time.Now() + logged = time.Now() + ) + for i, header := range headers { + // Remove any votes on checkpoint blocks + number := header.Number.Uint64() + if number%s.config.Epoch == 0 { + snap.Votes = nil + snap.Tally = make(map[common.Address]Tally) + } + // Delete the oldest signer from the recent list to allow it signing again + if limit := uint64(len(snap.Signers)/2 + 1); number >= limit { + delete(snap.Recents, number-limit) + } + // Resolve the authorization key and check against signers + signer, err := ecrecover(header, s.sigcache) + if err != nil { + return nil, err + } + if _, ok := snap.Signers[signer]; !ok { + return nil, errUnauthorizedSigner + } + for _, recent := range snap.Recents { + if recent == signer { + return nil, errRecentlySigned + } + } + snap.Recents[number] = signer + + // Header authorized, discard any previous votes from the signer + for i, vote := range snap.Votes { + if vote.Signer == signer && vote.Address == header.Coinbase { + // Uncast the vote from the cached tally + snap.uncast(vote.Address, vote.Authorize) + + // Uncast the vote from the chronological list + snap.Votes = append(snap.Votes[:i], snap.Votes[i+1:]...) + break // only one vote allowed + } + } + // Tally up the new vote from the signer + var authorize bool + switch { + case bytes.Equal(header.Nonce[:], nonceAuthVote): + authorize = true + case bytes.Equal(header.Nonce[:], nonceDropVote): + authorize = false + default: + return nil, errInvalidVote + } + if snap.cast(header.Coinbase, authorize) { + snap.Votes = append(snap.Votes, &Vote{ + Signer: signer, + Block: number, + Address: header.Coinbase, + Authorize: authorize, + }) + } + // If the vote passed, update the list of signers + if tally := snap.Tally[header.Coinbase]; tally.Votes > len(snap.Signers)/2 { + if tally.Authorize { + snap.Signers[header.Coinbase] = struct{}{} + } else { + delete(snap.Signers, header.Coinbase) + + // Signer list shrunk, delete any leftover recent caches + if limit := uint64(len(snap.Signers)/2 + 1); number >= limit { + delete(snap.Recents, number-limit) + } + // Discard any previous votes the deauthorized signer cast + for i := 0; i < len(snap.Votes); i++ { + if snap.Votes[i].Signer == header.Coinbase { + // Uncast the vote from the cached tally + snap.uncast(snap.Votes[i].Address, snap.Votes[i].Authorize) + + // Uncast the vote from the chronological list + snap.Votes = append(snap.Votes[:i], snap.Votes[i+1:]...) + + i-- + } + } + } + // Discard any previous votes around the just changed account + for i := 0; i < len(snap.Votes); i++ { + if snap.Votes[i].Address == header.Coinbase { + snap.Votes = append(snap.Votes[:i], snap.Votes[i+1:]...) + i-- + } + } + delete(snap.Tally, header.Coinbase) + } + // If we're taking too much time (ecrecover), notify the user once a while + if time.Since(logged) > 8*time.Second { + log.Info("Reconstructing voting history", "processed", i, "total", len(headers), "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + if time.Since(start) > 8*time.Second { + log.Info("Reconstructed voting history", "processed", len(headers), "elapsed", common.PrettyDuration(time.Since(start))) + } + snap.Number += uint64(len(headers)) + snap.Hash = headers[len(headers)-1].Hash() + + return snap, nil +} + +// signers retrieves the list of authorized signers in ascending order. +func (s *Snapshot) signers() []common.Address { + sigs := make([]common.Address, 0, len(s.Signers)) + for sig := range s.Signers { + sigs = append(sigs, sig) + } + sort.Sort(signersAscending(sigs)) + return sigs +} + +// inturn returns if a signer at a given block height is in-turn or not. +func (s *Snapshot) inturn(number uint64, signer common.Address) bool { + signers, offset := s.signers(), 0 + for offset < len(signers) && signers[offset] != signer { + offset++ + } + return (number % uint64(len(signers))) == uint64(offset) +} diff --git a/consensus/consensus.go b/consensus/consensus.go new file mode 100644 index 0000000..93fc9dc --- /dev/null +++ b/consensus/consensus.go @@ -0,0 +1,125 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package consensus implements different Ethereum consensus engines. +package consensus + +import ( + "math/big" + + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" + "github.com/ava-labs/go-ethereum/common" +) + +// ChainReader defines a small collection of methods needed to access the local +// blockchain during header and/or uncle verification. +type ChainReader interface { + // Config retrieves the blockchain's chain configuration. + Config() *params.ChainConfig + + // CurrentHeader retrieves the current header from the local chain. + CurrentHeader() *types.Header + + // GetHeader retrieves a block header from the database by hash and number. + GetHeader(hash common.Hash, number uint64) *types.Header + + // GetHeaderByNumber retrieves a block header from the database by number. + GetHeaderByNumber(number uint64) *types.Header + + // GetHeaderByHash retrieves a block header from the database by its hash. + GetHeaderByHash(hash common.Hash) *types.Header + + // GetBlock retrieves a block from the database by hash and number. + GetBlock(hash common.Hash, number uint64) *types.Block +} + +// Engine is an algorithm agnostic consensus engine. +type Engine interface { + // Author retrieves the Ethereum address of the account that minted the given + // block, which may be different from the header's coinbase if a consensus + // engine is based on signatures. + Author(header *types.Header) (common.Address, error) + + // VerifyHeader checks whether a header conforms to the consensus rules of a + // given engine. Verifying the seal may be done optionally here, or explicitly + // via the VerifySeal method. + VerifyHeader(chain ChainReader, header *types.Header, seal bool) error + + // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers + // concurrently. The method returns a quit channel to abort the operations and + // a results channel to retrieve the async verifications (the order is that of + // the input slice). + VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) + + // VerifyUncles verifies that the given block's uncles conform to the consensus + // rules of a given engine. + VerifyUncles(chain ChainReader, block *types.Block) error + + // VerifySeal checks whether the crypto seal on a header is valid according to + // the consensus rules of the given engine. + VerifySeal(chain ChainReader, header *types.Header) error + + // Prepare initializes the consensus fields of a block header according to the + // rules of a particular engine. The changes are executed inline. + Prepare(chain ChainReader, header *types.Header) error + + // Finalize runs any post-transaction state modifications (e.g. block rewards) + // but does not assemble the block. + // + // Note: The block header and state database might be updated to reflect any + // consensus rules that happen at finalization (e.g. block rewards). + Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, + uncles []*types.Header) + + // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block + // rewards) and assembles the final block. + // + // Note: The block header and state database might be updated to reflect any + // consensus rules that happen at finalization (e.g. block rewards). + FinalizeAndAssemble(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, + uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) + + // Seal generates a new sealing request for the given input block and pushes + // the result into the given channel. + // + // Note, the method returns immediately and will send the result async. More + // than one result may also be returned depending on the consensus algorithm. + Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error + + // SealHash returns the hash of a block prior to it being sealed. + SealHash(header *types.Header) common.Hash + + // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty + // that a new block should have. + CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int + + // APIs returns the RPC APIs this consensus engine provides. + APIs(chain ChainReader) []rpc.API + + // Close terminates any background threads maintained by the consensus engine. + Close() error +} + +// PoW is a consensus engine based on proof-of-work. +type PoW interface { + Engine + + // Hashrate returns the current mining hashrate of a PoW consensus engine. + Hashrate() float64 +} diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 92bd261..adc85fe 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -8,14 +8,13 @@ import ( "runtime" "time" - myparams "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/consensus" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/consensus" - "github.com/ava-labs/go-ethereum/core/state" - "github.com/ava-labs/go-ethereum/core/types" - "github.com/ava-labs/go-ethereum/params" "github.com/ava-labs/go-ethereum/rlp" - "github.com/ava-labs/go-ethereum/rpc" mapset "github.com/deckarep/golang-set" ) @@ -56,8 +55,8 @@ var ( // modified from consensus.go func (self *DummyEngine) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error { // Ensure that the header's extra-data section is of a reasonable size - if uint64(len(header.Extra)) > myparams.MaximumExtraDataSize { - return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), myparams.MaximumExtraDataSize) + if uint64(len(header.Extra)) > params.MaximumExtraDataSize { + return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) } // Verify the header's timestamp if !uncle { diff --git a/consensus/errors.go b/consensus/errors.go new file mode 100644 index 0000000..a005c5f --- /dev/null +++ b/consensus/errors.go @@ -0,0 +1,37 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package consensus + +import "errors" + +var ( + // ErrUnknownAncestor is returned when validating a block requires an ancestor + // that is unknown. + ErrUnknownAncestor = errors.New("unknown ancestor") + + // ErrPrunedAncestor is returned when validating a block requires an ancestor + // that is known, but the state of which is not available. + ErrPrunedAncestor = errors.New("pruned ancestor") + + // ErrFutureBlock is returned when a block's timestamp is in the future according + // to the current node. + ErrFutureBlock = errors.New("block in the future") + + // ErrInvalidNumber is returned if a block's number doesn't equal it's parent's + // plus one. + ErrInvalidNumber = errors.New("invalid block number") +) diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go new file mode 100644 index 0000000..2b66c33 --- /dev/null +++ b/consensus/ethash/algorithm.go @@ -0,0 +1,1148 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethash + +import ( + "encoding/binary" + "hash" + "math/big" + "reflect" + "runtime" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/common/bitutil" + "github.com/ava-labs/go-ethereum/crypto" + "github.com/ava-labs/go-ethereum/log" + "golang.org/x/crypto/sha3" +) + +const ( + datasetInitBytes = 1 << 30 // Bytes in dataset at genesis + datasetGrowthBytes = 1 << 23 // Dataset growth per epoch + cacheInitBytes = 1 << 24 // Bytes in cache at genesis + cacheGrowthBytes = 1 << 17 // Cache growth per epoch + epochLength = 30000 // Blocks per epoch + mixBytes = 128 // Width of mix + hashBytes = 64 // Hash length in bytes + hashWords = 16 // Number of 32 bit ints in a hash + datasetParents = 256 // Number of parents of each dataset element + cacheRounds = 3 // Number of rounds in cache production + loopAccesses = 64 // Number of accesses in hashimoto loop +) + +// cacheSize returns the size of the ethash verification cache that belongs to a certain +// block number. +func cacheSize(block uint64) uint64 { + epoch := int(block / epochLength) + if epoch < maxEpoch { + return cacheSizes[epoch] + } + return calcCacheSize(epoch) +} + +// calcCacheSize calculates the cache size for epoch. The cache size grows linearly, +// however, we always take the highest prime below the linearly growing threshold in order +// to reduce the risk of accidental regularities leading to cyclic behavior. +func calcCacheSize(epoch int) uint64 { + size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes + for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 + size -= 2 * hashBytes + } + return size +} + +// datasetSize returns the size of the ethash mining dataset that belongs to a certain +// block number. +func datasetSize(block uint64) uint64 { + epoch := int(block / epochLength) + if epoch < maxEpoch { + return datasetSizes[epoch] + } + return calcDatasetSize(epoch) +} + +// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly, +// however, we always take the highest prime below the linearly growing threshold in order +// to reduce the risk of accidental regularities leading to cyclic behavior. +func calcDatasetSize(epoch int) uint64 { + size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes + for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 + size -= 2 * mixBytes + } + return size +} + +// hasher is a repetitive hasher allowing the same hash data structures to be +// reused between hash runs instead of requiring new ones to be created. +type hasher func(dest []byte, data []byte) + +// makeHasher creates a repetitive hasher, allowing the same hash data structures to +// be reused between hash runs instead of requiring new ones to be created. The returned +// function is not thread safe! +func makeHasher(h hash.Hash) hasher { + // sha3.state supports Read to get the sum, use it to avoid the overhead of Sum. + // Read alters the state but we reset the hash before every operation. + type readerHash interface { + hash.Hash + Read([]byte) (int, error) + } + rh, ok := h.(readerHash) + if !ok { + panic("can't find Read method on hash") + } + outputLen := rh.Size() + return func(dest []byte, data []byte) { + rh.Reset() + rh.Write(data) + rh.Read(dest[:outputLen]) + } +} + +// seedHash is the seed to use for generating a verification cache and the mining +// dataset. +func seedHash(block uint64) []byte { + seed := make([]byte, 32) + if block < epochLength { + return seed + } + keccak256 := makeHasher(sha3.NewLegacyKeccak256()) + for i := 0; i < int(block/epochLength); i++ { + keccak256(seed, seed) + } + return seed +} + +// generateCache creates a verification cache of a given size for an input seed. +// The cache production process involves first sequentially filling up 32 MB of +// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash +// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a +// set of 524288 64-byte values. +// This method places the result into dest in machine byte order. +func generateCache(dest []uint32, epoch uint64, seed []byte) { + // Print some debug logs to allow analysis on low end devices + logger := log.New("epoch", epoch) + + start := time.Now() + defer func() { + elapsed := time.Since(start) + + logFn := logger.Debug + if elapsed > 3*time.Second { + logFn = logger.Info + } + logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + }() + // Convert our destination slice to a byte buffer + header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest)) + header.Len *= 4 + header.Cap *= 4 + cache := *(*[]byte)(unsafe.Pointer(&header)) + + // Calculate the number of theoretical rows (we'll store in one buffer nonetheless) + size := uint64(len(cache)) + rows := int(size) / hashBytes + + // Start a monitoring goroutine to report progress on low end devices + var progress uint32 + + done := make(chan struct{}) + defer close(done) + + go func() { + for { + select { + case <-done: + return + case <-time.After(3 * time.Second): + logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start))) + } + } + }() + // Create a hasher to reuse between invocations + keccak512 := makeHasher(sha3.NewLegacyKeccak512()) + + // Sequentially produce the initial dataset + keccak512(cache, seed) + for offset := uint64(hashBytes); offset < size; offset += hashBytes { + keccak512(cache[offset:], cache[offset-hashBytes:offset]) + atomic.AddUint32(&progress, 1) + } + // Use a low-round version of randmemohash + temp := make([]byte, hashBytes) + + for i := 0; i < cacheRounds; i++ { + for j := 0; j < rows; j++ { + var ( + srcOff = ((j - 1 + rows) % rows) * hashBytes + dstOff = j * hashBytes + xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes + ) + bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes]) + keccak512(cache[dstOff:], temp) + + atomic.AddUint32(&progress, 1) + } + } + // Swap the byte order on big endian systems and return + if !isLittleEndian() { + swap(cache) + } +} + +// swap changes the byte order of the buffer assuming a uint32 representation. +func swap(buffer []byte) { + for i := 0; i < len(buffer); i += 4 { + binary.BigEndian.PutUint32(buffer[i:], binary.LittleEndian.Uint32(buffer[i:])) + } +} + +// fnv is an algorithm inspired by the FNV hash, which in some cases is used as +// a non-associative substitute for XOR. Note that we multiply the prime with +// the full 32-bit input, in contrast with the FNV-1 spec which multiplies the +// prime with one byte (octet) in turn. +func fnv(a, b uint32) uint32 { + return a*0x01000193 ^ b +} + +// fnvHash mixes in data into mix using the ethash fnv method. +func fnvHash(mix []uint32, data []uint32) { + for i := 0; i < len(mix); i++ { + mix[i] = mix[i]*0x01000193 ^ data[i] + } +} + +// generateDatasetItem combines data from 256 pseudorandomly selected cache nodes, +// and hashes that to compute a single dataset node. +func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte { + // Calculate the number of theoretical rows (we use one buffer nonetheless) + rows := uint32(len(cache) / hashWords) + + // Initialize the mix + mix := make([]byte, hashBytes) + + binary.LittleEndian.PutUint32(mix, cache[(index%rows)*hashWords]^index) + for i := 1; i < hashWords; i++ { + binary.LittleEndian.PutUint32(mix[i*4:], cache[(index%rows)*hashWords+uint32(i)]) + } + keccak512(mix, mix) + + // Convert the mix to uint32s to avoid constant bit shifting + intMix := make([]uint32, hashWords) + for i := 0; i < len(intMix); i++ { + intMix[i] = binary.LittleEndian.Uint32(mix[i*4:]) + } + // fnv it with a lot of random cache nodes based on index + for i := uint32(0); i < datasetParents; i++ { + parent := fnv(index^i, intMix[i%16]) % rows + fnvHash(intMix, cache[parent*hashWords:]) + } + // Flatten the uint32 mix into a binary one and return + for i, val := range intMix { + binary.LittleEndian.PutUint32(mix[i*4:], val) + } + keccak512(mix, mix) + return mix +} + +// generateDataset generates the entire ethash dataset for mining. +// This method places the result into dest in machine byte order. +func generateDataset(dest []uint32, epoch uint64, cache []uint32) { + // Print some debug logs to allow analysis on low end devices + logger := log.New("epoch", epoch) + + start := time.Now() + defer func() { + elapsed := time.Since(start) + + logFn := logger.Debug + if elapsed > 3*time.Second { + logFn = logger.Info + } + logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + }() + + // Figure out whether the bytes need to be swapped for the machine + swapped := !isLittleEndian() + + // Convert our destination slice to a byte buffer + header := *(*reflect.Slic