From 78745551c077bf54151202138c2629f288769561 Mon Sep 17 00:00:00 2001 From: Determinant Date: Tue, 15 Sep 2020 23:55:34 -0400 Subject: WIP: geth-tavum --- consensus/ethash/algorithm.go | 8 +- consensus/ethash/api.go | 30 ++-- consensus/ethash/consensus.go | 44 +++--- consensus/ethash/ethash.go | 175 +++++++++------------ consensus/ethash/sealer.go | 357 +++++++++++++++++++++++++----------------- 5 files changed, 325 insertions(+), 289 deletions(-) (limited to 'consensus/ethash') diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 2b66c33..d6c8710 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -27,10 +27,10 @@ import ( "time" "unsafe" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/bitutil" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "golang.org/x/crypto/sha3" ) diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go index 34ed386..c983504 100644 --- a/consensus/ethash/api.go +++ b/consensus/ethash/api.go @@ -20,15 +20,15 @@ import ( "errors" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) var errEthashStopped = errors.New("ethash stopped") // API exposes ethash related methods for the RPC interface. type API struct { - ethash *Ethash // Make sure the mode of ethash is normal. + ethash *Ethash } // GetWork returns a work package for external miner. @@ -39,7 +39,7 @@ type API struct { // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty // result[3] - hex encoded block number func (api *API) GetWork() ([4]string, error) { - if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + if api.ethash.remote == nil { return [4]string{}, errors.New("not supported") } @@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) { workCh = make(chan [4]string, 1) errc = make(chan error, 1) ) - select { - case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: - case <-api.ethash.exitCh: + case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: + case <-api.ethash.remote.exitCh: return [4]string{}, errEthashStopped } - select { case work := <-workCh: return work, nil @@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) { // It returns an indication if the work was accepted. // Note either an invalid solution, a stale work a non-existent work will return false. func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool { - if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + if api.ethash.remote == nil { return false } var errc = make(chan error, 1) - select { - case api.ethash.submitWorkCh <- &mineResult{ + case api.ethash.remote.submitWorkCh <- &mineResult{ nonce: nonce, mixDigest: digest, hash: hash, errc: errc, }: - case <-api.ethash.exitCh: + case <-api.ethash.remote.exitCh: return false } - err := <-errc return err == nil } @@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo // It accepts the miner hash rate and an identifier which must be unique // between nodes. func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool { - if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { + if api.ethash.remote == nil { return false } var done = make(chan struct{}, 1) - select { - case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: - case <-api.ethash.exitCh: + case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: + case <-api.ethash.remote.exitCh: return false } // Block until hash rate submitted successfully. <-done - return true } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index dc88a79..151761c 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -29,10 +29,11 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/math" - "github.com/ava-labs/go-ethereum/rlp" mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -44,6 +45,11 @@ var ( maxUncles = 2 // Maximum number of uncles allowed in a single block allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks + // calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384. + // It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks. + // Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384 + calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000)) + // calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople. // It returns the difficulty that a new block should have when created at time given the // parent block's time and difficulty. The calculation uses the Byzantium rules, but with @@ -63,7 +69,7 @@ var ( // codebase, inherently breaking if the engine is swapped out. Please put common // error types into the consensus package. var ( - errZeroBlockTime = errors.New("timestamp equals parent's") + errOlderBlockTime = errors.New("timestamp older than parent") errTooManyUncles = errors.New("too many uncles") errDuplicateUncle = errors.New("duplicate uncle") errUncleIsAncestor = errors.New("uncle is ancestor") @@ -81,12 +87,12 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) { // VerifyHeader checks whether a header conforms to the consensus rules of the // stock Ethereum ethash engine. -func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { +func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { // If we're running a full engine faking, accept any input as valid if ethash.config.PowMode == ModeFullFake { return nil } - // Short circuit if the header is known, or it's parent not + // Short circuit if the header is known, or its parent not number := header.Number.Uint64() if chain.GetHeader(header.Hash(), number) != nil { return nil @@ -102,7 +108,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // concurrently. The method returns a quit channel to abort the operations and // a results channel to retrieve the async verifications. -func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { // If we're running a full engine faking, accept any input as valid if ethash.config.PowMode == ModeFullFake || len(headers) == 0 { abort, results := make(chan struct{}), make(chan error, len(headers)) @@ -164,7 +170,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*type return abort, errorsOut } -func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error { +func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error { var parent *types.Header if index == 0 { parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) @@ -238,7 +244,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo // verifyHeader checks whether a header conforms to the consensus rules of the // stock Ethereum ethash engine. // See YP section 4.3.4. "Block Header Validity" -func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error { +func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error { // Ensure that the header's extra-data section is of a reasonable size if uint64(len(header.Extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) @@ -250,9 +256,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * } } if header.Time <= parent.Time { - return errZeroBlockTime + return errOlderBlockTime } - // Verify the block's difficulty based in it's timestamp and parent's difficulty + // Verify the block's difficulty based on its timestamp and parent's difficulty expected := ethash.CalcDifficulty(chain, header.Time, parent) if expected.Cmp(header.Difficulty) != 0 { @@ -301,7 +307,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * // CalcDifficulty is the difficulty adjustment algorithm. It returns // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. -func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { +func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { return CalcDifficulty(chain.Config(), time, parent) } @@ -311,6 +317,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { next := new(big.Int).Add(parent.Number, big1) switch { + case config.IsMuirGlacier(next): + return calcDifficultyEip2384(time, parent) case config.IsConstantinople(next): return calcDifficultyConstantinople(time, parent) case config.IsByzantium(next): @@ -479,14 +487,14 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { // VerifySeal implements consensus.Engine, checking whether the given block satisfies // the PoW difficulty requirements. -func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error { +func (ethash *Ethash) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { return ethash.verifySeal(chain, header, false) } // verifySeal checks whether a block satisfies the PoW difficulty requirements, // either using the usual ethash cache for it, or alternatively using a full DAG // to make remote mining fast. -func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error { +func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error { // If we're running a fake PoW, accept any seal as valid if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { time.Sleep(ethash.fakeDelay) @@ -551,7 +559,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the ethash protocol. The changes are done inline. -func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error { +func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) if parent == nil { return consensus.ErrUnknownAncestor @@ -562,7 +570,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) // Finalize implements consensus.Engine, accumulating the block and uncle rewards, // setting the final state on the header -func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { +func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { // Accumulate any block and uncle rewards and commit the final state root accumulateRewards(chain.Config(), state, header, uncles) header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) @@ -570,13 +578,13 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. -func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { +func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { // Accumulate any block and uncle rewards and commit the final state root accumulateRewards(chain.Config(), state, header, uncles) header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, nil), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 53420d0..4a3912d 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -34,12 +34,10 @@ import ( "unsafe" "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/log" - "github.com/ava-labs/go-ethereum/metrics" mmap "github.com/edsrzf/mmap-go" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/hashicorp/golang-lru/simplelru" ) @@ -50,7 +48,7 @@ var ( two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // sharedEthash is a full instance that can be shared between multiple users. - sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false) + sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil}, nil, false) // algorithmRevision is the data structure version used for file naming. algorithmRevision = 23 @@ -67,7 +65,7 @@ func isLittleEndian() bool { } // memoryMap tries to memory map a file of uint32s for read only access. -func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { +func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) { file, err := os.OpenFile(path, os.O_RDONLY, 0644) if err != nil { return nil, nil, nil, err @@ -84,6 +82,13 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { return nil, nil, nil, ErrInvalidDumpMagic } } + if lock { + if err := mem.Lock(); err != nil { + mem.Unmap() + file.Close() + return nil, nil, nil, err + } + } return file, mem, buffer[len(dumpMagic):], err } @@ -109,7 +114,7 @@ func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write // access, fill it with the data from a generator and then move it into the final // path requested. -func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { +func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { // Ensure the data folder exists if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return nil, nil, nil, err @@ -144,7 +149,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint if err := os.Rename(temp, path); err != nil { return nil, nil, nil, err } - return memoryMap(path) + return memoryMap(path, lock) } // lru tracks caches or datasets by their last use time, keeping at most N of them. @@ -215,7 +220,7 @@ func newCache(epoch uint64) interface{} { } // generate ensures that the cache content is generated before use. -func (c *cache) generate(dir string, limit int, test bool) { +func (c *cache) generate(dir string, limit int, lock bool, test bool) { c.once.Do(func() { size := cacheSize(c.epoch*epochLength + 1) seed := seedHash(c.epoch*epochLength + 1) @@ -242,7 +247,7 @@ func (c *cache) generate(dir string, limit int, test bool) { // Try to load the file from disk and memory map it var err error - c.dump, c.mmap, c.cache, err = memoryMap(path) + c.dump, c.mmap, c.cache, err = memoryMap(path, lock) if err == nil { logger.Debug("Loaded old ethash cache from disk") return @@ -250,7 +255,7 @@ func (c *cache) generate(dir string, limit int, test bool) { logger.Debug("Failed to load old ethash cache", "err", err) // No previous cache available, create a new cache file to fill - c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) + c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) if err != nil { logger.Error("Failed to generate mapped ethash cache", "err", err) @@ -292,7 +297,7 @@ func newDataset(epoch uint64) interface{} { } // generate ensures that the dataset content is generated before use. -func (d *dataset) generate(dir string, limit int, test bool) { +func (d *dataset) generate(dir string, limit int, lock bool, test bool) { d.once.Do(func() { // Mark the dataset generated after we're done. This is needed for remote defer atomic.StoreUint32(&d.done, 1) @@ -328,7 +333,7 @@ func (d *dataset) generate(dir string, limit int, test bool) { // Try to load the file from disk and memory map it var err error - d.dump, d.mmap, d.dataset, err = memoryMap(path) + d.dump, d.mmap, d.dataset, err = memoryMap(path, lock) if err == nil { logger.Debug("Loaded old ethash dataset from disk") return @@ -339,7 +344,7 @@ func (d *dataset) generate(dir string, limit int, test bool) { cache := make([]uint32, csize/4) generateCache(cache, d.epoch, seed) - d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) + d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) if err != nil { logger.Error("Failed to generate mapped ethash dataset", "err", err) @@ -374,13 +379,13 @@ func (d *dataset) finalizer() { // MakeCache generates a new ethash cache and optionally stores it to disk. func MakeCache(block uint64, dir string) { c := cache{epoch: block / epochLength} - c.generate(dir, math.MaxInt32, false) + c.generate(dir, math.MaxInt32, false, false) } // MakeDataset generates a new ethash dataset and optionally stores it to disk. func MakeDataset(block uint64, dir string) { d := dataset{epoch: block / epochLength} - d.generate(dir, math.MaxInt32, false) + d.generate(dir, math.MaxInt32, false, false) } // Mode defines the type and amount of PoW verification an ethash engine makes. @@ -396,43 +401,17 @@ const ( // Config are the configuration parameters of the ethash. type Config struct { - CacheDir string - CachesInMem int - CachesOnDisk int - DatasetDir string - DatasetsInMem int - DatasetsOnDisk int - PowMode Mode -} + CacheDir string + CachesInMem int + CachesOnDisk int + CachesLockMmap bool + DatasetDir string + DatasetsInMem int + DatasetsOnDisk int + DatasetsLockMmap bool + PowMode Mode -// sealTask wraps a seal block with relative result channel for remote sealer thread. -type sealTask struct { - block *types.Block - results chan<- *types.Block -} - -// mineResult wraps the pow solution parameters for the specified block. -type mineResult struct { - nonce types.BlockNonce - mixDigest common.Hash - hash common.Hash - - errc chan error -} - -// hashrate wraps the hash rate submitted by the remote sealer. -type hashrate struct { - id common.Hash - ping time.Time - rate uint64 - - done chan struct{} -} - -// sealWork wraps a seal work package for remote sealer. -type sealWork struct { - errc chan error - res chan [4]string + Log log.Logger `toml:"-"` } // Ethash is a consensus engine based on proof-of-work implementing the ethash @@ -448,52 +427,42 @@ type Ethash struct { threads int // Number of threads to mine on if mining update chan struct{} // Notification channel to update mining parameters hashrate metrics.Meter // Meter tracking the average hashrate - - // Remote sealer related fields - workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer - fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work - submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result - fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. - submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate + remote *remoteSealer // The fields below are hooks for testing shared *Ethash // Shared PoW verifier to avoid cache regeneration fakeFail uint64 // Block number which fails PoW check even in fake mode fakeDelay time.Duration // Time delay to sleep for before returning from verify - lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields - closeOnce sync.Once // Ensures exit channel will not be closed twice. - exitCh chan chan error // Notification channel to exiting backend threads + lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields + closeOnce sync.Once // Ensures exit channel will not be closed twice. } // New creates a full sized ethash PoW scheme and starts a background thread for // remote mining, also optionally notifying a batch of remote services of new work // packages. func New(config Config, notify []string, noverify bool) *Ethash { + if config.Log == nil { + config.Log = log.Root() + } if config.CachesInMem <= 0 { - log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) + config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) config.CachesInMem = 1 } if config.CacheDir != "" && config.CachesOnDisk > 0 { - log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) + config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) } if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { - log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) + config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) } ethash := &Ethash{ - config: config, - caches: newlru("cache", config.CachesInMem, newCache), - datasets: newlru("dataset", config.DatasetsInMem, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeterForced(), - workCh: make(chan *sealTask), - fetchWorkCh: make(chan *sealWork), - submitWorkCh: make(chan *mineResult), - fetchRateCh: make(chan chan uint64), - submitRateCh: make(chan *hashrate), - exitCh: make(chan chan error), - } - go ethash.remote(notify, noverify) + config: config, + caches: newlru("cache", config.CachesInMem, newCache), + datasets: newlru("dataset", config.DatasetsInMem, newDataset), + update: make(chan struct{}), + hashrate: metrics.NewMeterForced(), + } + ethash.remote = startRemoteSealer(ethash, notify, noverify) return ethash } @@ -501,19 +470,13 @@ func New(config Config, notify []string, noverify bool) *Ethash { // purposes. func NewTester(notify []string, noverify bool) *Ethash { ethash := &Ethash{ - config: Config{PowMode: ModeTest}, - caches: newlru("cache", 1, newCache), - datasets: newlru("dataset", 1, newDataset), - update: make(chan struct{}), - hashrate: metrics.NewMeterForced(), - workCh: make(chan *sealTask), - fetchWorkCh: make(chan *sealWork), - submitWorkCh: make(chan *mineResult), - fetchRateCh: make(chan chan uint64), - submitRateCh: make(chan *hashrate), - exitCh: make(chan chan error), - } - go ethash.remote(notify, noverify) + config: Config{PowMode: ModeTest, Log: log.Root()}, + caches: newlru("cache", 1, newCache), + datasets: newlru("dataset", 1, newDataset), + update: make(chan struct{}), + hashrate: metrics.NewMeterForced(), + } + ethash.remote = startRemoteSealer(ethash, notify, noverify) return ethash } @@ -524,6 +487,7 @@ func NewFaker() *Ethash { return &Ethash{ config: Config{ PowMode: ModeFake, + Log: log.Root(), }, } } @@ -535,6 +499,7 @@ func NewFakeFailer(fail uint64) *Ethash { return &Ethash{ config: Config{ PowMode: ModeFake, + Log: log.Root(), }, fakeFail: fail, } @@ -547,6 +512,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash { return &Ethash{ config: Config{ PowMode: ModeFake, + Log: log.Root(), }, fakeDelay: delay, } @@ -558,6 +524,7 @@ func NewFullFaker() *Ethash { return &Ethash{ config: Config{ PowMode: ModeFullFake, + Log: log.Root(), }, } } @@ -573,13 +540,11 @@ func (ethash *Ethash) Close() error { var err error ethash.closeOnce.Do(func() { // Short circuit if the exit channel is not allocated. - if ethash.exitCh == nil { + if ethash.remote == nil { return } - errc := make(chan error) - ethash.exitCh <- errc - err = <-errc - close(ethash.exitCh) + close(ethash.remote.requestExit) + <-ethash.remote.exitCh }) return err } @@ -593,12 +558,12 @@ func (ethash *Ethash) cache(block uint64) *cache { current := currentI.(*cache) // Wait for generation finish. - current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) + current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) // If we need a new future cache, now's a good time to regenerate it. if futureI != nil { future := futureI.(*cache) - go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) + go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) } return current } @@ -618,20 +583,20 @@ func (ethash *Ethash) dataset(block uint64, async bool) *dataset { // If async is specified, generate everything in a background thread if async && !current.generated() { go func() { - current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) if futureI != nil { future := futureI.(*dataset) - future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) } }() } else { // Either blocking generation was requested, or already done - current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) if futureI != nil { future := futureI.(*dataset) - go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) } } return current @@ -680,8 +645,8 @@ func (ethash *Ethash) Hashrate() float64 { var res = make(chan uint64, 1) select { - case ethash.fetchRateCh <- res: - case <-ethash.exitCh: + case ethash.remote.fetchRateCh <- res: + case <-ethash.remote.exitCh: // Return local hashrate only if ethash is stopped. return ethash.hashrate.Rate1() } @@ -691,7 +656,7 @@ func (ethash *Ethash) Hashrate() float64 { } // APIs implements consensus.Engine, returning the user facing RPC APIs. -func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { +func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { // In order to ensure backward compatibility, we exposes ethash RPC APIs // to both eth and ethash namespaces. return []rpc.API{ diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index 799be05..cb8eed7 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -18,6 +18,7 @@ package ethash import ( "bytes" + "context" crand "crypto/rand" "encoding/json" "errors" @@ -31,9 +32,8 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) const ( @@ -48,7 +48,7 @@ var ( // Seal implements consensus.Engine, attempting to find a nonce that satisfies // the block's difficulty requirements. -func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { +func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { // If we're running a fake PoW, simply return a 0 nonce immediately if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { header := block.Header() @@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu select { case results <- block.WithSeal(header): default: - log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header())) + ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header())) } return nil } @@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu threads = 0 // Allows disabling local mining without extra logic around local/remote } // Push new work to remote sealer - if ethash.workCh != nil { - ethash.workCh <- &sealTask{block: block, results: results} + if ethash.remote != nil { + ethash.remote.workCh <- &sealTask{block: block, results: results} } var ( pend sync.WaitGroup @@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu select { case results <- result: default: - log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header())) + ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header())) } close(abort) case <-ethash.update: // Thread count was changed on user request, restart close(abort) if err := ethash.Seal(chain, block, results, stop); err != nil { - log.Error("Failed to restart sealing after update", "err", err) + ethash.config.Log.Error("Failed to restart sealing after update", "err", err) } } // Wait for all miners to terminate and return the block @@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s attempts = int64(0) nonce = seed ) - logger := log.New("miner", id) + logger := ethash.config.Log.New("miner", id) logger.Trace("Started ethash search for new nonces", "seed", seed) search: for { @@ -186,160 +186,128 @@ search: runtime.KeepAlive(dataset) } -// remote is a standalone goroutine to handle remote mining related stuff. -func (ethash *Ethash) remote(notify []string, noverify bool) { - var ( - works = make(map[common.Hash]*types.Block) - rates = make(map[common.Hash]hashrate) +// This is the timeout for HTTP requests to notify external miners. +const remoteSealerTimeout = 1 * time.Second - results chan<- *types.Block - currentBlock *types.Block - currentWork [4]string +type remoteSealer struct { + works map[common.Hash]*types.Block + rates map[common.Hash]hashrate + currentBlock *types.Block + currentWork [4]string + notifyCtx context.Context + cancelNotify context.CancelFunc // cancels all notification requests + reqWG sync.WaitGroup // tracks notification request goroutines - notifyTransport = &http.Transport{} - notifyClient = &http.Client{ - Transport: notifyTransport, - Timeout: time.Second, - } - notifyReqs = make([]*http.Request, len(notify)) - ) - // notifyWork notifies all the specified mining endpoints of the availability of - // new work to be processed. - notifyWork := func() { - work := currentWork - blob, _ := json.Marshal(work) - - for i, url := range notify { - // Terminate any previously pending request and create the new work - if notifyReqs[i] != nil { - notifyTransport.CancelRequest(notifyReqs[i]) - } - notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob)) - notifyReqs[i].Header.Set("Content-Type", "application/json") - - // Push the new work concurrently to all the remote nodes - go func(req *http.Request, url string) { - res, err := notifyClient.Do(req) - if err != nil { - log.Warn("Failed to notify remote miner", "err", err) - } else { - log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2]) - res.Body.Close() - } - }(notifyReqs[i], url) - } - } - // makeWork creates a work package for external miner. - // - // The work package consists of 3 strings: - // result[0], 32 bytes hex encoded current block header pow-hash - // result[1], 32 bytes hex encoded seed hash used for DAG - // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty - // result[3], hex encoded block number - makeWork := func(block *types.Block) { - hash := ethash.SealHash(block.Header()) - - currentWork[0] = hash.Hex() - currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() - currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() - currentWork[3] = hexutil.EncodeBig(block.Number()) - - // Trace the seal work fetched by remote sealer. - currentBlock = block - works[hash] = block - } - // submitWork verifies the submitted pow solution, returning - // whether the solution was accepted or not (not can be both a bad pow as well as - // any other error, like no pending work or stale mining result). - submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool { - if currentBlock == nil { - log.Error("Pending work without block", "sealhash", sealhash) - return false - } - // Make sure the work submitted is present - block := works[sealhash] - if block == nil { - log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64()) - return false - } - // Verify the correctness of submitted result. - header := block.Header() - header.Nonce = nonce - header.MixDigest = mixDigest - - start := time.Now() - if !noverify { - if err := ethash.verifySeal(nil, header, true); err != nil { - log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err) - return false - } - } - // Make sure the result channel is assigned. - if results == nil { - log.Warn("Ethash result channel is empty, submitted mining result is rejected") - return false - } - log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + ethash *Ethash + noverify bool + notifyURLs []string + results chan<- *types.Block + workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer + fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work + submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result + fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. + submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate + requestExit chan struct{} + exitCh chan struct{} +} - // Solutions seems to be valid, return to the miner and notify acceptance. - solution := block.WithSeal(header) +// sealTask wraps a seal block with relative result channel for remote sealer thread. +type sealTask struct { + block *types.Block + results chan<- *types.Block +} - // The submitted solution is within the scope of acceptance. - if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() { - select { - case results <- solution: - log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) - return true - default: - log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) - return false - } - } - // The submitted block is too old to accept, drop it. - log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) - return false +// mineResult wraps the pow solution parameters for the specified block. +type mineResult struct { + nonce types.BlockNonce + mixDigest common.Hash + hash common.Hash + + errc chan error +} + +// hashrate wraps the hash rate submitted by the remote sealer. +type hashrate struct { + id common.Hash + ping time.Time + rate uint64 + + done chan struct{} +} + +// sealWork wraps a seal work package for remote sealer. +type sealWork struct { + errc chan error + res chan [4]string +} + +func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer { + ctx, cancel := context.WithCancel(context.Background()) + s := &remoteSealer{ + ethash: ethash, + noverify: noverify, + notifyURLs: urls, + notifyCtx: ctx, + cancelNotify: cancel, + works: make(map[common.Hash]*types.Block), + rates: make(map[common.Hash]hashrate), + workCh: make(chan *sealTask), + fetchWorkCh: make(chan *sealWork), + submitWorkCh: make(chan *mineResult), + fetchRateCh: make(chan chan uint64), + submitRateCh: make(chan *hashrate), + requestExit: make(chan struct{}), + exitCh: make(chan struct{}), } + go s.loop() + return s +} + +func (s *remoteSealer) loop() { + defer func() { + s.ethash.config.Log.Trace("Ethash remote sealer is exiting") + s.cancelNotify() + s.reqWG.Wait() + close(s.exitCh) + }() ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() for { select { - case work := <-ethash.workCh: + case work := <-s.workCh: // Update current work with new received block. // Note same work can be past twice, happens when changing CPU threads. - results = work.results + s.results = work.results + s.makeWork(work.block) + s.notifyWork() - makeWork(work.block) - - // Notify and requested URLs of the new work availability - notifyWork() - - case work := <-ethash.fetchWorkCh: + case work := <-s.fetchWorkCh: // Return current mining work to remote miner. - if currentBlock == nil { + if s.currentBlock == nil { work.errc <- errNoMiningWork } else { - work.res <- currentWork + work.res <- s.currentWork } - case result := <-ethash.submitWorkCh: + case result := <-s.submitWorkCh: // Verify submitted PoW solution based on maintained mining blocks. - if submitWork(result.nonce, result.mixDigest, result.hash) { + if s.submitWork(result.nonce, result.mixDigest, result.hash) { result.errc <- nil } else { result.errc <- errInvalidSealResult } - case result := <-ethash.submitRateCh: + case result := <-s.submitRateCh: // Trace remote sealer's hash rate by submitted value. - rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} + s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} close(result.done) - case req := <-ethash.fetchRateCh: + case req := <-s.fetchRateCh: // Gather all hash rate submitted by remote sealer. var total uint64 - for _, rate := range rates { + for _, rate := range s.rates { // this could overflow total += rate.rate } @@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) { case <-ticker.C: // Clear stale submitted hash rate. - for id, rate := range rates { + for id, rate := range s.rates { if time.Since(rate.ping) > 10*time.Second { - delete(rates, id) + delete(s.rates, id) } } // Clear stale pending blocks - if currentBlock != nil { - for hash, block := range works { - if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() { - delete(works, hash) + if s.currentBlock != nil { + for hash, block := range s.works { + if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() { + delete(s.works, hash) } } } - case errc := <-ethash.exitCh: - // Exit remote loop if ethash is closed and return relevant error. - errc <- nil - log.Trace("Ethash remote sealer is exiting") + case <-s.requestExit: return } } } + +// makeWork creates a work package for external miner. +// +// The work package consists of 3 strings: +// result[0], 32 bytes hex encoded current block header pow-hash +// result[1], 32 bytes hex encoded seed hash used for DAG +// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3], hex encoded block number +func (s *remoteSealer) makeWork(block *types.Block) { + hash := s.ethash.SealHash(block.Header()) + s.currentWork[0] = hash.Hex() + s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() + s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() + s.currentWork[3] = hexutil.EncodeBig(block.Number()) + + // Trace the seal work fetched by remote sealer. + s.currentBlock = block + s.works[hash] = block +} + +// notifyWork notifies all the specified mining endpoints of the availability of +// new work to be processed. +func (s *remoteSealer) notifyWork() { + work := s.currentWork + blob, _ := json.Marshal(work) + s.reqWG.Add(len(s.notifyURLs)) + for _, url := range s.notifyURLs { + go s.sendNotification(s.notifyCtx, url, blob, work) + } +} + +func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) { + defer s.reqWG.Done() + + req, err := http.NewRequest("POST", url, bytes.NewReader(json)) + if err != nil { + s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err) + return + } + ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) + defer cancel() + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err) + } else { + s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) + resp.Body.Close() + } +} + +// submitWork verifies the submitted pow solution, returning +// whether the solution was accepted or not (not can be both a bad pow as well as +// any other error, like no pending work or stale mining result). +func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool { + if s.currentBlock == nil { + s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash) + return false + } + // Make sure the work submitted is present + block := s.works[sealhash] + if block == nil { + s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64()) + return false + } + // Verify the correctness of submitted result. + header := block.Header() + header.Nonce = nonce + header.MixDigest = mixDigest + + start := time.Now() + if !s.noverify { + if err := s.ethash.verifySeal(nil, header, true); err != nil { + s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err) + return false + } + } + // Make sure the result channel is assigned. + if s.results == nil { + s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected") + return false + } + s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + + // Solutions seems to be valid, return to the miner and notify acceptance. + solution := block.WithSeal(header) + + // The submitted solution is within the scope of acceptance. + if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() { + select { + case s.results <- solution: + s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) + return true + default: + s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) + return false + } + } + // The submitted block is too old to accept, drop it. + s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) + return false +} -- cgit v1.2.3-70-g09d2