From ad886faec521f1edcb90f6f8eb4555608d085312 Mon Sep 17 00:00:00 2001 From: Determinant Date: Tue, 13 Aug 2019 14:05:49 -0400 Subject: add an option to call geth entry; add vendor --- cmd/geth/chaincmd.go | 559 +++++++++++++ cmd/geth/config.go | 211 +++++ cmd/geth/consolecmd.go | 220 +++++ cmd/geth/main.go | 398 +++++++++ cmd/geth/misccmd.go | 142 ++++ cmd/geth/retesteth.go | 891 ++++++++++++++++++++ cmd/geth/retesteth_copypaste.go | 148 ++++ cmd/geth/usage.go | 370 +++++++++ cmd/utils/cmd.go | 314 +++++++ cmd/utils/customflags.go | 240 ++++++ cmd/utils/flags.go | 1747 +++++++++++++++++++++++++++++++++++++++ 11 files changed, 5240 insertions(+) create mode 100644 cmd/geth/chaincmd.go create mode 100644 cmd/geth/config.go create mode 100644 cmd/geth/consolecmd.go create mode 100644 cmd/geth/main.go create mode 100644 cmd/geth/misccmd.go create mode 100644 cmd/geth/retesteth.go create mode 100644 cmd/geth/retesteth_copypaste.go create mode 100644 cmd/geth/usage.go create mode 100644 cmd/utils/cmd.go create mode 100644 cmd/utils/customflags.go create mode 100644 cmd/utils/flags.go (limited to 'cmd') diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go new file mode 100644 index 0000000..4b175b6 --- /dev/null +++ b/cmd/geth/chaincmd.go @@ -0,0 +1,559 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package geth + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "sync/atomic" + "time" + + "github.com/Determinant/coreth/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" + "gopkg.in/urfave/cli.v1" +) + +var ( + initCommand = cli.Command{ + Action: utils.MigrateFlags(initGenesis), + Name: "init", + Usage: "Bootstrap and initialize a new genesis block", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The init command initializes a new genesis block and definition for the network. +This is a destructive action and changes the network in which you will be +participating. + +It expects the genesis file as argument.`, + } + importCommand = cli.Command{ + Action: utils.MigrateFlags(importChain), + Name: "import", + Usage: "Import a blockchain file", + ArgsUsage: " ( ... ) ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.GCModeFlag, + utils.CacheDatabaseFlag, + utils.CacheGCFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The import command imports blocks from an RLP-encoded form. The form can be one file +with several RLP-encoded blocks, or several files can be used. + +If only one file is used, import error will result in failure. If several files are used, +processing will proceed even if an individual RLP-file import failure occurs.`, + } + exportCommand = cli.Command{ + Action: utils.MigrateFlags(exportChain), + Name: "export", + Usage: "Export blockchain into file", + ArgsUsage: " [ ]", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +Requires a first argument of the file to write to. +Optional second and third arguments control the first and +last block to write. In this mode, the file will be appended +if already existing. If the file ends with .gz, the output will +be gzipped.`, + } + importPreimagesCommand = cli.Command{ + Action: utils.MigrateFlags(importPreimages), + Name: "import-preimages", + Usage: "Import the preimage database from an RLP stream", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` + The import-preimages command imports hash preimages from an RLP encoded stream.`, + } + exportPreimagesCommand = cli.Command{ + Action: utils.MigrateFlags(exportPreimages), + Name: "export-preimages", + Usage: "Export the preimage database into an RLP stream", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The export-preimages command export hash preimages to an RLP encoded stream`, + } + copydbCommand = cli.Command{ + Action: utils.MigrateFlags(copyDb), + Name: "copydb", + Usage: "Create a local chain from a target chaindata folder", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.FakePoWFlag, + utils.TestnetFlag, + utils.RinkebyFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The first argument must be the directory containing the blockchain to download from`, + } + removedbCommand = cli.Command{ + Action: utils.MigrateFlags(removeDB), + Name: "removedb", + Usage: "Remove blockchain and state databases", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +Remove blockchain and state databases`, + } + dumpCommand = cli.Command{ + Action: utils.MigrateFlags(dump), + Name: "dump", + Usage: "Dump a specific block from storage", + ArgsUsage: "[ | ]...", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.IterativeOutputFlag, + utils.ExcludeCodeFlag, + utils.ExcludeStorageFlag, + utils.IncludeIncompletesFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The arguments are interpreted as block numbers or hashes. +Use "ethereum dump 0" to dump the genesis block.`, + } + inspectCommand = cli.Command{ + Action: utils.MigrateFlags(inspect), + Name: "inspect", + Usage: "Inspect the storage size for each type of data in the database", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.CacheFlag, + utils.TestnetFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + } +) + +// initGenesis will initialise the given JSON format genesis file and writes it as +// the zero'd block (i.e. genesis) or will fail hard if it can't succeed. +func initGenesis(ctx *cli.Context) error { + // Make sure we have a valid genesis JSON + genesisPath := ctx.Args().First() + if len(genesisPath) == 0 { + utils.Fatalf("Must supply path to genesis JSON file") + } + file, err := os.Open(genesisPath) + if err != nil { + utils.Fatalf("Failed to read genesis file: %v", err) + } + defer file.Close() + + genesis := new(core.Genesis) + if err := json.NewDecoder(file).Decode(genesis); err != nil { + utils.Fatalf("invalid genesis file: %v", err) + } + // Open an initialise both full and light databases + stack := makeFullNode(ctx) + defer stack.Close() + + for _, name := range []string{"chaindata", "lightchaindata"} { + chaindb, err := stack.OpenDatabase(name, 0, 0, "") + if err != nil { + utils.Fatalf("Failed to open database: %v", err) + } + _, hash, err := core.SetupGenesisBlock(chaindb, genesis) + if err != nil { + utils.Fatalf("Failed to write genesis block: %v", err) + } + chaindb.Close() + log.Info("Successfully wrote genesis state", "database", name, "hash", hash) + } + return nil +} + +func importChain(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + stack := makeFullNode(ctx) + defer stack.Close() + + chain, db := utils.MakeChain(ctx, stack) + defer db.Close() + + // Start periodically gathering memory profiles + var peakMemAlloc, peakMemSys uint64 + go func() { + stats := new(runtime.MemStats) + for { + runtime.ReadMemStats(stats) + if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { + atomic.StoreUint64(&peakMemAlloc, stats.Alloc) + } + if atomic.LoadUint64(&peakMemSys) < stats.Sys { + atomic.StoreUint64(&peakMemSys, stats.Sys) + } + time.Sleep(5 * time.Second) + } + }() + // Import the chain + start := time.Now() + + if len(ctx.Args()) == 1 { + if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { + log.Error("Import error", "err", err) + } + } else { + for _, arg := range ctx.Args() { + if err := utils.ImportChain(chain, arg); err != nil { + log.Error("Import error", "file", arg, "err", err) + } + } + } + chain.Stop() + fmt.Printf("Import done in %v.\n\n", time.Since(start)) + + // Output pre-compaction stats mostly to see the import trashing + stats, err := db.Stat("leveldb.stats") + if err != nil { + utils.Fatalf("Failed to read database stats: %v", err) + } + fmt.Println(stats) + + ioStats, err := db.Stat("leveldb.iostats") + if err != nil { + utils.Fatalf("Failed to read database iostats: %v", err) + } + fmt.Println(ioStats) + + // Print the memory statistics used by the importing + mem := new(runtime.MemStats) + runtime.ReadMemStats(mem) + + fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) + fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) + fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) + fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) + + if ctx.GlobalBool(utils.NoCompactionFlag.Name) { + return nil + } + + // Compact the entire database to more accurately measure disk io and print the stats + start = time.Now() + fmt.Println("Compacting entire database...") + if err = db.Compact(nil, nil); err != nil { + utils.Fatalf("Compaction failed: %v", err) + } + fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) + + stats, err = db.Stat("leveldb.stats") + if err != nil { + utils.Fatalf("Failed to read database stats: %v", err) + } + fmt.Println(stats) + + ioStats, err = db.Stat("leveldb.iostats") + if err != nil { + utils.Fatalf("Failed to read database iostats: %v", err) + } + fmt.Println(ioStats) + return nil +} + +func exportChain(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + stack := makeFullNode(ctx) + defer stack.Close() + + chain, _ := utils.MakeChain(ctx, stack) + start := time.Now() + + var err error + fp := ctx.Args().First() + if len(ctx.Args()) < 3 { + err = utils.ExportChain(chain, fp) + } else { + // This can be improved to allow for numbers larger than 9223372036854775807 + first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) + last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) + if ferr != nil || lerr != nil { + utils.Fatalf("Export error in parsing parameters: block number not an integer\n") + } + if first < 0 || last < 0 { + utils.Fatalf("Export error: block number must be greater than 0\n") + } + err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) + } + + if err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + +// importPreimages imports preimage data from the specified file. +func importPreimages(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + stack := makeFullNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack) + start := time.Now() + + if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { + utils.Fatalf("Import error: %v\n", err) + } + fmt.Printf("Import done in %v\n", time.Since(start)) + return nil +} + +// exportPreimages dumps the preimage data to specified json file in streaming way. +func exportPreimages(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + stack := makeFullNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack) + start := time.Now() + + if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + +func copyDb(ctx *cli.Context) error { + // Ensure we have a source chain directory to copy + if len(ctx.Args()) < 1 { + utils.Fatalf("Source chaindata directory path argument missing") + } + if len(ctx.Args()) < 2 { + utils.Fatalf("Source ancient chain directory path argument missing") + } + // Initialize a new chain for the running node to sync into + stack := makeFullNode(ctx) + defer stack.Close() + + chain, chainDb := utils.MakeChain(ctx, stack) + syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) + + var syncBloom *trie.SyncBloom + if syncMode == downloader.FastSync { + syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) + } + dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) + + // Create a source peer to satisfy downloader requests from + db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") + if err != nil { + return err + } + hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) + if err != nil { + return err + } + peer := downloader.NewFakePeer("local", db, hc, dl) + if err = dl.RegisterPeer("local", 63, peer); err != nil { + return err + } + // Synchronise with the simulated peer + start := time.Now() + + currentHeader := hc.CurrentHeader() + if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { + return err + } + for dl.Synchronising() { + time.Sleep(10 * time.Millisecond) + } + fmt.Printf("Database copy done in %v\n", time.Since(start)) + + // Compact the entire database to remove any sync overhead + start = time.Now() + fmt.Println("Compacting entire database...") + if err = db.Compact(nil, nil); err != nil { + utils.Fatalf("Compaction failed: %v", err) + } + fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) + return nil +} + +func removeDB(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + + // Remove the full node state database + path := stack.ResolvePath("chaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node state database") + } else { + log.Info("Full node state database missing", "path", path) + } + // Remove the full node ancient database + path = config.Eth.DatabaseFreezer + switch { + case path == "": + path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") + case !filepath.IsAbs(path): + path = config.Node.ResolvePath(path) + } + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node ancient database") + } else { + log.Info("Full node ancient database missing", "path", path) + } + // Remove the light node database + path = stack.ResolvePath("lightchaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "light node database") + } else { + log.Info("Light node database missing", "path", path) + } + return nil +} + +// confirmAndRemoveDB prompts the user for a last confirmation and removes the +// folder if accepted. +func confirmAndRemoveDB(database string, kind string) { + confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) + switch { + case err != nil: + utils.Fatalf("%v", err) + case !confirm: + log.Info("Database deletion skipped", "path", database) + default: + start := time.Now() + filepath.Walk(database, func(path string, info os.FileInfo, err error) error { + // If we're at the top level folder, recurse into + if path == database { + return nil + } + // Delete all the files, but not subfolders + if !info.IsDir() { + os.Remove(path) + return nil + } + return filepath.SkipDir + }) + log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +func dump(ctx *cli.Context) error { + stack := makeFullNode(ctx) + defer stack.Close() + + chain, chainDb := utils.MakeChain(ctx, stack) + defer chainDb.Close() + for _, arg := range ctx.Args() { + var block *types.Block + if hashish(arg) { + block = chain.GetBlockByHash(common.HexToHash(arg)) + } else { + num, _ := strconv.Atoi(arg) + block = chain.GetBlockByNumber(uint64(num)) + } + if block == nil { + fmt.Println("{}") + utils.Fatalf("block not found") + } else { + state, err := state.New(block.Root(), state.NewDatabase(chainDb)) + if err != nil { + utils.Fatalf("could not create new state: %v", err) + } + excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) + excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) + includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) + if ctx.Bool(utils.IterativeOutputFlag.Name) { + state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) + } else { + if includeMissing { + fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + + " otherwise the accounts will overwrite each other in the resulting mapping.") + } + fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) + } + } + } + return nil +} + +func inspect(ctx *cli.Context) error { + node, _ := makeConfigNode(ctx) + defer node.Close() + + _, chainDb := utils.MakeChain(ctx, node) + defer chainDb.Close() + + return rawdb.InspectDatabase(chainDb) +} + +// hashish returns true for strings that look like hashes. +func hashish(x string) bool { + _, err := strconv.Atoi(x) + return err != nil +} diff --git a/cmd/geth/config.go b/cmd/geth/config.go new file mode 100644 index 0000000..e33b367 --- /dev/null +++ b/cmd/geth/config.go @@ -0,0 +1,211 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package geth + +import ( + "bufio" + "errors" + "fmt" + "os" + "reflect" + "unicode" + + cli "gopkg.in/urfave/cli.v1" + + "github.com/Determinant/coreth/cmd/utils" + "github.com/ethereum/go-ethereum/dashboard" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv6" + "github.com/naoina/toml" +) + +var ( + dumpConfigCommand = cli.Command{ + Action: utils.MigrateFlags(dumpConfig), + Name: "dumpconfig", + Usage: "Show configuration values", + ArgsUsage: "", + Flags: append(append(nodeFlags, rpcFlags...), whisperFlags...), + Category: "MISCELLANEOUS COMMANDS", + Description: `The dumpconfig command shows configuration values.`, + } + + configFileFlag = cli.StringFlag{ + Name: "config", + Usage: "TOML configuration file", + } +) + +// These settings ensure that TOML keys use the same names as Go struct fields. +var tomlSettings = toml.Config{ + NormFieldName: func(rt reflect.Type, key string) string { + return key + }, + FieldToKey: func(rt reflect.Type, field string) string { + return field + }, + MissingField: func(rt reflect.Type, field string) error { + link := "" + if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { + link = fmt.Sprintf(", see https://godoc.org/%s#%s for available fields", rt.PkgPath(), rt.Name()) + } + return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link) + }, +} + +type ethstatsConfig struct { + URL string `toml:",omitempty"` +} + +type gethConfig struct { + Eth eth.Config + Shh whisper.Config + Node node.Config + Ethstats ethstatsConfig + Dashboard dashboard.Config +} + +func loadConfig(file string, cfg *gethConfig) error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + err = tomlSettings.NewDecoder(bufio.NewReader(f)).Decode(cfg) + // Add file name to errors that have a line number. + if _, ok := err.(*toml.LineError); ok { + err = errors.New(file + ", " + err.Error()) + } + return err +} + +func defaultNodeConfig() node.Config { + cfg := node.DefaultConfig + cfg.Name = clientIdentifier + cfg.Version = params.VersionWithCommit(gitCommit, gitDate) + cfg.HTTPModules = append(cfg.HTTPModules, "eth", "shh") + cfg.WSModules = append(cfg.WSModules, "eth", "shh") + cfg.IPCPath = "geth.ipc" + return cfg +} + +func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { + // Load defaults. + cfg := gethConfig{ + Eth: eth.DefaultConfig, + Shh: whisper.DefaultConfig, + Node: defaultNodeConfig(), + Dashboard: dashboard.DefaultConfig, + } + + // Load config file. + if file := ctx.GlobalString(configFileFlag.Name); file != "" { + if err := loadConfig(file, &cfg); err != nil { + utils.Fatalf("%v", err) + } + } + + // Apply flags. + utils.SetNodeConfig(ctx, &cfg.Node) + stack, err := node.New(&cfg.Node) + if err != nil { + utils.Fatalf("Failed to create the protocol stack: %v", err) + } + utils.SetEthConfig(ctx, stack, &cfg.Eth) + if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { + cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) + } + utils.SetShhConfig(ctx, stack, &cfg.Shh) + utils.SetDashboardConfig(ctx, &cfg.Dashboard) + + return stack, cfg +} + +// enableWhisper returns true in case one of the whisper flags is set. +func enableWhisper(ctx *cli.Context) bool { + for _, flag := range whisperFlags { + if ctx.GlobalIsSet(flag.GetName()) { + return true + } + } + return false +} + +func makeFullNode(ctx *cli.Context) *node.Node { + stack, cfg := makeConfigNode(ctx) + utils.RegisterEthService(stack, &cfg.Eth) + + if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) { + utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit) + } + // Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode + shhEnabled := enableWhisper(ctx) + shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name) + if shhEnabled || shhAutoEnabled { + if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) { + cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name)) + } + if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) { + cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name) + } + if ctx.GlobalIsSet(utils.WhisperRestrictConnectionBetweenLightClientsFlag.Name) { + cfg.Shh.RestrictConnectionBetweenLightClients = true + } + utils.RegisterShhService(stack, &cfg.Shh) + } + // Configure GraphQL if requested + if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { + utils.RegisterGraphQLService(stack, cfg.Node.GraphQLEndpoint(), cfg.Node.GraphQLCors, cfg.Node.GraphQLVirtualHosts, cfg.Node.HTTPTimeouts) + } + // Add the Ethereum Stats daemon if requested. + if cfg.Ethstats.URL != "" { + utils.RegisterEthStatsService(stack, cfg.Ethstats.URL) + } + return stack +} + +// dumpConfig is the dumpconfig command. +func dumpConfig(ctx *cli.Context) error { + _, cfg := makeConfigNode(ctx) + comment := "" + + if cfg.Eth.Genesis != nil { + cfg.Eth.Genesis = nil + comment += "# Note: this config doesn't contain the genesis block.\n\n" + } + + out, err := tomlSettings.Marshal(&cfg) + if err != nil { + return err + } + + dump := os.Stdout + if ctx.NArg() > 0 { + dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer dump.Close() + } + dump.WriteString(comment) + dump.Write(out) + + return nil +} diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go new file mode 100644 index 0000000..0c0881b --- /dev/null +++ b/cmd/geth/consolecmd.go @@ -0,0 +1,220 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package geth + +import ( + "fmt" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + + "github.com/Determinant/coreth/cmd/utils" + "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + "gopkg.in/urfave/cli.v1" +) + +var ( + consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag} + + consoleCommand = cli.Command{ + Action: utils.MigrateFlags(localConsole), + Name: "console", + Usage: "Start an interactive JavaScript environment", + Flags: append(append(append(nodeFlags, rpcFlags...), consoleFlags...), whisperFlags...), + Category: "CONSOLE COMMANDS", + Description: ` +The Geth console is an interactive shell for the JavaScript runtime environment +which exposes a node admin interface as well as the Ðapp JavaScript API. +See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`, + } + + attachCommand = cli.Command{ + Action: utils.MigrateFlags(remoteConsole), + Name: "attach", + Usage: "Start an interactive JavaScript environment (connect to node)", + ArgsUsage: "[endpoint]", + Flags: append(consoleFlags, utils.DataDirFlag), + Category: "CONSOLE COMMANDS", + Description: ` +The Geth console is an interactive shell for the JavaScript runtime environment +which exposes a node admin interface as well as the Ðapp JavaScript API. +See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console. +This command allows to open a console on a running geth node.`, + } + + javascriptCommand = cli.Command{ + Action: utils.MigrateFlags(ephemeralConsole), + Name: "js", + Usage: "Execute the specified JavaScript files", + ArgsUsage: " [jsfile...]", + Flags: append(nodeFlags, consoleFlags...), + Category: "CONSOLE COMMANDS", + Description: ` +The JavaScript VM exposes a node admin interface as well as the Ðapp +JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console`, + } +) + +// localConsole starts a new geth node, attaching a JavaScript console to it at the +// same time. +func localConsole(ctx *cli.Context) error { + // Create and start the node based on the CLI flags + node := makeFullNode(ctx) + startNode(ctx, node) + defer node.Close() + + // Attach to the newly started node and start the JavaScript console + client, err := node.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + config := console.Config{ + DataDir: utils.MakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // If only a short execution was requested, evaluate and return + if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { + console.Evaluate(script) + return nil + } + // Otherwise print the welcome screen and enter interactive mode + console.Welcome() + console.Interactive() + + return nil +} + +// remoteConsole will connect to a remote geth instance, attaching a JavaScript +// console to it. +func remoteConsole(ctx *cli.Context) error { + // Attach to a remotely running geth instance and start the JavaScript console + endpoint := ctx.Args().First() + if endpoint == "" { + path := node.DefaultDataDir() + if ctx.GlobalIsSet(utils.DataDirFlag.Name) { + path = ctx.GlobalString(utils.DataDirFlag.Name) + } + if path != "" { + if ctx.GlobalBool(utils.TestnetFlag.Name) { + path = filepath.Join(path, "testnet") + } else if ctx.GlobalBool(utils.RinkebyFlag.Name) { + path = filepath.Join(path, "rinkeby") + } + } + endpoint = fmt.Sprintf("%s/geth.ipc", path) + } + client, err := dialRPC(endpoint) + if err != nil { + utils.Fatalf("Unable to attach to remote geth: %v", err) + } + config := console.Config{ + DataDir: utils.MakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { + console.Evaluate(script) + return nil + } + + // Otherwise print the welcome screen and enter interactive mode + console.Welcome() + console.Interactive() + + return nil +} + +// dialRPC returns a RPC client which connects to the given endpoint. +// The check for empty endpoint implements the defaulting logic +// for "geth attach" and "geth monitor" with no argument. +func dialRPC(endpoint string) (*rpc.Client, error) { + if endpoint == "" { + endpoint = node.DefaultIPCEndpoint(clientIdentifier) + } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { + // Backwards compatibility with geth < 1.5 which required + // these prefixes. + endpoint = endpoint[4:] + } + return rpc.Dial(endpoint) +} + +// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript +// console to it, executes each of the files specified as arguments and tears +// everything down. +func ephemeralConsole(ctx *cli.Context) error { + // Create and start the node based on the CLI flags + node := makeFullNode(ctx) + startNode(ctx, node) + defer node.Close() + + // Attach to the newly started node and start the JavaScript console + client, err := node.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + config := console.Config{ + DataDir: utils.MakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // Evaluate each of the specified JavaScript files + for _, file := range ctx.Args() { + if err = console.Execute(file); err != nil { + utils.Fatalf("Failed to execute %s: %v", file, err) + } + } + // Wait for pending callbacks, but stop for Ctrl-C. + abort := make(chan os.Signal, 1) + signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-abort + os.Exit(0) + }() + console.Stop(true) + + return nil +} diff --git a/cmd/geth/main.go b/cmd/geth/main.go new file mode 100644 index 0000000..a8ddd44 --- /dev/null +++ b/cmd/geth/main.go @@ -0,0 +1,398 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// geth is the official command-line client for Ethereum. +package geth + +import ( + "fmt" + "math" + "os" + "runtime" + godebug "runtime/debug" + "sort" + "strconv" + "time" + + "github.com/elastic/gosigar" + "github.com/Determinant/coreth/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/Determinant/coreth/internal/debug" + "github.com/ethereum/go-ethereum/les" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/node" + cli "gopkg.in/urfave/cli.v1" +) + +const ( + clientIdentifier = "geth" // Client identifier to advertise over the network +) + +var ( + // Git SHA1 commit hash of the release (set via linker flags) + gitCommit = "" + gitDate = "" + // The App that holds all commands and flags. + App = utils.NewApp(gitCommit, gitDate, "the go-ethereum command line interface") + // flags that configure the node + nodeFlags = []cli.Flag{ + utils.IdentityFlag, + utils.PasswordFileFlag, + utils.BootnodesFlag, + utils.BootnodesV4Flag, + utils.BootnodesV5Flag, + utils.DataDirFlag, + utils.AncientFlag, + utils.KeyStoreDirFlag, + utils.ExternalSignerFlag, + utils.NoUSBFlag, + utils.SmartCardDaemonPathFlag, + utils.DashboardEnabledFlag, + utils.DashboardAddrFlag, + utils.DashboardPortFlag, + utils.DashboardRefreshFlag, + utils.EthashCacheDirFlag, + utils.EthashCachesInMemoryFlag, + utils.EthashCachesOnDiskFlag, + utils.EthashDatasetDirFlag, + utils.EthashDatasetsInMemoryFlag, + utils.EthashDatasetsOnDiskFlag, + utils.TxPoolLocalsFlag, + utils.TxPoolNoLocalsFlag, + utils.TxPoolJournalFlag, + utils.TxPoolRejournalFlag, + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, + utils.SyncModeFlag, + utils.ExitWhenSyncedFlag, + utils.GCModeFlag, + utils.LightServeFlag, + utils.LightLegacyServFlag, + utils.LightIngressFlag, + utils.LightEgressFlag, + utils.LightMaxPeersFlag, + utils.LightLegacyPeersFlag, + utils.LightKDFFlag, + utils.UltraLightServersFlag, + utils.UltraLightFractionFlag, + utils.UltraLightOnlyAnnounceFlag, + utils.WhitelistFlag, + utils.CacheFlag, + utils.CacheDatabaseFlag, + utils.CacheTrieFlag, + utils.CacheGCFlag, + utils.CacheNoPrefetchFlag, + utils.ListenPortFlag, + utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, + utils.MiningEnabledFlag, + utils.MinerThreadsFlag, + utils.MinerLegacyThreadsFlag, + utils.MinerNotifyFlag, + utils.MinerGasTargetFlag, + utils.MinerLegacyGasTargetFlag, + utils.MinerGasLimitFlag, + utils.MinerGasPriceFlag, + utils.MinerLegacyGasPriceFlag, + utils.MinerEtherbaseFlag, + utils.MinerLegacyEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerLegacyExtraDataFlag, + utils.MinerRecommitIntervalFlag, + utils.MinerNoVerfiyFlag, + utils.NATFlag, + utils.NoDiscoverFlag, + utils.DiscoveryV5Flag, + utils.NetrestrictFlag, + utils.NodeKeyFileFlag, + utils.NodeKeyHexFlag, + utils.DeveloperFlag, + utils.DeveloperPeriodFlag, + utils.TestnetFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.VMEnableDebugFlag, + utils.NetworkIdFlag, + utils.EthStatsURLFlag, + utils.FakePoWFlag, + utils.NoCompactionFlag, + utils.GpoBlocksFlag, + utils.GpoPercentileFlag, + utils.EWASMInterpreterFlag, + utils.EVMInterpreterFlag, + configFileFlag, + } + + rpcFlags = []cli.Flag{ + utils.RPCEnabledFlag, + utils.RPCListenAddrFlag, + utils.RPCPortFlag, + utils.RPCCORSDomainFlag, + utils.RPCVirtualHostsFlag, + utils.GraphQLEnabledFlag, + utils.GraphQLListenAddrFlag, + utils.GraphQLPortFlag, + utils.GraphQLCORSDomainFlag, + utils.GraphQLVirtualHostsFlag, + utils.RPCApiFlag, + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSAllowedOriginsFlag, + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.InsecureUnlockAllowedFlag, + utils.RPCGlobalGasCap, + } + + whisperFlags = []cli.Flag{ + utils.WhisperEnabledFlag, + utils.WhisperMaxMessageSizeFlag, + utils.WhisperMinPOWFlag, + utils.WhisperRestrictConnectionBetweenLightClientsFlag, + } + + metricsFlags = []cli.Flag{ + utils.MetricsEnabledFlag, + utils.MetricsEnabledExpensiveFlag, + utils.MetricsEnableInfluxDBFlag, + utils.MetricsInfluxDBEndpointFlag, + utils.MetricsInfluxDBDatabaseFlag, + utils.MetricsInfluxDBUsernameFlag, + utils.MetricsInfluxDBPasswordFlag, + utils.MetricsInfluxDBTagsFlag, + } +) + +func init() { + // Initialize the CLI App and start Geth + App.Action = geth + App.HideVersion = true // we have a command to print the version + App.Copyright = "Copyright 2013-2019 The go-ethereum Authors" + App.Commands = []cli.Command{ + // See chaincmd.go: + initCommand, + importCommand, + exportCommand, + importPreimagesCommand, + exportPreimagesCommand, + copydbCommand, + removedbCommand, + dumpCommand, + inspectCommand, + // See accountcmd.go: + // See consolecmd.go: + consoleCommand, + attachCommand, + javascriptCommand, + // See misccmd.go: + makecacheCommand, + makedagCommand, + versionCommand, + licenseCommand, + // See config.go + dumpConfigCommand, + // See retesteth.go + retestethCommand, + } + sort.Sort(cli.CommandsByName(App.Commands)) + + App.Flags = append(App.Flags, nodeFlags...) + App.Flags = append(App.Flags, rpcFlags...) + App.Flags = append(App.Flags, consoleFlags...) + App.Flags = append(App.Flags, debug.Flags...) + App.Flags = append(App.Flags, whisperFlags...) + App.Flags = append(App.Flags, metricsFlags...) + + App.Before = func(ctx *cli.Context) error { + logdir := "" + if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) { + logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs") + } + if err := debug.Setup(ctx, logdir); err != nil { + return err + } + // If we're a full node on mainnet without --cache specified, bump default cache allowance + if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { + // Make sure we're not on any supported preconfigured testnet either + if !ctx.GlobalIsSet(utils.TestnetFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { + // Nope, we're really on mainnet. Bump that cache up! + log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096)) + } + } + // If we're running a light client on any network, drop the cache to some meaningfully low amount + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) { + log.Info("Dropping default light client cache", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 128) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128)) + } + // Cap the cache allowance and tune the garbage collector + var mem gosigar.Mem + // Workaround until OpenBSD support lands into gosigar + // Check https://github.com/elastic/gosigar#supported-platforms + if runtime.GOOS != "openbsd" { + if err := mem.Get(); err == nil { + allowance := int(mem.Total / 1024 / 1024 / 3) + if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance { + log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance)) + } + } + } + // Ensure Go's GC ignores the database cache for trigger percentage + cache := ctx.GlobalInt(utils.CacheFlag.Name) + gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + + log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + godebug.SetGCPercent(int(gogc)) + + // Start metrics export if enabled + utils.SetupMetrics(ctx) + + // Start system runtime metrics collection + go metrics.CollectProcessMetrics(3 * time.Second) + + return nil + } + + App.After = func(ctx *cli.Context) error { + debug.Exit() + console.Stdin.Close() // Resets terminal mode. + return nil + } +} + +func main() { + if err := App.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// geth is the main entry point into the system if no special subcommand is ran. +// It creates a default node based on the command line arguments and runs it in +// blocking mode, waiting for it to be shut down. +func geth(ctx *cli.Context) error { + if args := ctx.Args(); len(args) > 0 { + return fmt.Errorf("invalid command: %q", args[0]) + } + node := makeFullNode(ctx) + defer node.Close() + startNode(ctx, node) + node.Wait() + return nil +} + +// startNode boots up the system node and all registered protocols, after which +// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the +// miner. +func startNode(ctx *cli.Context, stack *node.Node) { + debug.Memsize.Add("node", stack) + + // Start up the node itself + utils.StartNode(stack) + + // Unlock any account specifically requested + + // Register wallet event handlers to open and auto-derive wallets + + // Create a client to interact with local geth node. + rpcClient, err := stack.Attach() + if err != nil { + utils.Fatalf("Failed to attach to self: %v", err) + } + ethClient := ethclient.NewClient(rpcClient) + + // Set contract backend for ethereum service if local node + // is serving LES requests. + if ctx.GlobalInt(utils.LightLegacyServFlag.Name) > 0 || ctx.GlobalInt(utils.LightServeFlag.Name) > 0 { + var ethService *eth.Ethereum + if err := stack.Service(ðService); err != nil { + utils.Fatalf("Failed to retrieve ethereum service: %v", err) + } + ethService.SetContractBackend(ethClient) + } + // Set contract backend for les service if local node is + // running as a light client. + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { + var lesService *les.LightEthereum + if err := stack.Service(&lesService); err != nil { + utils.Fatalf("Failed to retrieve light ethereum service: %v", err) + } + lesService.SetContractBackend(ethClient) + } + + // Spawn a standalone goroutine for status synchronization monitoring, + // close the node when synchronization is complete if user required. + if ctx.GlobalBool(utils.ExitWhenSyncedFlag.Name) { + go func() { + sub := stack.EventMux().Subscribe(downloader.DoneEvent{}) + defer sub.Unsubscribe() + for { + event := <-sub.Chan() + if event == nil { + continue + } + done, ok := event.Data.(downloader.DoneEvent) + if !ok { + continue + } + if timestamp := time.Unix(int64(done.Latest.Time), 0); time.Since(timestamp) < 10*time.Minute { + log.Info("Synchronisation completed", "latestnum", done.Latest.Number, "latesthash", done.Latest.Hash(), + "age", common.PrettyAge(timestamp)) + stack.Stop() + } + } + }() + } + + // Start auxiliary services if enabled + if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { + // Mining only makes sense if a full Ethereum node is running + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { + utils.Fatalf("Light clients do not support mining") + } + var ethereum *eth.Ethereum + if err := stack.Service(ðereum); err != nil { + utils.Fatalf("Ethereum service not running: %v", err) + } + // Set the gas price to the limits from the CLI and start mining + gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name) + if ctx.IsSet(utils.MinerGasPriceFlag.Name) { + gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) + } + ethereum.TxPool().SetGasPrice(gasprice) + + threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name) + if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) { + threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name) + } + if err := ethereum.StartMining(threads); err != nil { + utils.Fatalf("Failed to start mining: %v", err) + } + } +} diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go new file mode 100644 index 0000000..f778515 --- /dev/null +++ b/cmd/geth/misccmd.go @@ -0,0 +1,142 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package geth + +import ( + "fmt" + "os" + "runtime" + "strconv" + "strings" + + "github.com/Determinant/coreth/cmd/utils" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/params" + "gopkg.in/urfave/cli.v1" +) + +var ( + makecacheCommand = cli.Command{ + Action: utils.MigrateFlags(makecache), + Name: "makecache", + Usage: "Generate ethash verification cache (for testing)", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The makecache command generates an ethash cache in . + +This command exists to support the system testing project. +Regular users do not need to execute it. +`, + } + makedagCommand = cli.Command{ + Action: utils.MigrateFlags(makedag), + Name: "makedag", + Usage: "Generate ethash mining DAG (for testing)", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The makedag command generates an ethash DAG in . + +This command exists to support the system testing project. +Regular users do not need to execute it. +`, + } + versionCommand = cli.Command{ + Action: utils.MigrateFlags(version), + Name: "version", + Usage: "Print version numbers", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The output of this command is supposed to be machine-readable. +`, + } + licenseCommand = cli.Command{ + Action: utils.MigrateFlags(license), + Name: "license", + Usage: "Display license information", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + } +) + +// makecache generates an ethash verification cache into the provided folder. +func makecache(ctx *cli.Context) error { + args := ctx.Args() + if len(args) != 2 { + utils.Fatalf(`Usage: geth makecache `) + } + block, err := strconv.ParseUint(args[0], 0, 64) + if err != nil { + utils.Fatalf("Invalid block number: %v", err) + } + ethash.MakeCache(block, args[1]) + + return nil +} + +// makedag generates an ethash mining DAG into the provided folder. +func makedag(ctx *cli.Context) error { + args := ctx.Args() + if len(args) != 2 { + utils.Fatalf(`Usage: geth makedag `) + } + block, err := strconv.ParseUint(args[0], 0, 64) + if err != nil { + utils.Fatalf("Invalid block number: %v", err) + } + ethash.MakeDataset(block, args[1]) + + return nil +} + +func version(ctx *cli.Context) error { + fmt.Println(strings.Title(clientIdentifier)) + fmt.Println("Version:", params.VersionWithMeta) + if gitCommit != "" { + fmt.Println("Git Commit:", gitCommit) + } + if gitDate != "" { + fmt.Println("Git Commit Date:", gitDate) + } + fmt.Println("Architecture:", runtime.GOARCH) + fmt.Println("Protocol Versions:", eth.ProtocolVersions) + fmt.Println("Network Id:", eth.DefaultConfig.NetworkId) + fmt.Println("Go Version:", runtime.Version()) + fmt.Println("Operating System:", runtime.GOOS) + fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) + fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) + return nil +} + +func license(_ *cli.Context) error { + fmt.Println(`Geth is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Geth is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with geth. If not, see .`) + return nil +} diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go new file mode 100644 index 0000000..e3f163b --- /dev/null +++ b/cmd/geth/retesteth.go @@ -0,0 +1,891 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package geth + +import ( + "bytes" + "context" + "fmt" + "math/big" + "os" + "os/signal" + "strings" + "time" + + "github.com/Determinant/coreth/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" + + cli "gopkg.in/urfave/cli.v1" +) + +var ( + rpcPortFlag = cli.IntFlag{ + Name: "rpcport", + Usage: "HTTP-RPC server listening port", + Value: node.DefaultHTTPPort, + } + retestethCommand = cli.Command{ + Action: utils.MigrateFlags(retesteth), + Name: "retesteth", + Usage: "Launches geth in retesteth mode", + ArgsUsage: "", + Flags: []cli.Flag{rpcPortFlag}, + Category: "MISCELLANEOUS COMMANDS", + Description: `Launches geth in retesteth mode (no database, no network, only retesteth RPC interface)`, + } +) + +type RetestethTestAPI interface { + SetChainParams(ctx context.Context, chainParams ChainParams) (bool, error) + MineBlocks(ctx context.Context, number uint64) (bool, error) + ModifyTimestamp(ctx context.Context, interval uint64) (bool, error) + ImportRawBlock(ctx context.Context, rawBlock hexutil.Bytes) (common.Hash, error) + RewindToBlock(ctx context.Context, number uint64) (bool, error) + GetLogHash(ctx context.Context, txHash common.Hash) (common.Hash, error) +} + +type RetestethEthAPI interface { + SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error) + BlockNumber(ctx context.Context) (uint64, error) + GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) + GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error) + GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error) + GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error) +} + +type RetestethDebugAPI interface { + AccountRange(ctx context.Context, + blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, + addressHash *math.HexOrDecimal256, maxResults uint64, + ) (AccountRangeResult, error) + StorageRangeAt(ctx context.Context, + blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, + address common.Address, + begin *math.HexOrDecimal256, maxResults uint64, + ) (StorageRangeResult, error) +} + +type RetestWeb3API interface { + ClientVersion(ctx context.Context) (string, error) +} + +type RetestethAPI struct { + ethDb ethdb.Database + db state.Database + chainConfig *params.ChainConfig + author common.Address + extraData []byte + genesisHash common.Hash + engine *NoRewardEngine + blockchain *core.BlockChain + blockNumber uint64 + txMap map[common.Address]map[uint64]*types.Transaction // Sender -> Nonce -> Transaction + txSenders map[common.Address]struct{} // Set of transaction senders + blockInterval uint64 +} + +type ChainParams struct { + SealEngine string `json:"sealEngine"` + Params CParamsParams `json:"params"` + Genesis CParamsGenesis `json:"genesis"` + Accounts map[common.Address]CParamsAccount `json:"accounts"` +} + +type CParamsParams struct { + AccountStartNonce math.HexOrDecimal64 `json:"accountStartNonce"` + HomesteadForkBlock *math.HexOrDecimal64 `json:"homesteadForkBlock"` + EIP150ForkBlock *math.HexOrDecimal64 `json:"EIP150ForkBlock"` + EIP158ForkBlock *math.HexOrDecimal64 `json:"EIP158ForkBlock"` + DaoHardforkBlock *math.HexOrDecimal64 `json:"daoHardforkBlock"` + ByzantiumForkBlock *math.HexOrDecimal64 `json:"byzantiumForkBlock"` + ConstantinopleForkBlock *math.HexOrDecimal64 `json:"constantinopleForkBlock"` + ConstantinopleFixForkBlock *math.HexOrDecimal64 `json:"constantinopleFixForkBlock"` + ChainID *math.HexOrDecimal256 `json:"chainID"` + MaximumExtraDataSize math.HexOrDecimal64 `json:"maximumExtraDataSize"` + TieBreakingGas bool `json:"tieBreakingGas"` + MinGasLimit math.HexOrDecimal64 `json:"minGasLimit"` + MaxGasLimit math.HexOrDecimal64 `json:"maxGasLimit"` + GasLimitBoundDivisor math.HexOrDecimal64 `json:"gasLimitBoundDivisor"` + MinimumDifficulty math.HexOrDecimal256 `json:"minimumDifficulty"` + DifficultyBoundDivisor math.HexOrDecimal256 `json:"difficultyBoundDivisor"` + DurationLimit math.HexOrDecimal256 `json:"durationLimit"` + BlockReward math.HexOrDecimal256 `json:"blockReward"` + NetworkID math.HexOrDecimal256 `json:"networkID"` +} + +type CParamsGenesis struct { + Nonce math.HexOrDecimal64 `json:"nonce"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + MixHash *math.HexOrDecimal256 `json:"mixHash"` + Author common.Address `json:"author"` + Timestamp math.HexOrDecimal64 `json:"timestamp"` + ParentHash common.Hash `json:"parentHash"` + ExtraData hexutil.Bytes `json:"extraData"` + GasLimit math.HexOrDecimal64 `json:"gasLimit"` +} + +type CParamsAccount struct { + Balance *math.HexOrDecimal256 `json:"balance"` + Precompiled *CPAccountPrecompiled `json:"precompiled"` + Code hexutil.Bytes `json:"code"` + Storage map[string]string `json:"storage"` + Nonce *math.HexOrDecimal64 `json:"nonce"` +} + +type CPAccountPrecompiled struct { + Name string `json:"name"` + StartingBlock math.HexOrDecimal64 `json:"startingBlock"` + Linear *CPAPrecompiledLinear `json:"linear"` +} + +type CPAPrecompiledLinear struct { + Base uint64 `json:"base"` + Word uint64 `json:"word"` +} + +type AccountRangeResult struct { + AddressMap map[common.Hash]common.Address `json:"addressMap"` + NextKey common.Hash `json:"nextKey"` +} + +type StorageRangeResult struct { + Complete bool `json:"complete"` + Storage map[common.Hash]SRItem `json:"storage"` +} + +type SRItem struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type NoRewardEngine struct { + inner consensus.Engine + rewardsOn bool +} + +func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) { + return e.inner.Author(header) +} + +func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { + return e.inner.VerifyHeader(chain, header, seal) +} + +func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { + return e.inner.VerifyHeaders(chain, headers, seals) +} + +func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { + return e.inner.VerifyUncles(chain, block) +} + +func (e *NoRewardEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error { + return e.inner.VerifySeal(chain, header) +} + +func (e *NoRewardEngine) Prepare(chain consensus.ChainReader, header *types.Header) error { + return e.inner.Prepare(chain, header) +} + +func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { + // Simply touch miner and uncle coinbase accounts + reward := big.NewInt(0) + for _, uncle := range uncles { + state.AddBalance(uncle.Coinbase, reward) + } + state.AddBalance(header.Coinbase, reward) +} + +func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, + uncles []*types.Header) { + if e.rewardsOn { + e.inner.Finalize(chain, header, statedb, txs, uncles) + } else { + e.accumulateRewards(chain.Config(), statedb, header, uncles) + header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + } +} + +func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, + uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { + if e.rewardsOn { + return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts) + } else { + e.accumulateRewards(chain.Config(), statedb, header, uncles) + header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + + // Header seems complete, assemble into a block and return + return types.NewBlock(header, txs, uncles, receipts), nil + } +} + +func (e *NoRewardEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { + return e.inner.Seal(chain, block, results, stop) +} + +func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash { + return e.inner.SealHash(header) +} + +func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { + return e.inner.CalcDifficulty(chain, time, parent) +} + +func (e *NoRewardEngine) APIs(chain consensus.ChainReader) []rpc.API { + return e.inner.APIs(chain) +} + +func (e *NoRewardEngine) Close() error { + return e.inner.Close() +} + +func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainParams) (bool, error) { + // Clean up + if api.blockchain != nil { + api.blockchain.Stop() + } + if api.engine != nil { + api.engine.Close() + } + if api.ethDb != nil { + api.ethDb.Close() + } + ethDb := rawdb.NewMemoryDatabase() + accounts := make(core.GenesisAlloc) + for address, account := range chainParams.Accounts { + balance := big.NewInt(0) + if account.Balance != nil { + balance.Set((*big.Int)(account.Balance)) + } + var nonce uint64 + if account.Nonce != nil { + nonce = uint64(*account.Nonce) + } + if account.Precompiled == nil || account.Balance != nil { + storage := make(map[common.Hash]common.Hash) + for k, v := range account.Storage { + storage[common.HexToHash(k)] = common.HexToHash(v) + } + accounts[address] = core.GenesisAccount{ + Balance: balance, + Code: account.Code, + Nonce: nonce, + Storage: storage, + } + } + } + chainId := big.NewInt(1) + if chainParams.Params.ChainID != nil { + chainId.Set((*big.Int)(chainParams.Params.ChainID)) + } + var ( + homesteadBlock *big.Int + daoForkBlock *big.Int + eip150Block *big.Int + eip155Block *big.Int + eip158Block *big.Int + byzantiumBlock *big.Int + constantinopleBlock *big.Int + petersburgBlock *big.Int + ) + if chainParams.Params.HomesteadForkBlock != nil { + homesteadBlock = big.NewInt(int64(*chainParams.Params.HomesteadForkBlock)) + } + if chainParams.Params.DaoHardforkBlock != nil { + daoForkBlock = big.NewInt(int64(*chainParams.Params.DaoHardforkBlock)) + } + if chainParams.Params.EIP150ForkBlock != nil { + eip150Block = big.NewInt(int64(*chainParams.Params.EIP150ForkBlock)) + } + if chainParams.Params.EIP158ForkBlock != nil { + eip158Block = big.NewInt(int64(*chainParams.Params.EIP158ForkBlock)) + eip155Block = eip158Block + } + if chainParams.Params.ByzantiumForkBlock != nil { + byzantiumBlock = big.NewInt(int64(*chainParams.Params.ByzantiumForkBlock)) + } + if chainParams.Params.ConstantinopleForkBlock != nil { + constantinopleBlock = big.NewInt(int64(*chainParams.Params.ConstantinopleForkBlock)) + } + if chainParams.Params.ConstantinopleFixForkBlock != nil { + petersburgBlock = big.NewInt(int64(*chainParams.Params.ConstantinopleFixForkBlock)) + } + if constantinopleBlock != nil && petersburgBlock == nil { + petersburgBlock = big.NewInt(100000000000) + } + genesis := &core.Genesis{ + Config: ¶ms.ChainConfig{ + ChainID: chainId, + HomesteadBlock: homesteadBlock, + DAOForkBlock: daoForkBlock, + DAOForkSupport: false, + EIP150Block: eip150Block, + EIP155Block: eip155Block, + EIP158Block: eip158Block, + ByzantiumBlock: byzantiumBlock, + ConstantinopleBlock: constantinopleBlock, + PetersburgBlock: petersburgBlock, + }, + Nonce: uint64(chainParams.Genesis.Nonce), + Timestamp: uint64(chainParams.Genesis.Timestamp), + ExtraData: chainParams.Genesis.ExtraData, + GasLimit: uint64(chainParams.Genesis.GasLimit), + Difficulty: big.NewInt(0).Set((*big.Int)(chainParams.Genesis.Difficulty)), + Mixhash: common.BigToHash((*big.Int)(chainParams.Genesis.MixHash)), + Coinbase: chainParams.Genesis.Author, + ParentHash: chainParams.Genesis.ParentHash, + Alloc: accounts, + } + chainConfig, genesisHash, err := core.SetupGenesisBlock(ethDb, genesis) + if err != nil { + return false, err + } + fmt.Printf("Chain config: %v\n", chainConfig) + + var inner consensus.Engine + switch chainParams.SealEngine { + case "NoProof", "NoReward": + inner = ethash.NewFaker() + case "Ethash": + inner = ethash.New(ethash.Config{ + CacheDir: "ethash", + CachesInMem: 2, + CachesOnDisk: 3, + DatasetsInMem: 1, + DatasetsOnDisk: 2, + }, nil, false) + default: + return false, fmt.Errorf("unrecognised seal engine: %s", chainParams.SealEngine) + } + engine := &NoRewardEngine{inner: inner, rewardsOn: chainParams.SealEngine != "NoReward"} + + blockchain, err := core.NewBlockChain(ethDb, nil, chainConfig, engine, vm.Config{}, nil) + if err != nil { + return false, err + } + + api.chainConfig = chainConfig + api.genesisHash = genesisHash + api.author = chainParams.Genesis.Author + api.extraData = chainParams.Genesis.ExtraData + api.ethDb = ethDb + api.engine = engine + api.blockchain = blockchain + api.db = state.NewDatabase(api.ethDb) + api.blockNumber = 0 + api.txMap = make(map[common.Address]map[uint64]*types.Transaction) + api.txSenders = make(map[common.Address]struct{}) + api.blockInterval = 0 + return true, nil +} + +func (api *RetestethAPI) SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error) { + tx := new(types.Transaction) + if err := rlp.DecodeBytes(rawTx, tx); err != nil { + // Return nil is not by mistake - some tests include sending transaction where gasLimit overflows uint64 + return common.Hash{}, nil + } + signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.blockNumber))) + sender, err := types.Sender(signer, tx) + if err != nil { + return common.Hash{}, err + } + if nonceMap, ok := api.txMap[sender]; ok { + nonceMap[tx.Nonce()] = tx + } else { + nonceMap = make(map[uint64]*types.Transaction) + nonceMap[tx.Nonce()] = tx + api.txMap[sender] = nonceMap + } + api.txSenders[sender] = struct{}{} + return tx.Hash(), nil +} + +func (api *RetestethAPI) MineBlocks(ctx context.Context, number uint64) (bool, error) { + for i := 0; i < int(number); i++ { + if err := api.mineBlock(); err != nil { + return false, err + } + } + fmt.Printf("Mined %d blocks\n", number) + return true, nil +} + +func (api *RetestethAPI) mineBlock() error { + parentHash := rawdb.ReadCanonicalHash(api.ethDb, api.blockNumber) + parent := rawdb.ReadBlock(api.ethDb, parentHash, api.blockNumber) + var timestamp uint64 + if api.blockInterval == 0 { + timestamp = uint64(time.Now().Unix()) + } else { + timestamp = parent.Time() + api.blockInterval + } + gasLimit := core.CalcGasLimit(parent, 9223372036854775807, 9223372036854775807) + header := &types.Header{ + ParentHash: parent.Hash(), + Number: big.NewInt(int64(api.blockNumber + 1)), + GasLimit: gasLimit, + Extra: api.extraData, + Time: timestamp, + } + header.Coinbase = api.author + if api.engine != nil { + api.engine.Prepare(api.blockchain, header) + } + // If we are care about TheDAO hard-fork check whether to override the extra-data or not + if daoBlock := api.chainConfig.DAOForkBlock; daoBlock != nil { + // Check whether the block is among the fork extra-override range + limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) + if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { + // Depending whether we support or oppose the fork, override differently + if api.chainConfig.DAOForkSupport { + header.Extra = common.CopyBytes(params.DAOForkBlockExtra) + } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { + header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data + } + } + } + statedb, err := api.blockchain.StateAt(parent.Root()) + if err != nil { + return err + } + if api.chainConfig.DAOForkSupport && api.chainConfig.DAOForkBlock != nil && api.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { + misc.ApplyDAOHardFork(statedb) + } + gasPool := new(core.GasPool).AddGas(header.GasLimit) + txCount := 0 + var txs []*types.Transaction + var receipts []*types.Receipt + var coalescedLogs []*types.Log + var blockFull = gasPool.Gas() < params.TxGas + for address := range api.txSenders { + if blockFull { + break + } + m := api.txMap[address] + for nonce := statedb.GetNonce(address); ; nonce++ { + if tx, ok := m[nonce]; ok { + // Try to apply transactions to the state + statedb.Prepare(tx.Hash(), common.Hash{}, txCount) + snap := statedb.Snapshot() + + receipt, _, err := core.ApplyTransaction( + api.chainConfig, + api.blockchain, + &api.author, + gasPool, + statedb, + header, tx, &header.GasUsed, *api.blockchain.GetVMConfig(), + ) + if err != nil { + statedb.RevertToSnapshot(snap) + break + } + txs = append(txs, tx) + receipts = append(receipts, receipt) + coalescedLogs = append(coalescedLogs, receipt.Logs...) + delete(m, nonce) + if len(m) == 0 { + // Last tx for the sender + delete(api.txMap, address) + delete(api.txSenders, address) + } + txCount++ + if gasPool.Gas() < params.TxGas { + blockFull = true + break + } + } else { + break // Gap in the nonces + } + } + } + block, err := api.engine.FinalizeAndAssemble(api.blockchain, header, statedb, txs, []*types.Header{}, receipts) + if err != nil { + return err + } + return api.importBlock(block) +} + +func (api *RetestethAPI) importBlock(block *types.Block) error { + if _, err := api.blockchain.InsertChain([]*types.Block{block}); err != nil { + return err + } + api.blockNumber = block.NumberU64() + fmt.Printf("Imported block %d\n", block.NumberU64()) + return nil +} + +func (api *RetestethAPI) ModifyTimestamp(ctx context.Context, interval uint64) (bool, error) { + api.blockInterval = interval + return true, nil +} + +func (api *RetestethAPI) ImportRawBlock(ctx context.Context, rawBlock hexutil.Bytes) (common.Hash, error) { + block := new(types.Block) + if err := rlp.DecodeBytes(rawBlock, block); err != nil { + return common.Hash{}, err + } + fmt.Printf("Importing block %d with parent hash: %x, genesisHash: %x\n", block.NumberU64(), block.ParentHash(), api.genesisHash) + if err := api.importBlock(block); err != nil { + return common.Hash{}, err + } + return block.Hash(), nil +} + +func (api *RetestethAPI) RewindToBlock(ctx context.Context, newHead uint64) (bool, error) { + if err := api.blockchain.SetHead(newHead); err != nil { + return false, err + } + api.blockNumber = newHead + return true, nil +} + +var emptyListHash common.Hash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") + +func (api *RetestethAPI) GetLogHash(ctx context.Context, txHash common.Hash) (common.Hash, error) { + receipt, _, _, _ := rawdb.ReadReceipt(api.ethDb, txHash, api.chainConfig) + if receipt == nil { + return emptyListHash, nil + } else { + if logListRlp, err := rlp.EncodeToBytes(receipt.Logs); err != nil { + return common.Hash{}, err + } else { + return common.BytesToHash(crypto.Keccak256(logListRlp)), nil + } + } +} + +func (api *RetestethAPI) BlockNumber(ctx context.Context) (uint64, error) { + //fmt.Printf("BlockNumber, response: %d\n", api.blockNumber) + return api.blockNumber, nil +} + +func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) { + block := api.blockchain.GetBlockByNumber(uint64(blockNr)) + if block != nil { + response, err := RPCMarshalBlock(block, true, fullTx) + if err != nil { + return nil, err + } + response["author"] = response["miner"] + response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), uint64(blockNr))) + return response, err + } + return nil, fmt.Errorf("block %d not found", blockNr) +} + +func (api *RetestethAPI) AccountRange(ctx context.Context, + blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, + addressHash *math.HexOrDecimal256, maxResults uint64, +) (AccountRangeResult, error) { + var ( + header *types.Header + block *types.Block + ) + if (*big.Int)(blockHashOrNumber).Cmp(big.NewInt(math.MaxInt64)) > 0 { + blockHash := common.BigToHash((*big.Int)(blockHashOrNumber)) + header = api.blockchain.GetHeaderByHash(blockHash) + block = api.blockchain.GetBlockByHash(blockHash) + //fmt.Printf("Account range: %x, txIndex %d, start: %x, maxResults: %d\n", blockHash, txIndex, common.BigToHash((*big.Int)(addressHash)), maxResults) + } else { + blockNumber := (*big.Int)(blockHashOrNumber).Uint64() + header = api.blockchain.GetHeaderByNumber(blockNumber) + block = api.blockchain.GetBlockByNumber(blockNumber) + //fmt.Printf("Account range: %d, txIndex %d, start: %x, maxResults: %d\n", blockNumber, txIndex, common.BigToHash((*big.Int)(addressHash)), maxResults) + } + parentHeader := api.blockchain.GetHeaderByHash(header.ParentHash) + var root common.Hash + var statedb *state.StateDB + var err error + if parentHeader == nil || int(txIndex) >= len(block.Transactions()) { + root = header.Root + statedb, err = api.blockchain.StateAt(root) + if err != nil { + return AccountRangeResult{}, err + } + } else { + root = parentHeader.Root + statedb, err = api.blockchain.StateAt(root) + if err != nil { + return AccountRangeResult{}, err + } + // Recompute transactions up to the target index. + signer := types.MakeSigner(api.blockchain.Config(), block.Number()) + for idx, tx := range block.Transactions() { + // Assemble the transaction call message and return if the requested offset + msg, _ := tx.AsMessage(signer) + context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil) + // Not yet the searched for transaction, execute on top of the current state + vmenv := vm.NewEVM(context, statedb, api.blockchain.Config(), vm.Config{}) + if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { + return AccountRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + } + // Ensure any modifications are committed to the state + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + root = statedb.IntermediateRoot(vmenv.ChainConfig().IsEIP158(block.Number())) + if idx == int(txIndex) { + // This is to make sure root can be opened by OpenTrie + root, err = statedb.Commit(api.chainConfig.IsEIP158(block.Number())) + if err != nil { + return AccountRangeResult{}, err + } + break + } + } + } + accountTrie, err := statedb.Database().OpenTrie(root) + if err != nil { + return AccountRangeResult{}, err + } + it := trie.NewIterator(accountTrie.NodeIterator(common.BigToHash((*big.Int)(addressHash)).Bytes())) + result := AccountRangeResult{AddressMap: make(map[common.Hash]common.Address)} + for i := 0; /*i < int(maxResults) && */ it.Next(); i++ { + if preimage := accountTrie.GetKey(it.Key); preimage != nil { + result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage) + //fmt.Printf("%x: %x\n", it.Key, preimage) + } else { + //fmt.Printf("could not find preimage for %x\n", it.Key) + } + } + //fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap)) + // Add the 'next key' so clients can continue downloading. + if it.Next() { + next := common.BytesToHash(it.Key) + result.NextKey = next + } + return result, nil +} + +func (api *RetestethAPI) GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error) { + //fmt.Printf("GetBalance %x, block %d\n", address, blockNr) + header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) + statedb, err := api.blockchain.StateAt(header.Root) + if err != nil { + return nil, err + } + return (*math.HexOrDecimal256)(statedb.GetBalance(address)), nil +} + +func (api *RetestethAPI) GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error) { + header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) + statedb, err := api.blockchain.StateAt(header.Root) + if err != nil { + return nil, err + } + return statedb.GetCode(address), nil +} + +func (api *RetestethAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error) { + header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) + statedb, err := api.blockchain.StateAt(header.Root) + if err != nil { + return 0, err + } + return statedb.GetNonce(address), nil +} + +func (api *RetestethAPI) StorageRangeAt(ctx context.Context, + blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, + address common.Address, + begin *math.HexOrDecimal256, maxResults uint64, +) (StorageRangeResult, error) { + var ( + header *types.Header + block *types.Block + ) + if (*big.Int)(blockHashOrNumber).Cmp(big.NewInt(math.MaxInt64)) > 0 { + blockHash := common.BigToHash((*big.Int)(blockHashOrNumber)) + header = api.blockchain.GetHeaderByHash(blockHash) + block = api.blockchain.GetBlockByHash(blockHash) + //fmt.Printf("Storage range: %x, txIndex %d, addr: %x, start: %x, maxResults: %d\n", + // blockHash, txIndex, address, common.BigToHash((*big.Int)(begin)), maxResults) + } else { + blockNumber := (*big.Int)(blockHashOrNumber).Uint64() + header = api.blockchain.GetHeaderByNumber(blockNumber) + block = api.blockchain.GetBlockByNumber(blockNumber) + //fmt.Printf("Storage range: %d, txIndex %d, addr: %x, start: %x, maxResults: %d\n", + // blockNumber, txIndex, address, common.BigToHash((*big.Int)(begin)), maxResults) + } + parentHeader := api.blockchain.GetHeaderByHash(header.ParentHash) + var root common.Hash + var statedb *state.StateDB + var err error + if parentHeader == nil || int(txIndex) >= len(block.Transactions()) { + root = header.Root + statedb, err = api.blockchain.StateAt(root) + if err != nil { + return StorageRangeResult{}, err + } + } else { + root = parentHeader.Root + statedb, err = api.blockchain.StateAt(root) + if err != nil { + return StorageRangeResult{}, err + } + // Recompute transactions up to the target index. + signer := types.MakeSigner(api.blockchain.Config(), block.Number()) + for idx, tx := range block.Transactions() { + // Assemble the transaction call message and return if the requested offset + msg, _ := tx.AsMessage(signer) + context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil) + // Not yet the searched for transaction, execute on top of the current state + vmenv := vm.NewEVM(context, statedb, api.blockchain.Config(), vm.Config{}) + if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { + return StorageRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + } + // Ensure any modifications are committed to the state + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + _ = statedb.IntermediateRoot(vmenv.ChainConfig().IsEIP158(block.Number())) + if idx == int(txIndex) { + // This is to make sure root can be opened by OpenTrie + _, err = statedb.Commit(vmenv.ChainConfig().IsEIP158(block.Number())) + if err != nil { + return StorageRangeResult{}, err + } + } + } + } + storageTrie := statedb.StorageTrie(address) + it := trie.NewIterator(storageTrie.NodeIterator(common.BigToHash((*big.Int)(begin)).Bytes())) + result := StorageRangeResult{Storage: make(map[common.Hash]SRItem)} + for i := 0; /*i < int(maxResults) && */ it.Next(); i++ { + if preimage := storageTrie.GetKey(it.Key); preimage != nil { + key := (*math.HexOrDecimal256)(big.NewInt(0).SetBytes(preimage)) + v, _, err := rlp.SplitString(it.Value) + if err != nil { + return StorageRangeResult{}, err + } + value := (*math.HexOrDecimal256)(big.NewInt(0).SetBytes(v)) + ks, _ := key.MarshalText() + vs, _ := value.MarshalText() + if len(ks)%2 != 0 { + ks = append(append(append([]byte{}, ks[:2]...), byte('0')), ks[2:]...) + } + if len(vs)%2 != 0 { + vs = append(append(append([]byte{}, vs[:2]...), byte('0')), vs[2:]...) + } + result.Storage[common.BytesToHash(it.Key)] = SRItem{ + Key: string(ks), + Value: string(vs), + } + //fmt.Printf("Key: %s, Value: %s\n", ks, vs) + } else { + //fmt.Printf("Did not find preimage for %x\n", it.Key) + } + } + if it.Next() { + result.Complete = false + } else { + result.Complete = true + } + return result, nil +} + +func (api *RetestethAPI) ClientVersion(ctx context.Context) (string, error) { + return "Geth-" + params.VersionWithCommit(gitCommit, gitDate), nil +} + +// splitAndTrim splits input separated by a comma +// and trims excessive white space from the substrings. +func splitAndTrim(input string) []string { + result := strings.Split(input, ",") + for i, r := range result { + result[i] = strings.TrimSpace(r) + } + return result +} + +func retesteth(ctx *cli.Context) error { + log.Info("Welcome to retesteth!") + // register signer API with server + var ( + extapiURL string + ) + apiImpl := &RetestethAPI{} + var testApi RetestethTestAPI = apiImpl + var ethApi RetestethEthAPI = apiImpl + var debugApi RetestethDebugAPI = apiImpl + var web3Api RetestWeb3API = apiImpl + rpcAPI := []rpc.API{ + { + Namespace: "test", + Public: true, + Service: testApi, + Version: "1.0", + }, + { + Namespace: "eth", + Public: true, + Service: ethApi, + Version: "1.0", + }, + { + Namespace: "debug", + Public: true, + Service: debugApi, + Version: "1.0", + }, + { + Namespace: "web3", + Public: true, + Service: web3Api, + Version: "1.0", + }, + } + vhosts := splitAndTrim(ctx.GlobalString(utils.RPCVirtualHostsFlag.Name)) + cors := splitAndTrim(ctx.GlobalString(utils.RPCCORSDomainFlag.Name)) + + // start http server + httpEndpoint := fmt.Sprintf("%s:%d", ctx.GlobalString(utils.RPCListenAddrFlag.Name), ctx.Int(rpcPortFlag.Name)) + listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"test", "eth", "debug", "web3"}, cors, vhosts, rpc.DefaultHTTPTimeouts) + if err != nil { + utils.Fatalf("Could not start RPC api: %v", err) + } + extapiURL = fmt.Sprintf("http://%s", httpEndpoint) + log.Info("HTTP endpoint opened", "url", extapiURL) + + defer func() { + listener.Close() + log.Info("HTTP endpoint closed", "url", httpEndpoint) + }() + + abortChan := make(chan os.Signal) + signal.Notify(abortChan, os.Interrupt) + + sig := <-abortChan + log.Info("Exiting...", "signal", sig) + return nil +} diff --git a/cmd/geth/retesteth_copypaste.go b/cmd/geth/retesteth_copypaste.go new file mode 100644 index 0000000..6061702 --- /dev/null +++ b/cmd/geth/retesteth_copypaste.go @@ -0,0 +1,148 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package geth + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction +type RPCTransaction struct { + BlockHash common.Hash `json:"blockHash"` + BlockNumber *hexutil.Big `json:"blockNumber"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + Hash common.Hash `json:"hash"` + Input hexutil.Bytes `json:"input"` + Nonce hexutil.Uint64 `json:"nonce"` + To *common.Address `json:"to"` + TransactionIndex hexutil.Uint `json:"transactionIndex"` + Value *hexutil.Big `json:"value"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` +} + +// newRPCTransaction returns a transaction that will serialize to the RPC +// representation, with the given location metadata set (if available). +func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction { + var signer types.Signer = types.FrontierSigner{} + if tx.Protected() { + signer = types.NewEIP155Signer(tx.ChainId()) + } + from, _ := types.Sender(signer, tx) + v, r, s := tx.RawSignatureValues() + + result := &RPCTransaction{ + From: from, + Gas: hexutil.Uint64(tx.Gas()), + GasPrice: (*hexutil.Big)(tx.GasPrice()), + Hash: tx.Hash(), + Input: hexutil.Bytes(tx.Data()), + Nonce: hexutil.Uint64(tx.Nonce()), + To: tx.To(), + Value: (*hexutil.Big)(tx.Value()), + V: (*hexutil.Big)(v), + R: (*hexutil.Big)(r), + S: (*hexutil.Big)(s), + } + if blockHash != (common.Hash{}) { + result.BlockHash = blockHash + result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) + result.TransactionIndex = hexutil.Uint(index) + } + return result +} + +// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. +func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction { + txs := b.Transactions() + if index >= uint64(len(txs)) { + return nil + } + return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index) +} + +// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. +func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction { + for idx, tx := range b.Transactions() { + if tx.Hash() == hash { + return newRPCTransactionFromBlockIndex(b, uint64(idx)) + } + } + return nil +} + +// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are +// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain +// transaction hashes. +func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { + head := b.Header() // copies the header once + fields := map[string]interface{}{ + "number": (*hexutil.Big)(head.Number), + "hash": b.Hash(), + "parentHash": head.ParentHash, + "nonce": head.Nonce, + "mixHash": head.MixDigest, + "sha3Uncles": head.UncleHash, + "logsBloom": head.Bloom, + "stateRoot": head.Root, + "miner": head.Coinbase, + "difficulty": (*hexutil.Big)(head.Difficulty), + "extraData": hexutil.Bytes(head.Extra), + "size": hexutil.Uint64(b.Size()), + "gasLimit": hexutil.Uint64(head.GasLimit), + "gasUsed": hexutil.Uint64(head.GasUsed), + "timestamp": hexutil.Uint64(head.Time), + "transactionsRoot": head.TxHash, + "receiptsRoot": head.ReceiptHash, + } + + if inclTx { + formatTx := func(tx *types.Transaction) (interface{}, error) { + return tx.Hash(), nil + } + if fullTx { + formatTx = func(tx *types.Transaction) (interface{}, error) { + return newRPCTransactionFromBlockHash(b, tx.Hash()), nil + } + } + txs := b.Transactions() + transactions := make([]interface{}, len(txs)) + var err error + for i, tx := range txs { + if transactions[i], err = formatTx(tx); err != nil { + return nil, err + } + } + fields["transactions"] = transactions + } + + uncles := b.Uncles() + uncleHashes := make([]common.Hash, len(uncles)) + for i, uncle := range uncles { + uncleHashes[i] = uncle.Hash() + } + fields["uncles"] = uncleHashes + + return fields, nil +} diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go new file mode 100644 index 0000000..038d967 --- /dev/null +++ b/cmd/geth/usage.go @@ -0,0 +1,370 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// Contains the geth command usage template and generator. + +package geth + +import ( + "io" + "sort" + + "strings" + + "github.com/Determinant/coreth/cmd/utils" + "github.com/Determinant/coreth/internal/debug" + cli "gopkg.in/urfave/cli.v1" +) + +// AppHelpTemplate is the test template for the default, global app help topic. +var AppHelpTemplate = `NAME: + {{.App.Name}} - {{.App.Usage}} + + Copyright 2013-2019 The go-ethereum Authors + +USAGE: + {{.App.HelpName}} [options]{{if .App.Commands}} command [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if .App.Version}} +VERSION: + {{.App.Version}} + {{end}}{{if len .App.Authors}} +AUTHOR(S): + {{range .App.Authors}}{{ . }}{{end}} + {{end}}{{if .App.Commands}} +COMMANDS: + {{range .App.Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} + {{end}}{{end}}{{if .FlagGroups}} +{{range .FlagGroups}}{{.Name}} OPTIONS: + {{range .Flags}}{{.}} + {{end}} +{{end}}{{end}}{{if .App.Copyright }} +COPYRIGHT: + {{.App.Copyright}} + {{end}} +` + +// flagGroup is a collection of flags belonging to a single topic. +type flagGroup struct { + Name string + Flags []cli.Flag +} + +// AppHelpFlagGroups is the application flags, grouped by functionality. +var AppHelpFlagGroups = []flagGroup{ + { + Name: "ETHEREUM", + Flags: []cli.Flag{ + configFileFlag, + utils.DataDirFlag, + utils.AncientFlag, + utils.KeyStoreDirFlag, + utils.NoUSBFlag, + utils.SmartCardDaemonPathFlag, + utils.NetworkIdFlag, + utils.TestnetFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.SyncModeFlag, + utils.ExitWhenSyncedFlag, + utils.GCModeFlag, + utils.EthStatsURLFlag, + utils.IdentityFlag, + utils.LightKDFFlag, + utils.WhitelistFlag, + }, + }, + { + Name: "LIGHT CLIENT", + Flags: []cli.Flag{ + utils.LightServeFlag, + utils.LightIngressFlag, + utils.LightEgressFlag, + utils.LightMaxPeersFlag, + utils.UltraLightServersFlag, + utils.UltraLightFractionFlag, + utils.UltraLightOnlyAnnounceFlag, + }, + }, + { + Name: "DEVELOPER CHAIN", + Flags: []cli.Flag{ + utils.DeveloperFlag, + utils.DeveloperPeriodFlag, + }, + }, + { + Name: "ETHASH", + Flags: []cli.Flag{ + utils.EthashCacheDirFlag, + utils.EthashCachesInMemoryFlag, + utils.EthashCachesOnDiskFlag, + utils.EthashDatasetDirFlag, + utils.EthashDatasetsInMemoryFlag, + utils.EthashDatasetsOnDiskFlag, + }, + }, + //{ + // Name: "DASHBOARD", + // Flags: []cli.Flag{ + // utils.DashboardEnabledFlag, + // utils.DashboardAddrFlag, + // utils.DashboardPortFlag, + // utils.DashboardRefreshFlag, + // utils.DashboardAssetsFlag, + // }, + //}, + { + Name: "TRANSACTION POOL", + Flags: []cli.Flag{ + utils.TxPoolLocalsFlag, + utils.TxPoolNoLocalsFlag, + utils.TxPoolJournalFlag, + utils.TxPoolRejournalFlag, + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, + }, + }, + { + Name: "PERFORMANCE TUNING", + Flags: []cli.Flag{ + utils.CacheFlag, + utils.CacheDatabaseFlag, + utils.CacheTrieFlag, + utils.CacheGCFlag, + utils.CacheNoPrefetchFlag, + }, + }, + { + Name: "ACCOUNT", + Flags: []cli.Flag{ + utils.UnlockedAccountFlag, + utils.PasswordFileFlag, + utils.ExternalSignerFlag, + utils.InsecureUnlockAllowedFlag, + }, + }, + { + Name: "API AND CONSOLE", + Flags: []cli.Flag{ + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.RPCEnabledFlag, + utils.RPCListenAddrFlag, + utils.RPCPortFlag, + utils.RPCApiFlag, + utils.RPCGlobalGasCap, + utils.RPCCORSDomainFlag, + utils.RPCVirtualHostsFlag, + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSAllowedOriginsFlag, + utils.GraphQLEnabledFlag, + utils.GraphQLListenAddrFlag, + utils.GraphQLPortFlag, + utils.GraphQLCORSDomainFlag, + utils.GraphQLVirtualHostsFlag, + utils.JSpathFlag, + utils.ExecFlag, + utils.PreloadJSFlag, + }, + }, + { + Name: "NETWORKING", + Flags: []cli.Flag{ + utils.BootnodesFlag, + utils.BootnodesV4Flag, + utils.BootnodesV5Flag, + utils.ListenPortFlag, + utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, + utils.NATFlag, + utils.NoDiscoverFlag, + utils.DiscoveryV5Flag, + utils.NetrestrictFlag, + utils.NodeKeyFileFlag, + utils.NodeKeyHexFlag, + }, + }, + { + Name: "MINER", + Flags: []cli.Flag{ + utils.MiningEnabledFlag, + utils.MinerThreadsFlag, + utils.MinerNotifyFlag, + utils.MinerGasPriceFlag, + utils.MinerGasTargetFlag, + utils.MinerGasLimitFlag, + utils.MinerEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerRecommitIntervalFlag, + utils.MinerNoVerfiyFlag, + }, + }, + { + Name: "GAS PRICE ORACLE", + Flags: []cli.Flag{ + utils.GpoBlocksFlag, + utils.GpoPercentileFlag, + }, + }, + { + Name: "VIRTUAL MACHINE", + Flags: []cli.Flag{ + utils.VMEnableDebugFlag, + utils.EVMInterpreterFlag, + utils.EWASMInterpreterFlag, + }, + }, + { + Name: "LOGGING AND DEBUGGING", + Flags: append([]cli.Flag{ + utils.FakePoWFlag, + utils.NoCompactionFlag, + }, debug.Flags...), + }, + { + Name: "METRICS AND STATS", + Flags: metricsFlags, + }, + { + Name: "WHISPER (EXPERIMENTAL)", + Flags: whisperFlags, + }, + { + Name: "DEPRECATED", + Flags: []cli.Flag{ + utils.LightLegacyServFlag, + utils.LightLegacyPeersFlag, + utils.MinerLegacyThreadsFlag, + utils.MinerLegacyGasTargetFlag, + utils.MinerLegacyGasPriceFlag, + utils.MinerLegacyEtherbaseFlag, + utils.MinerLegacyExtraDataFlag, + }, + }, + { + Name: "MISC", + }, +} + +// byCategory sorts an array of flagGroup by Name in the order +// defined in AppHelpFlagGroups. +type byCategory []flagGroup + +func (a byCategory) Len() int { return len(a) } +func (a byCategory) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byCategory) Less(i, j int) bool { + iCat, jCat := a[i].Name, a[j].Name + iIdx, jIdx := len(AppHelpFlagGroups), len(AppHelpFlagGroups) // ensure non categorized flags come last + + for i, group := range AppHelpFlagGroups { + if iCat == group.Name { + iIdx = i + } + if jCat == group.Name { + jIdx = i + } + } + + return iIdx < jIdx +} + +func flagCategory(flag cli.Flag) string { + for _, category := range AppHelpFlagGroups { + for _, flg := range category.Flags { + if flg.GetName() == flag.GetName() { + return category.Name + } + } + } + return "MISC" +} + +func init() { + // Override the default app help template + cli.AppHelpTemplate = AppHelpTemplate + + // Define a one shot struct to pass to the usage template + type helpData struct { + App interface{} + FlagGroups []flagGroup + } + + // Override the default app help printer, but only for the global app help + originalHelpPrinter := cli.HelpPrinter + cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) { + if tmpl == AppHelpTemplate { + // Iterate over all the flags and add any uncategorized ones + categorized := make(map[string]struct{}) + for _, group := range AppHelpFlagGroups { + for _, flag := range group.Flags { + categorized[flag.String()] = struct{}{} + } + } + var uncategorized []cli.Flag + for _, flag := range data.(*cli.App).Flags { + if _, ok := categorized[flag.String()]; !ok { + if strings.HasPrefix(flag.GetName(), "dashboard") { + continue + } + uncategorized = append(uncategorized, flag) + } + } + if len(uncategorized) > 0 { + // Append all ungategorized options to the misc group + miscs := len(AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags) + AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags = append(AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags, uncategorized...) + + // Make sure they are removed afterwards + defer func() { + AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags = AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags[:miscs] + }() + } + // Render out custom usage screen + originalHelpPrinter(w, tmpl, helpData{data, AppHelpFlagGroups}) + } else if tmpl == utils.CommandHelpTemplate { + // Iterate over all command specific flags and categorize them + categorized := make(map[string][]cli.Flag) + for _, flag := range data.(cli.Command).Flags { + if _, ok := categorized[flag.String()]; !ok { + categorized[flagCategory(flag)] = append(categorized[flagCategory(flag)], flag) + } + } + + // sort to get a stable ordering + sorted := make([]flagGroup, 0, len(categorized)) + for cat, flgs := range categorized { + sorted = append(sorted, flagGroup{cat, flgs}) + } + sort.Sort(byCategory(sorted)) + + // add sorted array to data and render with default printer + originalHelpPrinter(w, tmpl, map[string]interface{}{ + "cmd": data, + "categorizedFlags": sorted, + }) + } else { + originalHelpPrinter(w, tmpl, data) + } + } +} diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go new file mode 100644 index 0000000..97597bb --- /dev/null +++ b/cmd/utils/cmd.go @@ -0,0 +1,314 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// Package utils contains internal helper functions for go-ethereum commands. +package utils + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "os/signal" + "runtime" + "strings" + "syscall" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/Determinant/coreth/internal/debug" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rlp" +) + +const ( + importBatchSize = 2500 +) + +// Fatalf formats a message to standard error and exits the program. +// The message is also printed to standard output if standard error +// is redirected to a different file. +func Fatalf(format string, args ...interface{}) { + w := io.MultiWriter(os.Stdout, os.Stderr) + if runtime.GOOS == "windows" { + // The SameFile check below doesn't work on Windows. + // stdout is unlikely to get redirected though, so just print there. + w = os.Stdout + } else { + outf, _ := os.Stdout.Stat() + errf, _ := os.Stderr.Stat() + if outf != nil && errf != nil && os.SameFile(outf, errf) { + w = os.Stderr + } + } + fmt.Fprintf(w, "Fatal: "+format+"\n", args...) + os.Exit(1) +} + +func StartNode(stack *node.Node) { + if err := stack.Start(); err != nil { + Fatalf("Error starting protocol stack: %v", err) + } + go func() { + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(sigc) + <-sigc + log.Info("Got interrupt, shutting down...") + go stack.Stop() + for i := 10; i > 0; i-- { + <-sigc + if i > 1 { + log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) + } + } + debug.Exit() // ensure trace and CPU profile data is flushed. + debug.LoudPanic("boom") + }() +} + +func ImportChain(chain *core.BlockChain, fn string) error { + // Watch for Ctrl-C while the import is running. + // If a signal is received, the import will stop at the next batch. + interrupt := make(chan os.Signal, 1) + stop := make(chan struct{}) + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(interrupt) + defer close(interrupt) + go func() { + if _, ok := <-interrupt; ok { + log.Info("Interrupted during import, stopping at next batch") + } + close(stop) + }() + checkInterrupt := func() bool { + select { + case <-stop: + return true + default: + return false + } + } + + log.Info("Importing blockchain", "file", fn) + + // Open the file handle and potentially unwrap the gzip stream + fh, err := os.Open(fn) + if err != nil { + return err + } + defer fh.Close() + + var reader io.Reader = fh + if strings.HasSuffix(fn, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return err + } + } + stream := rlp.NewStream(reader, 0) + + // Run actual the import. + blocks := make(types.Blocks, importBatchSize) + n := 0 + for batch := 0; ; batch++ { + // Load a batch of RLP blocks. + if checkInterrupt() { + return fmt.Errorf("interrupted") + } + i := 0 + for ; i < importBatchSize; i++ { + var b types.Block + if err := stream.Decode(&b); err == io.EOF { + break + } else if err != nil { + return fmt.Errorf("at block %d: %v", n, err) + } + // don't import first block + if b.NumberU64() == 0 { + i-- + continue + } + blocks[i] = &b + n++ + } + if i == 0 { + break + } + // Import the batch. + if checkInterrupt() { + return fmt.Errorf("interrupted") + } + missing := missingBlocks(chain, blocks[:i]) + if len(missing) == 0 { + log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash()) + continue + } + if _, err := chain.InsertChain(missing); err != nil { + return fmt.Errorf("invalid block %d: %v", n, err) + } + } + return nil +} + +func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block { + head := chain.CurrentBlock() + for i, block := range blocks { + // If we're behind the chain head, only check block, state is available at head + if head.NumberU64() > block.NumberU64() { + if !chain.HasBlock(block.Hash(), block.NumberU64()) { + return blocks[i:] + } + continue + } + // If we're above the chain head, state availability is a must + if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) { + return blocks[i:] + } + } + return nil +} + +// ExportChain exports a blockchain into the specified file, truncating any data +// already present in the file. +func ExportChain(blockchain *core.BlockChain, fn string) error { + log.Info("Exporting blockchain", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Iterate over the blocks and export them + if err := blockchain.Export(writer); err != nil { + return err + } + log.Info("Exported blockchain", "file", fn) + + return nil +} + +// ExportAppendChain exports a blockchain into the specified file, appending to +// the file if data already exists in it. +func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { + log.Info("Exporting blockchain", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Iterate over the blocks and export them + if err := blockchain.ExportN(writer, first, last); err != nil { + return err + } + log.Info("Exported blockchain to", "file", fn) + return nil +} + +// ImportPreimages imports a batch of exported hash preimages into the database. +func ImportPreimages(db ethdb.Database, fn string) error { + log.Info("Importing preimages", "file", fn) + + // Open the file handle and potentially unwrap the gzip stream + fh, err := os.Open(fn) + if err != nil { + return err + } + defer fh.Close() + + var reader io.Reader = fh + if strings.HasSuffix(fn, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return err + } + } + stream := rlp.NewStream(reader, 0) + + // Import the preimages in batches to prevent disk trashing + preimages := make(map[common.Hash][]byte) + + for { + // Read the next entry and ensure it's not junk + var blob []byte + + if err := stream.Decode(&blob); err != nil { + if err == io.EOF { + break + } + return err + } + // Accumulate the preimages and flush when enough ws gathered + preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) + if len(preimages) > 1024 { + rawdb.WritePreimages(db, preimages) + preimages = make(map[common.Hash][]byte) + } + } + // Flush the last batch preimage data + if len(preimages) > 0 { + rawdb.WritePreimages(db, preimages) + } + return nil +} + +// ExportPreimages exports all known hash preimages into the specified file, +// truncating any data already present in the file. +func ExportPreimages(db ethdb.Database, fn string) error { + log.Info("Exporting preimages", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Iterate over the preimages and export them + it := db.NewIteratorWithPrefix([]byte("secure-key-")) + defer it.Release() + + for it.Next() { + if err := rlp.Encode(writer, it.Value()); err != nil { + return err + } + } + log.Info("Exported preimages", "file", fn) + return nil +} diff --git a/cmd/utils/customflags.go b/cmd/utils/customflags.go new file mode 100644 index 0000000..e5bf872 --- /dev/null +++ b/cmd/utils/customflags.go @@ -0,0 +1,240 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package utils + +import ( + "encoding" + "errors" + "flag" + "fmt" + "math/big" + "os" + "os/user" + "path" + "strings" + + "github.com/ethereum/go-ethereum/common/math" + "gopkg.in/urfave/cli.v1" +) + +// Custom type which is registered in the flags library which cli uses for +// argument parsing. This allows us to expand Value to an absolute path when +// the argument is parsed +type DirectoryString struct { + Value string +} + +func (self *DirectoryString) String() string { + return self.Value +} + +func (self *DirectoryString) Set(value string) error { + self.Value = expandPath(value) + return nil +} + +// Custom cli.Flag type which expand the received string to an absolute path. +// e.g. ~/.ethereum -> /home/username/.ethereum +type DirectoryFlag struct { + Name string + Value DirectoryString + Usage string +} + +func (self DirectoryFlag) String() string { + fmtString := "%s %v\t%v" + if len(self.Value.Value) > 0 { + fmtString = "%s \"%v\"\t%v" + } + return fmt.Sprintf(fmtString, prefixedNames(self.Name), self.Value.Value, self.Usage) +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// called by cli library, grabs variable from environment (if in env) +// and adds variable to flag set for parsing. +func (self DirectoryFlag) Apply(set *flag.FlagSet) { + eachName(self.Name, func(name string) { + set.Var(&self.Value, self.Name, self.Usage) + }) +} + +type TextMarshaler interface { + encoding.TextMarshaler + encoding.TextUnmarshaler +} + +// textMarshalerVal turns a TextMarshaler into a flag.Value +type textMarshalerVal struct { + v TextMarshaler +} + +func (v textMarshalerVal) String() string { + if v.v == nil { + return "" + } + text, _ := v.v.MarshalText() + return string(text) +} + +func (v textMarshalerVal) Set(s string) error { + return v.v.UnmarshalText([]byte(s)) +} + +// TextMarshalerFlag wraps a TextMarshaler value. +type TextMarshalerFlag struct { + Name string + Value TextMarshaler + Usage string +} + +func (f TextMarshalerFlag) GetName() string { + return f.Name +} + +func (f TextMarshalerFlag) String() string { + return fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage) +} + +func (f TextMarshalerFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage) + }) +} + +// GlobalTextMarshaler returns the value of a TextMarshalerFlag from the global flag set. +func GlobalTextMarshaler(ctx *cli.Context, name string) TextMarshaler { + val := ctx.GlobalGeneric(name) + if val == nil { + return nil + } + return val.(textMarshalerVal).v +} + +// BigFlag is a command line flag that accepts 256 bit big integers in decimal or +// hexadecimal syntax. +type BigFlag struct { + Name string + Value *big.Int + Usage string +} + +// bigValue turns *big.Int into a flag.Value +type bigValue big.Int + +func (b *bigValue) String() string { + if b == nil { + return "" + } + return (*big.Int)(b).String() +} + +func (b *bigValue) Set(s string) error { + int, ok := math.ParseBig256(s) + if !ok { + return errors.New("invalid integer syntax") + } + *b = (bigValue)(*int) + return nil +} + +func (f BigFlag) GetName() string { + return f.Name +} + +func (f BigFlag) String() string { + fmtString := "%s %v\t%v" + if f.Value != nil { + fmtString = "%s \"%v\"\t%v" + } + return fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage) +} + +func (f BigFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Var((*bigValue)(f.Value), f.Name, f.Usage) + }) +} + +// GlobalBig returns the value of a BigFlag from the global flag set. +func GlobalBig(ctx *cli.Context, name string) *big.Int { + val := ctx.GlobalGeneric(name) + if val == nil { + return nil + } + return (*big.Int)(val.(*bigValue)) +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +func prefixedNames(fullName string) (prefixed string) { + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if i < len(parts)-1 { + prefixed += ", " + } + } + return +} + +func (self DirectoryFlag) GetName() string { + return self.Name +} + +func (self *DirectoryFlag) Set(value string) { + self.Value.Value = value +} + +// Expands a file path +// 1. replace tilde with users home dir +// 2. expands embedded environment variables +// 3. cleans the path, e.g. /a/b/../c -> /a/c +// Note, it has limitations, e.g. ~someuser/tmp will not be expanded +func expandPath(p string) string { + if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") { + if home := homeDir(); home != "" { + p = home + p[1:] + } + } + return path.Clean(os.ExpandEnv(p)) +} + +func homeDir() string { + if home := os.Getenv("HOME"); home != "" { + return home + } + if usr, err := user.Current(); err == nil { + return usr.HomeDir + } + return "" +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go new file mode 100644 index 0000000..7e28dff --- /dev/null +++ b/cmd/utils/flags.go @@ -0,0 +1,1747 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// Package utils contains internal helper functions for go-ethereum commands. +package utils + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "io/ioutil" + "math/big" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/fdlimit" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/clique" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/dashboard" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethstats" + "github.com/ethereum/go-ethereum/graphql" + "github.com/ethereum/go-ethereum/les" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/metrics/influxdb" + "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discv5" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/nat" + "github.com/ethereum/go-ethereum/p2p/netutil" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv6" + pcsclite "github.com/gballet/go-libpcsclite" + cli "gopkg.in/urfave/cli.v1" +) + +var ( + CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...] +{{if .cmd.Description}}{{.cmd.Description}} +{{end}}{{if .cmd.Subcommands}} +SUBCOMMANDS: + {{range .cmd.Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{end}}{{end}}{{if .categorizedFlags}} +{{range $idx, $categorized := .categorizedFlags}}{{$categorized.Name}} OPTIONS: +{{range $categorized.Flags}}{{"\t"}}{{.}} +{{end}} +{{end}}{{end}}` +) + +func init() { + cli.AppHelpTemplate = `{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...] + +VERSION: + {{.Version}} + +COMMANDS: + {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{end}}{{if .Flags}} +GLOBAL OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{end}} +` + + cli.CommandHelpTemplate = CommandHelpTemplate +} + +// NewApp creates an app with sane defaults. +func NewApp(gitCommit, gitDate, usage string) *cli.App { + app := cli.NewApp() + app.Name = filepath.Base(os.Args[0]) + app.Author = "" + app.Email = "" + app.Version = params.VersionWithCommit(gitCommit, gitDate) + app.Usage = usage + return app +} + +// These are all the command line flags we support. +// If you add to this list, please remember to include the +// flag in the appropriate command definition. +// +// The flags are defined here so their names and help texts +// are the same for all commands. + +var ( + // General settings + DataDirFlag = DirectoryFlag{ + Name: "datadir", + Usage: "Data directory for the databases and keystore", + Value: DirectoryString{node.DefaultDataDir()}, + } + AncientFlag = DirectoryFlag{ + Name: "datadir.ancient", + Usage: "Data directory for ancient chain segments (default = inside chaindata)", + } + KeyStoreDirFlag = DirectoryFlag{ + Name: "keystore", + Usage: "Directory for the keystore (default = inside the datadir)", + } + NoUSBFlag = cli.BoolFlag{ + Name: "nousb", + Usage: "Disables monitoring for and managing USB hardware wallets", + } + SmartCardDaemonPathFlag = cli.StringFlag{ + Name: "pcscdpath", + Usage: "Path to the smartcard daemon (pcscd) socket file", + Value: pcsclite.PCSCDSockName, + } + NetworkIdFlag = cli.Uint64Flag{ + Name: "networkid", + Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)", + Value: eth.DefaultConfig.NetworkId, + } + TestnetFlag = cli.BoolFlag{ + Name: "testnet", + Usage: "Ropsten network: pre-configured proof-of-work test network", + } + RinkebyFlag = cli.BoolFlag{ + Name: "rinkeby", + Usage: "Rinkeby network: pre-configured proof-of-authority test network", + } + GoerliFlag = cli.BoolFlag{ + Name: "goerli", + Usage: "Görli network: pre-configured proof-of-authority test network", + } + DeveloperFlag = cli.BoolFlag{ + Name: "dev", + Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", + } + DeveloperPeriodFlag = cli.IntFlag{ + Name: "dev.period", + Usage: "Block period to use in developer mode (0 = mine only if transaction pending)", + } + IdentityFlag = cli.StringFlag{ + Name: "identity", + Usage: "Custom node name", + } + DocRootFlag = DirectoryFlag{ + Name: "docroot", + Usage: "Document Root for HTTPClient file scheme", + Value: DirectoryString{homeDir()}, + } + ExitWhenSyncedFlag = cli.BoolFlag{ + Name: "exitwhensynced", + Usage: "Exits after block synchronisation completes", + } + IterativeOutputFlag = cli.BoolFlag{ + Name: "iterative", + Usage: "Print streaming JSON iteratively, delimited by newlines", + } + ExcludeStorageFlag = cli.BoolFlag{ + Name: "nostorage", + Usage: "Exclude storage entries (save db lookups)", + } + IncludeIncompletesFlag = cli.BoolFlag{ + Name: "incompletes", + Usage: "Include accounts for which we don't have the address (missing preimage)", + } + ExcludeCodeFlag = cli.BoolFlag{ + Name: "nocode", + Usage: "Exclude contract code (save db lookups)", + } + defaultSyncMode = eth.DefaultConfig.SyncMode + SyncModeFlag = TextMarshalerFlag{ + Name: "syncmode", + Usage: `Blockchain sync mode ("fast", "full", or "light")`, + Value: &defaultSyncMode, + } + GCModeFlag = cli.StringFlag{ + Name: "gcmode", + Usage: `Blockchain garbage collection mode ("full", "archive")`, + Value: "full", + } + LightKDFFlag = cli.BoolFlag{ + Name: "lightkdf", + Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", + } + WhitelistFlag = cli.StringFlag{ + Name: "whitelist", + Usage: "Comma separated block number-to-hash mappings to enforce (=)", + } + // Light server and client settings + LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021 + Name: "lightserv", + Usage: "Maximum percentage of time allowed for serving LES requests (deprecated, use --light.serve)", + Value: eth.DefaultConfig.LightServ, + } + LightServeFlag = cli.IntFlag{ + Name: "light.serve", + Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)", + Value: eth.DefaultConfig.LightServ, + } + LightIngressFlag = cli.IntFlag{ + Name: "light.ingress", + Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)", + Value: eth.DefaultConfig.LightIngress, + } + LightEgressFlag = cli.IntFlag{ + Name: "light.egress", + Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)", + Value: eth.DefaultConfig.LightEgress, + } + LightLegacyPeersFlag = cli.IntFlag{ // Deprecated in favor of light.maxpeers, remove in 2021 + Name: "lightpeers", + Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated, use --light.maxpeers)", + Value: eth.DefaultConfig.LightPeers, + } + LightMaxPeersFlag = cli.IntFlag{ + Name: "light.maxpeers", + Usage: "Maximum number of light clients to serve, or light servers to attach to", + Value: eth.DefaultConfig.LightPeers, + } + UltraLightServersFlag = cli.StringFlag{ + Name: "ulc.servers", + Usage: "List of trusted ultra-light servers", + Value: strings.Join(eth.DefaultConfig.UltraLightServers, ","), + } + UltraLightFractionFlag = cli.IntFlag{ + Name: "ulc.fraction", + Usage: "Minimum % of trusted ultra-light servers required to announce a new head", + Value: eth.DefaultConfig.UltraLightFraction, + } + UltraLightOnlyAnnounceFlag = cli.BoolFlag{ + Name: "ulc.onlyannounce", + Usage: "Ultra light server sends announcements only", + } + // Dashboard settings + DashboardEnabledFlag = cli.BoolFlag{ + Name: "dashboard", + Usage: "Enable the dashboard", + } + DashboardAddrFlag = cli.StringFlag{ + Name: "dashboard.addr", + Usage: "Dashboard listening interface", + Value: dashboard.DefaultConfig.Host, + } + DashboardPortFlag = cli.IntFlag{ + Name: "dashboard.host", + Usage: "Dashboard listening port", + Value: dashboard.DefaultConfig.Port, + } + DashboardRefreshFlag = cli.DurationFlag{ + Name: "dashboard.refresh", + Usage: "Dashboard metrics collection refresh rate", + Value: dashboard.DefaultConfig.Refresh, + } + // Ethash settings + EthashCacheDirFlag = DirectoryFlag{ + Name: "ethash.cachedir", + Usage: "Directory to store the ethash verification caches (default = inside the datadir)", + } + EthashCachesInMemoryFlag = cli.IntFlag{ + Name: "ethash.cachesinmem", + Usage: "Number of recent ethash caches to keep in memory (16MB each)", + Value: eth.DefaultConfig.Ethash.CachesInMem, + } + EthashCachesOnDiskFlag = cli.IntFlag{ + Name: "ethash.cachesondisk", + Usage: "Number of recent ethash caches to keep on disk (16MB each)", + Value: eth.DefaultConfig.Ethash.CachesOnDisk, + } + EthashDatasetDirFlag = DirectoryFlag{ + Name: "ethash.dagdir", + Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", + Value: DirectoryString{eth.DefaultConfig.Ethash.DatasetDir}, + } + EthashDatasetsInMemoryFlag = cli.IntFlag{ + Name: "ethash.dagsinmem", + Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", + Value: eth.DefaultConfig.Ethash.DatasetsInMem, + } + EthashDatasetsOnDiskFlag = cli.IntFlag{ + Name: "ethash.dagsondisk", + Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", + Value: eth.DefaultConfig.Ethash.DatasetsOnDisk, + } + // Transaction pool settings + TxPoolLocalsFlag = cli.StringFlag{ + Name: "txpool.locals", + Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)", + } + TxPoolNoLocalsFlag = cli.BoolFlag{ + Name: "txpool.nolocals", + Usage: "Disables price exemptions for locally submitted transactions", + } + TxPoolJournalFlag = cli.StringFlag{ + Name: "txpool.journal", + Usage: "Disk journal for local transaction to survive node restarts", + Value: core.DefaultTxPoolConfig.Journal, + } + TxPoolRejournalFlag = cli.DurationFlag{ + Name: "txpool.rejournal", + Usage: "Time interval to regenerate the local transaction journal", + Value: core.DefaultTxPoolConfig.Rejournal, + } + TxPoolPriceLimitFlag = cli.Uint64Flag{ + Name: "txpool.pricelimit", + Usage: "Minimum gas price limit to enforce for acceptance into the pool", + Value: eth.DefaultConfig.TxPool.PriceLimit, + } + TxPoolPriceBumpFlag = cli.Uint64Flag{ + Name: "txpool.pricebump", + Usage: "Price bump percentage to replace an already existing transaction", + Value: eth.DefaultConfig.TxPool.PriceBump, + } + TxPoolAccountSlotsFlag = cli.Uint64Flag{ + Name: "txpool.accountslots", + Usage: "Minimum number of executable transaction slots guaranteed per account", + Value: eth.DefaultConfig.TxPool.AccountSlots, + } + TxPoolGlobalSlotsFlag = cli.Uint64Flag{ + Name: "txpool.globalslots", + Usage: "Maximum number of executable transaction slots for all accounts", + Value: eth.DefaultConfig.TxPool.GlobalSlots, + } + TxPoolAccountQueueFlag = cli.Uint64Flag{ + Name: "txpool.accountqueue", + Usage: "Maximum number of non-executable transaction slots permitted per account", + Value: eth.DefaultConfig.TxPool.AccountQueue, + } + TxPoolGlobalQueueFlag = cli.Uint64Flag{ + Name: "txpool.globalqueue", + Usage: "Maximum number of non-executable transaction slots for all accounts", + Value: eth.DefaultConfig.TxPool.GlobalQueue, + } + TxPoolLifetimeFlag = cli.DurationFlag{ + Name: "txpool.lifetime", + Usage: "Maximum amount of time non-executable transaction are queued", + Value: eth.DefaultConfig.TxPool.Lifetime, + } + // Performance tuning settings + CacheFlag = cli.IntFlag{ + Name: "cache", + Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node, 128 light mode)", + Value: 1024, + } + CacheDatabaseFlag = cli.IntFlag{ + Name: "cache.database", + Usage: "Percentage of cache memory allowance to use for database io", + Value: 50, + } + CacheTrieFlag = cli.IntFlag{ + Name: "cache.trie", + Usage: "Percentage of cache memory allowance to use for trie caching (default = 25% full mode, 50% archive mode)", + Value: 25, + } + CacheGCFlag = cli.IntFlag{ + Name: "cache.gc", + Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", + Value: 25, + } + CacheNoPrefetchFlag = cli.BoolFlag{ + Name: "cache.noprefetch", + Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)", + } + // Miner settings + MiningEnabledFlag = cli.BoolFlag{ + Name: "mine", + Usage: "Enable mining", + } + MinerThreadsFlag = cli.IntFlag{ + Name: "miner.threads", + Usage: "Number of CPU threads to use for mining", + Value: 0, + } + MinerLegacyThreadsFlag = cli.IntFlag{ + Name: "minerthreads", + Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)", + Value: 0, + } + MinerNotifyFlag = cli.StringFlag{ + Name: "miner.notify", + Usage: "Comma separated HTTP URL list to notify of new work packages", + } + MinerGasTargetFlag = cli.Uint64Flag{ + Name: "miner.gastarget", + Usage: "Target gas floor for mined blocks", + Value: eth.DefaultConfig.Miner.GasFloor, + } + MinerLegacyGasTargetFlag = cli.Uint64Flag{ + Name: "targetgaslimit", + Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)", + Value: eth.DefaultConfig.Miner.GasFloor, + } + MinerGasLimitFlag = cli.Uint64Flag{ + Name: "miner.gaslimit", + Usage: "Target gas ceiling for mined blocks", + Value: eth.DefaultConfig.Miner.GasCeil, + } + MinerGasPriceFlag = BigFlag{ + Name: "miner.gasprice", + Usage: "Minimum gas price for mining a transaction", + Value: eth.DefaultConfig.Miner.GasPrice, + } + MinerLegacyGasPriceFlag = BigFlag{ + Name: "gasprice", + Usage: "Minimum gas price for mining a transaction (deprecated, use --miner.gasprice)", + Value: eth.DefaultConfig.Miner.GasPrice, + } + MinerEtherbaseFlag = cli.StringFlag{ + Name: "miner.etherbase", + Usage: "Public address for block mining rewards (default = first account)", + Value: "0", + } + MinerLegacyEtherbaseFlag = cli.StringFlag{ + Name: "etherbase", + Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)", + Value: "0", + } + MinerExtraDataFlag = cli.StringFlag{ + Name: "miner.extradata", + Usage: "Block extra data set by the miner (default = client version)", + } + MinerLegacyExtraDataFlag = cli.StringFlag{ + Name: "extradata", + Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)", + } + MinerRecommitIntervalFlag = cli.DurationFlag{ + Name: "miner.recommit", + Usage: "Time interval to recreate the block being mined", + Value: eth.DefaultConfig.Miner.Recommit, + } + MinerNoVerfiyFlag = cli.BoolFlag{ + Name: "miner.noverify", + Usage: "Disable remote sealing verification", + } + // Account settings + UnlockedAccountFlag = cli.StringFlag{ + Name: "unlock", + Usage: "Comma separated list of accounts to unlock", + Value: "", + } + PasswordFileFlag = cli.StringFlag{ + Name: "password", + Usage: "Password file to use for non-interactive password input", + Value: "", + } + ExternalSignerFlag = cli.StringFlag{ + Name: "signer", + Usage: "External signer (url or path to ipc file)", + Value: "", + } + VMEnableDebugFlag = cli.BoolFlag{ + Name: "vmdebug", + Usage: "Record information useful for VM and contract debugging", + } + InsecureUnlockAllowedFlag = cli.BoolFlag{ + Name: "allow-insecure-unlock", + Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http", + } + RPCGlobalGasCap = cli.Uint64Flag{ + Name: "rpc.gascap", + Usage: "Sets a cap on gas that can be used in eth_call/estimateGas", + } + // Logging and debug settings + EthStatsURLFlag = cli.StringFlag{ + Name: "ethstats", + Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)", + } + FakePoWFlag = cli.BoolFlag{ + Name: "fakepow", + Usage: "Disables proof-of-work verification", + } + NoCompactionFlag = cli.BoolFlag{ + Name: "nocompaction", + Usage: "Disables db compaction after import", + } + // RPC settings + IPCDisabledFlag = cli.BoolFlag{ + Name: "ipcdisable", + Usage: "Disable the IPC-RPC server", + } + IPCPathFlag = DirectoryFlag{ + Name: "ipcpath", + Usage: "Filename for IPC socket/pipe within the datadir (explicit paths escape it)", + } + RPCEnabledFlag = cli.BoolFlag{ + Name: "rpc", + Usage: "Enable the HTTP-RPC server", + } + RPCListenAddrFlag = cli.StringFlag{ + Name: "rpcaddr", + Usage: "HTTP-RPC server listening interface", + Value: node.DefaultHTTPHost, + } + RPCPortFlag = cli.IntFlag{ + Name: "rpcport", + Usage: "HTTP-RPC server listening port", + Value: node.DefaultHTTPPort, + } + RPCCORSDomainFlag = cli.StringFlag{ + Name: "rpccorsdomain", + Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)", + Value: "", + } + RPCVirtualHostsFlag = cli.StringFlag{ + Name: "rpcvhosts", + Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.", + Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","), + } + RPCApiFlag = cli.StringFlag{ + Name: "rpcapi", + Usage: "API's offered over the HTTP-RPC interface", + Value: "", + } + WSEnabledFlag = cli.BoolFlag{ + Name: "ws", + Usage: "Enable the WS-RPC server", + } + WSListenAddrFlag = cli.StringFlag{ + Name: "wsaddr", + Usage: "WS-RPC server listening interface", + Value: node.DefaultWSHost, + } + WSPortFlag = cli.IntFlag{ + Name: "wsport", + Usage: "WS-RPC server listening port", + Value: node.DefaultWSPort, + } + WSApiFlag = cli.StringFlag{ + Name: "wsapi", + Usage: "API's offered over the WS-RPC interface", + Value: "", + } + WSAllowedOriginsFlag = cli.StringFlag{ + Name: "wsorigins", + Usage: "Origins from which to accept websockets requests", + Value: "", + } + GraphQLEnabledFlag = cli.BoolFlag{ + Name: "graphql", + Usage: "Enable the GraphQL server", + } + GraphQLListenAddrFlag = cli.StringFlag{ + Name: "graphql.addr", + Usage: "GraphQL server listening interface", + Value: node.DefaultGraphQLHost, + } + GraphQLPortFlag = cli.IntFlag{ + Name: "graphql.port", + Usage: "GraphQL server listening port", + Value: node.DefaultGraphQLPort, + } + GraphQLCORSDomainFlag = cli.StringFlag{ + Name: "graphql.corsdomain", + Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)", + Value: "", + } + GraphQLVirtualHostsFlag = cli.StringFlag{ + Name: "graphql.vhosts", + Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.", + Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","), + } + ExecFlag = cli.StringFlag{ + Name: "exec", + Usage: "Execute JavaScript statement", + } + PreloadJSFlag = cli.StringFlag{ + Name: "preload", + Usage: "Comma separated list of JavaScript files to preload into the console", + } + + // Network Settings + MaxPeersFlag = cli.IntFlag{ + Name: "maxpeers", + Usage: "Maximum number of network peers (network disabled if set to 0)", + Value: node.DefaultConfig.P2P.MaxPeers, + } + MaxPendingPeersFlag = cli.IntFlag{ + Name: "maxpendpeers", + Usage: "Maximum number of pending connection attempts (defaults used if set to 0)", + Value: node.DefaultConfig.P2P.MaxPendingPeers, + } + ListenPortFlag = cli.IntFlag{ + Name: "port", + Usage: "Network listening port", + Value: 30303, + } + BootnodesFlag = cli.StringFlag{ + Name: "bootnodes", + Usage: "Comma separated enode URLs for P2P discovery bootstrap (set v4+v5 instead for light servers)", + Value: "", + } + BootnodesV4Flag = cli.StringFlag{ + Name: "bootnodesv4", + Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes)", + Value: "", + } + BootnodesV5Flag = cli.StringFlag{ + Name: "bootnodesv5", + Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes)", + Value: "", + } + NodeKeyFileFlag = cli.StringFlag{ + Name: "nodekey", + Usage: "P2P node key file", + } + NodeKeyHexFlag = cli.StringFlag{ + Name: "nodekeyhex", + Usage: "P2P node key as hex (for testing)", + } + NATFlag = cli.StringFlag{ + Name: "nat", + Usage: "NAT port mapping mechanism (any|none|upnp|pmp|extip:)", + Value: "any", + } + NoDiscoverFlag = cli.BoolFlag{ + Name: "nodiscover", + Usage: "Disables the peer discovery mechanism (manual peer addition)", + } + DiscoveryV5Flag = cli.BoolFlag{ + Name: "v5disc", + Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism", + } + NetrestrictFlag = cli.StringFlag{ + Name: "netrestrict", + Usage: "Restricts network communication to the given IP networks (CIDR masks)", + } + + // ATM the url is left to the user and deployment to + JSpathFlag = cli.StringFlag{ + Name: "jspath", + Usage: "JavaScript root path for `loadScript`", + Value: ".", + } + + // Gas price oracle settings + GpoBlocksFlag = cli.IntFlag{ + Name: "gpoblocks", + Usage: "Number of recent blocks to check for gas prices", + Value: eth.DefaultConfig.GPO.Blocks, + } + GpoPercentileFlag = cli.IntFlag{ + Name: "gpopercentile", + Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices", + Value: eth.DefaultConfig.GPO.Percentile, + } + WhisperEnabledFlag = cli.BoolFlag{ + Name: "shh", + Usage: "Enable Whisper", + } + WhisperMaxMessageSizeFlag = cli.IntFlag{ + Name: "shh.maxmessagesize", + Usage: "Max message size accepted", + Value: int(whisper.DefaultMaxMessageSize), + } + WhisperMinPOWFlag = cli.Float64Flag{ + Name: "shh.pow", + Usage: "Minimum POW accepted", + Value: whisper.DefaultMinimumPoW, + } + WhisperRestrictConnectionBetweenLightClientsFlag = cli.BoolFlag{ + Name: "shh.restrict-light", + Usage: "Restrict connection between two whisper light clients", + } + + // Metrics flags + MetricsEnabledFlag = cli.BoolFlag{ + Name: "metrics", + Usage: "Enable metrics collection and reporting", + } + MetricsEnabledExpensiveFlag = cli.BoolFlag{ + Name: "metrics.expensive", + Usage: "Enable expensive metrics collection and reporting", + } + MetricsEnableInfluxDBFlag = cli.BoolFlag{ + Name: "metrics.influxdb", + Usage: "Enable metrics export/push to an external InfluxDB database", + } + MetricsInfluxDBEndpointFlag = cli.StringFlag{ + Name: "metrics.influxdb.endpoint", + Usage: "InfluxDB API endpoint to report metrics to", + Value: "http://localhost:8086", + } + MetricsInfluxDBDatabaseFlag = cli.StringFlag{ + Name: "metrics.influxdb.database", + Usage: "InfluxDB database name to push reported metrics to", + Value: "geth", + } + MetricsInfluxDBUsernameFlag = cli.StringFlag{ + Name: "metrics.influxdb.username", + Usage: "Username to authorize access to the database", + Value: "test", + } + MetricsInfluxDBPasswordFlag = cli.StringFlag{ + Name: "metrics.influxdb.password", + Usage: "Password to authorize access to the database", + Value: "test", + } + // Tags are part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB. + // For example `host` tag could be used so that we can group all nodes and average a measurement + // across all of them, but also so that we can select a specific node and inspect its measurements. + // https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key + MetricsInfluxDBTagsFlag = cli.StringFlag{ + Name: "metrics.influxdb.tags", + Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements", + Value: "host=localhost", + } + + EWASMInterpreterFlag = cli.StringFlag{ + Name: "vm.ewasm", + Usage: "External ewasm configuration (default = built-in interpreter)", + Value: "", + } + EVMInterpreterFlag = cli.StringFlag{ + Name: "vm.evm", + Usage: "External EVM configuration (default = built-in interpreter)", + Value: "", + } +) + +// MakeDataDir retrieves the currently requested data directory, terminating +// if none (or the empty string) is specified. If the node is starting a testnet, +// the a subdirectory of the specified datadir will be used. +func MakeDataDir(ctx *cli.Context) string { + if path := ctx.GlobalString(DataDirFlag.Name); path != "" { + if ctx.GlobalBool(TestnetFlag.Name) { + return filepath.Join(path, "testnet") + } + if ctx.GlobalBool(RinkebyFlag.Name) { + return filepath.Join(path, "rinkeby") + } + if ctx.GlobalBool(GoerliFlag.Name) { + return filepath.Join(path, "goerli") + } + return path + } + Fatalf("Cannot determine default data directory, please set manually (--datadir)") + return "" +} + +// setNodeKey creates a node key from set command line flags, either loading it +// from a file or as a specified hex value. If neither flags were provided, this +// method returns nil and an emphemeral key is to be generated. +func setNodeKey(ctx *cli.Context, cfg *p2p.Config) { + var ( + hex = ctx.GlobalString(NodeKeyHexFlag.Name) + file = ctx.GlobalString(NodeKeyFileFlag.Name) + key *ecdsa.PrivateKey + err error + ) + switch { + case file != "" && hex != "": + Fatalf("Options %q and %q are mutually exclusive", NodeKeyFileFlag.Name, NodeKeyHexFlag.Name) + case file != "": + if key, err = crypto.LoadECDSA(file); err != nil { + Fatalf("Option %q: %v", NodeKeyFileFlag.Name, err) + } + cfg.PrivateKey = key + case hex != "": + if key, err = crypto.HexToECDSA(hex); err != nil { + Fatalf("Option %q: %v", NodeKeyHexFlag.Name, err) + } + cfg.PrivateKey = key + } +} + +// setNodeUserIdent creates the user identifier from CLI flags. +func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) { + if identity := ctx.GlobalString(IdentityFlag.Name); len(identity) > 0 { + cfg.UserIdent = identity + } +} + +// setBootstrapNodes creates a list of bootstrap nodes from the command line +// flags, reverting to pre-configured ones if none have been specified. +func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { + urls := params.MainnetBootnodes + switch { + case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name): + if ctx.GlobalIsSet(BootnodesV4Flag.Name) { + urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",") + } else { + urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") + } + case ctx.GlobalBool(TestnetFlag.Name): + urls = params.TestnetBootnodes + case ctx.GlobalBool(RinkebyFlag.Name): + urls = params.RinkebyBootnodes + case ctx.GlobalBool(GoerliFlag.Name): + urls = params.GoerliBootnodes + case cfg.BootstrapNodes != nil: + return // already set, don't apply defaults. + } + + cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls)) + for _, url := range urls { + if url != "" { + node, err := enode.Parse(enode.ValidSchemes, url) + if err != nil { + log.Crit("Bootstrap URL invalid", "enode", url, "err", err) + continue + } + cfg.BootstrapNodes = append(cfg.BootstrapNodes, node) + } + } +} + +// setBootstrapNodesV5 creates a list of bootstrap nodes from the command line +// flags, reverting to pre-configured ones if none have been specified. +func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) { + urls := params.DiscoveryV5Bootnodes + switch { + case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name): + if ctx.GlobalIsSet(BootnodesV5Flag.Name) { + urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",") + } else { + urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") + } + case ctx.GlobalBool(RinkebyFlag.Name): + urls = params.RinkebyBootnodes + case ctx.GlobalBool(GoerliFlag.Name): + urls = params.GoerliBootnodes + case cfg.BootstrapNodesV5 != nil: + return // already set, don't apply defaults. + } + + cfg.BootstrapNodesV5 = make([]*discv5.Node, 0, len(urls)) + for _, url := range urls { + if url != "" { + node, err := discv5.ParseNode(url) + if err != nil { + log.Error("Bootstrap URL invalid", "enode", url, "err", err) + continue + } + cfg.BootstrapNodesV5 = append(cfg.BootstrapNodesV5, node) + } + } +} + +// setListenAddress creates a TCP listening address string from set command +// line flags. +func setListenAddress(ctx *cli.Context, cfg *p2p.Config) { + if ctx.GlobalIsSet(ListenPortFlag.Name) { + cfg.ListenAddr = fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)) + } +} + +// setNAT creates a port mapper from command line flags. +func setNAT(ctx *cli.Context, cfg *p2p.Config) { + if ctx.GlobalIsSet(NATFlag.Name) { + natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name)) + if err != nil { + Fatalf("Option %s: %v", NATFlag.Name, err) + } + cfg.NAT = natif + } +} + +// splitAndTrim splits input separated by a comma +// and trims excessive white space from the substrings. +func splitAndTrim(input string) []string { + result := strings.Split(input, ",") + for i, r := range result { + result[i] = strings.TrimSpace(r) + } + return result +} + +// setHTTP creates the HTTP RPC listener interface string from the set +// command line flags, returning empty if the HTTP endpoint is disabled. +func setHTTP(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalBool(RPCEnabledFlag.Name) && cfg.HTTPHost == "" { + cfg.HTTPHost = "127.0.0.1" + if ctx.GlobalIsSet(RPCListenAddrFlag.Name) { + cfg.HTTPHost = ctx.GlobalString(RPCListenAddrFlag.Name) + } + } + if ctx.GlobalIsSet(RPCPortFlag.Name) { + cfg.HTTPPort = ctx.GlobalInt(RPCPortFlag.Name) + } + if ctx.GlobalIsSet(RPCCORSDomainFlag.Name) { + cfg.HTTPCors = splitAndTrim(ctx.GlobalString(RPCCORSDomainFlag.Name)) + } + if ctx.GlobalIsSet(RPCApiFlag.Name) { + cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name)) + } + if ctx.GlobalIsSet(RPCVirtualHostsFlag.Name) { + cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name)) + } +} + +// setGraphQL creates the GraphQL listener interface string from the set +// command line flags, returning empty if the GraphQL endpoint is disabled. +func setGraphQL(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalBool(GraphQLEnabledFlag.Name) && cfg.GraphQLHost == "" { + cfg.GraphQLHost = "127.0.0.1" + if ctx.GlobalIsSet(GraphQLListenAddrFlag.Name) { + cfg.GraphQLHost = ctx.GlobalString(GraphQLListenAddrFlag.Name) + } + } + cfg.GraphQLPort = ctx.GlobalInt(GraphQLPortFlag.Name) + if ctx.GlobalIsSet(GraphQLCORSDomainFlag.Name) { + cfg.GraphQLCors = splitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name)) + } + if ctx.GlobalIsSet(GraphQLVirtualHostsFlag.Name) { + cfg.GraphQLVirtualHosts = splitAndTrim(ctx.GlobalString(GraphQLVirtualHostsFlag.Name)) + } +} + +// setWS creates the WebSocket RPC listener interface string from the set +// command line flags, returning empty if the HTTP endpoint is disabled. +func setWS(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalBool(WSEnabledFlag.Name) && cfg.WSHost == "" { + cfg.WSHost = "127.0.0.1" + if ctx.GlobalIsSet(WSListenAddrFlag.Name) { + cfg.WSHost = ctx.GlobalString(WSListenAddrFlag.Name) + } + } + if ctx.GlobalIsSet(WSPortFlag.Name) { + cfg.WSPort = ctx.GlobalInt(WSPortFlag.Name) + } + if ctx.GlobalIsSet(WSAllowedOriginsFlag.Name) { + cfg.WSOrigins = splitAndTrim(ctx.GlobalString(WSAllowedOriginsFlag.Name)) + } + if ctx.GlobalIsSet(WSApiFlag.Name) { + cfg.WSModules = splitAndTrim(ctx.GlobalString(WSApiFlag.Name)) + } +} + +// setIPC creates an IPC path configuration from the set command line flags, +// returning an empty string if IPC was explicitly disabled, or the set path. +func setIPC(ctx *cli.Context, cfg *node.Config) { + CheckExclusive(ctx, IPCDisabledFlag, IPCPathFlag) + switch { + case ctx.GlobalBool(IPCDisabledFlag.Name): + cfg.IPCPath = "" + case ctx.GlobalIsSet(IPCPathFlag.Name): + cfg.IPCPath = ctx.GlobalString(IPCPathFlag.Name) + } +} + +// setLes configures the les server and ultra light client settings from the command line flags. +func setLes(ctx *cli.Context, cfg *eth.Config) { + if ctx.GlobalIsSet(LightLegacyServFlag.Name) { + cfg.LightServ = ctx.GlobalInt(LightLegacyServFlag.Name) + } + if ctx.GlobalIsSet(LightServeFlag.Name) { + cfg.LightServ = ctx.GlobalInt(LightServeFlag.Name) + } + if ctx.GlobalIsSet(LightIngressFlag.Name) { + cfg.LightIngress = ctx.GlobalInt(LightIngressFlag.Name) + } + if ctx.GlobalIsSet(LightEgressFlag.Name) { + cfg.LightEgress = ctx.GlobalInt(LightEgressFlag.Name) + } + if ctx.GlobalIsSet(LightLegacyPeersFlag.Name) { + cfg.LightPeers = ctx.GlobalInt(LightLegacyPeersFlag.Name) + } + if ctx.GlobalIsSet(LightMaxPeersFlag.Name) { + cfg.LightPeers = ctx.GlobalInt(LightMaxPeersFlag.Name) + } + if ctx.GlobalIsSet(UltraLightServersFlag.Name) { + cfg.UltraLightServers = strings.Split(ctx.GlobalString(UltraLightServersFlag.Name), ",") + } + if ctx.GlobalIsSet(UltraLightFractionFlag.Name) { + cfg.UltraLightFraction = ctx.GlobalInt(UltraLightFractionFlag.Name) + } + if cfg.UltraLightFraction <= 0 && cfg.UltraLightFraction > 100 { + log.Error("Ultra light fraction is invalid", "had", cfg.UltraLightFraction, "updated", eth.DefaultConfig.UltraLightFraction) + cfg.UltraLightFraction = eth.DefaultConfig.UltraLightFraction + } + if ctx.GlobalIsSet(UltraLightOnlyAnnounceFlag.Name) { + cfg.UltraLightOnlyAnnounce = ctx.GlobalBool(UltraLightOnlyAnnounceFlag.Name) + } +} + +// makeDatabaseHandles raises out the number of allowed file handles per process +// for Geth and returns half of the allowance to assign to the database. +func makeDatabaseHandles() int { + limit, err := fdlimit.Maximum() + if err != nil { + Fatalf("Failed to retrieve file descriptor allowance: %v", err) + } + raised, err := fdlimit.Raise(uint64(limit)) + if err != nil { + Fatalf("Failed to raise file descriptor allowance: %v", err) + } + return int(raised / 2) // Leave half for networking and other stuff +} + +// MakeAddress converts an account specified directly as a hex encoded string or +// a key index in the key store to an internal account representation. +func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error) { + // If the specified account is a valid address, return it + if common.IsHexAddress(account) { + return accounts.Account{Address: common.HexToAddress(account)}, nil + } + // Otherwise try to interpret the account as a keystore index + index, err := strconv.Atoi(account) + if err != nil || index < 0 { + return accounts.Account{}, fmt.Errorf("invalid account address or index %q", account) + } + log.Warn("-------------------------------------------------------------------") + log.Warn("Referring to accounts by order in the keystore folder is dangerous!") + log.Warn("This functionality is deprecated and will be removed in the future!") + log.Warn("Please use explicit addresses! (can search via `geth account list`)") + log.Warn("-------------------------------------------------------------------") + + accs := ks.Accounts() + if len(accs) <= index { + return accounts.Account{}, fmt.Errorf("index %d higher than number of accounts %d", index, len(accs)) + } + return accs[index], nil +} + +// setEtherbase retrieves the etherbase either from the directly specified +// command line flags or from the keystore if CLI indexed. +func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) { + // Extract the current etherbase, new flag overriding legacy one + var etherbase string + if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) { + etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name) + } + if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) { + etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name) + } + // Convert the etherbase into an address and configure it + if etherbase != "" { + if ks != nil { + account, err := MakeAddress(ks, etherbase) + if err != nil { + Fatalf("Invalid miner etherbase: %v", err) + } + cfg.Miner.Etherbase = account.Address + } else { + Fatalf("No etherbase configured") + } + } +} + +// MakePasswordList reads password lines from the file specified by the global --password flag. +func MakePasswordList(ctx *cli.Context) []string { + path := ctx.GlobalString(PasswordFileFlag.Name) + if path == "" { + return nil + } + text, err := ioutil.ReadFile(path) + if err != nil { + Fatalf("Failed to read password file: %v", err) + } + lines := strings.Split(string(text), "\n") + // Sanitise DOS line endings. + for i := range lines { + lines[i] = strings.TrimRight(lines[i], "\r") + } + return lines +} + +func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { + setNodeKey(ctx, cfg) + setNAT(ctx, cfg) + setListenAddress(ctx, cfg) + setBootstrapNodes(ctx, cfg) + setBootstrapNodesV5(ctx, cfg) + + lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light" + lightServer := (ctx.GlobalInt(LightLegacyServFlag.Name) != 0 || ctx.GlobalInt(LightServeFlag.Name) != 0) + + lightPeers := ctx.GlobalInt(LightLegacyPeersFlag.Name) + if ctx.GlobalIsSet(LightMaxPeersFlag.Name) { + lightPeers = ctx.GlobalInt(LightMaxPeersFlag.Name) + } + if ctx.GlobalIsSet(MaxPeersFlag.Name) { + cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name) + if lightServer && !ctx.GlobalIsSet(LightLegacyPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) { + cfg.MaxPeers += lightPeers + } + } else { + if lightServer { + cfg.MaxPeers += lightPeers + } + if lightClient && (ctx.GlobalIsSet(LightLegacyPeersFlag.Name) || ctx.GlobalIsSet(LightMaxPeersFlag.Name)) && cfg.MaxPeers < lightPeers { + cfg.MaxPeers = lightPeers + } + } + if !(lightClient || lightServer) { + lightPeers = 0 + } + ethPeers := cfg.MaxPeers - lightPeers + if lightClient { + ethPeers = 0 + } + log.Info("Maximum peer count", "ETH", ethPeers, "LES", lightPeers, "total", cfg.MaxPeers) + + if ctx.GlobalIsSet(MaxPendingPeersFlag.Name) { + cfg.MaxPendingPeers = ctx.GlobalInt(MaxPendingPeersFlag.Name) + } + if ctx.GlobalIsSet(NoDiscoverFlag.Name) || lightClient { + cfg.NoDiscovery = true + } + + // if we're running a light client or server, force enable the v5 peer discovery + // unless it is explicitly disabled with --nodiscover note that explicitly specifying + // --v5disc overrides --nodiscover, in which case the later only disables v4 discovery + forceV5Discovery := (lightClient || lightServer) && !ctx.GlobalBool(NoDiscoverFlag.Name) + if ctx.GlobalIsSet(DiscoveryV5Flag.Name) { + cfg.DiscoveryV5 = ctx.GlobalBool(DiscoveryV5Flag.Name) + } else if forceV5Discovery { + cfg.DiscoveryV5 = true + } + + if netrestrict := ctx.GlobalString(NetrestrictFlag.Name); netrestrict != "" { + list, err := netutil.ParseNetlist(netrestrict) + if err != nil { + Fatalf("Option %q: %v", NetrestrictFlag.Name, err) + } + cfg.NetRestrict = list + } + + if ctx.GlobalBool(DeveloperFlag.Name) { + // --dev mode can't use p2p networking. + cfg.MaxPeers = 0 + cfg.ListenAddr = ":0" + cfg.NoDiscovery = true + cfg.DiscoveryV5 = false + } +} + +// SetNodeConfig applies node-related command line flags to the config. +func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { + SetP2PConfig(ctx, &cfg.P2P) + setIPC(ctx, cfg) + setHTTP(ctx, cfg) + setGraphQL(ctx, cfg) + setWS(ctx, cfg) + setNodeUserIdent(ctx, cfg) + setDataDir(ctx, cfg) + setSmartCard(ctx, cfg) + + if ctx.GlobalIsSet(ExternalSignerFlag.Name) { + cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name) + } + + if ctx.GlobalIsSet(KeyStoreDirFlag.Name) { + cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name) + } + if ctx.GlobalIsSet(LightKDFFlag.Name) { + cfg.UseLightweightKDF = ctx.GlobalBool(LightKDFFlag.Name) + } + if ctx.GlobalIsSet(NoUSBFlag.Name) { + cfg.NoUSB = ctx.GlobalBool(NoUSBFlag.Name) + } + if ctx.GlobalIsSet(InsecureUnlockAllowedFlag.Name) { + cfg.InsecureUnlockAllowed = ctx.GlobalBool(InsecureUnlockAllowedFlag.Name) + } +} + +func setSmartCard(ctx *cli.Context, cfg *node.Config) { + // Skip enabling smartcards if no path is set + path := ctx.GlobalString(SmartCardDaemonPathFlag.Name) + if path == "" { + return + } + // Sanity check that the smartcard path is valid + fi, err := os.Stat(path) + if err != nil { + log.Info("Smartcard socket not found, disabling", "err", err) + return + } + if fi.Mode()&os.ModeType != os.ModeSocket { + log.Error("Invalid smartcard daemon path", "path", path, "type", fi.Mode().String()) + return + } + // Smartcard daemon path exists and is a socket, enable it + cfg.SmartCardDaemonPath = path +} + +func setDataDir(ctx *cli.Context, cfg *node.Config) { + switch { + case ctx.GlobalIsSet(DataDirFlag.Name): + cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) + case ctx.GlobalBool(DeveloperFlag.Name): + cfg.DataDir = "" // unless explicitly requested, use memory databases + case ctx.GlobalBool(TestnetFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet") + case ctx.GlobalBool(RinkebyFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") + case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") + } +} + +func setGPO(ctx *cli.Context, cfg *gasprice.Config) { + if ctx.GlobalIsSet(GpoBlocksFlag.Name) { + cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name) + } + if ctx.GlobalIsSet(GpoPercentileFlag.Name) { + cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name) + } +} + +func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { + if ctx.GlobalIsSet(TxPoolLocalsFlag.Name) { + locals := strings.Split(ctx.GlobalString(TxPoolLocalsFlag.Name), ",") + for _, account := range locals { + if trimmed := strings.TrimSpace(account); !common.IsHexAddress(trimmed) { + Fatalf("Invalid account in --txpool.locals: %s", trimmed) + } else { + cfg.Locals = append(cfg.Locals, common.HexToAddress(account)) + } + } + } + if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) { + cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name) + } + if ctx.GlobalIsSet(TxPoolJournalFlag.Name) { + cfg.Journal = ctx.GlobalString(TxPoolJournalFlag.Name) + } + if ctx.GlobalIsSet(TxPoolRejournalFlag.Name) { + cfg.Rejournal = ctx.GlobalDuration(TxPoolRejournalFlag.Name) + } + if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) { + cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name) + } + if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) { + cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name) + } + if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) { + cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name) + } + if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) { + cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name) + } + if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) { + cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name) + } + if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) { + cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name) + } + if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) { + cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name) + } +} + +func setEthash(ctx *cli.Context, cfg *eth.Config) { + if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { + cfg.Ethash.CacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) + } + if ctx.GlobalIsSet(EthashDatasetDirFlag.Name) { + cfg.Ethash.DatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name) + } + if ctx.GlobalIsSet(EthashCachesInMemoryFlag.Name) { + cfg.Ethash.CachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name) + } + if ctx.GlobalIsSet(EthashCachesOnDiskFlag.Name) { + cfg.Ethash.CachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name) + } + if ctx.GlobalIsSet(EthashDatasetsInMemoryFlag.Name) { + cfg.Ethash.DatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name) + } + if ctx.GlobalIsSet(EthashDatasetsOnDiskFlag.Name) { + cfg.Ethash.DatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name) + } +} + +func setMiner(ctx *cli.Context, cfg *miner.Config) { + if ctx.GlobalIsSet(MinerNotifyFlag.Name) { + cfg.Notify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") + } + if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) { + cfg.ExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name)) + } + if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { + cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) + } + if ctx.GlobalIsSet(MinerLegacyGasTargetFlag.Name) { + cfg.GasFloor = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name) + } + if ctx.GlobalIsSet(MinerGasTargetFlag.Name) { + cfg.GasFloor = ctx.GlobalUint64(MinerGasTargetFlag.Name) + } + if ctx.GlobalIsSet(MinerGasLimitFlag.Name) { + cfg.GasCeil = ctx.GlobalUint64(MinerGasLimitFlag.Name) + } + if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { + cfg.GasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name) + } + if ctx.GlobalIsSet(MinerGasPriceFlag.Name) { + cfg.GasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name) + } + if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) { + cfg.Recommit = ctx.Duration(MinerRecommitIntervalFlag.Name) + } + if ctx.GlobalIsSet(MinerNoVerfiyFlag.Name) { + cfg.Noverify = ctx.Bool(MinerNoVerfiyFlag.Name) + } +} + +func setWhitelist(ctx *cli.Context, cfg *eth.Config) { + whitelist := ctx.GlobalString(WhitelistFlag.Name) + if whitelist == "" { + return + } + cfg.Whitelist = make(map[uint64]common.Hash) + for _, entry := range strings.Split(whitelist, ",") { + parts := strings.Split(entry, "=") + if len(parts) != 2 { + Fatalf("Invalid whitelist entry: %s", entry) + } + number, err := strconv.ParseUint(parts[0], 0, 64) + if err != nil { + Fatalf("Invalid whitelist block number %s: %v", parts[0], err) + } + var hash common.Hash + if err = hash.UnmarshalText([]byte(parts[1])); err != nil { + Fatalf("Invalid whitelist hash %s: %v", parts[1], err) + } + cfg.Whitelist[number] = hash + } +} + +// CheckExclusive verifies that only a single instance of the provided flags was +// set by the user. Each flag might optionally be followed by a string type to +// specialize it further. +func CheckExclusive(ctx *cli.Context, args ...interface{}) { + set := make([]string, 0, 1) + for i := 0; i < len(args); i++ { + // Make sure the next argument is a flag and skip if not set + flag, ok := args[i].(cli.Flag) + if !ok { + panic(fmt.Sprintf("invalid argument, not cli.Flag type: %T", args[i])) + } + // Check if next arg extends current and expand its name if so + name := flag.GetName() + + if i+1 < len(args) { + switch option := args[i+1].(type) { + case string: + // Extended flag check, make sure value set doesn't conflict with passed in option + if ctx.GlobalString(flag.GetName()) == option { + name += "=" + option + set = append(set, "--"+name) + } + // shift arguments and continue + i++ + continue + + case cli.Flag: + default: + panic(fmt.Sprintf("invalid argument, not cli.Flag or string extension: %T", args[i+1])) + } + } + // Mark the flag if it's set + if ctx.GlobalIsSet(flag.GetName()) { + set = append(set, "--"+name) + } + } + if len(set) > 1 { + Fatalf("Flags %v can't be used at the same time", strings.Join(set, ", ")) + } +} + +// SetShhConfig applies shh-related command line flags to the config. +func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) { + if ctx.GlobalIsSet(WhisperMaxMessageSizeFlag.Name) { + cfg.MaxMessageSize = uint32(ctx.GlobalUint(WhisperMaxMessageSizeFlag.Name)) + } + if ctx.GlobalIsSet(WhisperMinPOWFlag.Name) { + cfg.MinimumAcceptedPOW = ctx.GlobalFloat64(WhisperMinPOWFlag.Name) + } + if ctx.GlobalIsSet(WhisperRestrictConnectionBetweenLightClientsFlag.Name) { + cfg.RestrictConnectionBetweenLightClients = true + } +} + +// SetEthConfig applies eth-related command line flags to the config. +func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { + // Avoid conflicting network flags + CheckExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag, GoerliFlag) + CheckExclusive(ctx, LightLegacyServFlag, LightServeFlag, SyncModeFlag, "light") + CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer + + var ks *keystore.KeyStore + if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 { + ks = keystores[0].(*keystore.KeyStore) + } + setEtherbase(ctx, ks, cfg) + setGPO(ctx, &cfg.GPO) + setTxPool(ctx, &cfg.TxPool) + setEthash(ctx, cfg) + setMiner(ctx, &cfg.Miner) + setWhitelist(ctx, cfg) + setLes(ctx, cfg) + + if ctx.GlobalIsSet(SyncModeFlag.Name) { + cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) + } + if ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name) + } + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) { + cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 + } + cfg.DatabaseHandles = makeDatabaseHandles() + if ctx.GlobalIsSet(AncientFlag.Name) { + cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name) + } + + if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { + Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) + } + cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive" + cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name) + + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { + cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 + } + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { + cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 + } + if ctx.GlobalIsSet(DocRootFlag.Name) { + cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) + } + if ctx.GlobalIsSet(VMEnableDebugFlag.Name) { + // TODO(fjl): force-enable this in --dev mode + cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name) + } + + if ctx.GlobalIsSet(EWASMInterpreterFlag.Name) { + cfg.EWASMInterpreter = ctx.GlobalString(EWASMInterpreterFlag.Name) + } + + if ctx.GlobalIsSet(EVMInterpreterFlag.Name) { + cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name) + } + if ctx.GlobalIsSet(RPCGlobalGasCap.Name) { + cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name)) + } + + // Override any default configs for hard coded networks. + switch { + case ctx.GlobalBool(TestnetFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 3 + } + cfg.Genesis = core.DefaultTestnetGenesisBlock() + case ctx.GlobalBool(RinkebyFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 4 + } + cfg.Genesis = core.DefaultRinkebyGenesisBlock() + case ctx.GlobalBool(GoerliFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 5 + } + cfg.Genesis = core.DefaultGoerliGenesisBlock() + case ctx.GlobalBool(DeveloperFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 1337 + } + // Create new developer account or reuse existing one + var ( + developer accounts.Account + err error + ) + if accs := ks.Accounts(); len(accs) > 0 { + developer = ks.Accounts()[0] + } else { + developer, err = ks.NewAccount("") + if err != nil { + Fatalf("Failed to create developer account: %v", err) + } + } + if err := ks.Unlock(developer, ""); err != nil { + Fatalf("Failed to unlock developer account: %v", err) + } + log.Info("Using developer account", "address", developer.Address) + + cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address) + if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { + cfg.Miner.GasPrice = big.NewInt(1) + } + } +} + +// SetDashboardConfig applies dashboard related command line flags to the config. +func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) { + cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name) + cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name) + cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name) +} + +// RegisterEthService adds an Ethereum client to the stack. +func RegisterEthService(stack *node.Node, cfg *eth.Config) { + var err error + if cfg.SyncMode == downloader.LightSync { + err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + return les.New(ctx, cfg) + }) + } else { + err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + fullNode, err := eth.New(ctx, cfg) + if fullNode != nil && cfg.LightServ > 0 { + ls, _ := les.NewLesServer(fullNode, cfg) + fullNode.AddLesServer(ls) + } + return fullNode, err + }) + } + if err != nil { + Fatalf("Failed to register the Ethereum service: %v", err) + } +} + +// RegisterDashboardService adds a dashboard to the stack. +func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) { + stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil + }) +} + +// RegisterShhService configures Whisper and adds it to the given node. +func RegisterShhService(stack *node.Node, cfg *whisper.Config) { + if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) { + return whisper.New(cfg), nil + }); err != nil { + Fatalf("Failed to register the Whisper service: %v", err) + } +} + +// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to +// the given node. +func RegisterEthStatsService(stack *node.Node, url string) { + if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + // Retrieve both eth and les services + var ethServ *eth.Ethereum + ctx.Service(ðServ) + + var lesServ *les.LightEthereum + ctx.Service(&lesServ) + + // Let ethstats use whichever is not nil + return ethstats.New(url, ethServ, lesServ) + }); err != nil { + Fatalf("Failed to register the Ethereum Stats service: %v", err) + } +} + +// RegisterGraphQLService is a utility function to construct a new service and register it against a node. +func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) { + if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + // Try to construct the GraphQL service backed by a full node + var ethServ *eth.Ethereum + if err := ctx.Service(ðServ); err == nil { + return graphql.New(ethServ.APIBackend, endpoint, cors, vhosts, timeouts) + } + // Try to construct the GraphQL service backed by a light node + var lesServ *les.LightEthereum + if err := ctx.Service(&lesServ); err == nil { + return graphql.New(lesServ.ApiBackend, endpoint, cors, vhosts, timeouts) + } + // Well, this should not have happened, bail out + return nil, errors.New("no Ethereum service") + }); err != nil { + Fatalf("Failed to register the GraphQL service: %v", err) + } +} + +func SetupMetrics(ctx *cli.Context) { + if metrics.Enabled { + log.Info("Enabling metrics collection") + var ( + enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name) + endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name) + database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name) + username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name) + password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name) + ) + + if enableExport { + tagsMap := SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name)) + + log.Info("Enabling metrics export to InfluxDB") + + go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap) + } + } +} + +func SplitTagsFlag(tagsFlag string) map[string]string { + tags := strings.Split(tagsFlag, ",") + tagsMap := map[string]string{} + + for _, t := range tags { + if t != "" { + kv := strings.Split(t, "=") + + if len(kv) == 2 { + tagsMap[kv[0]] = kv[1] + } + } + } + + return tagsMap +} + +// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails. +func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { + var ( + cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 + handles = makeDatabaseHandles() + ) + name := "chaindata" + if ctx.GlobalString(SyncModeFlag.Name) == "light" { + name = "lightchaindata" + } + chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "") + if err != nil { + Fatalf("Could not open database: %v", err) + } + return chainDb +} + +func MakeGenesis(ctx *cli.Context) *core.Genesis { + var genesis *core.Genesis + switch { + case ctx.GlobalBool(TestnetFlag.Name): + genesis = core.DefaultTestnetGenesisBlock() + case ctx.GlobalBool(RinkebyFlag.Name): + genesis = core.DefaultRinkebyGenesisBlock() + case ctx.GlobalBool(GoerliFlag.Name): + genesis = core.DefaultGoerliGenesisBlock() + case ctx.GlobalBool(DeveloperFlag.Name): + Fatalf("Developer chains are ephemeral") + } + return genesis +} + +// MakeChain creates a chain manager from set command line flags. +func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) { + var err error + chainDb = MakeChainDatabase(ctx, stack) + config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx)) + if err != nil { + Fatalf("%v", err) + } + var engine consensus.Engine + if config.Clique != nil { + engine = clique.New(config.Clique, chainDb) + } else { + engine = ethash.NewFaker() + if !ctx.GlobalBool(FakePoWFlag.Name) { + engine = ethash.New(ethash.Config{ + CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir), + CachesInMem: eth.DefaultConfig.Ethash.CachesInMem, + CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk, + DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir), + DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem, + DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk, + }, nil, false) + } + } + if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { + Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) + } + cache := &core.CacheConfig{ + TrieCleanLimit: eth.DefaultConfig.TrieCleanCache, + TrieCleanNoPrefetch: ctx.GlobalBool(CacheNoPrefetchFlag.Name), + TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache, + TrieDirtyDisabled: ctx.GlobalString(GCModeFlag.Name) == "archive", + TrieTimeLimit: eth.DefaultConfig.TrieTimeout, + } + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { + cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 + } + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { + cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 + } + vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} + chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil) + if err != nil { + Fatalf("Can't create BlockChain: %v", err) + } + return chain, chainDb +} + +// MakeConsolePreloads retrieves the absolute paths for the console JavaScript +// scripts to preload before starting. +func MakeConsolePreloads(ctx *cli.Context) []string { + // Skip preloading if there's nothing to preload + if ctx.GlobalString(PreloadJSFlag.Name) == "" { + return nil + } + // Otherwise resolve absolute paths and return them + var preloads []string + + assets := ctx.GlobalString(JSpathFlag.Name) + for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") { + preloads = append(preloads, common.AbsolutePath(assets, strings.TrimSpace(file))) + } + return preloads +} + +// MigrateFlags sets the global flag from a local flag when it's set. +// This is a temporary function used for migrating old command/flags to the +// new format. +// +// e.g. geth account new --keystore /tmp/mykeystore --lightkdf +// +// is equivalent after calling this method with: +// +// geth --keystore /tmp/mykeystore --lightkdf account new +// +// This allows the use of the existing configuration functionality. +// When all flags are migrated this function can be removed and the existing +// configuration functionality must be changed that is uses local flags +func MigrateFlags(action func(ctx *cli.Context) error) func(*cli.Context) error { + return func(ctx *cli.Context) error { + for _, name := range ctx.FlagNames() { + if ctx.IsSet(name) { + ctx.GlobalSet(name, ctx.String(name)) + } + } + return action(ctx) + } +} -- cgit v1.2.3-70-g09d2