aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorDeterminant <tederminant@gmail.com>2020-06-28 14:47:41 -0400
committerDeterminant <tederminant@gmail.com>2020-06-28 14:47:41 -0400
commitd235e2c6a5788ec4a6cff15a16f56b38a3876a0d (patch)
tree5f2727f7a50ee5840f889c82776d3a30a88dd59b /core
parent13ebd8bd9468e9d769d598b0ca2afb72ba78cb97 (diff)
...
Diffstat (limited to 'core')
-rw-r--r--core/block_validator.go8
-rw-r--r--core/blockchain.go12
-rw-r--r--core/blockchain_insert.go2
-rw-r--r--core/bloombits/doc.go18
-rw-r--r--core/bloombits/generator.go93
-rw-r--r--core/bloombits/matcher.go650
-rw-r--r--core/bloombits/scheduler.go181
-rw-r--r--core/chain_indexer.go4
-rw-r--r--core/events.go2
-rw-r--r--core/evm.go60
-rw-r--r--core/genesis.go156
-rw-r--r--core/headerchain.go8
-rw-r--r--core/mkalloc.go2
-rw-r--r--core/rawdb/accessors_chain.go560
-rw-r--r--core/rawdb/accessors_indexes.go131
-rw-r--r--core/rawdb/accessors_metadata.go98
-rw-r--r--core/rawdb/database.go355
-rw-r--r--core/rawdb/freezer.go393
-rw-r--r--core/rawdb/freezer_reinit.go127
-rw-r--r--core/rawdb/freezer_table.go637
-rw-r--r--core/rawdb/schema.go166
-rw-r--r--core/rawdb/table.go204
-rw-r--r--core/state/database.go163
-rw-r--r--core/state/dump.go158
-rw-r--r--core/state/iterator.go154
-rw-r--r--core/state/journal.go245
-rw-r--r--core/state/state_object.go499
-rw-r--r--core/state/statedb.go793
-rw-r--r--core/state/sync.go42
-rw-r--r--core/state_prefetcher.go10
-rw-r--r--core/state_processor.go14
-rw-r--r--core/state_transition.go13
-rw-r--r--core/tx_cacher.go2
-rw-r--r--core/tx_journal.go2
-rw-r--r--core/tx_list.go2
-rw-r--r--core/tx_noncer.go2
-rw-r--r--core/tx_pool.go6
-rw-r--r--core/types.go6
-rw-r--r--core/types/block.go419
-rw-r--r--core/types/bloom9.go136
-rw-r--r--core/types/derive_sha.go41
-rw-r--r--core/types/gen_header_json.go138
-rw-r--r--core/types/gen_log_json.go92
-rw-r--r--core/types/gen_receipt_json.go104
-rw-r--r--core/types/gen_tx_json.go101
-rw-r--r--core/types/log.go143
-rw-r--r--core/types/receipt.go336
-rw-r--r--core/types/transaction.go440
-rw-r--r--core/types/transaction_signing.go260
-rw-r--r--core/vm/analysis.go62
-rw-r--r--core/vm/common.go99
-rw-r--r--core/vm/contract.go184
-rw-r--r--core/vm/contracts.go497
-rw-r--r--core/vm/doc.go24
-rw-r--r--core/vm/eips.go92
-rw-r--r--core/vm/errors.go31
-rw-r--r--core/vm/evm.go557
-rw-r--r--core/vm/gas.go53
-rw-r--r--core/vm/gas_table.go441
-rw-r--r--core/vm/gen_structlog.go111
-rw-r--r--core/vm/instructions.go1011
-rw-r--r--core/vm/int_pool_verifier.go31
-rw-r--r--core/vm/int_pool_verifier_empty.go23
-rw-r--r--core/vm/interface.go87
-rw-r--r--core/vm/interpreter.go300
-rw-r--r--core/vm/intpool.go106
-rw-r--r--core/vm/jump_table.go1183
-rw-r--r--core/vm/logger.go256
-rw-r--r--core/vm/logger_json.go87
-rw-r--r--core/vm/memory.go124
-rw-r--r--core/vm/memory_table.go113
-rw-r--r--core/vm/opcodes.go555
-rw-r--r--core/vm/stack.go95
-rw-r--r--core/vm/stack_table.go42
74 files changed, 14289 insertions, 63 deletions
diff --git a/core/block_validator.go b/core/block_validator.go
index ae6cd4d..f6cdafe 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -19,10 +19,10 @@ package core
import (
"fmt"
- "github.com/ava-labs/go-ethereum/consensus"
- "github.com/ava-labs/go-ethereum/core/state"
- "github.com/ava-labs/go-ethereum/core/types"
- "github.com/ava-labs/go-ethereum/params"
+ "github.com/ava-labs/coreth/consensus"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
)
// BlockValidator is responsible for validating block headers, uncles and
diff --git a/core/blockchain.go b/core/blockchain.go
index dddca62..64c2451 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -27,19 +27,19 @@ import (
"sync/atomic"
"time"
+ "github.com/ava-labs/coreth/consensus"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/core/vm"
+ "github.com/ava-labs/coreth/params"
"github.com/ava-labs/go-ethereum/common"
"github.com/ava-labs/go-ethereum/common/mclock"
"github.com/ava-labs/go-ethereum/common/prque"
- "github.com/ava-labs/go-ethereum/consensus"
- "github.com/ava-labs/go-ethereum/core/rawdb"
- "github.com/ava-labs/go-ethereum/core/state"
- "github.com/ava-labs/go-ethereum/core/types"
- "github.com/ava-labs/go-ethereum/core/vm"
"github.com/ava-labs/go-ethereum/ethdb"
"github.com/ava-labs/go-ethereum/event"
"github.com/ava-labs/go-ethereum/log"
"github.com/ava-labs/go-ethereum/metrics"
- "github.com/ava-labs/go-ethereum/params"
"github.com/ava-labs/go-ethereum/rlp"
"github.com/ava-labs/go-ethereum/trie"
"github.com/hashicorp/golang-lru"
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index afcbb2b..8991a97 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -19,9 +19,9 @@ package core
import (
"time"
+ "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/go-ethereum/common"
"github.com/ava-labs/go-ethereum/common/mclock"
- "github.com/ava-labs/go-ethereum/core/types"
"github.com/ava-labs/go-ethereum/log"
)
diff --git a/core/bloombits/doc.go b/core/bloombits/doc.go
new file mode 100644
index 0000000..3d159e7
--- /dev/null
+++ b/core/bloombits/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package bloombits implements bloom filtering on batches of data.
+package bloombits
diff --git a/core/bloombits/generator.go b/core/bloombits/generator.go
new file mode 100644
index 0000000..3dd54b6
--- /dev/null
+++ b/core/bloombits/generator.go
@@ -0,0 +1,93 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package bloombits
+
+import (
+ "errors"
+
+ "github.com/ava-labs/coreth/core/types"
+)
+
+var (
+ // errSectionOutOfBounds is returned if the user tried to add more bloom filters
+ // to the batch than available space, or if tries to retrieve above the capacity.
+ errSectionOutOfBounds = errors.New("section out of bounds")
+
+ // errBloomBitOutOfBounds is returned if the user tried to retrieve specified
+ // bit bloom above the capacity.
+ errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
+)
+
+// Generator takes a number of bloom filters and generates the rotated bloom bits
+// to be used for batched filtering.
+type Generator struct {
+ blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
+ sections uint // Number of sections to batch together
+ nextSec uint // Next section to set when adding a bloom
+}
+
+// NewGenerator creates a rotated bloom generator that can iteratively fill a
+// batched bloom filter's bits.
+func NewGenerator(sections uint) (*Generator, error) {
+ if sections%8 != 0 {
+ return nil, errors.New("section count not multiple of 8")
+ }
+ b := &Generator{sections: sections}
+ for i := 0; i < types.BloomBitLength; i++ {
+ b.blooms[i] = make([]byte, sections/8)
+ }
+ return b, nil
+}
+
+// AddBloom takes a single bloom filter and sets the corresponding bit column
+// in memory accordingly.
+func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
+ // Make sure we're not adding more bloom filters than our capacity
+ if b.nextSec >= b.sections {
+ return errSectionOutOfBounds
+ }
+ if b.nextSec != index {
+ return errors.New("bloom filter with unexpected index")
+ }
+ // Rotate the bloom and insert into our collection
+ byteIndex := b.nextSec / 8
+ bitMask := byte(1) << byte(7-b.nextSec%8)
+
+ for i := 0; i < types.BloomBitLength; i++ {
+ bloomByteIndex := types.BloomByteLength - 1 - i/8
+ bloomBitMask := byte(1) << byte(i%8)
+
+ if (bloom[bloomByteIndex] & bloomBitMask) != 0 {
+ b.blooms[i][byteIndex] |= bitMask
+ }
+ }
+ b.nextSec++
+
+ return nil
+}
+
+// Bitset returns the bit vector belonging to the given bit index after all
+// blooms have been added.
+func (b *Generator) Bitset(idx uint) ([]byte, error) {
+ if b.nextSec != b.sections {
+ return nil, errors.New("bloom not fully generated yet")
+ }
+ if idx >= types.BloomBitLength {
+ return nil, errBloomBitOutOfBounds
+ }
+ return b.blooms[idx], nil
+}
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
new file mode 100644
index 0000000..fdf296a
--- /dev/null
+++ b/core/bloombits/matcher.go
@@ -0,0 +1,650 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package bloombits
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "math"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/go-ethereum/common/bitutil"
+ "github.com/ava-labs/go-ethereum/crypto"
+)
+
+// bloomIndexes represents the bit indexes inside the bloom filter that belong
+// to some key.
+type bloomIndexes [3]uint
+
+// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
+func calcBloomIndexes(b []byte) bloomIndexes {
+ b = crypto.Keccak256(b)
+
+ var idxs bloomIndexes
+ for i := 0; i < len(idxs); i++ {
+ idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
+ }
+ return idxs
+}
+
+// partialMatches with a non-nil vector represents a section in which some sub-
+// matchers have already found potential matches. Subsequent sub-matchers will
+// binary AND their matches with this vector. If vector is nil, it represents a
+// section to be processed by the first sub-matcher.
+type partialMatches struct {
+ section uint64
+ bitset []byte
+}
+
+// Retrieval represents a request for retrieval task assignments for a given
+// bit with the given number of fetch elements, or a response for such a request.
+// It can also have the actual results set to be used as a delivery data struct.
+//
+// The contest and error fields are used by the light client to terminate matching
+// early if an error is encountered on some path of the pipeline.
+type Retrieval struct {
+ Bit uint
+ Sections []uint64
+ Bitsets [][]byte
+
+ Context context.Context
+ Error error
+}
+
+// Matcher is a pipelined system of schedulers and logic matchers which perform
+// binary AND/OR operations on the bit-streams, creating a stream of potential
+// blocks to inspect for data content.
+type Matcher struct {
+ sectionSize uint64 // Size of the data batches to filter on
+
+ filters [][]bloomIndexes // Filter the system is matching for
+ schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
+
+ retrievers chan chan uint // Retriever processes waiting for bit allocations
+ counters chan chan uint // Retriever processes waiting for task count reports
+ retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
+ deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
+
+ running uint32 // Atomic flag whether a session is live or not
+}
+
+// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
+// address and topic filtering on them. Setting a filter component to `nil` is
+// allowed and will result in that filter rule being skipped (OR 0x11...1).
+func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
+ // Create the matcher instance
+ m := &Matcher{
+ sectionSize: sectionSize,
+ schedulers: make(map[uint]*scheduler),
+ retrievers: make(chan chan uint),
+ counters: make(chan chan uint),
+ retrievals: make(chan chan *Retrieval),
+ deliveries: make(chan *Retrieval),
+ }
+ // Calculate the bloom bit indexes for the groups we're interested in
+ m.filters = nil
+
+ for _, filter := range filters {
+ // Gather the bit indexes of the filter rule, special casing the nil filter
+ if len(filter) == 0 {
+ continue
+ }
+ bloomBits := make([]bloomIndexes, len(filter))
+ for i, clause := range filter {
+ if clause == nil {
+ bloomBits = nil
+ break
+ }
+ bloomBits[i] = calcBloomIndexes(clause)
+ }
+ // Accumulate the filter rules if no nil rule was within
+ if bloomBits != nil {
+ m.filters = append(m.filters, bloomBits)
+ }
+ }
+ // For every bit, create a scheduler to load/download the bit vectors
+ for _, bloomIndexLists := range m.filters {
+ for _, bloomIndexList := range bloomIndexLists {
+ for _, bloomIndex := range bloomIndexList {
+ m.addScheduler(bloomIndex)
+ }
+ }
+ }
+ return m
+}
+
+// addScheduler adds a bit stream retrieval scheduler for the given bit index if
+// it has not existed before. If the bit is already selected for filtering, the
+// existing scheduler can be used.
+func (m *Matcher) addScheduler(idx uint) {
+ if _, ok := m.schedulers[idx]; ok {
+ return
+ }
+ m.schedulers[idx] = newScheduler(idx)
+}
+
+// Start starts the matching process and returns a stream of bloom matches in
+// a given range of blocks. If there are no more matches in the range, the result
+// channel is closed.
+func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
+ // Make sure we're not creating concurrent sessions
+ if atomic.SwapUint32(&m.running, 1) == 1 {
+ return nil, errors.New("matcher already running")
+ }
+ defer atomic.StoreUint32(&m.running, 0)
+
+ // Initiate a new matching round
+ session := &MatcherSession{
+ matcher: m,
+ quit: make(chan struct{}),
+ kill: make(chan struct{}),
+ ctx: ctx,
+ }
+ for _, scheduler := range m.schedulers {
+ scheduler.reset()
+ }
+ sink := m.run(begin, end, cap(results), session)
+
+ // Read the output from the result sink and deliver to the user
+ session.pend.Add(1)
+ go func() {
+ defer session.pend.Done()
+ defer close(results)
+
+ for {
+ select {
+ case <-session.quit:
+ return
+
+ case res, ok := <-sink:
+ // New match result found
+ if !ok {
+ return
+ }
+ // Calculate the first and last blocks of the section
+ sectionStart := res.section * m.sectionSize
+
+ first := sectionStart
+ if begin > first {
+ first = begin
+ }
+ last := sectionStart + m.sectionSize - 1
+ if end < last {
+ last = end
+ }
+ // Iterate over all the blocks in the section and return the matching ones
+ for i := first; i <= last; i++ {
+ // Skip the entire byte if no matches are found inside (and we're processing an entire byte!)
+ next := res.bitset[(i-sectionStart)/8]
+ if next == 0 {
+ if i%8 == 0 {
+ i += 7
+ }
+ continue
+ }
+ // Some bit it set, do the actual submatching
+ if bit := 7 - i%8; next&(1<<bit) != 0 {
+ select {
+ case <-session.quit:
+ return
+ case results <- i:
+ }
+ }
+ }
+ }
+ }
+ }()
+ return session, nil
+}
+
+// run creates a daisy-chain of sub-matchers, one for the address set and one
+// for each topic set, each sub-matcher receiving a section only if the previous
+// ones have all found a potential match in one of the blocks of the section,
+// then binary AND-ing its own matches and forwarding the result to the next one.
+//
+// The method starts feeding the section indexes into the first sub-matcher on a
+// new goroutine and returns a sink channel receiving the results.
+func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) chan *partialMatches {
+ // Create the source channel and feed section indexes into
+ source := make(chan *partialMatches, buffer)
+
+ session.pend.Add(1)
+ go func() {
+ defer session.pend.Done()
+ defer close(source)
+
+ for i := begin / m.sectionSize; i <= end/m.sectionSize; i++ {
+ select {
+ case <-session.quit:
+ return
+ case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
+ }
+ }
+ }()
+ // Assemble the daisy-chained filtering pipeline
+ next := source
+ dist := make(chan *request, buffer)
+
+ for _, bloom := range m.filters {
+ next = m.subMatch(next, dist, bloom, session)
+ }
+ // Start the request distribution
+ session.pend.Add(1)
+ go m.distributor(dist, session)
+
+ return next
+}
+
+// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
+// binary AND-s the result to the daisy-chain input (source) and forwards it to the daisy-chain output.
+// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
+// that address/topic, and binary AND-ing those vectors together.
+func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloom []bloomIndexes, session *MatcherSession) chan *partialMatches {
+ // Start the concurrent schedulers for each bit required by the bloom filter
+ sectionSources := make([][3]chan uint64, len(bloom))
+ sectionSinks := make([][3]chan []byte, len(bloom))