aboutsummaryrefslogtreecommitdiff
path: root/plugin/evm
diff options
context:
space:
mode:
Diffstat (limited to 'plugin/evm')
-rw-r--r--plugin/evm/block.go5
-rw-r--r--plugin/evm/error.go2
-rw-r--r--plugin/evm/export_tx_test.go4
-rw-r--r--plugin/evm/import_tx_test.go8
-rw-r--r--plugin/evm/service.go5
-rw-r--r--plugin/evm/vm.go181
-rw-r--r--plugin/evm/vm_test.go400
7 files changed, 525 insertions, 80 deletions
diff --git a/plugin/evm/block.go b/plugin/evm/block.go
index 7c23c17..5a0d377 100644
--- a/plugin/evm/block.go
+++ b/plugin/evm/block.go
@@ -75,6 +75,11 @@ func (b *Block) Parent() snowman.Block {
return &missing.Block{BlkID: parentID}
}
+// Height implements the snowman.Block interface
+func (b *Block) Height() uint64 {
+ return b.ethBlock.Number().Uint64()
+}
+
// Verify implements the snowman.Block interface
func (b *Block) Verify() error {
// Only enforce a minimum fee when bootstrapping has finished
diff --git a/plugin/evm/error.go b/plugin/evm/error.go
index 0554349..d65d28b 100644
--- a/plugin/evm/error.go
+++ b/plugin/evm/error.go
@@ -3,7 +3,7 @@
package evm
-// TxError provides the ability for errors to be distinguished as permenant or
+// TxError provides the ability for errors to be distinguished as permanent or
// temporary
type TxError interface {
error
diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go
index 6fdf3a2..c1ddcda 100644
--- a/plugin/evm/export_tx_test.go
+++ b/plugin/evm/export_tx_test.go
@@ -126,7 +126,3 @@ func TestExportTxVerify(t *testing.T) {
t.Fatal("ExportTx should have failed verification due to invalid output")
}
}
-
-func TestExportTxSemanticVerify(t *testing.T) {
-
-}
diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go
index 973802a..139aa4e 100644
--- a/plugin/evm/import_tx_test.go
+++ b/plugin/evm/import_tx_test.go
@@ -178,7 +178,7 @@ func TestImportTxSemanticVerify(t *testing.T) {
Outs: []EVMOutput{evmOutput},
}
- state, err := vm.chain.BlockState(vm.lastAccepted.ethBlock)
+ state, err := vm.chain.CurrentState()
if err != nil {
t.Fatalf("Failed to get last accepted stateDB due to: %s", err)
}
@@ -291,6 +291,12 @@ func TestImportTxSemanticVerify(t *testing.T) {
func TestNewImportTx(t *testing.T) {
_, vm, _, sharedMemory := GenesisVM(t, true)
+ defer func() {
+ if err := vm.Shutdown(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
importAmount := uint64(1000000)
utxoID := avax.UTXOID{
TxID: ids.ID{
diff --git a/plugin/evm/service.go b/plugin/evm/service.go
index 2bb06df..8c9e16c 100644
--- a/plugin/evm/service.go
+++ b/plugin/evm/service.go
@@ -24,7 +24,7 @@ import (
)
const (
- version = "coreth-v0.3.15"
+ version = "coreth-v0.3.17"
)
// test constants
@@ -330,7 +330,6 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply
return fmt.Errorf("number of addresses given, %d, exceeds maximum, %d", len(args.Addresses), maxGetUTXOsAddrs)
}
- sourceChain := ids.ID{}
if args.SourceChain == "" {
return errNoSourceChain
}
@@ -339,7 +338,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply
if err != nil {
return fmt.Errorf("problem parsing source chainID %q: %w", args.SourceChain, err)
}
- sourceChain = chainID
+ sourceChain := chainID
addrSet := ids.ShortSet{}
for _, addrStr := range args.Addresses {
diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go
index 58ab600..9335b51 100644
--- a/plugin/evm/vm.go
+++ b/plugin/evm/vm.go
@@ -69,19 +69,15 @@ const (
maxBlockTime = 1000 * time.Millisecond
batchSize = 250
maxUTXOsToFetch = 1024
- blockCacheSize = 1 << 10 // 1024
+ blockCacheSize = 1024
codecVersion = uint16(0)
)
const (
- bdTimerStateMin = iota
- bdTimerStateMax
- bdTimerStateLong
+ txFee = units.MilliAvax
)
var (
- txFee = units.MilliAvax
-
errEmptyBlock = errors.New("empty block")
errCreateBlock = errors.New("couldn't create block")
errUnknownBlock = errors.New("unknown block")
@@ -103,6 +99,17 @@ var (
errInvalidNonce = errors.New("invalid nonce")
)
+// mayBuildBlockStatus denotes whether the engine should be notified
+// that a block should be built, or whether more time has to pass
+// before doing so. See VM's [mayBuildBlock].
+type mayBuildBlockStatus uint8
+
+const (
+ waitToBuild mayBuildBlockStatus = iota
+ conditionalWaitToBuild
+ mayBuild
+)
+
func maxDuration(x, y time.Duration) time.Duration {
if x > y {
return x
@@ -145,37 +152,53 @@ type VM struct {
CLIConfig CommandLineConfig
- chainID *big.Int
- networkID uint64
- genesisHash common.Hash
- chain *coreth.ETHChain
- chaindb Database
- newBlockChan chan *Block
- networkChan chan<- commonEng.Message
- newMinedBlockSub *event.TypeMuxSubscription
+ chainID *big.Int
+ networkID uint64
+ genesisHash common.Hash
+ chain *coreth.ETHChain
+ chaindb Database
+ newBlockChan chan *Block
+ // A message is sent on this channel when a new block
+ // is ready to be build. This notifies the consensus engine.
+ notifyBuildBlockChan chan<- commonEng.Message
+ newMinedBlockSub *event.TypeMuxSubscription
acceptedDB database.Database
- txPoolStabilizedHead common.Hash
- txPoolStabilizedOk chan struct{}
- txPoolStabilizedLock sync.Mutex
- txPoolStabilizedShutdownChan chan struct{}
+ txPoolStabilizedLock sync.Mutex
+ txPoolStabilizedHead common.Hash
+ txPoolStabilizedOk chan struct{}
metalock sync.Mutex
blockCache, blockStatusCache cache.LRU
lastAccepted *Block
writingMetadata uint32
- bdlock sync.Mutex
- blockDelayTimer *timer.Timer
- bdTimerState int8
- bdGenWaitFlag bool
- bdGenFlag bool
+ // [buildBlockLock] must be held when accessing [mayBuildBlock],
+ // [tryToBuildBlock] or [awaitingBuildBlock].
+ buildBlockLock sync.Mutex
+ // [buildBlockTimer] periodically fires in order to update [mayBuildBlock]
+ // and to try to build a block, if applicable.
+ buildBlockTimer *timer.Timer
+ // [mayBuildBlock] == [wait] means that the next block may be built
+ // only after more time has elapsed.
+ // [mayBuildBlock] == [conditionalWait] means that the next block may be built
+ // only if it has more than [batchSize] txs in it. Otherwise, wait until more
+ // time has elapsed.
+ // [mayBuildBlock] == [build] means that the next block may be built
+ // at any time.
+ mayBuildBlock mayBuildBlockStatus
+ // If true, try to notify the engine that a block should be built.
+ // Engine may not be notified because [mayBuildBlock] says to wait.
+ tryToBuildBlock bool
+ // If true, the engine has been notified that it should build a block
+ // but has not done so yet. If this is the case, wait until it has
+ // built a block before notifying it again.
+ awaitingBuildBlock bool
genlock sync.Mutex
txSubmitChan <-chan struct{}
atomicTxSubmitChan chan struct{}
- shutdownSubmitChan chan struct{}
baseCodec codec.Codec
codec codec.Manager
clock timer.Clock
@@ -183,7 +206,8 @@ type VM struct {
pendingAtomicTxs chan *Tx
blockAtomicInputCache cache.LRU
- shutdownWg sync.WaitGroup
+ shutdownChan chan struct{}
+ shutdownWg sync.WaitGroup
fx secp256k1fx.Fx
}
@@ -232,6 +256,7 @@ func (vm *VM) Initialize(
return errUnsupportedFXs
}
+ vm.shutdownChan = make(chan struct{}, 1)
vm.ctx = ctx
vm.chaindb = Database{db}
g := new(core.Genesis)
@@ -329,32 +354,35 @@ func (vm *VM) Initialize(
vm.blockStatusCache = cache.LRU{Size: blockCacheSize}
vm.blockAtomicInputCache = cache.LRU{Size: blockCacheSize}
vm.newBlockChan = make(chan *Block)
- vm.networkChan = toEngine
- vm.blockDelayTimer = timer.NewTimer(func() {
- vm.bdlock.Lock()
- switch vm.bdTimerState {
- case bdTimerStateMin:
- vm.bdTimerState = bdTimerStateMax
- vm.blockDelayTimer.SetTimeoutIn(maxDuration(maxBlockTime-minBlockTime, 0))
- case bdTimerStateMax:
- vm.bdTimerState = bdTimerStateLong
+ vm.notifyBuildBlockChan = toEngine
+
+ // Periodically updates [vm.mayBuildBlock] and tries to notify the engine to build
+ // a new block, if applicable.
+ vm.buildBlockTimer = timer.NewTimer(func() {
+ vm.buildBlockLock.Lock()
+ switch vm.mayBuildBlock {
+ case waitToBuild:
+ // Some time has passed. Allow block to be built if it has enough txs in it.
+ vm.mayBuildBlock = conditionalWaitToBuild
+ vm.buildBlockTimer.SetTimeoutIn(maxDuration(maxBlockTime-minBlockTime, 0))
+ case conditionalWaitToBuild:
+ // More time has passed. Allow block to be built regardless of tx count.
+ vm.mayBuildBlock = mayBuild
}
- tryAgain := vm.bdGenWaitFlag
- vm.bdlock.Unlock()
- if tryAgain {
+ tryBuildBlock := vm.tryToBuildBlock
+ vm.buildBlockLock.Unlock()
+ if tryBuildBlock {
vm.tryBlockGen()
}
})
- go ctx.Log.RecoverAndPanic(vm.blockDelayTimer.Dispatch)
+ go ctx.Log.RecoverAndPanic(vm.buildBlockTimer.Dispatch)
- vm.bdTimerState = bdTimerStateLong
- vm.bdGenWaitFlag = true
+ vm.mayBuildBlock = mayBuild
+ vm.tryToBuildBlock = true
vm.txPoolStabilizedOk = make(chan struct{}, 1)
- vm.txPoolStabilizedShutdownChan = make(chan struct{}, 1) // Signal goroutine to shutdown
// TODO: read size from options
vm.pendingAtomicTxs = make(chan *Tx, 1024)
vm.atomicTxSubmitChan = make(chan struct{}, 1)
- vm.shutdownSubmitChan = make(chan struct{}, 1)
vm.newMinedBlockSub = vm.chain.SubscribeNewMinedBlockEvent()
vm.shutdownWg.Add(1)
go ctx.Log.RecoverAndPanic(vm.awaitTxPoolStabilized)
@@ -401,7 +429,10 @@ func (vm *VM) Bootstrapping() error { return vm.fx.Bootstrapping() }
// Bootstrapped notifies this VM that the consensus engine has finished
// bootstrapping
-func (vm *VM) Bootstrapped() error { return vm.fx.Bootstrapped() }
+func (vm *VM) Bootstrapped() error {
+ vm.ctx.Bootstrapped()
+ return vm.fx.Bootstrapped()
+}
// Shutdown implements the snowman.ChainVM interface
func (vm *VM) Shutdown() error {
@@ -410,8 +441,8 @@ func (vm *VM) Shutdown() error {
}
vm.writeBackMetadata()
- close(vm.txPoolStabilizedShutdownChan)
- close(vm.shutdownSubmitChan)
+ vm.buildBlockTimer.Stop()
+ close(vm.shutdownChan)
vm.chain.Stop()
vm.shutdownWg.Wait()
return nil
@@ -421,18 +452,20 @@ func (vm *VM) Shutdown() error {
func (vm *VM) BuildBlock() (snowman.Block, error) {
vm.chain.GenBlock()
block := <-vm.newBlockChan
+
+ vm.buildBlockLock.Lock()
+ // Specify that we should wait before trying to build another block.
+ vm.mayBuildBlock = waitToBuild
+ vm.tryToBuildBlock = false
+ vm.awaitingBuildBlock = false
+ vm.buildBlockTimer.SetTimeoutIn(minBlockTime)
+ vm.buildBlockLock.Unlock()
+
if block == nil {
return nil, errCreateBlock
}
- // reset the min block time timer
- vm.bdlock.Lock()
- vm.bdTimerState = bdTimerStateMin
- vm.bdGenWaitFlag = false
- vm.bdGenFlag = false
- vm.blockDelayTimer.SetTimeoutIn(minBlockTime)
- vm.bdlock.Unlock()
- log.Debug(fmt.Sprintf("built block %s", block.ID()))
+ log.Debug(fmt.Sprintf("Built block %s", block.ID()))
// make sure Tx Pool is updated
<-vm.txPoolStabilizedOk
return block, nil
@@ -573,13 +606,14 @@ func (vm *VM) updateStatus(blockID ids.ID, status choices.Status) {
}
func (vm *VM) tryBlockGen() error {
- vm.bdlock.Lock()
- defer vm.bdlock.Unlock()
- if vm.bdGenFlag {
- // skip if one call already generates a block in this round
+ vm.buildBlockLock.Lock()
+ defer vm.buildBlockLock.Unlock()
+ if vm.awaitingBuildBlock {
+ // We notified the engine that a block should be built but it hasn't
+ // done so yet. Wait until it has done so before notifying again.
return nil
}
- vm.bdGenWaitFlag = true
+ vm.tryToBuildBlock = true
vm.genlock.Lock()
defer vm.genlock.Unlock()
@@ -592,20 +626,21 @@ func (vm *VM) tryBlockGen() error {
return nil
}
- switch vm.bdTimerState {
- case bdTimerStateMin:
+ switch vm.mayBuildBlock {
+ case waitToBuild: // Wait more time before notifying engine to building a block
return nil
- case bdTimerStateMax:
+ case conditionalWaitToBuild: // Notify engine only if there are enough pending txs
if size < batchSize {
return nil
}
- case bdTimerStateLong:
- // timeout; go ahead and generate a new block anyway
+ case mayBuild: // Notify engine
+ default:
+ panic(fmt.Sprintf("mayBuildBlock has unexpected value %d", vm.mayBuildBlock))
}
select {
- case vm.networkChan <- commonEng.PendingTxs:
- // successfully push out the notification; this round ends
- vm.bdGenFlag = true
+ case vm.notifyBuildBlockChan <- commonEng.PendingTxs:
+ // Notify engine to build a block
+ vm.awaitingBuildBlock = true
default:
return errBlockFrequency
}
@@ -734,12 +769,18 @@ func (vm *VM) writeBackMetadata() {
// awaitTxPoolStabilized waits for a txPoolHead channel event
// and notifies the VM when the tx pool has stabilized to the
// expected block hash
-// Waits for signal to shutdown from txPoolStabilizedShutdownChan chan
+// Waits for signal to shutdown from [vm.shutdownChan]
func (vm *VM) awaitTxPoolStabilized() {
defer vm.shutdownWg.Done()
for {
select {
- case e := <-vm.newMinedBlockSub.Chan():
+ case e, ok := <-vm.newMinedBlockSub.Chan():
+ if !ok {
+ return
+ }
+ if e == nil {
+ continue
+ }
switch h := e.Data.(type) {
case core.NewMinedBlockEvent:
vm.txPoolStabilizedLock.Lock()
@@ -750,7 +791,7 @@ func (vm *VM) awaitTxPoolStabilized() {
vm.txPoolStabilizedLock.Unlock()
default:
}
- case <-vm.txPoolStabilizedShutdownChan:
+ case <-vm.shutdownChan:
return
}
}
@@ -769,7 +810,7 @@ func (vm *VM) awaitSubmittedTxs() {
vm.tryBlockGen()
case <-time.After(5 * time.Second):
vm.tryBlockGen()
- case <-vm.shutdownSubmitChan:
+ case <-vm.shutdownChan:
return
}
}
diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go
index 0e9c102..dbceb1b 100644
--- a/plugin/evm/vm_test.go
+++ b/plugin/evm/vm_test.go
@@ -4,20 +4,30 @@
package evm
import (
+ "crypto/rand"
"encoding/json"
+ "math/big"
"testing"
+ "time"
"github.com/ava-labs/avalanchego/api/keystore"
+ "github.com/ava-labs/avalanchego/cache"
"github.com/ava-labs/avalanchego/chains/atomic"
"github.com/ava-labs/avalanchego/database/memdb"
"github.com/ava-labs/avalanchego/database/prefixdb"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/snow"
+ "github.com/ava-labs/avalanchego/snow/choices"
engCommon "github.com/ava-labs/avalanchego/snow/engine/common"
"github.com/ava-labs/avalanchego/utils/crypto"
"github.com/ava-labs/avalanchego/utils/formatting"
"github.com/ava-labs/avalanchego/utils/logging"
+ "github.com/ava-labs/avalanchego/vms/components/avax"
+ "github.com/ava-labs/avalanchego/vms/secp256k1fx"
+ "github.com/ava-labs/coreth"
"github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
"github.com/ethereum/go-ethereum/common"
)
@@ -130,9 +140,397 @@ func GenesisVM(t *testing.T, finishBootstrapping bool) (chan engCommon.Message,
t.Fatal(err)
}
+ if finishBootstrapping {
+ if err := vm.Bootstrapping(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := vm.Bootstrapped(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
return issuer, vm, genesisBytes, m
}
func TestVMGenesis(t *testing.T) {
- _, _, _, _ = GenesisVM(t, true)
+ _, vm, _, _ := GenesisVM(t, true)
+
+ shutdownChan := make(chan error, 1)
+ shutdownFunc := func() {
+ err := vm.Shutdown()
+ shutdownChan <- err
+ }
+
+ go shutdownFunc()
+ shutdownTimeout := 10 * time.Millisecond
+ ticker := time.NewTicker(shutdownTimeout)
+ select {
+ case <-ticker.C:
+ t.Fatalf("VM shutdown took longer than timeout: %v", shutdownTimeout)
+ case err := <-shutdownChan:
+ if err != nil {
+ t.Fatalf("Shutdown errored: %s", err)
+ }
+ }
+}
+
+func TestIssueAtomicTxs(t *testing.T) {
+ issuer, vm, _, sharedMemory := GenesisVM(t, true)
+
+ defer func() {
+ if err := vm.Shutdown(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ importAmount := uint64(10000000)
+ utxoID := avax.UTXOID{
+ TxID: ids.ID{
+ 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee,
+ 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec,
+ 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea,
+ 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8,
+ },
+ }
+
+ utxo := &avax.UTXO{
+ UTXOID: utxoID,
+ Asset: avax.Asset{ID: vm.ctx.AVAXAssetID},
+ Out: &secp256k1fx.TransferOutput{
+ Amt: importAmount,
+ OutputOwners: secp256k1fx.OutputOwners{
+ Threshold: 1,
+ Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()},
+ },
+ },
+ }
+ utxoBytes, err := vm.codec.Marshal(codecVersion, utxo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID)
+ inputID := utxo.InputID()
+ if err := xChainSharedMemory.Put(vm.ctx.ChainID, []*atomic.Element{{
+ Key: inputID[:],
+ Value: utxoBytes,
+ Traits: [][]byte{
+ testKeys[0].PublicKey().Address().Bytes(),
+ },
+ }}); err != nil {
+ t.Fatal(err)
+ }
+
+ importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], []*crypto.PrivateKeySECP256K1R{testKeys[0]})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := vm.issueTx(importTx); err != nil {
+ t.Fatal(err)
+ }
+
+ <-issuer
+
+ blk, err := vm.BuildBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := blk.Verify(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk.Status(); status != choices.Processing {
+ t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status)
+ }
+
+ if err := blk.Accept(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk.Status(); status != choices.Accepted {
+ t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status)
+ }
+
+ lastAcceptedID := vm.LastAccepted()
+ if lastAcceptedID != blk.ID() {
+ t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID)
+ }
+
+ exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-vm.txFee-1, vm.ctx.XChainID, testShortIDAddrs[0], []*crypto.PrivateKeySECP256K1R{testKeys[0]})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := vm.issueTx(exportTx); err != nil {
+ t.Fatal(err)
+ }
+
+ <-issuer
+
+ blk2, err := vm.BuildBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := blk2.Verify(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk2.Status(); status != choices.Processing {
+ t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status)
+ }
+
+ if err := blk2.Accept(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk2.Status(); status != choices.Accepted {
+ t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status)
+ }
+
+ lastAcceptedID = vm.LastAccepted()
+ if lastAcceptedID != blk2.ID() {
+ t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID)
+ }
+}
+
+func TestBuildEthTxBlock(t *testing.T) {
+ issuer, vm, _, sharedMemory := GenesisVM(t, true)
+
+ defer func() {
+ if err := vm.Shutdown(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ key, err := coreth.NewKey(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ importAmount := uint64(10000000)
+ utxoID := avax.UTXOID{
+ TxID: ids.ID{
+ 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee,
+ 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec,
+ 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea,
+ 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8,
+ },
+ }
+
+ utxo := &avax.UTXO{
+ UTXOID: utxoID,
+ Asset: avax.Asset{ID: vm.ctx.AVAXAssetID},
+ Out: &secp256k1fx.TransferOutput{
+ Amt: importAmount,
+ OutputOwners: secp256k1fx.OutputOwners{
+ Threshold: 1,
+ Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()},
+ },
+ },
+ }
+ utxoBytes, err := vm.codec.Marshal(codecVersion, utxo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID)
+ inputID := utxo.InputID()
+ if err := xChainSharedMemory.Put(vm.ctx.ChainID, []*atomic.Element{{
+ Key: inputID[:],
+ Value: utxoBytes,
+ Traits: [][]byte{
+ testKeys[0].PublicKey().Address().Bytes(),
+ },
+ }}); err != nil {
+ t.Fatal(err)
+ }
+
+ importTx, err := vm.newImportTx(vm.ctx.XChainID, key.Address, []*crypto.PrivateKeySECP256K1R{testKeys[0]})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := vm.issueTx(importTx); err != nil {
+ t.Fatal(err)
+ }
+
+ <-issuer
+
+ blk, err := vm.BuildBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := blk.Verify(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk.Status(); status != choices.Processing {
+ t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status)
+ }
+
+ if err := blk.Accept(); err != nil {
+ t.Fatal(err)
+ }
+
+ txs := make([]*types.Transaction, 10)
+ for i := 0; i < 10; i++ {
+ tx := types.NewTransaction(uint64(i), key.Address, big.NewInt(10), 21000, params.MinGasPrice, nil)
+ signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key.PrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ txs[i] = signedTx
+ }
+ errs := vm.chain.AddRemoteTxs(txs)
+ for i, err := range errs {
+ if err != nil {
+ t.Fatalf("Failed to add tx at index %d: %s", i, err)
+ }
+ }
+
+ <-issuer
+
+ blk, err = vm.BuildBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := blk.Verify(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk.Status(); status != choices.Processing {
+ t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status)
+ }
+
+ if err := blk.Accept(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk.Status(); status != choices.Accepted {
+ t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status)
+ }
+
+ lastAcceptedID := vm.LastAccepted()
+ if lastAcceptedID != blk.ID() {
+ t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID)
+ }
+}
+
+func TestConflictingImportTxs(t *testing.T) {
+ issuer, vm, _, sharedMemory := GenesisVM(t, true)
+
+ defer func() {
+ if err := vm.Shutdown(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ conflictKey, err := coreth.NewKey(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID)
+ importTxs := make([]*Tx, 0, 3)
+ conflictTxs := make([]*Tx, 0, 3)
+ for i, key := range testKeys {
+ importAmount := uint64(10000000)
+ utxoID := avax.UTXOID{
+ TxID: ids.ID{byte(i)},
+ }
+
+ utxo := &avax.UTXO{
+ UTXOID: utxoID,
+ Asset: avax.Asset{ID: vm.ctx.AVAXAssetID},
+ Out: &secp256k1fx.TransferOutput{
+ Amt: importAmount,
+ OutputOwners: secp256k1fx.OutputOwners{
+ Threshold: 1,
+ Addrs: []ids.ShortID{key.PublicKey().Address()},
+ },
+ },
+ }
+ utxoBytes, err := vm.codec.Marshal(codecVersion, utxo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ inputID := utxo.InputID()
+ if err := xChainSharedMemory.Put(vm.ctx.ChainID, []*atomic.Element{{
+ Key: inputID[:],
+ Value: utxoBytes,
+ Traits: [][]byte{
+ key.PublicKey().Address().Bytes(),
+ },
+ }}); err != nil {
+ t.Fatal(err)
+ }
+
+ importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], []*crypto.PrivateKeySECP256K1R{key})
+ if err != nil {
+ t.Fatal(err)
+ }
+ importTxs = append(importTxs, importTx)
+
+ conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, []*crypto.PrivateKeySECP256K1R{key})
+ if err != nil {
+ t.Fatal(err)
+ }
+ conflictTxs = append(conflictTxs, conflictTx)
+ }
+
+ expectedParentBlkID := vm.LastAccepted()
+ for i, tx := range importTxs {
+ if err := vm.issueTx(tx); err != nil {
+ t.Fatal(err)
+ }
+
+ <-issuer
+
+ blk, err := vm.BuildBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := blk.Verify(); err != nil {
+ t.Fatal(err)
+ }
+
+ if status := blk.Status(); status != choices.Processing {
+ t.Fatalf("Expected status of built block %d to be %s, but found %s", i, choices.Processing, status)
+ }
+
+ if parentID := blk.Parent().ID(); parentID != expectedParentBlkID {
+ t.Fatalf("Expected parent to have blockID %s, but found %s", expectedParentBlkID, parentID)
+ }
+
+ expectedParentBlkID = blk.ID()
+ vm.SetPreference(blk.ID())
+ }
+
+ // Shrink the atomic input cache to ensure that
+ // verification handles cache misses correctly.
+ vm.blockAtomicInputCache = cache.LRU{Size: 1}
+
+ for i, tx := range conflictTxs {
+ if err := vm.issueTx(tx); err != nil {
+ t.Fatal(err)
+ }
+
+ <-issuer
+
+ _, err := vm.BuildBlock()
+ // The new block is verified in BuildBlock, so
+ // BuildBlock should fail due to an attempt to
+ // double spend an atomic UTXO.
+ if err == nil {
+ t.Fatalf("Block verification should have failed in BuildBlock %d due to double spending atomic UTXO", i)
+ }
+ }
}