diff --git a/config/config.go b/config/config.go index 1ecbe33f33f8..1b0eff0f96a4 100644 --- a/config/config.go +++ b/config/config.go @@ -928,7 +928,8 @@ func getDatabaseConfig(v *viper.Viper, networkID uint32) (node.DatabaseConfig, e } return node.DatabaseConfig{ - Name: v.GetString(DBTypeKey), + Name: v.GetString(DBTypeKey), + ReadOnly: v.GetBool(DBReadOnlyKey), Path: filepath.Join( GetExpandedArg(v, DBPathKey), constants.NetworkName(networkID), diff --git a/config/flags.go b/config/flags.go index 9fce19ae501f..edece3ada4a3 100644 --- a/config/flags.go +++ b/config/flags.go @@ -105,6 +105,7 @@ func addNodeFlags(fs *pflag.FlagSet) { // Database fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebble.Name)) + fs.Bool(DBReadOnlyKey, false, "If true, database writes are to memory and never persisted. May still initialize database directory/files on disk if they don't exist") fs.String(DBPathKey, defaultDBDir, "Path to database directory") fs.String(DBConfigFileKey, "", fmt.Sprintf("Path to database config file. Ignored if %s is specified", DBConfigContentKey)) fs.String(DBConfigContentKey, "", "Specifies base64 encoded database config content") diff --git a/config/keys.go b/config/keys.go index a62e960f16bb..7263cf0a6bdd 100644 --- a/config/keys.go +++ b/config/keys.go @@ -34,6 +34,7 @@ const ( StakeMintingPeriodKey = "stake-minting-period" StakeSupplyCapKey = "stake-supply-cap" DBTypeKey = "db-type" + DBReadOnlyKey = "db-read-only" DBPathKey = "db-dir" DBConfigFileKey = "db-config-file" DBConfigContentKey = "db-config-file-content" diff --git a/database/corruptabledb/db_test.go b/database/corruptabledb/db_test.go index d4c14f782986..b58c65b4b162 100644 --- a/database/corruptabledb/db_test.go +++ b/database/corruptabledb/db_test.go @@ -26,16 +26,21 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { +func newDB() *Database { baseDB := memdb.New() - db := New(baseDB) - database.FuzzKeyValue(f, db) + return New(baseDB) +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB()) } func FuzzNewIteratorWithPrefix(f *testing.F) { - baseDB := memdb.New() - db := New(baseDB) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB()) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB()) } // TestCorruption tests to make sure corruptabledb wrapper works as expected. @@ -70,9 +75,7 @@ func TestCorruption(t *testing.T) { return err }, } - baseDB := memdb.New() - // wrap this db - corruptableDB := New(baseDB) + corruptableDB := newDB() _ = corruptableDB.handleError(errTest) for name, testFn := range tests { t.Run(name, func(tt *testing.T) { @@ -176,9 +179,7 @@ func TestIterator(t *testing.T) { ctrl := gomock.NewController(t) // Make a database - baseDB := memdb.New() - corruptableDB := New(baseDB) - + corruptableDB := newDB() // Put a key-value pair in the database. require.NoError(corruptableDB.Put([]byte{0}, []byte{1})) diff --git a/database/encdb/db_test.go b/database/encdb/db_test.go index 177259f5c7f2..f49dd7ebb28d 100644 --- a/database/encdb/db_test.go +++ b/database/encdb/db_test.go @@ -24,28 +24,30 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { +func newDB(t testing.TB) database.Database { unencryptedDB := memdb.New() db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(f, err) - database.FuzzKeyValue(f, db) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) } func FuzzNewIteratorWithPrefix(f *testing.F) { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(f, err) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(b, err) - bench(b, db, "encdb", keys, values) + bench(b, newDB(b), "encdb", keys, values) } } } diff --git a/database/leveldb/db_test.go b/database/leveldb/db_test.go index bf6bdeac7f27..23733dacaff4 100644 --- a/database/leveldb/db_test.go +++ b/database/leveldb/db_test.go @@ -26,34 +26,39 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { - folder := f.TempDir() +func newDB(t testing.TB) database.Database { + folder := t.TempDir() db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(f, err) + require.NoError(t, err) + return db +} +func FuzzKeyValue(f *testing.F) { + db := newDB(f) defer db.Close() database.FuzzKeyValue(f, db) } func FuzzNewIteratorWithPrefix(f *testing.F) { - folder := f.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(f, err) - + db := newDB(f) defer db.Close() database.FuzzNewIteratorWithPrefix(f, db) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + defer db.Close() + + database.FuzzNewIteratorWithStartAndPrefix(f, db) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { - folder := b.TempDir() - - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(b, err) + db := newDB(b) bench(b, db, "leveldb", keys, values) diff --git a/database/memdb/db_test.go b/database/memdb/db_test.go index b0497758f5c2..10d8ebe2be25 100644 --- a/database/memdb/db_test.go +++ b/database/memdb/db_test.go @@ -23,6 +23,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New()) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New()) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/meterdb/db_test.go b/database/meterdb/db_test.go index ddd613353946..a54d25c21542 100644 --- a/database/meterdb/db_test.go +++ b/database/meterdb/db_test.go @@ -24,28 +24,30 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { +func newDB(t testing.TB) database.Database { baseDB := memdb.New() db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(f, err) - database.FuzzKeyValue(f, db) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) } func FuzzNewIteratorWithPrefix(f *testing.F) { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(f, err) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(b, err) - bench(b, db, "meterdb", keys, values) + bench(b, newDB(b), "meterdb", keys, values) } } } diff --git a/database/pebble/db_test.go b/database/pebble/db_test.go index cba67a79a88f..043caa23c813 100644 --- a/database/pebble/db_test.go +++ b/database/pebble/db_test.go @@ -41,6 +41,12 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { _ = db.Close() } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db) + _ = db.Close() +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/prefixdb/db_test.go b/database/prefixdb/db_test.go index 065e1d7a00c8..a0539b8e0105 100644 --- a/database/prefixdb/db_test.go +++ b/database/prefixdb/db_test.go @@ -30,6 +30,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New([]byte(""), memdb.New())) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New([]byte(""), memdb.New())) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/rpcdb/db_test.go b/database/rpcdb/db_test.go index 763b95b83f75..baabc9a69fbf 100644 --- a/database/rpcdb/db_test.go +++ b/database/rpcdb/db_test.go @@ -75,6 +75,13 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { db.closeFn() } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := setupDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db.client) + + db.closeFn() +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/test_database.go b/database/test_database.go index 2e68f53341b8..fc7e644ae5fd 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -1266,7 +1266,74 @@ func FuzzNewIteratorWithPrefix(f *testing.F, db Database) { require.Equal(expected[string(iter.Key())], val) numIterElts++ } - require.Equal(len(expectedList), numIterElts) + require.Len(expectedList, numIterElts) + + // Clear the database for the next fuzz iteration. + require.NoError(AtomicClear(db, db)) + }) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F, db Database) { + const ( + maxKeyLen = 32 + maxValueLen = 32 + ) + + f.Fuzz(func( + t *testing.T, + randSeed int64, + start []byte, + prefix []byte, + numKeyValues uint, + ) { + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + expected := map[string][]byte{} + + // Put a bunch of key-values + for i := 0; i < int(numKeyValues); i++ { + key := make([]byte, r.Intn(maxKeyLen)) + _, _ = r.Read(key) // #nosec G404 + + value := make([]byte, r.Intn(maxValueLen)) + _, _ = r.Read(value) // #nosec G404 + + if len(value) == 0 { + // Consistently treat zero length values as nil + // so that we can compare [expected] and [got] with + // require.Equal, which treats nil and empty byte + // as being unequal, whereas the database treats + // them as being equal. + value = nil + } + + if bytes.HasPrefix(key, prefix) && bytes.Compare(key, start) >= 0 { + expected[string(key)] = value + } + + require.NoError(db.Put(key, value)) + } + + expectedList := maps.Keys(expected) + slices.Sort(expectedList) + + iter := db.NewIteratorWithStartAndPrefix(start, prefix) + defer iter.Release() + + // Assert the iterator returns the expected key-values. + numIterElts := 0 + for iter.Next() { + val := iter.Value() + if len(val) == 0 { + val = nil + } + keyStr := string(iter.Key()) + require.Equal(expectedList[numIterElts], keyStr) + require.Equal(expected[keyStr], val) + numIterElts++ + } + require.Len(expectedList, numIterElts) // Clear the database for the next fuzz iteration. require.NoError(AtomicClear(db, db)) diff --git a/database/versiondb/db_test.go b/database/versiondb/db_test.go index fdea2934b8d2..c2f57caacf61 100644 --- a/database/versiondb/db_test.go +++ b/database/versiondb/db_test.go @@ -27,6 +27,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New(memdb.New())) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New(memdb.New())) +} + func TestIterate(t *testing.T) { require := require.New(t) diff --git a/ids/test_generator.go b/ids/test_generator.go index 2f8edf3567d8..29ecfa8d409c 100644 --- a/ids/test_generator.go +++ b/ids/test_generator.go @@ -23,3 +23,12 @@ func GenerateTestShortID() ShortID { func GenerateTestNodeID() NodeID { return NodeID(GenerateTestShortID()) } + +// BuildTestNodeID is an utility to build NodeID from bytes in UTs +// It must not be used in production code. In production code we should +// use ToNodeID, which performs proper length checking. +func BuildTestNodeID(src []byte) NodeID { + res := NodeID{} + copy(res[:], src) + return res +} diff --git a/network/peer/set_test.go b/network/peer/set_test.go index f26b1d19f8ec..fb67d25ef05f 100644 --- a/network/peer/set_test.go +++ b/network/peer/set_test.go @@ -18,24 +18,24 @@ func TestSet(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 0}, } updatedPeer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 1}, } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } unknownPeer := &peer{ - id: ids.NodeID{0xff}, + id: ids.BuildTestNodeID([]byte{0xff}), } peer3 := &peer{ - id: ids.NodeID{0x03}, + id: ids.BuildTestNodeID([]byte{0x03}), } peer4 := &peer{ - id: ids.NodeID{0x04}, + id: ids.BuildTestNodeID([]byte{0x04}), } // add of first peer is handled @@ -105,10 +105,10 @@ func TestSetSample(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } // Case: Empty diff --git a/network/peer/upgrader.go b/network/peer/upgrader.go index 47a6d0bc7c68..b601ee370947 100644 --- a/network/peer/upgrader.go +++ b/network/peer/upgrader.go @@ -60,12 +60,12 @@ func (t *tlsClientUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staki func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeID, net.Conn, *staking.Certificate, error) { if err := conn.Handshake(); err != nil { - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } state := conn.ConnectionState() if len(state.PeerCertificates) == 0 { - return ids.NodeID{}, nil, nil, errNoCert + return ids.EmptyNodeID, nil, nil, errNoCert } tlsCert := state.PeerCertificates[0] @@ -75,7 +75,7 @@ func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeI peerCert, err := staking.ParseCertificate(tlsCert.Raw) if err != nil { invalidCerts.Inc() - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } // We validate the certificate here to attempt to make the validity of the @@ -84,7 +84,7 @@ func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeI // healthy. if err := staking.ValidateCertificate(peerCert); err != nil { invalidCerts.Inc() - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } nodeID := ids.NodeIDFromCert(peerCert) diff --git a/node/config.go b/node/config.go index f78987822c33..736bf08b74dc 100644 --- a/node/config.go +++ b/node/config.go @@ -131,6 +131,9 @@ type BootstrapConfig struct { } type DatabaseConfig struct { + // If true, all writes are to memory and are discarded at node shutdown. + ReadOnly bool `json:"readOnly"` + // Path to database Path string `json:"path"` diff --git a/node/insecure_validator_manager.go b/node/insecure_validator_manager.go index bd69529619dc..d2cdab94cc89 100644 --- a/node/insecure_validator_manager.go +++ b/node/insecure_validator_manager.go @@ -27,7 +27,7 @@ func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *vers // peer as a validator. Because each validator needs a txID associated // with it, we hack one together by padding the nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], vdrID[:]) + copy(dummyTxID[:], vdrID.Bytes()) err := i.vdrs.AddStaker(constants.PrimaryNetworkID, vdrID, nil, dummyTxID, i.weight) if err != nil { diff --git a/node/node.go b/node/node.go index cad0073899b8..49548e2fc164 100644 --- a/node/node.go +++ b/node/node.go @@ -42,6 +42,7 @@ import ( "github.com/ava-labs/avalanchego/database/meterdb" "github.com/ava-labs/avalanchego/database/pebble" "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" @@ -303,7 +304,7 @@ func (n *Node) initNetworking() error { // a validator. Because each validator needs a txID associated with it, // we hack one together by just padding our nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], n.ID[:]) + copy(dummyTxID[:], n.ID.Bytes()) err := n.vdrs.AddStaker( constants.PrimaryNetworkID, @@ -531,6 +532,10 @@ func (n *Node) initDatabase() error { ) } + if n.Config.ReadOnly && n.Config.DatabaseConfig.Name != memdb.Name { + n.DB = versiondb.New(n.DB) + } + var err error n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) if err != nil { diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 70830e3da36c..84ed8f7a5c8c 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -21,11 +21,11 @@ var ( blkID3 = ids.ID{3} blkID4 = ids.ID{4} - vdr1 = ids.NodeID{1} - vdr2 = ids.NodeID{2} - vdr3 = ids.NodeID{3} - vdr4 = ids.NodeID{4} - vdr5 = ids.NodeID{5} + vdr1 = ids.BuildTestNodeID([]byte{0x01}) + vdr2 = ids.BuildTestNodeID([]byte{0x02}) + vdr3 = ids.BuildTestNodeID([]byte{0x03}) + vdr4 = ids.BuildTestNodeID([]byte{0x04}) + vdr5 = ids.BuildTestNodeID([]byte{0x05}) // k = 5 ) func TestNewSetErrorOnPollsMetrics(t *testing.T) { diff --git a/snow/engine/common/appsender/appsender_client.go b/snow/engine/common/appsender/appsender_client.go index a816dd68241e..c74616d71006 100644 --- a/snow/engine/common/appsender/appsender_client.go +++ b/snow/engine/common/appsender/appsender_client.go @@ -52,8 +52,7 @@ func (c *Client) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppRequest( @@ -71,7 +70,7 @@ func (c *Client) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request _, err := c.client.SendAppResponse( ctx, &appsenderpb.SendAppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -93,8 +92,7 @@ func (c *Client) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids. nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppGossipSpecific( diff --git a/snow/engine/common/requests_test.go b/snow/engine/common/requests_test.go index 00e648dfa90a..73a98e4ccb94 100644 --- a/snow/engine/common/requests_test.go +++ b/snow/engine/common/requests_test.go @@ -30,7 +30,7 @@ func TestRequests(t *testing.T) { _, removed = req.Remove(ids.EmptyNodeID, 1) require.False(removed) - _, removed = req.Remove(ids.NodeID{1}, 0) + _, removed = req.Remove(ids.BuildTestNodeID([]byte{0x01}), 0) require.False(removed) require.True(req.Contains(ids.Empty)) @@ -42,7 +42,7 @@ func TestRequests(t *testing.T) { _, removed = req.Remove(ids.EmptyNodeID, 1) require.False(removed) - _, removed = req.Remove(ids.NodeID{1}, 0) + _, removed = req.Remove(ids.BuildTestNodeID([]byte{0x01}), 0) require.False(removed) require.True(req.Contains(ids.Empty)) diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 620d85b0ba80..81f04d44ba01 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -405,7 +405,7 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { require.NoError(bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1})) // respond with wrong request ID require.Equal(oldReqID, *requestID) - require.NoError(bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1})) // respond from wrong peer + require.NoError(bs.Ancestors(context.Background(), ids.BuildTestNodeID([]byte{1, 2, 3}), *requestID, [][]byte{blkBytes1})) // respond from wrong peer require.Equal(oldReqID, *requestID) require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0})) // respond with wrong block diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 6355834dcf78..88c02b9a7f3f 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -312,7 +312,7 @@ func TestReliableMessages(t *testing.T) { ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewManager() - require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.NodeID{1}, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.BuildTestNodeID([]byte{1}), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -443,7 +443,7 @@ func TestReliableMessages(t *testing.T) { go func() { for i := 0; i < queriesToSend; i++ { - vdrIDs := set.Of(ids.NodeID{1}) + vdrIDs := set.Of(ids.BuildTestNodeID([]byte{1})) sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) // #nosec G404 diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index ce412150b1b6..582da3a9ea1b 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -39,7 +39,7 @@ func TestManagerFire(t *testing.T) { wg.Add(1) manager.RegisterRequest( - ids.NodeID{}, + ids.EmptyNodeID, ids.ID{}, true, ids.RequestID{}, diff --git a/snow/networking/tracker/resource_tracker_test.go b/snow/networking/tracker/resource_tracker_test.go index 4bc78eb4827a..64a897589f90 100644 --- a/snow/networking/tracker/resource_tracker_test.go +++ b/snow/networking/tracker/resource_tracker_test.go @@ -48,8 +48,8 @@ func TestCPUTracker(t *testing.T) { tracker, err := NewResourceTracker(prometheus.NewRegistry(), mockUser, meter.ContinuousFactory{}, time.Second) require.NoError(err) - node1 := ids.NodeID{1} - node2 := ids.NodeID{2} + node1 := ids.BuildTestNodeID([]byte{1}) + node2 := ids.BuildTestNodeID([]byte{2}) // Note that all the durations between start and end are [halflife]. startTime1 := time.Now() diff --git a/snow/networking/tracker/targeter_test.go b/snow/networking/tracker/targeter_test.go index 23096adbed28..55974dbf4ac6 100644 --- a/snow/networking/tracker/targeter_test.go +++ b/snow/networking/tracker/targeter_test.go @@ -46,10 +46,10 @@ func TestNewTargeter(t *testing.T) { func TestTarget(t *testing.T) { ctrl := gomock.NewController(t) - vdr := ids.NodeID{1} + vdr := ids.BuildTestNodeID([]byte{1}) vdrWeight := uint64(1) totalVdrWeight := uint64(10) - nonVdr := ids.NodeID{2} + nonVdr := ids.BuildTestNodeID([]byte{2}) vdrs := validators.NewManager() require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, vdr, nil, ids.Empty, 1)) require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight)) diff --git a/snow/validators/gvalidators/validator_state_server.go b/snow/validators/gvalidators/validator_state_server.go index 5f0dbc7f46c4..ad9b75197947 100644 --- a/snow/validators/gvalidators/validator_state_server.go +++ b/snow/validators/gvalidators/validator_state_server.go @@ -66,7 +66,7 @@ func (s *Server) GetValidatorSet(ctx context.Context, req *pb.GetValidatorSetReq i := 0 for _, vdr := range vdrs { vdrPB := &pb.Validator{ - NodeId: vdr.NodeID[:], + NodeId: vdr.NodeID.Bytes(), Weight: vdr.Weight, } if vdr.PublicKey != nil { diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 01a84201f91d..f93ab719e18a 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -324,9 +324,9 @@ func TestGetMap(t *testing.T) { func TestWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) m := NewManager() @@ -411,7 +411,7 @@ func TestString(t *testing.T) { func TestAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) @@ -442,7 +442,7 @@ func TestAddCallback(t *testing.T) { func TestAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) @@ -480,7 +480,7 @@ func TestAddWeightCallback(t *testing.T) { func TestRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) @@ -518,7 +518,7 @@ func TestRemoveWeightCallback(t *testing.T) { func TestValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 99651e7930e0..0067a520b5a9 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -273,9 +273,9 @@ func TestSetMap(t *testing.T) { func TestSetWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) s := newSet() @@ -332,10 +332,10 @@ func TestSetString(t *testing.T) { require := require.New(t) nodeID0 := ids.EmptyNodeID - nodeID1 := ids.NodeID{ + nodeID1 := ids.BuildTestNodeID([]byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } + }) s := newSet() require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) @@ -385,7 +385,7 @@ func (c *callbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight func TestSetAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) @@ -413,7 +413,7 @@ func TestSetAddCallback(t *testing.T) { func TestSetAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) @@ -447,7 +447,7 @@ func TestSetAddWeightCallback(t *testing.T) { func TestSetRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) @@ -481,7 +481,7 @@ func TestSetRemoveWeightCallback(t *testing.T) { func TestSetValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) diff --git a/tests/e2e/banff/suites.go b/tests/e2e/banff/suites.go index 5bc071d6e004..37d0aa90156a 100644 --- a/tests/e2e/banff/suites.go +++ b/tests/e2e/banff/suites.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index c8e005621983..8f15b6d43caf 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) diff --git a/tests/e2e/c/interchain_workflow.go b/tests/e2e/c/interchain_workflow.go index d4881255ddff..2c9bd198ec39 100644 --- a/tests/e2e/c/interchain_workflow.go +++ b/tests/e2e/c/interchain_workflow.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 2e9a86684df0..3245516262d2 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -4,22 +4,13 @@ package e2e_test import ( - "encoding/json" - "flag" - "fmt" - "os" "testing" ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" @@ -36,75 +27,18 @@ func TestE2E(t *testing.T) { ginkgo.RunSpecs(t, "e2e test suites") } -var ( - avalancheGoExecPath string - persistentNetworkDir string - usePersistentNetwork bool -) +var flagVars *e2e.FlagVars func init() { - flag.StringVar( - &avalancheGoExecPath, - "avalanchego-path", - os.Getenv(local.AvalancheGoPathEnvName), - fmt.Sprintf("avalanchego executable path (required if not using a persistent network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), - ) - flag.StringVar( - &persistentNetworkDir, - "network-dir", - "", - fmt.Sprintf("[optional] the dir containing the configuration of a persistent network to target for testing. Useful for speeding up test development. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), - ) - flag.BoolVar( - &usePersistentNetwork, - "use-persistent-network", - false, - "[optional] whether to target the persistent network identified by --network-dir.", - ) + flagVars = e2e.RegisterFlags() } var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process - - require := require.New(ginkgo.GinkgoT()) - - if usePersistentNetwork && len(persistentNetworkDir) == 0 { - persistentNetworkDir = os.Getenv(local.NetworkDirEnvName) - } - - // Load or create a test network - var network *local.LocalNetwork - if len(persistentNetworkDir) > 0 { - tests.Outf("{{yellow}}Using a persistent network configured at %s{{/}}\n", persistentNetworkDir) - - var err error - network, err = local.ReadNetwork(persistentNetworkDir) - require.NoError(err) - } else { - network = e2e.StartLocalNetwork(avalancheGoExecPath, e2e.DefaultNetworkDir) - } - - uris := network.GetURIs() - require.NotEmpty(uris, "network contains no nodes") - tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) - - testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ - FundedKeys: network.FundedKeys, - }) - tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) - require.NoError(err) - - env := &e2e.TestEnvironment{ - NetworkDir: network.Dir, - URIs: uris, - TestDataServerURI: testDataServerURI, - } - bytes, err := json.Marshal(env) - require.NoError(err) - return bytes + return e2e.NewTestEnvironment(flagVars).Marshal() }, func(envBytes []byte) { // Run in every ginkgo process // Initialize the local test environment from the global state - e2e.InitTestEnvironment(envBytes) + e2e.InitSharedTestEnvironment(envBytes) }) diff --git a/tests/e2e/faultinjection/duplicate_node_id.go b/tests/e2e/faultinjection/duplicate_node_id.go index bcdd05cb35b0..1d865d840f51 100644 --- a/tests/e2e/faultinjection/duplicate_node_id.go +++ b/tests/e2e/faultinjection/duplicate_node_id.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/set" ) diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index 9bea416294cb..44a6912715ef 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -18,7 +18,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" diff --git a/tests/e2e/p/permissionless_subnets.go b/tests/e2e/p/permissionless_subnets.go index ab2909228365..0521306b9b40 100644 --- a/tests/e2e/p/permissionless_subnets.go +++ b/tests/e2e/p/permissionless_subnets.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" diff --git a/tests/e2e/p/staking_rewards.go b/tests/e2e/p/staking_rewards.go index 09b169dfb8f4..41a77985729b 100644 --- a/tests/e2e/p/staking_rewards.go +++ b/tests/e2e/p/staking_rewards.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index c0fda23775fd..3f0440ac49b6 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" diff --git a/tests/e2e/static-handlers/suites.go b/tests/e2e/static-handlers/suites.go index 596a79f3afda..52c0f50a9436 100644 --- a/tests/e2e/static-handlers/suites.go +++ b/tests/e2e/static-handlers/suites.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" diff --git a/tests/e2e/x/interchain_workflow.go b/tests/e2e/x/interchain_workflow.go index 373e567a666a..f0c2951feb84 100644 --- a/tests/e2e/x/interchain_workflow.go +++ b/tests/e2e/x/interchain_workflow.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 119e28b9d1e9..7a1eb1bb6b91 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" diff --git a/tests/e2e/describe.go b/tests/fixture/e2e/describe.go similarity index 100% rename from tests/e2e/describe.go rename to tests/fixture/e2e/describe.go diff --git a/tests/fixture/e2e/env.go b/tests/fixture/e2e/env.go new file mode 100644 index 000000000000..54a4676482e1 --- /dev/null +++ b/tests/fixture/e2e/env.go @@ -0,0 +1,138 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "encoding/json" + "math/rand" + "os" + "path/filepath" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture" + "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +// Env is used to access shared test fixture. Intended to be +// initialized from SynchronizedBeforeSuite. +var Env *TestEnvironment + +func InitSharedTestEnvironment(envBytes []byte) { + require := require.New(ginkgo.GinkgoT()) + require.Nil(Env, "env already initialized") + Env = &TestEnvironment{ + require: require, + } + require.NoError(json.Unmarshal(envBytes, Env)) +} + +type TestEnvironment struct { + // The directory where the test network configuration is stored + NetworkDir string + // URIs used to access the API endpoints of nodes of the network + URIs []testnet.NodeURI + // The URI used to access the http server that allocates test data + TestDataServerURI string + + require *require.Assertions +} + +func (te *TestEnvironment) Marshal() []byte { + bytes, err := json.Marshal(te) + require.NoError(ginkgo.GinkgoT(), err) + return bytes +} + +// Initialize a new test environment with a shared network (either pre-existing or newly created). +func NewTestEnvironment(flagVars *FlagVars) *TestEnvironment { + require := require.New(ginkgo.GinkgoT()) + + persistentNetworkDir := flagVars.PersistentNetworkDir() + + // Load or create a test network + var network *local.LocalNetwork + if len(persistentNetworkDir) > 0 { + tests.Outf("{{yellow}}Using a persistent network configured at %s{{/}}\n", persistentNetworkDir) + + var err error + network, err = local.ReadNetwork(persistentNetworkDir) + require.NoError(err) + } else { + network = StartLocalNetwork(flagVars.AvalancheGoExecPath(), DefaultNetworkDir) + } + + uris := network.GetURIs() + require.NotEmpty(uris, "network contains no nodes") + tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) + + testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ + FundedKeys: network.FundedKeys, + }) + tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) + require.NoError(err) + + return &TestEnvironment{ + NetworkDir: network.Dir, + URIs: uris, + TestDataServerURI: testDataServerURI, + } +} + +// Retrieve a random URI to naively attempt to spread API load across +// nodes. +func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { + r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 + nodeURI := te.URIs[r.Intn(len(te.URIs))] + tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) + return nodeURI +} + +// Retrieve the network to target for testing. +func (te *TestEnvironment) GetNetwork() testnet.Network { + network, err := local.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + return network +} + +// Retrieve the specified number of funded keys allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocateFundedKeys(count int) []*secp256k1.PrivateKey { + keys, err := fixture.AllocateFundedKeys(te.TestDataServerURI, count) + te.require.NoError(err) + tests.Outf("{{blue}} allocated funded key(s): %+v{{/}}\n", keys) + return keys +} + +// Retrieve a funded key allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocateFundedKey() *secp256k1.PrivateKey { + return te.AllocateFundedKeys(1)[0] +} + +// Create a new keychain with the specified number of test keys. +func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { + keys := te.AllocateFundedKeys(count) + return secp256k1fx.NewKeychain(keys...) +} + +// Create a new private network that is not shared with other tests. +func (te *TestEnvironment) NewPrivateNetwork() testnet.Network { + // Load the shared network to retrieve its path and exec path + sharedNetwork, err := local.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + + // The private networks dir is under the shared network dir to ensure it + // will be included in the artifact uploaded in CI. + privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) + te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) + + return StartLocalNetwork(sharedNetwork.ExecPath, privateNetworksDir) +} diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go new file mode 100644 index 000000000000..c7838cb7c761 --- /dev/null +++ b/tests/fixture/e2e/flags.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "flag" + "fmt" + "os" + + "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" +) + +type FlagVars struct { + avalancheGoExecPath string + persistentNetworkDir string + usePersistentNetwork bool +} + +func (v *FlagVars) PersistentNetworkDir() string { + if v.usePersistentNetwork && len(v.persistentNetworkDir) == 0 { + return os.Getenv(local.NetworkDirEnvName) + } + return v.persistentNetworkDir +} + +func (v *FlagVars) AvalancheGoExecPath() string { + return v.avalancheGoExecPath +} + +func (v *FlagVars) UsePersistentNetwork() bool { + return v.usePersistentNetwork +} + +func RegisterFlags() *FlagVars { + vars := FlagVars{} + flag.StringVar( + &vars.avalancheGoExecPath, + "avalanchego-path", + os.Getenv(local.AvalancheGoPathEnvName), + fmt.Sprintf("avalanchego executable path (required if not using a persistent network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), + ) + flag.StringVar( + &vars.persistentNetworkDir, + "network-dir", + "", + fmt.Sprintf("[optional] the dir containing the configuration of a persistent network to target for testing. Useful for speeding up test development. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), + ) + flag.BoolVar( + &vars.usePersistentNetwork, + "use-persistent-network", + false, + "[optional] whether to target the persistent network identified by --network-dir.", + ) + + return &vars +} diff --git a/tests/e2e/e2e.go b/tests/fixture/e2e/helpers.go similarity index 73% rename from tests/e2e/e2e.go rename to tests/fixture/e2e/helpers.go index 44c5d911e8dd..cc1e22ae3aa9 100644 --- a/tests/e2e/e2e.go +++ b/tests/fixture/e2e/helpers.go @@ -1,18 +1,14 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -// e2e implements the e2e tests. package e2e import ( "context" - "encoding/json" "errors" "fmt" "math/big" - "math/rand" "os" - "path/filepath" "strings" "time" @@ -26,11 +22,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/fixture" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -68,79 +61,6 @@ const ( PrivateNetworksDirName = "private_networks" ) -// Env is used to access shared test fixture. Intended to be -// initialized by SynchronizedBeforeSuite. -var Env *TestEnvironment - -type TestEnvironment struct { - // The directory where the test network configuration is stored - NetworkDir string - // URIs used to access the API endpoints of nodes of the network - URIs []testnet.NodeURI - // The URI used to access the http server that allocates test data - TestDataServerURI string - - require *require.Assertions -} - -func InitTestEnvironment(envBytes []byte) { - require := require.New(ginkgo.GinkgoT()) - require.Nil(Env, "env already initialized") - Env = &TestEnvironment{ - require: require, - } - require.NoError(json.Unmarshal(envBytes, Env)) -} - -// Retrieve a random URI to naively attempt to spread API load across -// nodes. -func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { - r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 - nodeURI := te.URIs[r.Intn(len(te.URIs))] - tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) - return nodeURI -} - -// Retrieve the network to target for testing. -func (te *TestEnvironment) GetNetwork() testnet.Network { - network, err := local.ReadNetwork(te.NetworkDir) - te.require.NoError(err) - return network -} - -// Retrieve the specified number of funded keys allocated for the caller's exclusive use. -func (te *TestEnvironment) AllocateFundedKeys(count int) []*secp256k1.PrivateKey { - keys, err := fixture.AllocateFundedKeys(te.TestDataServerURI, count) - te.require.NoError(err) - tests.Outf("{{blue}} allocated funded key(s): %+v{{/}}\n", keys) - return keys -} - -// Retrieve a funded key allocated for the caller's exclusive use. -func (te *TestEnvironment) AllocateFundedKey() *secp256k1.PrivateKey { - return te.AllocateFundedKeys(1)[0] -} - -// Create a new keychain with the specified number of test keys. -func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { - keys := te.AllocateFundedKeys(count) - return secp256k1fx.NewKeychain(keys...) -} - -// Create a new private network that is not shared with other tests. -func (te *TestEnvironment) NewPrivateNetwork() testnet.Network { - // Load the shared network to retrieve its path and exec path - sharedNetwork, err := local.ReadNetwork(te.NetworkDir) - te.require.NoError(err) - - // The private networks dir is under the shared network dir to ensure it - // will be included in the artifact uploaded in CI. - privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) - te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) - - return StartLocalNetwork(sharedNetwork.ExecPath, privateNetworksDir) -} - // Create a new wallet for the provided keychain against the specified node URI. func NewWallet(keychain *secp256k1fx.Keychain, nodeURI testnet.NodeURI) primary.Wallet { tests.Outf("{{blue}} initializing a new wallet for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index 8a633fd6244a..b8f8147c831a 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" ) func TestUpgrade(t *testing.T) { diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index 2dc240404988..3f4d6cbc4053 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -16,9 +16,9 @@ import ( func TestSet(t *testing.T) { require := require.New(t) - id0 := ids.NodeID{0} - id1 := ids.NodeID{1} - id2 := ids.NodeID{2} + id0 := ids.BuildTestNodeID([]byte{0}) + id1 := ids.BuildTestNodeID([]byte{1}) + id2 := ids.BuildTestNodeID([]byte{2}) ip0 := ips.IPPort{ IP: net.IPv4zero, diff --git a/utils/linkedhashmap/linkedhashmap.go b/utils/linkedhashmap/linkedhashmap.go index 12e9569c3391..fa5a123a0942 100644 --- a/utils/linkedhashmap/linkedhashmap.go +++ b/utils/linkedhashmap/linkedhashmap.go @@ -17,7 +17,7 @@ var _ LinkedHashmap[int, struct{}] = (*linkedHashmap[int, struct{}])(nil) type Hashmap[K, V any] interface { Put(key K, val V) Get(key K) (val V, exists bool) - Delete(key K) + Delete(key K) (deleted bool) Len() int } @@ -63,11 +63,11 @@ func (lh *linkedHashmap[K, V]) Get(key K) (V, bool) { return lh.get(key) } -func (lh *linkedHashmap[K, V]) Delete(key K) { +func (lh *linkedHashmap[K, V]) Delete(key K) bool { lh.lock.Lock() defer lh.lock.Unlock() - lh.delete(key) + return lh.delete(key) } func (lh *linkedHashmap[K, V]) Len() int { @@ -114,11 +114,13 @@ func (lh *linkedHashmap[K, V]) get(key K) (V, bool) { return utils.Zero[V](), false } -func (lh *linkedHashmap[K, V]) delete(key K) { - if e, ok := lh.entryMap[key]; ok { +func (lh *linkedHashmap[K, V]) delete(key K) bool { + e, ok := lh.entryMap[key] + if ok { lh.entryList.Remove(e) delete(lh.entryMap, key) } + return ok } func (lh *linkedHashmap[K, V]) len() int { diff --git a/utils/linkedhashmap/linkedhashmap_test.go b/utils/linkedhashmap/linkedhashmap_test.go index 8bd7239ed5d9..0c95c30b24a8 100644 --- a/utils/linkedhashmap/linkedhashmap_test.go +++ b/utils/linkedhashmap/linkedhashmap_test.go @@ -62,7 +62,7 @@ func TestLinkedHashmap(t *testing.T) { require.Equal(key1, rkey1, "wrong key") require.Equal(1, val1, "wrong value") - lh.Delete(key0) + require.True(lh.Delete(key0)) require.Equal(1, lh.Len(), "wrong hashmap length") _, exists = lh.Get(key0) @@ -132,7 +132,7 @@ func TestIterator(t *testing.T) { // Should be empty require.False(iter.Next()) // Delete id1 - lh.Delete(id1) + require.True(lh.Delete(id1)) iter = lh.NewIterator() require.NotNil(iter) // Should immediately be exhausted @@ -169,8 +169,8 @@ func TestIterator(t *testing.T) { iter := lh.NewIterator() require.True(iter.Next()) require.True(iter.Next()) - lh.Delete(id1) - lh.Delete(id2) + require.True(lh.Delete(id1)) + require.True(lh.Delete(id2)) require.True(iter.Next()) require.Equal(id3, iter.Key()) require.Equal(3, iter.Value()) diff --git a/vms/avm/txs/mempool/mempool.go b/vms/avm/txs/mempool/mempool.go index b64002e8f39d..84a3583781ef 100644 --- a/vms/avm/txs/mempool/mempool.go +++ b/vms/avm/txs/mempool/mempool.go @@ -48,8 +48,7 @@ type Mempool interface { Get(txID ids.ID) *txs.Tx Remove(txs []*txs.Tx) - // Peek returns the next first tx that was added to the mempool whose size - // is less than or equal to maxTxSize. + // Peek returns the first tx in the mempool whose size is <= [maxTxSize]. Peek(maxTxSize int) *txs.Tx // RequestBuildBlock notifies the consensus engine that a block should be @@ -162,23 +161,20 @@ func (m *mempool) Has(txID ids.ID) bool { } func (m *mempool) Get(txID ids.ID) *txs.Tx { - unissuedTxs, _ := m.unissuedTxs.Get(txID) - return unissuedTxs + tx, _ := m.unissuedTxs.Get(txID) + return tx } func (m *mempool) Remove(txsToRemove []*txs.Tx) { for _, tx := range txsToRemove { txID := tx.ID() - if _, ok := m.unissuedTxs.Get(txID); !ok { - // If tx isn't in the mempool, there is nothing to do. + if !m.unissuedTxs.Delete(txID) { continue } - txBytes := tx.Bytes() - m.bytesAvailable += len(txBytes) + m.bytesAvailable += len(tx.Bytes()) m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - m.unissuedTxs.Delete(txID) m.numTxs.Dec() inputs := tx.Unsigned.InputIDs() diff --git a/vms/platformvm/api/static_service_test.go b/vms/platformvm/api/static_service_test.go index 49822d9679d4..12bdb91bfa60 100644 --- a/vms/platformvm/api/static_service_test.go +++ b/vms/platformvm/api/static_service_test.go @@ -18,7 +18,7 @@ import ( func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -62,7 +62,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { func TestBuildGenesisInvalidStakeWeight(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -106,7 +106,7 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { func TestBuildGenesisInvalidEndtime(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -151,7 +151,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1} + nodeID := ids.BuildTestNodeID([]byte{1}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index 66a90f99e7f5..734762215395 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -244,36 +244,6 @@ func (b *builder) ResetBlockTimer() { b.timer.SetTimeoutIn(0) } -// dropExpiredStakerTxs drops add validator/delegator transactions in the -// mempool whose start time is not sufficiently far in the future -// (i.e. within local time plus [MaxFutureStartFrom]). -func (b *builder) dropExpiredStakerTxs(timestamp time.Time) { - minStartTime := timestamp.Add(txexecutor.SyncBound) - for b.Mempool.HasStakerTx() { - tx := b.Mempool.PeekStakerTx() - startTime := tx.Unsigned.(txs.Staker).StartTime() - if !startTime.Before(minStartTime) { - // The next proposal tx in the mempool starts sufficiently far in - // the future. - return - } - - txID := tx.ID() - err := fmt.Errorf( - "synchrony bound (%s) is later than staker start time (%s)", - minStartTime, - startTime, - ) - - b.Mempool.Remove([]*txs.Tx{tx}) - b.Mempool.MarkDropped(txID, err) // cache tx as dropped - b.txExecutorBackend.Ctx.Log.Debug("dropping tx", - zap.Stringer("txID", txID), - zap.Error(err), - ) - } -} - func (b *builder) setNextBuildBlockTime() { ctx := b.txExecutorBackend.Ctx @@ -368,7 +338,13 @@ func buildBlock( } // Clean out the mempool's transactions with invalid timestamps. - builder.dropExpiredStakerTxs(timestamp) + droppedStakerTxIDs := mempool.DropExpiredStakerTxs(builder.Mempool, timestamp.Add(txexecutor.SyncBound)) + for _, txID := range droppedStakerTxIDs { + builder.txExecutorBackend.Ctx.Log.Debug("dropping tx", + zap.Stringer("txID", txID), + zap.Error(err), + ) + } // If there is no reason to build a block, don't. if !builder.Mempool.HasTxs() && !forceAdvanceTime { diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index ed55791147d8..a773fc964ef6 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -157,7 +157,7 @@ func newEnvironment(t *testing.T) *environment { metrics, err := metrics.New("", registerer) require.NoError(err) - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, res) require.NoError(err) res.blkManager = blockexecutor.NewManager( diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 5e79d52c9b10..9448cdde44eb 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -189,7 +189,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { metrics := metrics.Noop var err error - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, res) if err != nil { panic(fmt.Errorf("failed to create mempool: %w", err)) } diff --git a/vms/platformvm/state/disk_staker_diff_iterator.go b/vms/platformvm/state/disk_staker_diff_iterator.go index 44ee1ed87180..efac5ec7b6d7 100644 --- a/vms/platformvm/state/disk_staker_diff_iterator.go +++ b/vms/platformvm/state/disk_staker_diff_iterator.go @@ -43,7 +43,7 @@ func marshalDiffKey(subnetID ids.ID, height uint64, nodeID ids.NodeID) []byte { key := make([]byte, diffKeyLength) copy(key, subnetID[:]) packIterableHeight(key[ids.IDLen:], height) - copy(key[diffKeyNodeIDOffset:], nodeID[:]) + copy(key[diffKeyNodeIDOffset:], nodeID.Bytes()) return key } diff --git a/vms/platformvm/state/disk_staker_diff_iterator_test.go b/vms/platformvm/state/disk_staker_diff_iterator_test.go index 9439428937b5..543f42a4b9c3 100644 --- a/vms/platformvm/state/disk_staker_diff_iterator_test.go +++ b/vms/platformvm/state/disk_staker_diff_iterator_test.go @@ -58,8 +58,8 @@ func TestDiffIteration(t *testing.T) { subnetID0 := ids.GenerateTestID() subnetID1 := ids.GenerateTestID() - nodeID0 := ids.NodeID{0x00} - nodeID1 := ids.NodeID{0x01} + nodeID0 := ids.BuildTestNodeID([]byte{0x00}) + nodeID1 := ids.BuildTestNodeID([]byte{0x01}) subnetID0Height0NodeID0 := marshalDiffKey(subnetID0, 0, nodeID0) subnetID0Height1NodeID0 := marshalDiffKey(subnetID0, 1, nodeID0) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index e64d24d434b1..fd842f684eae 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2030,7 +2030,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error // // Note: We store the compressed public key here. pkBytes := bls.PublicKeyToBytes(staker.PublicKey) - if err := nestedPKDiffDB.Put(nodeID[:], pkBytes); err != nil { + if err := nestedPKDiffDB.Put(nodeID.Bytes(), pkBytes); err != nil { return err } } @@ -2069,7 +2069,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error if err != nil { return fmt.Errorf("failed to serialize validator weight diff: %w", err) } - if err := nestedWeightDiffDB.Put(nodeID[:], weightDiffBytes); err != nil { + if err := nestedWeightDiffDB.Put(nodeID.Bytes(), weightDiffBytes); err != nil { return err } diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index 821a3b7da849..c70bf720bfe3 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -54,11 +54,11 @@ func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) simpleAddPrimaryTx := &AddPermissionlessDelegatorTx{ BaseTx: BaseTx{ @@ -768,11 +768,11 @@ func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, diff --git a/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 79b1a64abd00..80e4d3b6ae93 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -60,11 +60,11 @@ func TestAddPermissionlessPrimaryValidator(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) simpleAddPrimaryTx := &AddPermissionlessValidatorTx{ BaseTx: BaseTx{ @@ -725,11 +725,11 @@ func TestAddPermissionlessSubnetValidator(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, diff --git a/vms/platformvm/txs/mempool/mempool.go b/vms/platformvm/txs/mempool/mempool.go index 7d1ba9b609bd..91b547cf5414 100644 --- a/vms/platformvm/txs/mempool/mempool.go +++ b/vms/platformvm/txs/mempool/mempool.go @@ -6,6 +6,7 @@ package mempool import ( "errors" "fmt" + "time" "github.com/prometheus/client_golang/prometheus" @@ -18,9 +19,9 @@ import ( ) const ( - // targetTxSize is the maximum number of bytes a transaction can use to be + // MaxTxSize is the maximum number of bytes a transaction can use to be // allowed into the mempool. - targetTxSize = 64 * units.KiB + MaxTxSize = 64 * units.KiB // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache droppedTxIDsCacheSize = 64 @@ -34,7 +35,10 @@ const ( var ( _ Mempool = (*mempool)(nil) - errMempoolFull = errors.New("mempool is full") + errDuplicateTx = errors.New("duplicate tx") + errTxTooLarge = errors.New("tx too large") + errMempoolFull = errors.New("mempool is full") + errConflictsWithOtherTx = errors.New("tx conflicts with other tx") ) type BlockTimer interface { @@ -97,7 +101,7 @@ type mempool struct { blkTimer BlockTimer } -func NewMempool( +func New( namespace string, registerer prometheus.Registerer, blkTimer BlockTimer, @@ -158,25 +162,30 @@ func (m *mempool) Add(tx *txs.Tx) error { // Note: a previously dropped tx can be re-added txID := tx.ID() if m.Has(txID) { - return fmt.Errorf("duplicate tx %s", txID) + return fmt.Errorf("%w: %s", errDuplicateTx, txID) } - txBytes := tx.Bytes() - if len(txBytes) > targetTxSize { - return fmt.Errorf("tx %s size (%d) > target size (%d)", txID, len(txBytes), targetTxSize) + txSize := len(tx.Bytes()) + if txSize > MaxTxSize { + return fmt.Errorf("%w: %s size (%d) > max size (%d)", + errTxTooLarge, + txID, + txSize, + MaxTxSize, + ) } - if len(txBytes) > m.bytesAvailable { - return fmt.Errorf("%w, tx %s size (%d) exceeds available space (%d)", + if txSize > m.bytesAvailable { + return fmt.Errorf("%w: %s size (%d) > available space (%d)", errMempoolFull, txID, - len(txBytes), + txSize, m.bytesAvailable, ) } inputs := tx.Unsigned.InputIDs() if m.consumedUTXOs.Overlaps(inputs) { - return fmt.Errorf("tx %s conflicts with a transaction in the mempool", txID) + return fmt.Errorf("%w: %s", errConflictsWithOtherTx, txID) } if err := tx.Unsigned.Visit(&issuer{ @@ -297,3 +306,34 @@ func (m *mempool) deregister(tx *txs.Tx) { inputs := tx.Unsigned.InputIDs() m.consumedUTXOs.Difference(inputs) } + +// Drops all [txs.Staker] transactions whose [StartTime] is before +// [minStartTime] from [mempool]. The dropped tx ids are returned. +// +// TODO: Remove once [StartTime] field is ignored in staker txs +func DropExpiredStakerTxs(mempool Mempool, minStartTime time.Time) []ids.ID { + var droppedTxIDs []ids.ID + + for mempool.HasStakerTx() { + tx := mempool.PeekStakerTx() + startTime := tx.Unsigned.(txs.Staker).StartTime() + if !startTime.Before(minStartTime) { + // The next proposal tx in the mempool starts sufficiently far in + // the future. + break + } + + txID := tx.ID() + err := fmt.Errorf( + "synchrony bound (%s) is later than staker start time (%s)", + minStartTime, + startTime, + ) + + mempool.Remove([]*txs.Tx{tx}) + mempool.MarkDropped(txID, err) // cache tx as dropped + droppedTxIDs = append(droppedTxIDs, txID) + } + + return droppedTxIDs +} diff --git a/vms/platformvm/txs/mempool/mempool_test.go b/vms/platformvm/txs/mempool/mempool_test.go index bdcd3101233f..dbfe895f9d9b 100644 --- a/vms/platformvm/txs/mempool/mempool_test.go +++ b/vms/platformvm/txs/mempool/mempool_test.go @@ -34,7 +34,7 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, &noopBlkTimer{}) require.NoError(err) decisionTxs, err := createTestDecisionTxs(1) @@ -58,7 +58,7 @@ func TestDecisionTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, &noopBlkTimer{}) require.NoError(err) decisionTxs, err := createTestDecisionTxs(2) @@ -110,7 +110,7 @@ func TestProposalTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, &noopBlkTimer{}) require.NoError(err) // The proposal txs are ordered by decreasing start time. This means after diff --git a/vms/platformvm/txs/remove_subnet_validator_tx_test.go b/vms/platformvm/txs/remove_subnet_validator_tx_test.go index 73f2df3cdcf2..4b1f381c039b 100644 --- a/vms/platformvm/txs/remove_subnet_validator_tx_test.go +++ b/vms/platformvm/txs/remove_subnet_validator_tx_test.go @@ -51,11 +51,11 @@ func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, diff --git a/vms/platformvm/txs/txheap/by_end_time_test.go b/vms/platformvm/txs/txheap/by_end_time_test.go index 8ea152d27e02..5d95a2c66ad7 100644 --- a/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/vms/platformvm/txs/txheap/by_end_time_test.go @@ -23,7 +23,7 @@ func TestByEndTime(t *testing.T) { utx0 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{0}, + NodeID: ids.BuildTestNodeID([]byte{0}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 1, }, @@ -34,7 +34,7 @@ func TestByEndTime(t *testing.T) { utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 2, }, @@ -45,7 +45,7 @@ func TestByEndTime(t *testing.T) { utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 3, }, diff --git a/vms/platformvm/txs/txheap/by_start_time_test.go b/vms/platformvm/txs/txheap/by_start_time_test.go index 164e2ec35e59..e00d42076015 100644 --- a/vms/platformvm/txs/txheap/by_start_time_test.go +++ b/vms/platformvm/txs/txheap/by_start_time_test.go @@ -23,7 +23,7 @@ func TestByStartTime(t *testing.T) { utx0 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{0}, + NodeID: ids.BuildTestNodeID([]byte{0}), Start: uint64(baseTime.Unix()) + 1, End: uint64(baseTime.Unix()) + 1, }, @@ -34,7 +34,7 @@ func TestByStartTime(t *testing.T) { utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()) + 2, End: uint64(baseTime.Unix()) + 2, }, @@ -45,7 +45,7 @@ func TestByStartTime(t *testing.T) { utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()) + 3, End: uint64(baseTime.Unix()) + 3, }, diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 8522d3ae4715..72201c16e42f 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -177,7 +177,7 @@ func (vm *VM) Initialize( // Note: There is a circular dependency between the mempool and block // builder which is broken by passing in the vm. - mempool, err := mempool.NewMempool("mempool", registerer, vm) + mempool, err := mempool.New("mempool", registerer, vm) if err != nil { return fmt.Errorf("failed to create mempool: %w", err) } diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index ea8d43891dd6..31dfcfbda183 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1388,7 +1388,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { advanceTimeBlkID := advanceTimeBlk.ID() advanceTimeBlkBytes := advanceTimeBlk.Bytes() - peerID := ids.NodeID{1, 2, 3, 4, 5, 4, 3, 2, 1} + peerID := ids.BuildTestNodeID([]byte{1, 2, 3, 4, 5, 4, 3, 2, 1}) beacons := validators.NewManager() require.NoError(beacons.AddStaker(ctx.SubnetID, peerID, nil, ids.Empty, 1)) diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 499025c6b2f4..326272275dac 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -1038,21 +1038,27 @@ func initTestRemoteProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil diff --git a/vms/proposervm/proposer/validators_test.go b/vms/proposervm/proposer/validators_test.go index a0703d498ec8..2f7913d01e2e 100644 --- a/vms/proposervm/proposer/validators_test.go +++ b/vms/proposervm/proposer/validators_test.go @@ -19,7 +19,7 @@ func TestValidatorDataLess(t *testing.T) { require.False(v2.Less(v1)) v1 = validatorData{ - id: ids.NodeID{1}, + id: ids.BuildTestNodeID([]byte{1}), } require.False(v1.Less(v2)) require.True(v2.Less(v1)) diff --git a/vms/proposervm/proposer/windower_test.go b/vms/proposervm/proposer/windower_test.go index ec2225003230..961398c78867 100644 --- a/vms/proposervm/proposer/windower_test.go +++ b/vms/proposervm/proposer/windower_test.go @@ -72,7 +72,7 @@ func TestWindowerChangeByHeight(t *testing.T) { chainID := ids.ID{0, 2} validatorIDs := make([]ids.NodeID, MaxWindows) for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} + validatorIDs[i] = ids.BuildTestNodeID([]byte{byte(i) + 1}) } vdrState := &validators.TestState{ T: t, @@ -134,7 +134,7 @@ func TestWindowerChangeByChain(t *testing.T) { validatorIDs := make([]ids.NodeID, MaxWindows) for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} + validatorIDs[i] = ids.BuildTestNodeID([]byte{byte(i) + 1}) } vdrState := &validators.TestState{ T: t, diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index b3c22862f9bf..b75493f26ac6 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -152,21 +152,27 @@ func initTestProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil @@ -892,9 +898,10 @@ func TestExpiredBuildBlock(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil @@ -1160,9 +1167,10 @@ func TestInnerVMRollback(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil @@ -1813,21 +1821,27 @@ func TestRejectedHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil @@ -2014,21 +2028,27 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 9a7823979348..d9915bfbfcd6 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -396,7 +396,7 @@ func (vm *VMClient) CreateStaticHandlers(ctx context.Context) (map[string]http.H func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { _, err := vm.client.Connected(ctx, &vmpb.ConnectedRequest{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), Version: nodeVersion.String(), }) return err @@ -404,7 +404,7 @@ func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersio func (vm *VMClient) Disconnected(ctx context.Context, nodeID ids.NodeID) error { _, err := vm.client.Disconnected(ctx, &vmpb.DisconnectedRequest{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), }) return err } @@ -568,7 +568,7 @@ func (vm *VMClient) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID _, err := vm.client.AppRequest( ctx, &vmpb.AppRequestMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Request: request, Deadline: grpcutils.TimestampFromTime(deadline), @@ -581,7 +581,7 @@ func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestI _, err := vm.client.AppResponse( ctx, &vmpb.AppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -593,7 +593,7 @@ func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, req _, err := vm.client.AppRequestFailed( ctx, &vmpb.AppRequestFailedMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, }, ) @@ -604,7 +604,7 @@ func (vm *VMClient) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte _, err := vm.client.AppGossip( ctx, &vmpb.AppGossipMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), Msg: msg, }, ) diff --git a/x/merkledb/cache.go b/x/merkledb/cache.go index 57d674ed63ef..7b280c1208d4 100644 --- a/x/merkledb/cache.go +++ b/x/merkledb/cache.go @@ -48,7 +48,7 @@ func (c *onEvictCache[K, V]) Get(key K) (V, bool) { // Put an element into this cache. If this causes an element // to be evicted, calls [c.onEviction] on the evicted element -// and returns the error from [c.onEviction]. Otherwise returns nil. +// and returns the error from [c.onEviction]. Otherwise, returns nil. func (c *onEvictCache[K, V]) Put(key K, value V) error { c.lock.Lock() defer c.lock.Unlock() diff --git a/x/merkledb/codec.go b/x/merkledb/codec.go index a7decc6f6436..c9837abb509f 100644 --- a/x/merkledb/codec.go +++ b/x/merkledb/codec.go @@ -83,7 +83,7 @@ func newCodec() encoderDecoder { } } -// Note that bytes.Buffer.Write always returns nil so we +// Note that bytes.Buffer.Write always returns nil, so we // can ignore its return values in [codecImpl] methods. type codecImpl struct { // Invariant: Every byte slice returned by [varIntPool] has @@ -277,12 +277,12 @@ func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (maybe.Maybe[[]byte] return maybe.Nothing[[]byte](), err } - bytes, err := c.decodeByteSlice(src) + rawBytes, err := c.decodeByteSlice(src) if err != nil { return maybe.Nothing[[]byte](), err } - return maybe.Some(bytes), nil + return maybe.Some(rawBytes), nil } func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { diff --git a/x/merkledb/db.go b/x/merkledb/db.go index 88dd667ae22a..7e52e1fa9ecf 100644 --- a/x/merkledb/db.go +++ b/x/merkledb/db.go @@ -73,7 +73,7 @@ type ChangeProofer interface { maxLength int, ) (*ChangeProof, error) - // Returns nil iff all of the following hold: + // Returns nil iff all the following hold: // - [start] <= [end]. // - [proof] is non-empty. // - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. @@ -175,7 +175,7 @@ type merkleDB struct { // Should be held before taking [db.lock] commitLock sync.RWMutex - // Contains all of the key-value pairs stored by this database, + // Contains all the key-value pairs stored by this database, // including metadata, intermediate nodes and value nodes. baseDB database.Database @@ -495,11 +495,11 @@ func (db *merkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []e defer db.lock.RUnlock() values := make([][]byte, len(keys)) - errors := make([]error, len(keys)) + getErrors := make([]error, len(keys)) for i, key := range keys { - values[i], errors[i] = db.getValueCopy(ToKey(key)) + values[i], getErrors[i] = db.getValueCopy(ToKey(key)) } - return values, errors + return values, getErrors } // GetValue returns the value associated with [key]. @@ -778,7 +778,7 @@ func (db *merkleDB) Has(k []byte) (bool, error) { } _, err := db.getValueWithoutLock(ToKey(k)) - if err == database.ErrNotFound { + if errors.Is(err, database.ErrNotFound) { return false, nil } return err == nil, err @@ -862,7 +862,7 @@ func (db *merkleDB) DeleteContext(ctx context.Context, key []byte) error { return view.commitToDB(ctx) } -// Assumes values inside of [ops] are safe to reference after the function +// Assumes values inside [ops] are safe to reference after the function // returns. Assumes [db.lock] isn't held. func (db *merkleDB) commitBatch(ops []database.BatchOp) error { db.commitLock.Lock() @@ -1144,7 +1144,7 @@ func (db *merkleDB) initializeRootIfNeeded() (ids.ID, error) { // check under both prefixes var err error db.root, err = db.intermediateNodeDB.Get(Key{}) - if err == database.ErrNotFound { + if errors.Is(err, database.ErrNotFound) { db.root, err = db.valueNodeDB.Get(Key{}) } if err == nil { @@ -1152,7 +1152,7 @@ func (db *merkleDB) initializeRootIfNeeded() (ids.ID, error) { db.root.calculateID(db.metrics) return db.root.id, nil } - if err != database.ErrNotFound { + if !errors.Is(err, database.ErrNotFound) { return ids.Empty, err } diff --git a/x/merkledb/history.go b/x/merkledb/history.go index 103c4c9357e8..c52385445cd2 100644 --- a/x/merkledb/history.go +++ b/x/merkledb/history.go @@ -49,7 +49,7 @@ type changeSummaryAndInsertNumber struct { insertNumber uint64 } -// Tracks all of the node and value changes that resulted in the rootID. +// Tracks all the node and value changes that resulted in the rootID. type changeSummary struct { rootID ids.ID nodes map[Key]*change[*node] diff --git a/x/merkledb/key.go b/x/merkledb/key.go index b92ac2d7ceec..d65d9b74a0a6 100644 --- a/x/merkledb/key.go +++ b/x/merkledb/key.go @@ -68,7 +68,7 @@ func ToToken(val byte, tokenSize int) Key { } // Token returns the token at the specified index, -// Assumes that bitindex + tokenSize doesn't cross a byte boundary +// Assumes that bitIndex + tokenSize doesn't cross a byte boundary func (k Key) Token(bitIndex int, tokenSize int) byte { storageByte := k.value[bitIndex/8] // Shift the byte right to get the last bit to the rightmost position. @@ -145,7 +145,7 @@ func (k Key) HasPrefix(prefix Key) bool { } // Note that this will never be an index OOB because len(prefix.value) > 0. - // If len(prefix.value) == 0 were true, [remainderTokens] would be 0 so we + // If len(prefix.value) == 0 were true, [remainderTokens] would be 0, so we // would have returned above. prefixWithoutPartialByte := prefix.value[:len(prefix.value)-1] return strings.HasPrefix(k.value, prefixWithoutPartialByte) @@ -167,7 +167,7 @@ func (k Key) Greater(other Key) bool { return k.value > other.value || (k.value == other.value && k.length > other.length) } -// Less returns true if current Key is less than other Key +// Less will return true if current Key is less than other Key func (k Key) Less(other Key) bool { return k.value < other.value || (k.value == other.value && k.length < other.length) } diff --git a/x/merkledb/proof.go b/x/merkledb/proof.go index f750158d4c11..e348a83f0f13 100644 --- a/x/merkledb/proof.go +++ b/x/merkledb/proof.go @@ -50,7 +50,6 @@ var ( ErrNilProof = errors.New("proof is nil") ErrNilValue = errors.New("value is nil") ErrUnexpectedEndProof = errors.New("end proof should be empty") - ErrInconsistentBranchFactor = errors.New("all keys in proof nodes should have the same branch factor") ) type ProofNode struct { @@ -62,6 +61,7 @@ type ProofNode struct { Children map[byte]ids.ID } +// ToProto converts the ProofNode into the protobuf version of a proof node // Assumes [node.Key.Key.length] <= math.MaxUint64. func (node *ProofNode) ToProto() *pb.ProofNode { pbNode := &pb.ProofNode{ @@ -127,11 +127,11 @@ type Proof struct { Key Key // Nothing if [Key] isn't in the trie. - // Otherwise the value corresponding to [Key]. + // Otherwise, the value corresponding to [Key]. Value maybe.Maybe[[]byte] } -// Returns nil if the trie given in [proof] has root [expectedRootID]. +// Verify returns nil if the trie given in [proof] has root [expectedRootID]. // That is, this is a valid proof that [proof.Key] exists/doesn't exist // in the trie with root [expectedRootID]. func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize int) error { @@ -172,11 +172,11 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize } // Insert all proof nodes. - // [provenPath] is the path that we are proving exists, or the path - // that is where the path we are proving doesn't exist should be. - provenPath := maybe.Some(proof.Path[len(proof.Path)-1].Key) + // [provenKey] is the key that we are proving exists, or the key + // that is the next key along the node path, proving that [proof.Key] doesn't exist in the trie. + provenKey := maybe.Some(proof.Path[len(proof.Path)-1].Key) - if err = addPathInfo(view, proof.Path, provenPath, provenPath); err != nil { + if err = addPathInfo(view, proof.Path, provenKey, provenKey); err != nil { return err } @@ -240,7 +240,7 @@ type KeyValue struct { Value []byte } -// A proof that a given set of key-value pairs are in a trie. +// RangeProof is a proof that a given set of key-value pairs are in a trie. type RangeProof struct { // Invariant: At least one of [StartProof], [EndProof], [KeyValues] is non-empty. @@ -302,21 +302,21 @@ func (proof *RangeProof) Verify( } // [proof] allegedly provides and proves all key-value - // pairs in [smallestProvenPath, largestProvenPath]. - // If [smallestProvenPath] is Nothing, [proof] should - // provide and prove all keys < [largestProvenPath]. - // If [largestProvenPath] is Nothing, [proof] should - // provide and prove all keys > [smallestProvenPath]. + // pairs in [smallestProvenKey, largestProvenKey]. + // If [smallestProvenKey] is Nothing, [proof] should + // provide and prove all keys < [largestProvenKey]. + // If [largestProvenKey] is Nothing, [proof] should + // provide and prove all keys > [smallestProvenKey]. // If both are Nothing, [proof] should prove the entire trie. - smallestProvenPath := maybe.Bind(start, ToKey) + smallestProvenKey := maybe.Bind(start, ToKey) - largestProvenPath := maybe.Bind(end, ToKey) + largestProvenKey := maybe.Bind(end, ToKey) if len(proof.KeyValues) > 0 { // If [proof] has key-value pairs, we should insert children - // greater than [largestProvenPath] to ancestors of the node containing - // [largestProvenPath] so that we get the expected root ID. - largestProvenPath = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key)) + // greater than [largestProvenKey] to ancestors of the node containing + // [largestProvenKey] so that we get the expected root ID. + largestProvenKey = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key)) } // The key-value pairs (allegedly) proven by [proof]. @@ -327,13 +327,13 @@ func (proof *RangeProof) Verify( // Ensure that the start proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.StartProof, smallestProvenPath); err != nil { + if err := verifyProofPath(proof.StartProof, smallestProvenKey); err != nil { return err } if err := verifyAllRangeProofKeyValuesPresent( proof.StartProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, keyValues, ); err != nil { return err @@ -341,13 +341,13 @@ func (proof *RangeProof) Verify( // Ensure that the end proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.EndProof, largestProvenPath); err != nil { + if err := verifyProofPath(proof.EndProof, largestProvenKey); err != nil { return err } if err := verifyAllRangeProofKeyValuesPresent( proof.EndProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, keyValues, ); err != nil { return err @@ -369,24 +369,24 @@ func (proof *RangeProof) Verify( } // For all the nodes along the edges of the proof, insert children - // < [smallestProvenPath] and > [largestProvenPath] + // < [smallestProvenKey] and > [largestProvenKey] // into the trie so that we get the expected root ID (if this proof is valid). - // By inserting all children < [smallestProvenPath], we prove that there are no keys - // > [smallestProvenPath] but less than the first key given. + // By inserting all children < [smallestProvenKey], we prove that there are no keys + // > [smallestProvenKey] but less than the first key given. // That is, the peer who gave us this proof is not omitting nodes. if err := addPathInfo( view, proof.StartProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, ); err != nil { return err } if err := addPathInfo( view, proof.EndProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, ); err != nil { return err } @@ -462,13 +462,13 @@ func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof) error { func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start maybe.Maybe[Key], end maybe.Maybe[Key], keysValues map[Key][]byte) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodePath = node.Key + node = proof[i] + nodeKey = node.Key ) // Skip keys that cannot have a value (enforced by [verifyProofPath]). - if !nodePath.hasPartialByte() && (start.IsNothing() || !nodePath.Less(start.Value())) && (end.IsNothing() || !nodePath.Greater(end.Value())) { - value, ok := keysValues[nodePath] + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok && node.ValueOrHash.HasValue() { // We didn't get a key-value pair for this key, but the proof node has a value. return ErrProofNodeHasUnincludedValue @@ -488,7 +488,7 @@ type KeyChange struct { Value maybe.Maybe[[]byte] } -// A change proof proves that a set of key-value changes occurred +// ChangeProof proves that a set of key-value changes occurred // between two trie roots, where each key-value pair's key is // between some lower and upper bound (inclusive). type ChangeProof struct { @@ -622,8 +622,8 @@ func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof) error { } // Verifies that all values present in the [proof]: -// - Are nothing when deleted, not in the db, or the node has path partial byte length -// - if the node's path is within the key range, that has a value that matches the value passed in the change list or in the db +// - Are nothing when deleted, not in the db, or the node has key partial byte length +// - if the node's key is within the key range, that has a value that matches the value passed in the change list or in the db func verifyAllChangeProofKeyValuesPresent( ctx context.Context, db MerkleDB, @@ -634,19 +634,19 @@ func verifyAllChangeProofKeyValuesPresent( ) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodePath = node.Key + node = proof[i] + nodeKey = node.Key ) // Check the value of any node with a key that is within the range. // Skip keys that cannot have a value (enforced by [verifyProofPath]). - if !nodePath.hasPartialByte() && (start.IsNothing() || !nodePath.Less(start.Value())) && (end.IsNothing() || !nodePath.Greater(end.Value())) { - value, ok := keysValues[nodePath] + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok { // This value isn't in the list of key-value pairs we got. - dbValue, err := db.GetValue(ctx, nodePath.Bytes()) + dbValue, err := db.GetValue(ctx, nodeKey.Bytes()) if err != nil { - if err != database.ErrNotFound { + if !errors.Is(err, database.ErrNotFound) { return err } // This key isn't in the database so proof node should have Nothing. @@ -669,7 +669,7 @@ func (proof *ChangeProof) Empty() bool { len(proof.StartProof) == 0 && len(proof.EndProof) == 0 } -// Exactly one of [ChangeProof] or [RangeProof] is non-nil. +// ChangeOrRangeProof has exactly one of [ChangeProof] or [RangeProof] is non-nil. type ChangeOrRangeProof struct { ChangeProof *ChangeProof RangeProof *RangeProof @@ -840,9 +840,9 @@ func addPathInfo( if existingChild, ok := n.children[index]; ok { compressedKey = existingChild.compressedKey } - childPath := key.Extend(ToToken(index, t.tokenSize), compressedKey) - if (shouldInsertLeftChildren && childPath.Less(insertChildrenLessThan.Value())) || - (shouldInsertRightChildren && childPath.Greater(insertChildrenGreaterThan.Value())) { + childKey := key.Extend(ToToken(index, t.tokenSize), compressedKey) + if (shouldInsertLeftChildren && childKey.Less(insertChildrenLessThan.Value())) || + (shouldInsertRightChildren && childKey.Greater(insertChildrenGreaterThan.Value())) { // We didn't set the other values on the child entry, but it doesn't matter. // We only need the IDs to be correct so that the calculated hash is correct. n.setChildEntry( diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index e278456649b1..9bce417a7b1a 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -43,7 +43,7 @@ func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { if err != nil { return nil, err } - if result.key != path || result == nil { + if result == nil || result.key != path { return nil, database.ErrNotFound } diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index c905cb82c218..d8d9cfbdeb28 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -149,7 +149,7 @@ func newTrieView( ) (*trieView, error) { root, err := parentTrie.getEditableNode(Key{}, false /* hasValue */) if err != nil { - if err == database.ErrNotFound { + if errors.Is(err, database.ErrNotFound) { return nil, ErrNoValidRoot } return nil, err @@ -273,8 +273,8 @@ func (t *trieView) calculateNodeIDsHelper(n *node) { ) for childIndex, child := range n.children { - childPath := n.key.Extend(ToToken(childIndex, t.tokenSize), child.compressedKey) - childNodeChange, ok := t.changes.nodes[childPath] + childKey := n.key.Extend(ToToken(childIndex, t.tokenSize), child.compressedKey) + childNodeChange, ok := t.changes.nodes[childKey] if !ok { // This child wasn't changed. continue @@ -811,7 +811,7 @@ func (t *trieView) insert( // have the existing path node and the value being inserted as children. // generate the new branch node - // find how many tokens are common between the existing child's compressed path and + // find how many tokens are common between the existing child's compressed key and // the current key(offset by the closest node's key), // then move all the common tokens into the branch node commonPrefixLength := getLengthOfCommonPrefix( @@ -910,7 +910,7 @@ func (t *trieView) recordKeyChange(key Key, after *node, hadValue bool, newNode } before, err := t.getParentTrie().getEditableNode(key, hadValue) - if err != nil && err != database.ErrNotFound { + if err != nil && !errors.Is(err, database.ErrNotFound) { return err } t.changes.nodes[key] = &change[*node]{ diff --git a/x/sync/client.go b/x/sync/client.go index e86c37485c7f..6605a5089935 100644 --- a/x/sync/client.go +++ b/x/sync/client.go @@ -256,9 +256,6 @@ func (c *client) GetRangeProof( return nil, err } - startKey := maybeBytesToMaybe(req.StartKey) - endKey := maybeBytesToMaybe(req.EndKey) - var rangeProof merkledb.RangeProof if err := rangeProof.UnmarshalProto(&rangeProofProto); err != nil { return nil, err @@ -268,8 +265,8 @@ func (c *client) GetRangeProof( ctx, &rangeProof, int(req.KeyLimit), - startKey, - endKey, + maybeBytesToMaybe(req.StartKey), + maybeBytesToMaybe(req.EndKey), req.RootHash, c.tokenSize, ); err != nil { diff --git a/x/sync/client_test.go b/x/sync/client_test.go index c9f473bea53e..f6c67debe5ee 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -812,7 +812,7 @@ func TestAppRequestSendFailed(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(ids.NodeID{}, nil, errAppSendFailed).Times(2) + ).Return(ids.EmptyNodeID, nil, errAppSendFailed).Times(2) _, err = client.GetChangeProof( context.Background(), diff --git a/x/sync/network_server.go b/x/sync/network_server.go index 6f21702ce397..c213bee6a739 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -39,7 +39,6 @@ const ( // TODO: refine this estimate. This is almost certainly a large overestimate. estimatedMessageOverhead = 4 * units.KiB maxByteSizeLimit = constants.DefaultMaxMessageSize - estimatedMessageOverhead - endProofSizeBufferAmount = 2 * units.KiB ) var (