diff --git a/.github/workflows/docker-stable.yml b/.github/workflows/docker-stable.yml deleted file mode 100644 index 29f88b4be8..0000000000 --- a/.github/workflows/docker-stable.yml +++ /dev/null @@ -1,33 +0,0 @@ -on: - pull_request: - # Commented paths to avoid skipping required workflow - # See https://github.community/t/feature-request-conditional-required-checks/16761 - # paths: - # - .github/workflows/docker-stable.yml - # - "**/*.go" - # - "chain/**" - # - "cmd/**" - # - "dot/**" - # - "internal/**" - # - "lib/**" - # - "pkg/**" - # - scripts/integration-test-all.sh - # - go.mod - # - go.sum -name: docker-stable - -jobs: - docker-stable-tests: - runs-on: ubuntu-latest - env: - DOCKER_BUILDKIT: "1" - steps: - - uses: docker/build-push-action@v3 - with: - load: true - target: builder - tags: chainsafe/gossamer:test - - - name: Run stable tests - run: | - docker run chainsafe/gossamer:test sh -c "make it-stable" diff --git a/.gitignore b/.gitignore index 8995c9de36..d5d719704f 100644 --- a/.gitignore +++ b/.gitignore @@ -23,9 +23,6 @@ test_data trie_putandget_failed_test_data_* tmp -tests/utils/config* -tests/utils/genesis* - # node_modules used by polkadot.js/api tests tests/polkadotjs_test/node_modules !tests/polkadotjs_test/test/*.wasm diff --git a/Dockerfile b/Dockerfile index 2eb2c33cc8..c3e678117f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,11 +17,6 @@ RUN wget -qO- https://deb.nodesource.com/setup_14.x | bash - && \ RUN wget -O /usr/local/bin/subkey https://chainbridge.ams3.digitaloceanspaces.com/subkey-v2.0.0 && \ chmod +x /usr/local/bin/subkey -# Polkadot JS dependencies -WORKDIR /go/src/github.com/ChainSafe/gossamer/tests/polkadotjs_test -COPY tests/polkadotjs_test/package.json tests/polkadotjs_test/package-lock.json ./ -RUN npm install - WORKDIR /go/src/github.com/ChainSafe/gossamer # Go dependencies diff --git a/Makefile b/Makefile index 40926cb548..fef16e8427 100644 --- a/Makefile +++ b/Makefile @@ -36,32 +36,26 @@ test: git lfs pull go test -short -coverprofile c.out ./... -timeout=30m -## it-stable: Runs Integration Tests Stable mode -it-stable: - @echo " > \033[32mRunning Integration Tests...\033[0m " - @chmod +x scripts/integration-test-all.sh - ./scripts/integration-test-all.sh -q 3 -s 10 - ## it-stress: Runs Integration Tests stress mode it-stress: build @echo " > \033[32mRunning stress tests...\033[0m " - HOSTNAME=0.0.0.0 MODE=stress go test ./tests/stress/... -timeout=15m -v -short -run TestSync_ + MODE=stress go test ./tests/stress/... -timeout=15m -v -short -run TestSync_ it-grandpa: build @echo " > \033[32mRunning GRANDPA stress tests...\033[0m " - HOSTNAME=0.0.0.0 MODE=stress go test ./tests/stress/... -timeout=12m -v -short -run TestStress_Grandpa_ + MODE=stress go test ./tests/stress/... -timeout=12m -v -short -run TestStress_Grandpa_ it-rpc: build @echo " > \033[32mRunning Integration Tests RPC Specs mode...\033[0m " - HOSTNAME=0.0.0.0 MODE=rpc go test ./tests/rpc/... -timeout=10m -v + MODE=rpc go test ./tests/rpc/... -timeout=10m -v it-sync: build @echo " > \033[32mRunning Integration Tests sync mode...\033[0m " - HOSTNAME=0.0.0.0 MODE=sync go test ./tests/sync/... -timeout=5m -v + MODE=sync go test ./tests/sync/... -timeout=5m -v it-polkadotjs: build @echo " > \033[32mRunning Integration Tests polkadot.js/api mode...\033[0m " - HOSTNAME=0.0.0.0 MODE=polkadot go test ./tests/polkadotjs_test/... -timeout=5m -v + MODE=polkadot go test ./tests/polkadotjs_test/... -timeout=5m -v ## test: Runs `go test -race` on project test files. test-state-race: diff --git a/docs/docs/testing-and-debugging/test-suite.md b/docs/docs/testing-and-debugging/test-suite.md index 5e5c7ab31b..ca6a3fb6e6 100644 --- a/docs/docs/testing-and-debugging/test-suite.md +++ b/docs/docs/testing-and-debugging/test-suite.md @@ -25,13 +25,6 @@ Proceed to open `cover.html` in your preferred browser. ### Gossamer Integration Tests Running Gossamer's integration tests with the below commands will build a Gossamer binary, install required dependencies, and then proceeds to run the provided set of tests. Integration tests can also be run within a docker container. - - -To run Gossamer integration tests in **stable** mode run the following command: - -``` -make it-stable -``` To run Gossamer integration tests in **stress** mode run the following command: diff --git a/dot/utils.go b/dot/utils.go index 1748c7c9dd..596fe397d7 100644 --- a/dot/utils.go +++ b/dot/utils.go @@ -9,37 +9,9 @@ import ( "os" "strings" - ctoml "github.com/ChainSafe/gossamer/dot/config/toml" "github.com/cosmos/go-bip39" - "github.com/naoina/toml" ) -// exportConfig exports a dot configuration to a toml configuration file -func exportConfig(cfg *Config, fp string) { - raw, err := toml.Marshal(*cfg) - if err != nil { - logger.Errorf("failed to marshal configuration: %s", err) - os.Exit(1) - } - if err := os.WriteFile(fp, raw, 0600); err != nil { - logger.Errorf("failed to write file: %s", err) - os.Exit(1) - } -} - -// ExportTomlConfig exports a dot configuration to a toml configuration file -func ExportTomlConfig(cfg *ctoml.Config, fp string) { - raw, err := toml.Marshal(*cfg) - if err != nil { - logger.Errorf("failed to marshal configuration: %s", err) - os.Exit(1) - } - if err := os.WriteFile(fp, raw, 0600); err != nil { - logger.Errorf("failed to write file: %s", err) - os.Exit(1) - } -} - // CreateJSONRawFile will generate a JSON genesis file with raw storage func CreateJSONRawFile(bs *BuildSpec, fp string) { data, err := bs.ToJSONRaw() diff --git a/dot/utils_test.go b/dot/utils_test.go index 638d003c21..38c7c8c01a 100644 --- a/dot/utils_test.go +++ b/dot/utils_test.go @@ -12,7 +12,6 @@ import ( "path/filepath" "testing" - ctoml "github.com/ChainSafe/gossamer/dot/config/toml" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/genesis" @@ -80,154 +79,6 @@ func TestCreateJSONRawFile(t *testing.T) { } } -func TestExportConfig(t *testing.T) { - filepath := filepath.Join(t.TempDir(), "test.json") - type args struct { - cfg *Config - fp string - } - tests := []struct { - name string - args args - want *os.File - wantedContent string - }{ - { - name: "working example", - args: args{ - cfg: &Config{}, - fp: filepath, - }, - want: &os.File{}, - wantedContent: `[global] -name = "" -id = "" -base_path = "" -log_lvl = 0 -publish_metrics = false -metrics_address = "" -no_telemetry = false -telemetry_urls = [] -retain_blocks = 0 -pruning = "" - -[log] -core_lvl = 0 -digest_lvl = 0 -sync_lvl = 0 -network_lvl = 0 -rpc_lvl = 0 -state_lvl = 0 -runtime_lvl = 0 -block_producer_lvl = 0 -finality_gadget_lvl = 0 - -[init] -genesis = "" - -[account] -key = "" -unlock = "" - -[core] -roles = 0 -babe_authority = false -b_a_b_e_lead = false -grandpa_authority = false -wasm_interpreter = "" -grandpa_interval = 0 - -[network] -port = 0 -bootnodes = [] -protocol_id = "" -no_bootstrap = false -no_m_dns = false -min_peers = 0 -max_peers = 0 -persistent_peers = [] -discovery_interval = 0 -public_ip = "" -public_dns = "" - -[rpc] -enabled = false -external = false -unsafe = false -unsafe_external = false -port = 0 -host = "" -modules = [] -w_s_port = 0 -w_s = false -w_s_external = false -w_s_unsafe = false -w_s_unsafe_external = false - -[system] -system_name = "" -system_version = "" - -[state] -rewind = 0 - -[pprof] -enabled = false - -[pprof.settings] -listening_address = "" -block_profile_rate = 0 -mutex_profile_rate = 0 -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - exportConfig(tt.args.cfg, tt.args.fp) - - content, err := ioutil.ReadFile(tt.args.fp) - require.NoError(t, err) - require.Equal(t, tt.wantedContent, string(content)) - - }) - } -} - -func TestExportTomlConfig(t *testing.T) { - filepath := filepath.Join(t.TempDir(), "test.json") - type args struct { - cfg *ctoml.Config - fp string - } - tests := []struct { - name string - args args - wantedContent string - }{ - { - name: "working example", - args: args{ - cfg: &ctoml.Config{}, - fp: filepath, - }, - wantedContent: `[core] -babe-authority = false -grandpa-authority = false -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ExportTomlConfig(tt.args.cfg, tt.args.fp) - - content, err := ioutil.ReadFile(tt.args.fp) - require.NoError(t, err) - require.Equal(t, tt.wantedContent, string(content)) - - }) - } -} - func TestNewTestConfig(t *testing.T) { basePath := t.TempDir() incBasePath := basePath[:len(basePath)-1] + "2" diff --git a/lib/utils/utils.go b/lib/utils/utils.go index 69f5454e50..8b1ab8440f 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -181,6 +181,11 @@ func GetDevGenesisPath(t *testing.T) string { return filepath.Join(GetProjectRootPathTest(t), "./chain/dev/genesis.json") } +// GetDevGenesisSpecPathTest gets the dev genesis spec path +func GetDevGenesisSpecPathTest(t *testing.T) string { + return filepath.Join(GetProjectRootPathTest(t), "./chain/dev/genesis-spec.json") +} + // GetGssmrGenesisPath gets the gssmr genesis path // and returns an error if it cannot find it. func GetGssmrGenesisPath() (path string, err error) { diff --git a/scripts/integration-test-all.sh b/scripts/integration-test-all.sh deleted file mode 100755 index 7c1cf9d81a..0000000000 --- a/scripts/integration-test-all.sh +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/bash - -# "stable" mode tests assume data is static -# "live" mode tests assume data dynamic - -SCRIPT=$(basename ${BASH_SOURCE[0]}) -TEST="" -QTD=1 -SLEEP_TIMEOUT=5 -TEST_QTD=3 - -PORT=7000 -RPC_PORT=8540 -HOSTNAME="0.0.0.0" -MODE="stable" - -declare -a keys=("alice" "bob" "charlie" "dave" "eve" "ferdie" "george" "heather" "ian") - -usage() { - echo "Usage: $SCRIPT" - echo "Optional command line arguments" - echo "-t -- Test to run. eg: rpc" - echo "-q -- Quantity of nodes to run. eg: 3" - echo "-z -- Quantity of nodes to run tests against eg: 3" - echo "-s -- Sleep between operations in secs. eg: 5" - exit 1 -} - -while getopts "h?t:q:z:s:" args; do -case $args in - h|\?) - usage; - exit;; - t ) TEST=${OPTARG};; - q ) QTD=${OPTARG};; - z ) TEST_QTD=${OPTARG};; - s ) SLEEP_TIMEOUT=${OPTARG};; - esac -done - -set -euxo pipefail - -BASE_PATH=$(mktemp -d -t gossamer-basepath.XXXXX) - -if [[ ! "$BASE_PATH" ]]; then - echo "Could not create $BASE_PATH" - exit 1 -fi - -# Compile gossamer -echo "compiling gossamer" -make build - -# PID array declaration -arr=() - -start_func() { - echo "starting gossamer node $i in background ..." - "$PWD"/bin/gossamer --port=$(($PORT + $i)) --key=${keys[$i-1]} --basepath="$BASE_PATH$i" \ - --rpc --rpchost=$HOSTNAME --rpcport=$(($RPC_PORT + $i)) --roles=1 --rpcmods=system,author,chain >"$BASE_PATH"/node"$i".log 2>&1 & disown - - GOSSAMER_PID=$! - echo "started gossamer node, pid=$GOSSAMER_PID" - # add PID to array - arr+=("$GOSSAMER_PID") -} - -# Run node with static blockchain database -# For loop N times -for i in $(seq 1 "$QTD"); do - start_func "$i" - echo "sleeping $SLEEP_TIMEOUT seconds for startup" - sleep "$SLEEP_TIMEOUT" - echo "done sleeping" -done - -echo "sleeping $SLEEP_TIMEOUT seconds before running tests ... " -sleep "$SLEEP_TIMEOUT" -echo "done sleeping" - -set +e - -if [[ -z $TEST || $TEST == "rpc" ]]; then - - for i in $(seq 1 "$TEST_QTD"); do - echo "going to test gossamer node $(($RPC_PORT + $i))..." - MODE=$MODE NETWORK_SIZE=$QTD HOSTNAME=$HOSTNAME PORT=$(($RPC_PORT + $i)) go test ./tests/rpc/... -timeout=60s -v -count=1 - - RPC_FAIL=$? - done - -fi - -stop_func() { - GOSSAMER_PID=$i - echo "shutting down gossamer node, pid=$GOSSAMER_PID ..." - - # Shutdown gossamer node - kill -9 "$GOSSAMER_PID" - wait "$GOSSAMER_PID" -} - - -for i in "${arr[@]}"; do - stop_func "$i" -done - -if [[ (-z $TEST || $TEST == "rpc") && $RPC_FAIL -ne 0 ]]; then - exit $RPC_FAIL -else - exit 0 -fi diff --git a/tests/polkadotjs_test/start_polkadotjs_test.go b/tests/polkadotjs_test/start_polkadotjs_test.go index 61714c2b6c..26e3bf775a 100644 --- a/tests/polkadotjs_test/start_polkadotjs_test.go +++ b/tests/polkadotjs_test/start_polkadotjs_test.go @@ -4,12 +4,15 @@ package polkadotjs_test import ( - "os" + "context" "os/exec" "strings" "testing" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,23 +24,38 @@ func TestStartGossamerAndPolkadotAPI(t *testing.T) { t.Log("Going to skip polkadot.js/api suite tests") return } - t.Log("starting gossamer for polkadot.js/api tests...") - utils.CreateDefaultConfig() - defer os.Remove(utils.ConfigDefault) + const nodePackageManager = "npm" + t.Logf("Checking %s is available...", nodePackageManager) + _, err := exec.LookPath(nodePackageManager) + if err != nil { + t.Fatalf("%s is not available: %s", nodePackageManager, err) + } - nodes, err := utils.InitializeAndStartNodesWebsocket(t, 1, utils.GenesisDev, utils.ConfigDefault) + t.Log("Installing Node dependencies...") + cmd := exec.Command(nodePackageManager, "install") + testWriter := utils.NewTestWriter(t) + cmd.Stdout = testWriter + cmd.Stderr = testWriter + err = cmd.Run() require.NoError(t, err) + t.Log("starting gossamer for polkadot.js/api tests...") + + tomlConfig := config.Default() + tomlConfig.Init.Genesis = libutils.GetDevGenesisSpecPathTest(t) + tomlConfig.Core.BABELead = true + tomlConfig.RPC.WS = true + n := node.New(t, tomlConfig) + + ctx, cancel := context.WithCancel(context.Background()) + n.InitAndStartTest(ctx, t, cancel) + command := "npx mocha ./test --timeout 30000" parts := strings.Fields(command) - data, err := exec.Command(parts[0], parts[1:]...).Output() + data, err := exec.CommandContext(ctx, parts[0], parts[1:]...).CombinedOutput() assert.NoError(t, err, string(data)) //uncomment this to see log results from javascript tests //fmt.Printf("%s\n", data) - - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) } diff --git a/tests/rpc/rpc_00_test.go b/tests/rpc/rpc_00_test.go index e1b9b462e0..6c4d1e30ff 100644 --- a/tests/rpc/rpc_00_test.go +++ b/tests/rpc/rpc_00_test.go @@ -6,54 +6,46 @@ package rpc import ( "context" "fmt" - "os" - "reflect" - "strconv" "testing" + "time" - "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) var ( - currentPort = strconv.Itoa(utils.BaseRPCPort) - rpcSuite = "rpc" + rpcSuite = "rpc" ) -func TestMain(m *testing.M) { - fmt.Println("Going to start RPC suite test") - - utils.CreateDefaultConfig() - defer os.Remove(utils.ConfigDefault) - - // Start all tests - code := m.Run() - os.Exit(code) -} - type testCase struct { description string method string params string expected interface{} - skip bool } -func getResponse(ctx context.Context, t *testing.T, test *testCase) interface{} { - if test.skip { - t.Skip("RPC endpoint not yet implemented") - return nil - } +func fetchWithTimeout(ctx context.Context, t *testing.T, + method, params string, target interface{}) { + t.Helper() - endpoint := utils.NewEndpoint(currentPort) - respBody, err := utils.PostRPC(ctx, endpoint, test.method, test.params) + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + defer getResponseCancel() + err := getResponse(getResponseCtx, method, params, target) require.NoError(t, err) +} - target := reflect.New(reflect.TypeOf(test.expected)).Interface() - err = utils.DecodeRPC(t, respBody, target) - require.Nil(t, err, "Could not DecodeRPC", string(respBody)) +func getResponse(ctx context.Context, method, params string, target interface{}) (err error) { + const currentPort = "8540" + endpoint := rpc.NewEndpoint(currentPort) + respBody, err := rpc.Post(ctx, endpoint, method, params) + if err != nil { + return fmt.Errorf("cannot RPC post: %w", err) + } - require.NotNil(t, target) + err = rpc.Decode(respBody, &target) + if err != nil { + return fmt.Errorf("cannot decode RPC response: %w", err) + } - return target + return nil } diff --git a/tests/rpc/rpc_01-system_test.go b/tests/rpc/rpc_01-system_test.go index bf43135990..6b386187c7 100644 --- a/tests/rpc/rpc_01-system_test.go +++ b/tests/rpc/rpc_01-system_test.go @@ -9,137 +9,190 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const peerIDRegex = `^[a-zA-Z0-9]{52}$` + func TestSystemRPC(t *testing.T) { if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test system_name", - method: "system_name", - skip: true, - }, - { //TODO - description: "test system_version", - method: "system_version", - skip: true, - }, - { //TODO - description: "test system_chain", - method: "system_chain", - skip: true, - }, - { //TODO - description: "test system_properties", - method: "system_properties", - skip: true, - }, - { - description: "test system_health", - method: "system_health", - expected: modules.SystemHealthResponse{ - Peers: 2, - IsSyncing: true, - ShouldHavePeers: true, - }, - params: "{}", - }, - { - description: "test system_peers", - method: "system_peers", - expected: modules.SystemPeersResponse{}, - params: "{}", - }, - { - description: "test system_network_state", - method: "system_networkState", - expected: modules.SystemNetworkStateResponse{ - NetworkState: modules.NetworkStateString{ - PeerID: "", - }, - }, - params: "{}", - }, - { //TODO - description: "test system_addReservedPeer", - method: "system_addReservedPeer", - skip: true, - }, - { //TODO - description: "test system_removeReservedPeer", - method: "system_removeReservedPeer", - skip: true, - }, - { //TODO - description: "test system_nodeRoles", - method: "system_nodeRoles", - skip: true, - }, - { //TODO - description: "test system_accountNextIndex", - method: "system_accountNextIndex", - skip: true, - }, - } - - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 3, utils.GenesisDefault, utils.ConfigDefault) - - //use only first server for tests - require.NoError(t, err) + const testTimeout = 8 * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) - time.Sleep(time.Second) // give server a second to start + const numberOfNodes = 3 - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - target := getResponse(getResponseCtx, t, test) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numberOfNodes, tomlConfig) - switch v := target.(type) { - case *modules.SystemHealthResponse: - t.Log("Will assert SystemHealthResponse", "target", target) + nodes.InitAndStartTest(ctx, t, cancel) - require.Equal(t, test.expected.(modules.SystemHealthResponse).IsSyncing, v.IsSyncing) - require.Equal(t, test.expected.(modules.SystemHealthResponse).ShouldHavePeers, v.ShouldHavePeers) - require.GreaterOrEqual(t, v.Peers, test.expected.(modules.SystemHealthResponse).Peers) + t.Run("system_health", func(t *testing.T) { + t.Parallel() - case *modules.SystemNetworkStateResponse: - t.Log("Will assert SystemNetworkStateResponse", "target", target) + const method = "system_health" + const params = "{}" - require.NotNil(t, v.NetworkState) - require.NotNil(t, v.NetworkState.PeerID) + expected := modules.SystemHealthResponse{ + Peers: numberOfNodes - 1, + ShouldHavePeers: true, + } - case *modules.SystemPeersResponse: - t.Log("Will assert SystemPeersResponse", "target", target) + var response modules.SystemHealthResponse + err := retry.UntilOK(ctx, time.Second, func() (ok bool, err error) { + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + err = getResponse(getResponseCtx, method, params, &response) + getResponseCancel() + if err != nil { + return false, err + } + return response.Peers == expected.Peers, nil + }) + require.NoError(t, err) + + // IsSyncing can be true or false + response.IsSyncing = false + + assert.Equal(t, expected, response) + }) + + t.Run("system_peers", func(t *testing.T) { + t.Parallel() + + // Wait for N-1 peers connected and no syncing + err := retry.UntilOK(ctx, time.Second, func() (ok bool, err error) { + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + const method = "system_health" + const params = "{}" + var healthResponse modules.SystemHealthResponse + err = getResponse(getResponseCtx, method, params, &healthResponse) + getResponseCancel() + if err != nil { + return false, err // error and stop retrying + } - require.NotNil(t, v) + ok = healthResponse.Peers == numberOfNodes-1 && !healthResponse.IsSyncing + return ok, nil + }) + require.NoError(t, err) + + var response modules.SystemPeersResponse + // Wait for N-1 peers with peer IDs set + err = retry.UntilOK(ctx, time.Second, func() (ok bool, err error) { + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + const method = "system_peers" + const params = "{}" + err = getResponse(getResponseCtx, method, params, &response) + getResponseCancel() + if err != nil { + return false, err // error and stop retrying + } - //TODO: #807 - //this assertion requires more time on init to be enabled - //require.GreaterOrEqual(t, len(v.Peers), 2) + if len(response) != numberOfNodes-1 { + return false, nil // retry + } - for _, vv := range *v { - require.NotNil(t, vv.PeerID) - require.NotNil(t, vv.Roles) - require.NotNil(t, vv.BestHash) - require.NotNil(t, vv.BestNumber) + for _, peer := range response { + // wait for all peers to have the same best block number + if peer.PeerID == "" || peer.BestHash.IsEmpty() { + return false, nil // retry } - } + return true, nil // success, stop retrying }) - } - - t.Log("going to tear down gossamer...") - - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + require.NoError(t, err) + + expectedResponse := modules.SystemPeersResponse{ + // Assert they all have the same best block number and hash + {Roles: 4, PeerID: ""}, + {Roles: 4, PeerID: ""}, + } + for i := range response { + // Check randomly generated peer IDs and clear them + assert.Regexp(t, peerIDRegex, response[i].PeerID) + response[i].PeerID = "" + // TODO assert these are all the same, + // see https://github.com/ChainSafe/gossamer/issues/2498 + response[i].BestHash = common.Hash{} + response[i].BestNumber = 0 + } + + assert.Equal(t, expectedResponse, response) + }) + + t.Run("system_networkState", func(t *testing.T) { + t.Parallel() + + const method = "system_networkState" + const params = "{}" + + var response modules.SystemNetworkStateResponse + fetchWithTimeout(ctx, t, method, params, &response) + + assert.Regexp(t, peerIDRegex, response.NetworkState.PeerID) + response.NetworkState.PeerID = "" + + assert.NotEmpty(t, response.NetworkState.Multiaddrs) + for _, addr := range response.NetworkState.Multiaddrs { + assert.Regexp(t, "^/ip[4|6]/.+/tcp/[0-9]{1,5}/p2p/[a-zA-Z0-9]{52}$", addr) + } + response.NetworkState.Multiaddrs = nil + + // Ensure we don't need to assert other fields + expectedResponse := modules.SystemNetworkStateResponse{} + assert.Equal(t, expectedResponse, response) + }) + + t.Run("system_name", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_version", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_chain", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_properties", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_addReservedPeer", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_removeReservedPeer", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_nodeRoles", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_accountNextIndex", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) } diff --git a/tests/rpc/rpc_02-author_test.go b/tests/rpc/rpc_02-author_test.go index 9db0aabddd..1716eef32e 100644 --- a/tests/rpc/rpc_02-author_test.go +++ b/tests/rpc/rpc_02-author_test.go @@ -12,7 +12,11 @@ import ( "github.com/centrifuge/go-substrate-rpc-client/v3/scale" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v3" "github.com/centrifuge/go-substrate-rpc-client/v3/signature" "github.com/centrifuge/go-substrate-rpc-client/v3/types" @@ -25,20 +29,27 @@ func TestAuthorSubmitExtrinsic(t *testing.T) { return } - t.Log("starting gossamer...") + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - time.Sleep(30 * time.Second) // wait for server to start and block 1 to be produced + api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", node.RPCPort())) + require.NoError(t, err) - api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", nodes[0].RPCPort)) + // Wait for the first block to be produced. + const retryWait = time.Second + err = retry.UntilOK(ctx, retryWait, func() (ok bool, err error) { + block, err := api.RPC.Chain.GetBlockLatest() + if err != nil { + return false, err + } + return block.Block.Header.Number > 0, nil + }) require.NoError(t, err) meta, err := api.RPC.State.GetMetadataLatest() @@ -94,60 +105,75 @@ func TestAuthorRPC(t *testing.T) { return } - testCases := []*testCase{ - { //TODO - description: "test author_submitExtrinsic", - method: "author_submitExtrinsic", - skip: true, - }, - { //TODO - description: "test author_pendingExtrinsics", - method: "author_pendingExtrinsics", - skip: true, - }, - { //TODO - description: "test author_removeExtrinsic", - method: "author_removeExtrinsic", - skip: true, - }, - { //TODO - description: "test author_insertKey", - method: "author_insertKey", - skip: true, - }, - { //TODO - description: "test author_rotateKeys", - method: "author_rotateKeys", - skip: true, - }, - { //TODO - description: "test author_hasSessionKeys", - method: "author_hasSessionKeys", - skip: true, - }, - { //TODO - description: "test author_hasKey", - method: "author_hasKey", - skip: true, - }, - } - - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) - - time.Sleep(time.Second) // give server a second to start - - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } - - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) + + t.Run("author_pendingExtrinsics", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_pendingExtrinsics", "", target) + }) + + t.Run("author_submitExtrinsic", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_submitExtrinsic", "", target) + }) + + t.Run("author_pendingExtrinsics", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_pendingExtrinsics", "", target) + }) + + t.Run("author_removeExtrinsic", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_removeExtrinsic", "", target) + }) + + t.Run("author_insertKey", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_insertKey", "", target) + }) + + t.Run("author_rotateKeys", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_rotateKeys", "", target) + }) + + t.Run("author_hasSessionKeys", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_hasSessionKeys", "", target) + }) + + t.Run("author_hasKey", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_hasKey", "", target) + }) } diff --git a/tests/rpc/rpc_03-chain_test.go b/tests/rpc/rpc_03-chain_test.go index 31111ccb5d..6f6be2e145 100644 --- a/tests/rpc/rpc_03-chain_test.go +++ b/tests/rpc/rpc_03-chain_test.go @@ -5,283 +5,356 @@ package rpc import ( "context" - "log" + "errors" + "fmt" + "math/rand" "testing" "time" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/rpc/subscription" + "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const ( + regex32BytesHex = `^0x[0-9a-f]{64}$` + regexBytesHex = `^0x[0-9a-f]{2}[0-9a-f]*$` + regexBytesHexOrEmpty = `^0x[0-9a-f]*$` +) + func TestChainRPC(t *testing.T) { if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { - description: "test chain_getFinalizedHead", - method: "chain_getFinalizedHead", - expected: "", - params: "[]", - }, - { - description: "test chain_getHeader", - method: "chain_getHeader", - expected: modules.ChainBlockHeaderResponse{ - Number: "1", - }, - params: "[]", - }, - { - description: "test chain_getBlock", - method: "chain_getBlock", - expected: modules.ChainBlockResponse{ - Block: modules.ChainBlock{ - Header: modules.ChainBlockHeaderResponse{ - Number: "1", - }, - Body: []string{}, - }, + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) + + // Wait for Gossamer to produce block 2 + errBlockNumberTooHigh := errors.New("block number is too high") + const retryWaitDuration = 200 * time.Millisecond + err := retry.UntilOK(ctx, retryWaitDuration, func() (ok bool, err error) { + var header modules.ChainBlockHeaderResponse + fetchWithTimeout(ctx, t, "chain_getHeader", "[]", &header) + number, err := common.HexToUint(header.Number) + if err != nil { + return false, fmt.Errorf("cannot convert header number to uint: %w", err) + } + + switch number { + case 0, 1: + return false, nil + case 2: + return true, nil + default: + return false, fmt.Errorf("%w: %d", errBlockNumberTooHigh, number) + } + }) + require.NoError(t, err) + + var finalizedHead string + fetchWithTimeout(ctx, t, "chain_getFinalizedHead", "[]", &finalizedHead) + assert.Regexp(t, regex32BytesHex, finalizedHead) + + var header modules.ChainBlockHeaderResponse + fetchWithTimeout(ctx, t, "chain_getHeader", "[]", &header) + + // Check and clear unpredictable fields + assert.Regexp(t, regex32BytesHex, header.StateRoot) + header.StateRoot = "" + assert.Regexp(t, regex32BytesHex, header.ExtrinsicsRoot) + header.ExtrinsicsRoot = "" + assert.Len(t, header.Digest.Logs, 2) + for _, digestLog := range header.Digest.Logs { + assert.Regexp(t, regexBytesHex, digestLog) + } + header.Digest.Logs = nil + + // Assert remaining struct with predictable fields + expectedHeader := modules.ChainBlockHeaderResponse{ + ParentHash: finalizedHead, + Number: "0x02", + } + assert.Equal(t, expectedHeader, header) + + var block modules.ChainBlockResponse + fetchWithTimeout(ctx, t, "chain_getBlock", fmt.Sprintf(`["`+header.ParentHash+`"]`), &block) + + // Check and clear unpredictable fields + assert.Regexp(t, regex32BytesHex, block.Block.Header.ParentHash) + block.Block.Header.ParentHash = "" + assert.Regexp(t, regex32BytesHex, block.Block.Header.StateRoot) + block.Block.Header.StateRoot = "" + assert.Regexp(t, regex32BytesHex, block.Block.Header.ExtrinsicsRoot) + block.Block.Header.ExtrinsicsRoot = "" + assert.Len(t, block.Block.Header.Digest.Logs, 3) + for _, digestLog := range block.Block.Header.Digest.Logs { + assert.Regexp(t, regexBytesHex, digestLog) + } + block.Block.Header.Digest.Logs = nil + assert.Len(t, block.Block.Body, 1) + const bodyRegex = `^0x280403000b[0-9a-z]{8}8101$` + assert.Regexp(t, bodyRegex, block.Block.Body[0]) + block.Block.Body = nil + + // Assert remaining struct with predictable fields + expectedBlock := modules.ChainBlockResponse{ + Block: modules.ChainBlock{ + Header: modules.ChainBlockHeaderResponse{ + Number: "0x01", }, - params: "[]", - }, - { - description: "test chain_getBlockHash", - method: "chain_getBlockHash", - expected: "", - params: "[]", }, } + assert.Equal(t, expectedBlock, block) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) + var blockHash string + fetchWithTimeout(ctx, t, "chain_getBlockHash", "[]", &blockHash) + assert.Regexp(t, regex32BytesHex, blockHash) + assert.NotEqual(t, finalizedHead, blockHash) +} - time.Sleep(time.Second * 5) // give server a few seconds to start +func TestChainSubscriptionRPC(t *testing.T) { + if utils.MODE != rpcSuite { + t.Log("Going to skip RPC suite tests") + return + } - chainBlockHeaderHash := "" - for _, test := range testCases { + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + tomlConfig.RPC.WS = true // WS port is set in the node.New constructor + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Run(test.description, func(t *testing.T) { + const endpoint = "ws://localhost:8546/" - // set params for chain_getBlock from previous chain_getHeader call - if chainBlockHeaderHash != "" { - test.params = "[\"" + chainBlockHeaderHash + "\"]" - } + t.Run("chain_subscribeNewHeads", func(t *testing.T) { + t.Parallel() - ctx := context.Background() + const numberOfMesages = 2 + messages := callAndSubscribeWebsocket(ctx, t, endpoint, "chain_subscribeNewHeads", "[]", numberOfMesages) - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - target := getResponse(getResponseCtx, t, test) + allParams := make([]subscription.Params, numberOfMesages) + for i, message := range messages { + err := rpc.Decode(message, &allParams[i]) + require.NoError(t, err, "cannot decode websocket message for message index %d", i) + } - switch v := target.(type) { - case *modules.ChainBlockHeaderResponse: - t.Log("Will assert ChainBlockHeaderResponse", "value", v) + for i, params := range allParams { + result := getResultMapFromParams(t, params) - require.GreaterOrEqual(t, test.expected.(modules.ChainBlockHeaderResponse).Number, v.Number) + number := getResultNumber(t, result) + assert.Equal(t, uint(i+1), number) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).ParentHash) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).StateRoot) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).ExtrinsicsRoot) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).Digest) + assertResultRegex(t, result, "parentHash", regex32BytesHex) + assertResultRegex(t, result, "stateRoot", regex32BytesHex) + assertResultRegex(t, result, "extrinsicsRoot", regex32BytesHex) + assertResultDigest(t, result) - //save for chain_getBlock - chainBlockHeaderHash = v.ParentHash - case *modules.ChainBlockResponse: - t.Log("Will assert ChainBlockResponse", "value", v.Block) + remainingExpected := subscription.Params{ + Result: map[string]interface{}{}, + SubscriptionID: 1, + } + assert.Equal(t, remainingExpected, params) + } + }) - //reset - chainBlockHeaderHash = "" + t.Run("state_subscribeStorage", func(t *testing.T) { + t.Parallel() - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block) + const numberOfMesages = 2 + messages := callAndSubscribeWebsocket(ctx, t, endpoint, "state_subscribeStorage", "[]", numberOfMesages) - require.GreaterOrEqual(t, test.expected.(modules.ChainBlockResponse).Block.Header.Number, v.Block.Header.Number) + allParams := make([]subscription.Params, numberOfMesages) + for i := range allParams { + message := messages[i] + err := rpc.Decode(message, &allParams[i]) + require.NoError(t, err, "cannot decode websocket message for message index %d", i) + } + + for i, params := range allParams { + errorContext := fmt.Sprintf("for response at index %d", i) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.ParentHash) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.StateRoot) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.ExtrinsicsRoot) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.Digest) + result := getResultMapFromParams(t, params) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Body) - require.GreaterOrEqual(t, len(test.expected.(modules.ChainBlockResponse).Block.Body), 0) + blockHex, ok := result["block"].(string) + require.True(t, ok, errorContext) + assert.Regexp(t, regex32BytesHex, blockHex, errorContext) + delete(result, "block") - case *string: - t.Log("Will assert ChainBlockNumberRequest", "value", *v) - require.NotNil(t, v) - require.GreaterOrEqual(t, len(*v), 66) + changes, ok := result["changes"].([]interface{}) + require.True(t, ok, errorContext) + for _, change := range changes { + fromTo, ok := change.([]interface{}) + require.Truef(t, ok, "%s and change: %v", errorContext, change) + from, ok := fromTo[0].(string) + require.Truef(t, ok, "%s and from: %v", errorContext, fromTo[0]) + to, ok := fromTo[1].(string) + require.Truef(t, ok, "%s and to: %v", errorContext, fromTo[1]) + assert.Regexp(t, regexBytesHexOrEmpty, from, errorContext) + assert.Regexp(t, regexBytesHexOrEmpty, to, errorContext) } + delete(result, "changes") - }) - } + remainingExpected := map[string]interface{}{} + assert.Equal(t, remainingExpected, result, errorContext) + } + }) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) -} + t.Run("chain_subscribeFinalizedHeads", func(t *testing.T) { + t.Parallel() -func TestChainSubscriptionRPC(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } + const numberOfMesages = 4 + messages := callAndSubscribeWebsocket(ctx, t, endpoint, "chain_subscribeFinalizedHeads", "[]", numberOfMesages) - testCases := []*testCase{ - { - description: "test chain_subscribeNewHeads", - method: "chain_subscribeNewHeads", - expected: []interface{}{1, - map[string](interface{}){ - "subscription": float64(1), - "result": map[string](interface{}){ - "number": "0x01", - "parentHash": "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21", - "stateRoot": "0x3b1a31d10d4d8a444579fd5a3fb17cbe6bebba9d939d88fe7bafb9d48036abb5", - "extrinsicsRoot": "0x8025c0d64df303f79647611c8c2b0a77bc2247ee12d851df4624e1f71ebb3aed", - //nolint:lll - "digest": map[string](interface{}){"logs": []interface{}{ - "0x0642414245c101c809062df1d1271d6a50232754baa64870515a7ada927886467748a220972c6d58347fd7317e286045604c5ddb78b84018c4b3a3836ee6626c8da6957338720053588d9f29c307fade658661d8d6a57c525f48553a253cf6e1475dbd319ca90200000000000000000e00000000000000", - "0x054241424501017cac567e5b5688260d9d0a1f7fe6a9f81ae0f1900a382e1c73a4929fcaf6e33ed9e7347eb81ebb2699d58f6c8b01c7bdf0714e5f6f4495bc4b5fb3becb287580"}}}}}, - params: "[]", - skip: false, - }, - { - description: "test state_subscribeStorage", - method: "state_subscribeStorage", - expected: "", - params: "[]", - skip: true, - }, - { - description: "test chain_finalizedHeads", - method: "chain_subscribeFinalizedHeads", - expected: []interface{}{1, - map[string](interface{}){ - "subscription": float64(1), - "result": map[string](interface{}){ - "number": "0x01", - "parentHash": "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21", - "stateRoot": "0x3b1a31d10d4d8a444579fd5a3fb17cbe6bebba9d939d88fe7bafb9d48036abb5", - "extrinsicsRoot": "0x8025c0d64df303f79647611c8c2b0a77bc2247ee12d851df4624e1f71ebb3aed", - //nolint:lll - "digest": map[string](interface{}){"logs": []interface{}{ - "0x0642414245c101c809062df1d1271d6a50232754baa64870515a7ada927886467748a220972c6d58347fd7317e286045604c5ddb78b84018c4b3a3836ee6626c8da6957338720053588d9f29c307fade658661d8d6a57c525f48553a253cf6e1475dbd319ca90200000000000000000e00000000000000", - "0x054241424501017cac567e5b5688260d9d0a1f7fe6a9f81ae0f1900a382e1c73a4929fcaf6e33ed9e7347eb81ebb2699d58f6c8b01c7bdf0714e5f6f4495bc4b5fb3becb287580"}}}}}, - params: "[]", - skip: false, - }, - } + allParams := make([]subscription.Params, numberOfMesages) + for i, message := range messages { + err := rpc.Decode(message, &allParams[i]) + require.NoError(t, err, "cannot decode websocket message for message index %d", i) + } - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodesWebsocket(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) + var blockNumbers []uint + for _, params := range allParams { + result := getResultMapFromParams(t, params) - time.Sleep(time.Second) // give server a second to start + number := getResultNumber(t, result) + blockNumbers = append(blockNumbers, number) - for _, test := range testCases { + assertResultRegex(t, result, "parentHash", regex32BytesHex) + assertResultRegex(t, result, "stateRoot", regex32BytesHex) + assertResultRegex(t, result, "extrinsicsRoot", regex32BytesHex) + assertResultDigest(t, result) - t.Run(test.description, func(t *testing.T) { - callWebsocket(t, test) - }) - } + remainingExpected := subscription.Params{ + Result: map[string]interface{}{}, + SubscriptionID: 1, + } + assert.Equal(t, remainingExpected, params) + } - time.Sleep(time.Second * 2) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // Check block numbers grow by zero or one in order of responses. + for i, blockNumber := range blockNumbers { + if i == 0 { + assert.Equal(t, uint(1), blockNumber) + continue + } + assert.GreaterOrEqual(t, blockNumber, blockNumbers[i-1]) + } + }) } -func callWebsocket(t *testing.T, test *testCase) { - if test.skip { - t.Skip("Websocket endpoint not yet implemented") - } - url := "ws://localhost:8546/" // todo don't hard code this - ws, _, err := websocket.DefaultDialer.Dial(url, nil) - require.NoError(t, err) - defer ws.Close() +func getResultMapFromParams(t *testing.T, params subscription.Params) ( + resultMap map[string]interface{}) { + t.Helper() - done := make(chan struct{}) + resultMap, ok := params.Result.(map[string]interface{}) + require.True(t, ok) - vals := make(chan []byte) - go wsListener(t, ws, vals, done, len(test.expected.([]interface{}))) + return resultMap +} - err = ws.WriteMessage(websocket.TextMessage, []byte(`{ - "jsonrpc": "2.0", - "method": "`+test.method+`", - "params": [`+test.params+`], - "id": 1 -}`)) +// getResultNumber returns the number value from the result map +// and deletes the "number" key from the map. +func getResultNumber(t *testing.T, result map[string]interface{}) uint { + t.Helper() + + hexNumber, ok := result["number"].(string) + require.True(t, ok) + + number, err := common.HexToUint(hexNumber) require.NoError(t, err) - resCount := 0 - for { - select { - case v := <-vals: - resCount++ - switch exp := test.expected.([]interface{})[resCount-1].(type) { - case int: - // check for result subscription number - resNum := 0 - err = utils.DecodeWebsocket(t, v, &resNum) - require.NoError(t, err) - - case map[string]interface{}: - // check result map response - resMap := make(map[string]interface{}) - err = utils.DecodeWebsocket(t, v, &resMap) - require.NoError(t, err) - - // check values in map are expected type - for eKey, eVal := range exp { - rVal := resMap[eKey] - require.NotNil(t, rVal) - require.IsType(t, eVal, rVal) - switch evt := eVal.(type) { - case map[string]interface{}: - checkMap(t, evt, rVal.(map[string]interface{})) - } - } - } + delete(result, "number") - case <-done: - return - } - } + return number } -func wsListener(t *testing.T, ws *websocket.Conn, val chan []byte, done chan struct{}, msgCount int) { - defer close(done) - count := 0 - for { - _, message, err := ws.ReadMessage() - require.NoError(t, err) - - count++ - log.Printf("recv: %v: %s\n", count, message) - - val <- message - if count == msgCount { - err := ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - require.NoError(t, err) - return - } +// assertResultRegex gets the value from the map and asserts that it matches the regex. +// It then removes the key from the map. +func assertResultRegex(t *testing.T, result map[string]interface{}, key, regex string) { + t.Helper() + + value, ok := result[key] + require.True(t, ok, "cannot find key %q in result", key) + assert.Regexp(t, regex, value, "at result key %q", key) + delete(result, key) +} + +func assertResultDigest(t *testing.T, result map[string]interface{}) { + t.Helper() + + digest, ok := result["digest"].(map[string]interface{}) + require.True(t, ok) + + logs, ok := digest["logs"].([]interface{}) + require.True(t, ok) + + assert.NotEmpty(t, logs) + for _, log := range logs { + assert.Regexp(t, regexBytesHex, log) } + + delete(result, "digest") } -func checkMap(t *testing.T, expMap map[string]interface{}, ckMap map[string]interface{}) { - for eKey, eVal := range expMap { - cVal := ckMap[eKey] +func callAndSubscribeWebsocket(ctx context.Context, t *testing.T, + endpoint, method, params string, numberOfMesages uint) ( + messages [][]byte) { + t.Helper() - require.NotNil(t, cVal) - require.IsType(t, eVal, cVal) - switch evt := eVal.(type) { - case map[string]interface{}: - checkMap(t, evt, cVal.(map[string]interface{})) - } + connection, _, err := websocket.DefaultDialer.Dial(endpoint, nil) + require.NoError(t, err, "cannot dial websocket") + defer connection.Close() // in case of failed required assertion + + const maxid = 100000 // otherwise it becomes a float64 + id := rand.Intn(maxid) + messageData := fmt.Sprintf(`{ + "jsonrpc": "2.0", + "method": %q, + "params": [%s], + "id": %d +}`, method, params, id) + err = connection.WriteMessage(websocket.TextMessage, []byte(messageData)) + require.NoError(t, err, "cannot write websocket message") + + // Read subscription id result + var target subscription.ResponseJSON + err = connection.ReadJSON(&target) + require.NoError(t, err, "cannot read websocket message") + assert.Equal(t, float64(id), target.ID, "request id mismatch") + assert.NotZero(t, target.Result, "subscription id is 0") + + for i := uint(0); i < numberOfMesages; i++ { + _, data, err := connection.ReadMessage() + require.NoError(t, err, "cannot read websocket message") + + messages = append(messages, data) } + // Close connection + const messageType = websocket.CloseMessage + data := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + err = connection.WriteMessage(messageType, data) + assert.NoError(t, err, "cannot write close websocket message") + err = connection.Close() + assert.NoError(t, err, "cannot close websocket connection") + + return messages } diff --git a/tests/rpc/rpc_04-offchain_test.go b/tests/rpc/rpc_04-offchain_test.go index b31dba704c..78ec0b4739 100644 --- a/tests/rpc/rpc_04-offchain_test.go +++ b/tests/rpc/rpc_04-offchain_test.go @@ -6,52 +6,56 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestOffchainRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test offchain_localStorageSet", - method: "offchain_localStorageSet", - skip: true, - }, - { //TODO - description: "test offchain_localStorageGet", - method: "offchain_localStorageGet", - skip: true, - }, - { //TODO - description: "test offchain_localStorageGet", - method: "offchain_localStorageGet", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + tomlConfig.Init.Genesis = genesisPath + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("offchain_localStorageSet", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + fetchWithTimeout(ctx, t, "offchain_localStorageSet", "", &response) + + // TODO assert response + }) + + t.Run("offchain_localStorageGet", func(t *testing.T) { + t.Parallel() + + var response struct{} // TODO + + fetchWithTimeout(ctx, t, "offchain_localStorageGet", "", &response) + + // TODO assert response + }) + + t.Run("offchain_localStorageGet", func(t *testing.T) { + t.Parallel() + + var response struct{} // TODO + + fetchWithTimeout(ctx, t, "offchain_localStorageGet", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_05-state_test.go b/tests/rpc/rpc_05-state_test.go index 1d22a6a8e0..943c5b79f5 100644 --- a/tests/rpc/rpc_05-state_test.go +++ b/tests/rpc/rpc_05-state_test.go @@ -11,7 +11,11 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) @@ -21,115 +25,146 @@ func TestStateRPCResponseValidation(t *testing.T) { return } - t.Log("starting gossamer...") + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) + getBlockHashCtx, getBlockHashCancel := context.WithTimeout(ctx, time.Second) + blockHash, err := rpc.GetBlockHash(getBlockHashCtx, node.RPCPort(), "") + getBlockHashCancel() require.NoError(t, err) - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + t.Run("state_call", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + const params = `["", "","0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21"]` + var response modules.StateCallResponse - ctx := context.Background() + fetchWithTimeout(ctx, t, "state_call", params, &response) - getBlockHashCtx, cancel := context.WithTimeout(ctx, time.Second) - blockHash, err := utils.GetBlockHash(getBlockHashCtx, t, nodes[0].RPCPort, "") - cancel() - require.NoError(t, err) + // TODO assert stateCallResponse + }) - testCases := []*testCase{ - { - description: "Test state_call", - method: "state_call", - params: `["", "","0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21"]`, - expected: modules.StateCallResponse{}, - }, - { //TODO disable skip when implemented - description: "Test state_getKeysPaged", - method: "state_getKeysPaged", - skip: true, - }, - { - description: "Test state_queryStorage", - method: "state_queryStorage", - params: fmt.Sprintf( - `[["0xf2794c22e353e9a839f12faab03a911bf68967d635641a7087e53f2bff1ecad3c6756fee45ec79ead60347fffb770bcdf0ec74da701ab3d6495986fe1ecc3027"], "%s", null]`, //nolint:lll - blockHash), - expected: modules.StorageChangeSetResponse{ - Block: &blockHash, - Changes: [][]string{}, - }, - skip: true, - }, - { - description: "Test valid block hash state_getRuntimeVersion", - method: "state_getRuntimeVersion", - params: fmt.Sprintf(`["%s"]`, blockHash.String()), - expected: modules.StateRuntimeVersionResponse{}, - }, - { - description: "Test valid block hash state_getPairs", - method: "state_getPairs", - params: fmt.Sprintf(`["0x", "%s"]`, blockHash.String()), - expected: modules.StatePairResponse{}, - }, - { - description: "Test valid block hash state_getMetadata", - method: "state_getMetadata", - params: fmt.Sprintf(`["%s"]`, blockHash.String()), - expected: modules.StateMetadataResponse(""), - }, - { - description: "Test optional param state_getRuntimeVersion", - method: "state_getRuntimeVersion", - params: `[]`, - expected: modules.StateRuntimeVersionResponse{}, - }, - { - description: "Test optional params hash state_getPairs", - method: "state_getPairs", - params: `["0x"]`, - expected: modules.StatePairResponse{}, - }, - { - description: "Test optional param hash state_getMetadata", - method: "state_getMetadata", - params: `[]`, - expected: modules.StateMetadataResponse(""), - }, - { - description: "Test optional param value as null state_getRuntimeVersion", - method: "state_getRuntimeVersion", - params: `[null]`, - expected: modules.StateRuntimeVersionResponse{}, - }, - { - description: "Test optional param value as null state_getMetadata", - method: "state_getMetadata", - params: `[null]`, - expected: modules.StateMetadataResponse(""), - }, - { - description: "Test optional param value as null state_getPairs", - method: "state_getPairs", - params: `["0x", null]`, - expected: modules.StatePairResponse{}, - }, - } + t.Run("state_getKeysPaged", func(t *testing.T) { + t.Parallel() + t.SkipNow() - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + var response struct{} // TODO + fetchWithTimeout(ctx, t, "state_getKeysPaged", "", &response) + + // TODO assert response + }) + + t.Run("state_queryStorage", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO disable skip + + params := fmt.Sprintf( + `[["0xf2794c22e353e9a839f12faab03a911bf68967d635641a7087e53f2bff1ecad3c6756fee45ec79ead60347fffb770bcdf0ec74da701ab3d6495986fe1ecc3027"], "%s", null]`, //nolint:lll + blockHash) + var response modules.StorageChangeSetResponse + + fetchWithTimeout(ctx, t, "state_queryStorage", params, &response) + + // TODO assert response + }) + t.Run("state_getRuntimeVersion", func(t *testing.T) { + t.Parallel() + + params := fmt.Sprintf(`[%q]`, blockHash) + var response modules.StateRuntimeVersionResponse + + fetchWithTimeout(ctx, t, "state_getRuntimeVersion", params, &response) + + // TODO assert response + }) + + t.Run("valid block hash state_getPairs", func(t *testing.T) { + t.Parallel() + + params := fmt.Sprintf(`["0x", "%s"]`, blockHash) + var response modules.StatePairResponse + + fetchWithTimeout(ctx, t, "state_getPairs", params, &response) + + // TODO assert response + }) + + t.Run("valid block hash state_getMetadata", func(t *testing.T) { + t.Parallel() + + params := fmt.Sprintf(`["%s"]`, blockHash) + var response modules.StateMetadataResponse + + fetchWithTimeout(ctx, t, "state_getMetadata", params, &response) + + // TODO assert response + }) + + t.Run("valid block hash state_getRuntimeVersion", func(t *testing.T) { + t.Parallel() + + var response modules.StateRuntimeVersionResponse + + fetchWithTimeout(ctx, t, "state_getRuntimeVersion", "[]", &response) + + // TODO assert response + }) + + t.Run("optional params hash state_getPairs", func(t *testing.T) { + t.Parallel() + + var response modules.StatePairResponse + + fetchWithTimeout(ctx, t, "state_getPairs", `["0x"]`, &response) + + // TODO assert response + }) + + t.Run("optional param hash state_getMetadata", func(t *testing.T) { + t.Parallel() + + var response modules.StateMetadataResponse + + fetchWithTimeout(ctx, t, "state_getMetadata", "[]", &response) + + // TODO assert response + }) + + t.Run("optional param value as null state_getRuntimeVersion", func(t *testing.T) { + t.Parallel() + + var response modules.StateRuntimeVersionResponse + + fetchWithTimeout(ctx, t, "state_getRuntimeVersion", "[null]", &response) + + // TODO assert response + }) + + t.Run("optional param value as null state_getMetadata", func(t *testing.T) { + t.Parallel() + + var response modules.StateMetadataResponse + + fetchWithTimeout(ctx, t, "state_getMetadata", "[null]", &response) + + // TODO assert response + }) + + t.Run("optional param value as null state_getPairs", func(t *testing.T) { + t.Parallel() + + var response modules.StatePairResponse + + fetchWithTimeout(ctx, t, "state_getPairs", `["0x", null]`, &response) + + // TODO assert response + }) } func TestStateRPCAPI(t *testing.T) { @@ -138,23 +173,19 @@ func TestStateRPCAPI(t *testing.T) { return } - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) time.Sleep(5 * time.Second) // Wait for block production - ctx := context.Background() - - getBlockHashCtx, cancel := context.WithTimeout(ctx, time.Second) - blockHash, err := utils.GetBlockHash(getBlockHashCtx, t, nodes[0].RPCPort, "") - cancel() + getBlockHashCtx, getBlockHashCancel := context.WithTimeout(ctx, time.Second) + blockHash, err := rpc.GetBlockHash(getBlockHashCtx, node.RPCPort(), "") + getBlockHashCancel() require.NoError(t, err) const ( @@ -331,10 +362,9 @@ func TestStateRPCAPI(t *testing.T) { // Cases for valid block hash in RPC params for _, test := range testCases { t.Run(test.description, func(t *testing.T) { - ctx := context.Background() postRPCCtx, cancel := context.WithTimeout(ctx, time.Second) - endpoint := utils.NewEndpoint(nodes[0].RPCPort) - respBody, err := utils.PostRPC(postRPCCtx, endpoint, test.method, test.params) + endpoint := rpc.NewEndpoint(node.RPCPort()) + respBody, err := rpc.Post(postRPCCtx, endpoint, test.method, test.params) cancel() require.NoError(t, err) @@ -349,15 +379,13 @@ func TestRPCStructParamUnmarshal(t *testing.T) { return } - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + tomlConfig.Init.Genesis = genesisPath + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) time.Sleep(2 * time.Second) // Wait for block production @@ -367,12 +395,10 @@ func TestRPCStructParamUnmarshal(t *testing.T) { params: `[["0xf2794c22e353e9a839f12faab03a911bf68967d635641a7087e53f2bff1ecad3c6756fee45ec79ead60347fffb770bcdf0ec74da701ab3d6495986fe1ecc3027"],"0xa32c60dee8647b07435ae7583eb35cee606209a595718562dd4a486a07b6de15", null]`, //nolint:lll } t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - - postRPCCtx, cancel := context.WithTimeout(ctx, time.Second) - endpoint := utils.NewEndpoint(nodes[0].RPCPort) - respBody, err := utils.PostRPC(postRPCCtx, endpoint, test.method, test.params) - cancel() + postRPCCtx, postRPCCancel := context.WithTimeout(ctx, time.Second) + endpoint := rpc.NewEndpoint(node.RPCPort()) + respBody, err := rpc.Post(postRPCCtx, endpoint, test.method, test.params) + postRPCCancel() require.NoError(t, err) require.NotContains(t, string(respBody), "json: cannot unmarshal") fmt.Println(string(respBody)) diff --git a/tests/rpc/rpc_06-engine_test.go b/tests/rpc/rpc_06-engine_test.go index 4f5a3020ab..53b9c5d2c8 100644 --- a/tests/rpc/rpc_06-engine_test.go +++ b/tests/rpc/rpc_06-engine_test.go @@ -6,47 +6,44 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestEngineRPC(t *testing.T) { + t.SkipNow() + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test engine_createBlock", - method: "engine_createBlock", - skip: true, - }, - { //TODO - description: "test engine_finalizeBlock", - method: "engine_finalizeBlock", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("engine_createBlock", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO + fetchWithTimeout(ctx, t, "engine_createBlock", "", &response) - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + // TODO assert response + }) + + t.Run("engine_finalizeBlock", func(t *testing.T) { + t.Parallel() + + var response struct{} // TODO + fetchWithTimeout(ctx, t, "engine_finalizeBlock", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_07-payment_test.go b/tests/rpc/rpc_07-payment_test.go index 8639ff417d..f75d0b347c 100644 --- a/tests/rpc/rpc_07-payment_test.go +++ b/tests/rpc/rpc_07-payment_test.go @@ -6,42 +6,36 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestPaymentRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test payment_queryInfo", - method: "payment_queryInfo", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("payment_queryInfo", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + fetchWithTimeout(ctx, t, "payment_queryInfo", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_08-contracts_test.go b/tests/rpc/rpc_08-contracts_test.go index 21b33d1283..0dd42ac095 100644 --- a/tests/rpc/rpc_08-contracts_test.go +++ b/tests/rpc/rpc_08-contracts_test.go @@ -6,47 +6,35 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestContractsRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test contracts_getStorage", - method: "contracts_getStorage", - skip: true, - }, - { //TODO - description: "test contracts_getStorage", - method: "contracts_getStorage", - skip: true, - }, - } - - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - time.Sleep(time.Second) // give server a second to start + t.Run("contracts_getStorage", func(t *testing.T) { + t.Parallel() - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + var response struct{} // TODO + fetchWithTimeout(ctx, t, "contracts_getStorage", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_09-babe_test.go b/tests/rpc/rpc_09-babe_test.go index 0f8318ffea..e97185c0ab 100644 --- a/tests/rpc/rpc_09-babe_test.go +++ b/tests/rpc/rpc_09-babe_test.go @@ -6,42 +6,36 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestBabeRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test babe_epochAuthorship", - method: "babe_epochAuthorship", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("babe_epochAuthorship", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + fetchWithTimeout(ctx, t, "babe_epochAuthorship", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/system_integration_test.go b/tests/rpc/system_integration_test.go index 809e172b28..83d4baff08 100644 --- a/tests/rpc/system_integration_test.go +++ b/tests/rpc/system_integration_test.go @@ -5,94 +5,113 @@ package rpc import ( "context" - "fmt" - "reflect" - "strconv" "testing" + "time" + "github.com/ChainSafe/gossamer/dot/config/toml" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestStableNetworkRPC(t *testing.T) { - if utils.MODE != "stable" { - t.Skip("Integration tests are disabled, going to skip.") + if utils.MODE != "rpc" { + t.Skip("RPC tests are disabled, going to skip.") } - t.Log("Running NetworkAPI tests with HOSTNAME=" + utils.HOSTNAME + " and PORT=" + utils.PORT) - networkSize, err := strconv.Atoi(utils.NETWORK_SIZE) - if err != nil { - networkSize = 0 - } - - testsCases := []*testCase{ - { - description: "test system_health", - method: "system_health", - expected: modules.SystemHealthResponse{ - Peers: networkSize - 1, - IsSyncing: true, - ShouldHavePeers: true, - }, - }, - { - description: "test system_network_state", - method: "system_networkState", - expected: modules.SystemNetworkStateResponse{ - NetworkState: modules.NetworkStateString{ - PeerID: "", - }, - }, + const numberOfNodes = 3 + config := toml.Config{ + RPC: toml.RPCConfig{ + Enabled: true, + Modules: []string{"system", "author", "chain"}, }, - { - description: "test system_peers", - method: "system_peers", - expected: modules.SystemPeersResponse{}, + Core: toml.CoreConfig{ + Roles: types.FullNodeRole, }, } - for _, test := range testsCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() + nodes := make(node.Nodes, numberOfNodes) + for i := range nodes { + nodes[i] = node.New(t, config, node.SetIndex(i)) + } + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + for _, node := range nodes { + node.InitAndStartTest(ctx, t, cancel) + const timeBetweenStart = 0 * time.Second + timer := time.NewTimer(timeBetweenStart) + select { + case <-timer.C: + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } - endpoint := fmt.Sprintf("http://%s:%s", utils.HOSTNAME, utils.PORT) - const params = "{}" - respBody, err := utils.PostRPC(ctx, endpoint, test.method, params) - require.NoError(t, err) + for _, node := range nodes { + node := node + t.Run(node.String(), func(t *testing.T) { + t.Parallel() + endpoint := rpc.NewEndpoint(node.RPCPort()) - target := reflect.New(reflect.TypeOf(test.expected)).Interface() - err = utils.DecodeRPC(t, respBody, target) - require.NoError(t, err) + t.Run("system_health", func(t *testing.T) { + t.Parallel() - switch v := target.(type) { - case *modules.SystemHealthResponse: - t.Log("Will assert SystemHealthResponse", "target", target) + var response modules.SystemHealthResponse - require.Equal(t, test.expected.(modules.SystemHealthResponse).IsSyncing, v.IsSyncing) - require.Equal(t, test.expected.(modules.SystemHealthResponse).ShouldHavePeers, v.ShouldHavePeers) - require.GreaterOrEqual(t, v.Peers, test.expected.(modules.SystemHealthResponse).Peers) + fetchWithTimeoutFromEndpoint(t, endpoint, "system_health", "{}", &response) - case *modules.SystemNetworkStateResponse: - t.Log("Will assert SystemNetworkStateResponse", "target", target) + expectedResponse := modules.SystemHealthResponse{ + Peers: numberOfNodes - 1, + IsSyncing: true, + ShouldHavePeers: true, + } + assert.Equal(t, expectedResponse, response) + }) - require.NotNil(t, v.NetworkState) - require.NotNil(t, v.NetworkState.PeerID) + t.Run("system_networkState", func(t *testing.T) { + t.Parallel() - case *modules.SystemPeersResponse: - t.Log("Will assert SystemPeersResponse", "target", target) + var response modules.SystemNetworkStateResponse - require.NotNil(t, *v) - require.GreaterOrEqual(t, len(*v), networkSize-2) + fetchWithTimeoutFromEndpoint(t, endpoint, "system_networkState", "{}", &response) - for _, vv := range *v { - require.NotNil(t, vv.PeerID) - require.NotNil(t, vv.Roles) - require.NotNil(t, vv.BestHash) - require.NotNil(t, vv.BestNumber) - } - } + // TODO assert response + }) + + t.Run("system_peers", func(t *testing.T) { + t.Parallel() + + var response modules.SystemPeersResponse + + fetchWithTimeoutFromEndpoint(t, endpoint, "system_peers", "{}", &response) + + assert.GreaterOrEqual(t, len(response), numberOfNodes-2) + + // TODO assert response + }) }) } } + +func fetchWithTimeoutFromEndpoint(t *testing.T, endpoint, method, + params string, target interface{}) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + body, err := rpc.Post(ctx, endpoint, method, params) + cancel() + require.NoError(t, err) + + err = rpc.Decode(body, target) + require.NoError(t, err) +} diff --git a/tests/stress/grandpa_test.go b/tests/stress/grandpa_test.go index a98935619b..ddd52585a8 100644 --- a/tests/stress/grandpa_test.go +++ b/tests/stress/grandpa_test.go @@ -5,31 +5,33 @@ package stress import ( "context" - "os" "testing" "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" "github.com/stretchr/testify/require" ) func TestStress_Grandpa_OneAuthority(t *testing.T) { - numNodes := 1 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + tomlConfig.Init.Genesis = genesisPath + n := node.New(t, tomlConfig) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + ctx, cancel := context.WithCancel(context.Background()) - time.Sleep(time.Second * 10) + n.InitAndStartTest(ctx, t, cancel) + nodes := node.Nodes{n} - ctx := context.Background() + time.Sleep(time.Second * 10) const getChainHeadTimeout = time.Second - compareChainHeadsWithRetry(ctx, t, nodes, getChainHeadTimeout) + compareChainHeadsWithRetry(ctx, nodes, getChainHeadTimeout) const getFinalizedHeadTimeout = time.Second prev, _ := compareFinalizedHeads(ctx, t, nodes, getFinalizedHeadTimeout) @@ -42,53 +44,51 @@ func TestStress_Grandpa_OneAuthority(t *testing.T) { func TestStress_Grandpa_ThreeAuthorities(t *testing.T) { t.Skip() - utils.GenerateGenesisThreeAuth() - defer os.Remove(utils.GenesisThreeAuths) + const numNodes = 3 + + genesisPath := utils.GenerateGenesisAuths(t, numNodes) - numNodes := 3 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisThreeAuths, utils.ConfigDefault) - require.NoError(t, err) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + ctx, cancel := context.WithCancel(context.Background()) - ctx := context.Background() + nodes.InitAndStartTest(ctx, t, cancel) - numRounds := 5 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, - nodes, uint64(i), getFinalizedHeadByRoundTimeout) + const numRounds uint64 = 5 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } func TestStress_Grandpa_SixAuthorities(t *testing.T) { t.Skip() - utils.GenerateGenesisSixAuth(t) - defer os.Remove(utils.GenesisSixAuths) - - numNodes := 6 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisSixAuths, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() - - ctx := context.Background() - numRounds := 10 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, nodes, - uint64(i), getFinalizedHeadByRoundTimeout) + const numNodes = 6 + genesisPath := utils.GenerateGenesisAuths(t, numNodes) + + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) + + const numRounds uint64 = 10 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -97,27 +97,24 @@ func TestStress_Grandpa_NineAuthorities(t *testing.T) { t.Skip("skipping TestStress_Grandpa_NineAuthorities") } - utils.CreateConfigLogGrandpa() - defer os.Remove(utils.ConfigLogGrandpa) - - numNodes := 9 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigLogGrandpa) - require.NoError(t, err) - - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() - - ctx := context.Background() - - numRounds := 3 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, nodes, - uint64(i), getFinalizedHeadByRoundTimeout) + const numNodes = 9 + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + + tomlConfig := config.LogGrandpa() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) + + const numRounds uint64 = 3 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -126,34 +123,29 @@ func TestStress_Grandpa_CatchUp(t *testing.T) { t.Skip("skipping TestStress_Grandpa_CatchUp") } - utils.GenerateGenesisSixAuth(t) - defer os.Remove(utils.GenesisSixAuths) - - numNodes := 6 - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisSixAuths, utils.ConfigDefault) - require.NoError(t, err) + const numNodes = 6 + genesisPath := utils.GenerateGenesisAuths(t, numNodes) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) time.Sleep(time.Second * 70) // let some rounds run - node, err := utils.RunGossamer(t, numNodes-1, - utils.TestDir(t, utils.KeyList[numNodes-1]), - utils.GenesisSixAuths, utils.ConfigDefault, - false, false) - require.NoError(t, err) + node := node.New(t, tomlConfig, node.SetIndex(numNodes-1)) + node.InitAndStartTest(ctx, t, cancel) nodes = append(nodes, node) - ctx := context.Background() - - numRounds := 10 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, nodes, uint64(i), getFinalizedHeadByRoundTimeout) + const numRounds uint64 = 10 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } diff --git a/tests/stress/helpers.go b/tests/stress/helpers.go index 1af4f77701..f47bbe2020 100644 --- a/tests/stress/helpers.go +++ b/tests/stress/helpers.go @@ -7,35 +7,39 @@ import ( "context" "errors" "fmt" + "strings" "testing" "time" "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) var ( - maxRetries = 32 - testTimeout = time.Minute * 3 - logger = log.NewFromGlobal(log.AddContext("pkg", "tests/stress")) + logger = log.NewFromGlobal(log.AddContext("pkg", "tests/stress")) ) // compareChainHeads calls getChainHead for each node in the array // it returns a map of chainHead hashes to node key names, and an error if the hashes don't all match -func compareChainHeads(ctx context.Context, t *testing.T, nodes []utils.Node, +func compareChainHeads(ctx context.Context, nodes node.Nodes, getChainHeadTimeout time.Duration) (hashes map[common.Hash][]string, err error) { hashes = make(map[common.Hash][]string) for _, node := range nodes { getChainHeadCtx, cancel := context.WithTimeout(ctx, getChainHeadTimeout) - header := utils.GetChainHead(getChainHeadCtx, t, node.RPCPort) + header, err := rpc.GetChainHead(getChainHeadCtx, node.RPCPort()) cancel() + if err != nil { + return nil, fmt.Errorf("cannot get chain head for node %s: %w", node, err) + } - logger.Infof("got header with hash %s from node with key %s", header.Hash(), node.Key) - hashes[header.Hash()] = append(hashes[header.Hash()], node.Key) + logger.Infof("got header with hash %s from node %s", header.Hash(), node) + hashes[header.Hash()] = append(hashes[header.Hash()], node.Key()) } if len(hashes) != 1 { @@ -45,16 +49,14 @@ func compareChainHeads(ctx context.Context, t *testing.T, nodes []utils.Node, return hashes, err } -// compareChainHeadsWithRetry calls compareChainHeads, retrying up to maxRetries times if it errors. -func compareChainHeadsWithRetry(ctx context.Context, t *testing.T, nodes []utils.Node, +// compareChainHeadsWithRetry calls compareChainHeads, +// retrying until the context gets canceled. +func compareChainHeadsWithRetry(ctx context.Context, nodes node.Nodes, getChainHeadTimeout time.Duration) error { - var hashes map[common.Hash][]string - var err error - - for i := 0; i < maxRetries; i++ { - hashes, err = compareChainHeads(ctx, t, nodes, getChainHeadTimeout) + for { + hashes, err := compareChainHeads(ctx, nodes, getChainHeadTimeout) if err == nil { - break + return nil } timer := time.NewTimer(time.Second) @@ -64,87 +66,60 @@ func compareChainHeadsWithRetry(ctx context.Context, t *testing.T, nodes []utils if !timer.Stop() { <-timer.C } - return err // last error + return fmt.Errorf("%w: hashes=%v", err, hashes) // last error } } - - if err != nil { - err = fmt.Errorf("%w: hashes=%v", err, hashes) - } - - return err } +var errBlockHashNotOne = errors.New("expected 1 block hash") + // compareBlocksByNumber calls getBlockByNumber for each node in the array // it returns a map of block hashes to node key names, and an error if the hashes don't all match -func compareBlocksByNumber(ctx context.Context, t *testing.T, nodes []utils.Node, - num string) (hashToKeys map[common.Hash][]string) { - type resultContainer struct { - hash common.Hash - nodeKey string - err error - } - results := make(chan resultContainer) - - for _, node := range nodes { - go func(node utils.Node) { - result := resultContainer{ - nodeKey: node.Key, - } - - for { // retry until context gets canceled - result.hash, result.err = utils.GetBlockHash(ctx, t, node.RPCPort, num) - - if err := ctx.Err(); err != nil { - result.err = err - break - } - - if result.err == nil { - break +func compareBlocksByNumber(ctx context.Context, nodes node.Nodes, + num string) (nodeKeys []string, err error) { + blockHashes := make(map[common.Hash]struct{}, 1) + for _, n := range nodes { + const retryWait = time.Second + err := retry.UntilOK(ctx, retryWait, func() (ok bool, err error) { + hash, err := rpc.GetBlockHash(ctx, n.RPCPort(), num) + if err != nil { + const blockDoesNotExistString = "cannot find node with number greater than highest in blocktree" + if strings.Contains(err.Error(), blockDoesNotExistString) { + return false, nil // retry after retryWait has elapsed. } + return false, err // stop retrying } - results <- result - }(node) - } - - var err error - hashToKeys = make(map[common.Hash][]string, len(nodes)) - for range nodes { - result := <-results + blockHashes[hash] = struct{}{} + nodeKeys = append(nodeKeys, n.Key()) + return true, nil + }) if err != nil { - continue // one failed, we don't care anymore - } - - if result.err != nil { - err = result.err - continue + return nil, fmt.Errorf("for node %s and block number %s: %w", n, num, err) } - - hashToKeys[result.hash] = append(hashToKeys[result.hash], result.nodeKey) } - require.NoError(t, err) - require.Lenf(t, hashToKeys, 1, - "expected 1 block found for number %s but got %d block(s)", - num, len(hashToKeys)) + if len(blockHashes) != 1 { + return nil, fmt.Errorf("%w: but got %d block hashes for block number %s", + errBlockHashNotOne, len(blockHashes), num) + } - return hashToKeys + return nodeKeys, nil } // compareFinalizedHeads calls getFinalizedHeadByRound for each node in the array // it returns a map of finalisedHead hashes to node key names, and an error if the hashes don't all match -func compareFinalizedHeads(ctx context.Context, t *testing.T, nodes []utils.Node, +func compareFinalizedHeads(ctx context.Context, t *testing.T, nodes node.Nodes, getFinalizedHeadTimeout time.Duration) (hashes map[common.Hash][]string, err error) { hashes = make(map[common.Hash][]string) for _, node := range nodes { getFinalizedHeadCtx, cancel := context.WithTimeout(ctx, getFinalizedHeadTimeout) - hash := utils.GetFinalizedHead(getFinalizedHeadCtx, t, node.RPCPort) + hash, err := rpc.GetFinalizedHead(getFinalizedHeadCtx, node.RPCPort()) cancel() + require.NoError(t, err) - logger.Infof("got finalised head with hash %s from node with key %s", hash, node.Key) - hashes[hash] = append(hashes[hash], node.Key) + logger.Infof("got finalised head with hash %s from node %s", hash, node) + hashes[hash] = append(hashes[hash], node.Key()) } if len(hashes) == 0 { @@ -160,21 +135,21 @@ func compareFinalizedHeads(ctx context.Context, t *testing.T, nodes []utils.Node // compareFinalizedHeadsByRound calls getFinalizedHeadByRound for each node in the array // it returns a map of finalisedHead hashes to node key names, and an error if the hashes don't all match -func compareFinalizedHeadsByRound(ctx context.Context, t *testing.T, nodes []utils.Node, +func compareFinalizedHeadsByRound(ctx context.Context, nodes node.Nodes, round uint64, getFinalizedHeadByRoundTimeout time.Duration) ( hashes map[common.Hash][]string, err error) { hashes = make(map[common.Hash][]string) for _, node := range nodes { getFinalizedHeadByRoundCtx, cancel := context.WithTimeout(ctx, getFinalizedHeadByRoundTimeout) - hash, err := utils.GetFinalizedHeadByRound(getFinalizedHeadByRoundCtx, t, node.RPCPort, round) + hash, err := rpc.GetFinalizedHeadByRound(getFinalizedHeadByRoundCtx, node.RPCPort(), round) cancel() if err != nil { - return nil, err + return nil, fmt.Errorf("cannot get finalized head for round %d: %w", round, err) } - logger.Infof("got finalised head with hash %s from node with key %s at round %d", hash, node.Key, round) - hashes[hash] = append(hashes[hash], node.Key) + logger.Infof("got finalised head with hash %s from node %s at round %d", hash, node, round) + hashes[hash] = append(hashes[hash], node.Key()) } if len(hashes) == 0 { @@ -188,47 +163,15 @@ func compareFinalizedHeadsByRound(ctx context.Context, t *testing.T, nodes []uti return hashes, err } -// compareFinalizedHeadsWithRetry calls compareFinalizedHeadsByRound, retrying up to maxRetries times if it errors. -// it returns the finalised hash if it succeeds -func compareFinalizedHeadsWithRetry(ctx context.Context, t *testing.T, - nodes []utils.Node, round uint64, - getFinalizedHeadByRoundTimeout time.Duration) ( - hash common.Hash, err error) { - var hashes map[common.Hash][]string - - for i := 0; i < maxRetries; i++ { - hashes, err = compareFinalizedHeadsByRound(ctx, t, nodes, round, getFinalizedHeadByRoundTimeout) - if err == nil { - break - } - - if errors.Is(err, errFinalizedBlockMismatch) { - return common.Hash{}, fmt.Errorf("%w: round=%d hashes=%v", err, round, hashes) - } - - time.Sleep(3 * time.Second) - } - - if err != nil { - return common.Hash{}, fmt.Errorf("%w: round=%d hashes=%v", err, round, hashes) - } - - for h := range hashes { - return h, nil - } - - return common.Hash{}, nil -} - -func getPendingExtrinsics(ctx context.Context, t *testing.T, node utils.Node) []string { - endpoint := utils.NewEndpoint(node.RPCPort) - method := utils.AuthorPendingExtrinsics +func getPendingExtrinsics(ctx context.Context, t *testing.T, node node.Node) []string { + endpoint := rpc.NewEndpoint(node.RPCPort()) + const method = "author_pendingExtrinsics" const params = "[]" - respBody, err := utils.PostRPC(ctx, endpoint, method, params) + respBody, err := rpc.Post(ctx, endpoint, method, params) require.NoError(t, err) exts := new(modules.PendingExtrinsicsResponse) - err = utils.DecodeRPC(t, respBody, exts) + err = rpc.Decode(respBody, exts) require.NoError(t, err) return *exts diff --git a/tests/stress/network_test.go b/tests/stress/network_test.go index 3f1302b446..5398aa6147 100644 --- a/tests/stress/network_test.go +++ b/tests/stress/network_test.go @@ -8,7 +8,11 @@ import ( "testing" "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/ChainSafe/gossamer/internal/log" "github.com/stretchr/testify/require" @@ -16,25 +20,24 @@ import ( func TestNetwork_MaxPeers(t *testing.T) { numNodes := 9 // 9 block producers + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) utils.Logger.Patch(log.SetLevel(log.Info)) - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) // wait for nodes to connect time.Sleep(time.Second * 10) - ctx := context.Background() - for i, node := range nodes { const getPeersTimeout = time.Second - getPeersCtx, cancel := context.WithTimeout(ctx, getPeersTimeout) - peers := utils.GetPeers(getPeersCtx, t, node.RPCPort) - cancel() + getPeersCtx, getPeersCancel := context.WithTimeout(ctx, getPeersTimeout) + peers, err := rpc.GetPeers(getPeersCtx, node.RPCPort()) + getPeersCancel() + + require.NoError(t, err) t.Logf("node %d: peer count=%d", i, len(peers)) require.LessOrEqual(t, len(peers), 5) diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go index 869937dadc..9b0fe52abd 100644 --- a/tests/stress/stress_test.go +++ b/tests/stress/stress_test.go @@ -9,7 +9,7 @@ import ( "math/big" "math/rand" "os" - "strconv" + "path/filepath" "strings" "testing" "time" @@ -24,7 +24,11 @@ import ( gosstypes "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" ) func TestMain(m *testing.M) { @@ -33,22 +37,6 @@ func TestMain(m *testing.M) { return } - if utils.HOSTNAME == "" { - utils.HOSTNAME = "localhost" - } - - utils.CreateConfigNoBabe() - utils.CreateDefaultConfig() - utils.CreateConfigNoGrandpa() - utils.CreateConfigNotAuthority() - - defer func() { - os.Remove(utils.ConfigNoBABE) - os.Remove(utils.ConfigDefault) - os.Remove(utils.ConfigNoGrandpa) - os.Remove(utils.ConfigNotAuthority) - }() - logLvl := log.Info if utils.LOGLEVEL != "" { var err error @@ -61,127 +49,130 @@ func TestMain(m *testing.M) { utils.Logger.Patch(log.SetLevel(logLvl)) logger.Patch(log.SetLevel(logLvl)) - utils.GenerateGenesisThreeAuth() - // Start all tests code := m.Run() os.Exit(code) } func TestRestartNode(t *testing.T) { - numNodes := 1 - nodes, err := utils.InitNodes(numNodes, utils.ConfigDefault) - require.NoError(t, err) + const numNodes = 1 + defaultConfig := config.Default() + nodes := node.MakeNodes(t, numNodes, defaultConfig) - err = utils.StartNodes(t, nodes) + err := nodes.Init(context.Background()) require.NoError(t, err) - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) + ctx, cancel := context.WithCancel(context.Background()) - err = utils.StartNodes(t, nodes) - require.NoError(t, err) + runtimeErrors, startErr := nodes.Start(ctx) + if startErr != nil { + stopNodes(cancel, runtimeErrors) + t.Fatalf("failed to start nodes: %s", startErr) + } - errList = utils.StopNodes(t, nodes) - require.Len(t, errList, 0) -} + stopNodes(cancel, runtimeErrors) -func TestSync_SingleBlockProducer(t *testing.T) { - numNodes := 4 - utils.Logger.Patch(log.SetLevel(log.Info)) + ctx, cancel = context.WithCancel(context.Background()) - // start block producing node first - node, err := utils.RunGossamer(t, numNodes-1, - utils.TestDir(t, utils.KeyList[numNodes-1]), - utils.GenesisDev, utils.ConfigNoGrandpa, - false, true) - require.NoError(t, err) + runtimeErrors, startErr = nodes.Start(ctx) + if startErr != nil { + stopNodes(cancel, runtimeErrors) + t.Fatalf("failed to start nodes: %s", startErr) + } - // wait and start rest of nodes - if they all start at the same time the first round usually doesn't complete since - // all nodes vote for different blocks. - time.Sleep(time.Second * 15) - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisDev, utils.ConfigNotAuthority) - require.NoError(t, err) - nodes = append(nodes, node) + stopNodes(cancel, runtimeErrors) +} - time.Sleep(time.Second * 30) +func stopNodes(cancel context.CancelFunc, runtimeErrors []<-chan error) { + cancel() + for _, runtimeError := range runtimeErrors { + <-runtimeError + } +} - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() +func TestSync_SingleBlockProducer(t *testing.T) { + const numNodes = 4 + genesisPath := libutils.GetDevGenesisSpecPathTest(t) - numCmps := 10 - ctx := context.Background() + configNoGrandpa := config.NoGrandpa() + configNoGrandpa.Init.Genesis = genesisPath + configNoGrandpa.Core.BABELead = true + babeLeadNode := node.New(t, configNoGrandpa, node.SetIndex(numNodes-1)) - for i := 0; i < numCmps; i++ { - time.Sleep(3 * time.Second) - t.Log("comparing...", i) + configNoAuthority := config.NotAuthority() + configNoAuthority.Init.Genesis = genesisPath + noAuthorityNodes := node.MakeNodes(t, numNodes-1, configNoAuthority) - const comparisonTimeout = 5 * time.Second - compareCtx, cancel := context.WithTimeout(ctx, comparisonTimeout) + nodes := make(node.Nodes, 0, numNodes) + nodes = append(nodes, babeLeadNode) + nodes = append(nodes, noAuthorityNodes...) - hashes := compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(i)) + const testTimeout = 20 * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) - cancel() + nodes.InitAndStartTest(ctx, t, cancel) - // there will only be one key in the mapping - for _, nodesWithHash := range hashes { - // allow 1 node to potentially not have synced. this is due to the need to increase max peer count - require.GreaterOrEqual(t, len(nodesWithHash), numNodes-1) - } + const blockNumbers = 10 + for blockNumber := 0; blockNumber < blockNumbers; blockNumber++ { + t.Logf("comparing block number %d...", blockNumber) + + nodeKeys, err := compareBlocksByNumber(ctx, nodes, fmt.Sprint(blockNumber)) + require.NoError(t, err) + require.Equal(t, len(nodeKeys), numNodes) } } func TestSync_Basic(t *testing.T) { - nodes, err := utils.InitializeAndStartNodes(t, 3, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + config := config.Default() + config.Init.Genesis = genesisPath + const numNodes = 3 + nodes := node.MakeNodes(t, numNodes, config) + + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) - ctx := context.Background() const getChainHeadTimeout = time.Second - err = compareChainHeadsWithRetry(ctx, t, nodes, getChainHeadTimeout) + err := compareChainHeadsWithRetry(ctx, nodes, getChainHeadTimeout) require.NoError(t, err) } func TestSync_MultipleEpoch(t *testing.T) { t.Skip("skipping TestSync_MultipleEpoch") + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) numNodes := 3 utils.Logger.Patch(log.SetLevel(log.Info)) // wait and start rest of nodes - if they all start at the same time the first round usually doesn't complete since - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) time.Sleep(time.Second * 10) - ctx := context.Background() - slotDurationCtx, cancel := context.WithTimeout(ctx, time.Second) - slotDuration := utils.SlotDuration(slotDurationCtx, t, nodes[0].RPCPort) + slotDuration, err := rpc.SlotDuration(slotDurationCtx, nodes[0].RPCPort()) cancel() + require.NoError(t, err) epochLengthCtx, cancel := context.WithTimeout(ctx, time.Second) - epochLength := utils.EpochLength(epochLengthCtx, t, nodes[0].RPCPort) + epochLength, err := rpc.EpochLength(epochLengthCtx, nodes[0].RPCPort()) cancel() + require.NoError(t, err) // Wait for epoch to pass time.Sleep(time.Duration(uint64(slotDuration.Nanoseconds()) * epochLength)) // Just checking that everythings operating as expected getChainHeadCtx, cancel := context.WithTimeout(ctx, time.Second) - header := utils.GetChainHead(getChainHeadCtx, t, nodes[0].RPCPort) + header, err := rpc.GetChainHead(getChainHeadCtx, nodes[0].RPCPort()) cancel() + require.NoError(t, err) currentHeight := header.Number for i := uint(0); i < currentHeight; i++ { @@ -190,7 +181,8 @@ func TestSync_MultipleEpoch(t *testing.T) { const compareTimeout = 5 * time.Second compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(int(i))) + _, err := compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(i)) + require.NoError(t, err) cancel() } @@ -201,26 +193,27 @@ func TestSync_SingleSyncingNode(t *testing.T) { t.Skip("skipping TestSync_SingleSyncingNode") utils.Logger.Patch(log.SetLevel(log.Info)) + ctx, cancel := context.WithCancel(context.Background()) + // start block producing node - alice, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[0]), utils.GenesisDev, - utils.ConfigDefault, false, true) - require.NoError(t, err) + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + blockProducingConfig := config.Default() + blockProducingConfig.Init.Genesis = genesisPath + blockProducingConfig.Core.BABELead = true + alice := node.New(t, blockProducingConfig, node.SetIndex(0)) + + alice.InitAndStartTest(ctx, t, cancel) + time.Sleep(time.Second * 15) // start syncing node - bob, err := utils.RunGossamer(t, 1, - utils.TestDir(t, utils.KeyList[1]), utils.GenesisDev, - utils.ConfigNoBABE, false, false) - require.NoError(t, err) + syncingNodeConfig := config.NoBabe() + syncingNodeConfig.Init.Genesis = genesisPath + bob := node.New(t, syncingNodeConfig, node.SetIndex(1)) - nodes := []utils.Node{alice, bob} - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + bob.InitAndStartTest(ctx, t, cancel) - ctx := context.Background() + nodes := node.Nodes{alice, bob} numCmps := 100 for i := 0; i < numCmps; i++ { @@ -229,7 +222,8 @@ func TestSync_SingleSyncingNode(t *testing.T) { const compareTimeout = 5 * time.Second compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(i)) + _, err := compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(i)) + require.NoError(t, err) cancel() } @@ -240,18 +234,20 @@ func TestSync_Bench(t *testing.T) { const numBlocks uint = 64 // start block producing node - alice, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[1]), - utils.GenesisDev, utils.ConfigNoGrandpa, - false, true) - require.NoError(t, err) + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + configNoGrandpa := config.NoGrandpa() + configNoGrandpa.Init.Genesis = genesisPath + configNoGrandpa.Core.BABELead = true - ctx := context.Background() + alice := node.New(t, configNoGrandpa, node.SetIndex(0)) + + ctx, cancel := context.WithCancel(context.Background()) + alice.InitAndStartTest(ctx, t, cancel) for { - getChainHeadCtx, cancel := context.WithTimeout(ctx, time.Second) - header, err := utils.GetChainHeadWithError(getChainHeadCtx, t, alice.RPCPort) - cancel() + getChainHeadCtx, getChainCancel := context.WithTimeout(ctx, time.Second) + header, err := rpc.GetChainHead(getChainHeadCtx, alice.RPCPort()) + getChainCancel() if err != nil { continue } @@ -263,47 +259,46 @@ func TestSync_Bench(t *testing.T) { time.Sleep(3 * time.Second) } - pauseBabeCtx, cancel := context.WithTimeout(ctx, time.Second) - err = utils.PauseBABE(pauseBabeCtx, alice.RPCPort) - cancel() + pauseBabeCtx, pauseBabeCancel := context.WithTimeout(ctx, time.Second) + err := rpc.PauseBABE(pauseBabeCtx, alice.RPCPort()) + pauseBabeCancel() require.NoError(t, err) t.Log("BABE paused") // start syncing node - bob, err := utils.RunGossamer(t, 1, - utils.TestDir(t, utils.KeyList[0]), utils.GenesisDev, - utils.ConfigNotAuthority, false, true) + configNoAuthority := config.NotAuthority() + configNoAuthority.Init.Genesis = genesisPath + configNoAuthority.Core.BABELead = true + bob := node.New(t, configNoAuthority, node.SetIndex(1)) + + bob.InitAndStartTest(ctx, t, cancel) require.NoError(t, err) - nodes := []utils.Node{alice, bob} - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + nodes := node.Nodes{alice, bob} // see how long it takes to sync to block numBlocks last := numBlocks start := time.Now() var end time.Time + const retryWait = time.Second + const syncWaitTimeout = 3 * time.Minute + syncWaitCtx, syncWaitCancel := context.WithTimeout(ctx, syncWaitTimeout) for { - if time.Since(start) >= testTimeout { - t.Fatal("did not sync") - } - - getChainHeadCtx, getChainHeadCancel := context.WithTimeout(ctx, time.Second) - head, err := utils.GetChainHeadWithError(getChainHeadCtx, t, bob.RPCPort) + getChainHeadCtx, getChainHeadCancel := context.WithTimeout(syncWaitCtx, time.Second) + head, err := rpc.GetChainHead(getChainHeadCtx, bob.RPCPort()) getChainHeadCancel() - if err != nil { - continue - } - - if head.Number >= last { + if err == nil && head.Number >= last { end = time.Now() + syncWaitCancel() break } + + retryWaitCtx, retryWaitCancel := context.WithTimeout(syncWaitCtx, retryWait) + <-retryWaitCtx.Done() + retryWaitCancel() } maxTime := time.Second * 85 @@ -319,11 +314,12 @@ func TestSync_Bench(t *testing.T) { t.Log("comparing block...", numBlocks) const compareTimeout = 5 * time.Second - compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) + compareCtx, pauseBabeCancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, fmt.Sprint(numBlocks)) + _, err = compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(numBlocks)) + require.NoError(t, err) - cancel() + pauseBabeCancel() time.Sleep(time.Second * 3) } @@ -334,98 +330,135 @@ func TestSync_Restart(t *testing.T) { numNodes := 3 utils.Logger.Patch(log.SetLevel(log.Info)) + mainCtx, mainCancel := context.WithCancel(context.Background()) + + nodeCtxs := make([]context.Context, numNodes) + nodeCancels := make([]context.CancelFunc, numNodes) + nodeWaitErrs := make([]<-chan error, numNodes) + for i := 0; i < numNodes; i++ { + nodeCtxs[i], nodeCancels[i] = context.WithCancel(mainCtx) + } + + // Note we assume no runtime error in this test otherwise + // it gets rather complex to handle runtime errors and stop + // the test. + // start block producing node first - node, err := utils.RunGossamer(t, numNodes-1, - utils.TestDir(t, utils.KeyList[numNodes-1]), - utils.GenesisDefault, utils.ConfigDefault, - false, true) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + blockProducingConfig := config.Default() + blockProducingConfig.Init.Genesis = genesisPath + blockProducingConfig.Core.BABELead = true + producingNode := node.New(t, blockProducingConfig, node.SetIndex(numNodes-1)) + + err := producingNode.Init(mainCtx) + require.NoError(t, err) + + nodeWaitErrs[0], err = producingNode.StartAndWait(nodeCtxs[0]) + t.Cleanup(func() { + // note we need to use indexes since these + // slice elements might change. + nodeCancels[0]() + <-nodeWaitErrs[0] + }) require.NoError(t, err) // wait and start rest of nodes time.Sleep(time.Second * 5) - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisDefault, utils.ConfigNoBABE) - require.NoError(t, err) - nodes = append(nodes, node) + noBabeConfig := config.NoBabe() + noBabeConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes-1, noBabeConfig) + for i, node := range nodes { + err := node.Init(mainCtx) + require.NoError(t, err) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + nodeWaitErrs[i+1], err = node.StartAndWait(nodeCtxs[i+1]) + t.Cleanup(func() { + // note we need to use indexes since these + // slice elements might change. + nodeCancels[i+1]() + <-nodeWaitErrs[i+1] + }) + require.NoError(t, err) + } - done := make(chan struct{}) + nodes = append(nodes, producingNode) // randomly turn off and on nodes + onOffRoutineDone := make(chan struct{}) go func() { + defer close(onOffRoutineDone) for { select { case <-time.After(time.Second * 10): idx := rand.Intn(numNodes) - errList := utils.StopNodes(t, nodes[idx:idx+1]) - require.Len(t, errList, 0) - - time.Sleep(time.Second) - - err = utils.StartNodes(t, nodes[idx:idx+1]) - require.NoError(t, err) - case <-done: + // Stop node + nodeCancels[idx]() + <-nodeWaitErrs[idx] + + // Start node + nodeCtxs[idx], nodeCancels[idx] = context.WithCancel(mainCtx) + nodeWaitErrs[idx], err = nodes[idx].Start(nodeCtxs[idx]) + if err != nil { + assert.NoError(t, err) // cannot use require.NoError from a goroutine + mainCancel() // stop all operations + return + } + case <-mainCtx.Done(): return } } }() - ctx := context.Background() - numCmps := 12 for i := 0; i < numCmps; i++ { t.Log("comparing...", i) const compareTimeout = 5 * time.Second - compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) + compareCtx, cancel := context.WithTimeout(mainCtx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(i)) + _, err := compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(i)) + require.NoError(t, err) cancel() time.Sleep(time.Second * 5) } - close(done) + + mainCancel() + <-onOffRoutineDone } func TestSync_SubmitExtrinsic(t *testing.T) { t.Skip() - t.Log("starting gossamer...") + + ctx, cancel := context.WithCancel(context.Background()) // index of node to submit tx to idx := 0 // TODO: randomise this // start block producing node first - node, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[0]), utils.GenesisDev, - utils.ConfigNoGrandpa, false, true) - require.NoError(t, err) - nodes := []utils.Node{node} + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + configNoGrandpa := config.NoGrandpa() + configNoGrandpa.Init.Genesis = genesisPath + configNoGrandpa.Core.BABELead = true + producingNode := node.New(t, configNoGrandpa, node.SetIndex(0)) + producingNode.InitAndStartTest(ctx, t, cancel) + + nodes := node.Nodes{producingNode} + + configNoAuthority := config.NotAuthority() // Start rest of nodes - node, err = utils.RunGossamer(t, 1, - utils.TestDir(t, utils.KeyList[1]), utils.GenesisDev, - utils.ConfigNotAuthority, false, false) - require.NoError(t, err) - nodes = append(nodes, node) - node, err = utils.RunGossamer(t, 2, - utils.TestDir(t, utils.KeyList[2]), utils.GenesisDev, - utils.ConfigNotAuthority, false, false) - require.NoError(t, err) - nodes = append(nodes, node) + configNoAuthority.Init.Genesis = genesisPath + n := node.New(t, configNoAuthority, node.SetIndex(1)) + nodes = append(nodes, n) - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + n = node.New(t, configNoAuthority, node.SetIndex(2)) + nodes = append(nodes, n) // send tx to non-authority node - api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", nodes[idx].RPCPort)) + api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", nodes[idx].RPCPort())) require.NoError(t, err) meta, err := api.RPC.State.GetMetadataLatest() @@ -468,12 +501,11 @@ func TestSync_SubmitExtrinsic(t *testing.T) { extEnc, err := types.EncodeToHexString(ext) require.NoError(t, err) - ctx := context.Background() - // get starting header so that we can lookup blocks by number later getChainHeadCtx, getChainHeadCancel := context.WithTimeout(ctx, time.Second) - prevHeader := utils.GetChainHead(getChainHeadCtx, t, nodes[idx].RPCPort) + prevHeader, err := rpc.GetChainHead(getChainHeadCtx, nodes[idx].RPCPort()) getChainHeadCancel() + require.NoError(t, err) // Send the extrinsic hash, err := api.RPC.Author.SubmitExtrinsic(ext) @@ -483,21 +515,33 @@ func TestSync_SubmitExtrinsic(t *testing.T) { time.Sleep(time.Second * 20) // wait until there's no more pending extrinsics - for i := 0; i < maxRetries; i++ { - getPendingExtsCtx, getPendingExtsCancel := context.WithTimeout(ctx, time.Second) + const waitNoExtTimeout = 30 * time.Second + waitNoExtCtx, waitNoExtCancel := context.WithTimeout(ctx, waitNoExtTimeout) + for { + getPendingExtsCtx, getPendingExtsCancel := context.WithTimeout(waitNoExtCtx, time.Second) exts := getPendingExtrinsics(getPendingExtsCtx, t, nodes[idx]) getPendingExtsCancel() if len(exts) == 0 { + waitNoExtCancel() break } - time.Sleep(time.Second) + timer := time.NewTimer(time.Second) + select { + case <-timer.C: + case <-waitNoExtCtx.Done(): + if !timer.Stop() { + <-timer.C + } + require.NoError(t, waitNoExtCtx.Err()) + } } - getChainHeadCtx, cancel := context.WithTimeout(ctx, time.Second) - header := utils.GetChainHead(getChainHeadCtx, t, nodes[idx].RPCPort) - cancel() + getChainHeadCtx, getChainHeadCancel = context.WithTimeout(ctx, time.Second) + header, err := rpc.GetChainHead(getChainHeadCtx, nodes[idx].RPCPort()) + getChainHeadCancel() + require.NoError(t, err) // search from child -> parent blocks for extrinsic var ( @@ -505,18 +549,22 @@ func TestSync_SubmitExtrinsic(t *testing.T) { extInBlock uint ) - for i := 0; i < maxRetries; i++ { - getBlockCtx, getBlockCancel := context.WithTimeout(ctx, time.Second) - block := utils.GetBlock(getBlockCtx, t, nodes[idx].RPCPort, header.ParentHash) + const extrinsicSearchTimeout = 10 * time.Second + extrinsicSearchCtx, extrinsicSearchCancel := context.WithTimeout(ctx, extrinsicSearchTimeout) + for { + getBlockCtx, getBlockCancel := context.WithTimeout(extrinsicSearchCtx, time.Second) + block, err := rpc.GetBlock(getBlockCtx, nodes[idx].RPCPort(), header.ParentHash) getBlockCancel() + require.NoError(t, err) + if block == nil { // couldn't get block, increment retry counter continue } header = &block.Header - logger.Debugf("got block with header %s and body %v from node with key %s", header, block.Body, nodes[idx].Key) + logger.Debugf("got block with header %s and body %v from node with key %s", header, block.Body, nodes[idx].Key()) if block.Body != nil { resExts = block.Body @@ -524,6 +572,7 @@ func TestSync_SubmitExtrinsic(t *testing.T) { logger.Debugf("extrinsics: %v", resExts) if len(resExts) >= 2 { extInBlock = block.Header.Number + extrinsicSearchCancel() break } } @@ -546,32 +595,25 @@ func TestSync_SubmitExtrinsic(t *testing.T) { const compareTimeout = 5 * time.Second compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, fmt.Sprint(extInBlock)) + _, err = compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(extInBlock)) + require.NoError(t, err) cancel() } func Test_SubmitAndWatchExtrinsic(t *testing.T) { - t.Log("starting gossamer...") - - // index of node to submit tx to - idx := 0 // TODO: randomise this - // start block producing node first - node, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[0]), - utils.GenesisDev, utils.ConfigNoGrandpa, true, true) - require.NoError(t, err) - nodes := []utils.Node{node} - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.NoGrandpa() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.RPC.WS = true + tomlConfig.Core.BABELead = true + producingNode := node.New(t, tomlConfig, node.SetIndex(0)) + ctx, cancel := context.WithCancel(context.Background()) + producingNode.InitAndStartTest(ctx, t, cancel) // send tx to non-authority node - api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("ws://localhost:%s", nodes[idx].WSPort)) + api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("ws://localhost:%s", producingNode.WSPort())) require.NoError(t, err) meta, err := api.RPC.State.GetMetadataLatest() @@ -716,6 +758,13 @@ func TestSync_SubmitExtrinsicLoad(t *testing.T) { } func TestStress_SecondarySlotProduction(t *testing.T) { + rootPath, err := libutils.GetProjectRootPath() + require.NoError(t, err) + + // genesis_two_auths_secondaryvrf_0_9_10.json has 2 authorities and block production by + // secondary VRF slots enabled + genesisTwoAuthsSecondaryVRF0_9_10 := filepath.Join(rootPath, "tests/utils/genesis_two_auths_secondaryvrf_0_9_10.json") + testcases := []struct { description string genesis string @@ -723,35 +772,38 @@ func TestStress_SecondarySlotProduction(t *testing.T) { }{ { description: "with secondary vrf slots enabled", - genesis: utils.GenesisTwoAuthsSecondaryVRF0_9_10, + genesis: genesisTwoAuthsSecondaryVRF0_9_10, allowedSlots: gosstypes.PrimaryAndSecondaryVRFSlots, }, } const numNodes = 2 for _, c := range testcases { t.Run(c.description, func(t *testing.T) { - nodes, err := utils.InitializeAndStartNodes(t, numNodes, c.genesis, utils.ConfigDefault) - require.NoError(t, err) - defer utils.StopNodes(t, nodes) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = c.genesis + + nodes := node.MakeNodes(t, numNodes, tomlConfig) + + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) primaryCount := 0 secondaryPlainCount := 0 secondaryVRFCount := 0 - ctx := context.Background() - for i := 1; i < 10; i++ { fmt.Printf("%d iteration\n", i) getBlockHashCtx, cancel := context.WithTimeout(ctx, time.Second) - hash, err := utils.GetBlockHash(getBlockHashCtx, t, nodes[0].RPCPort, fmt.Sprintf("%d", i)) + hash, err := rpc.GetBlockHash(getBlockHashCtx, nodes[0].RPCPort(), fmt.Sprint(i)) cancel() require.NoError(t, err) getBlockCtx, cancel := context.WithTimeout(ctx, time.Second) - block := utils.GetBlock(getBlockCtx, t, nodes[0].RPCPort, hash) + block, err := rpc.GetBlock(getBlockCtx, nodes[0].RPCPort(), hash) cancel() + require.NoError(t, err) header := block.Header diff --git a/tests/sync/sync_test.go b/tests/sync/sync_test.go index ff9512b8f2..69880ef743 100644 --- a/tests/sync/sync_test.go +++ b/tests/sync/sync_test.go @@ -5,18 +5,14 @@ package sync import ( "context" - "fmt" - "log" - "os" "testing" "time" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/stretchr/testify/require" ) -var framework utils.Framework - type testRPCCall struct { nodeIdx int method string @@ -46,27 +42,33 @@ var checks = []checkDBCall{ {call1idx: 3, call2idx: 5, field: "parentHash"}, } -func TestMain(m *testing.M) { +// this starts nodes and runs RPC calls (which loads db) +func TestCalls(t *testing.T) { if utils.MODE != "sync" { - fmt.Println("Going to skip stress test") - return - } - fw, err := utils.InitFramework(3) - if err != nil { - log.Fatal(fmt.Errorf("error initialising test framework")) + t.Skip("MODE != 'sync', skipping stress test") } - framework = *fw - // Start all tests - code := m.Run() - os.Exit(code) -} -// this starts nodes and runs RPC calls (which loads db) -func TestCalls(t *testing.T) { ctx := context.Background() - err := framework.StartNodes(t) - require.Len(t, err, 0) + const qtyNodes = 3 + tomlConfig := config.Default() + framework, err := utils.InitFramework(ctx, t, qtyNodes, tomlConfig) + + require.NoError(t, err) + + nodesCtx, nodesCancel := context.WithCancel(ctx) + + runtimeErrors, startErr := framework.StartNodes(nodesCtx, t) + + t.Cleanup(func() { + nodesCancel() + for _, runtimeError := range runtimeErrors { + <-runtimeError + } + }) + + require.NoError(t, startErr) + for _, call := range tests { time.Sleep(call.delay) @@ -87,7 +89,4 @@ func TestCalls(t *testing.T) { res := framework.CheckEqual(check.call1idx, check.call2idx, check.field) require.True(t, res) } - - err = framework.KillNodes(t) - require.Len(t, err, 0) } diff --git a/tests/utils/chain.go b/tests/utils/chain.go deleted file mode 100644 index 055d888316..0000000000 --- a/tests/utils/chain.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/rpc/modules" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// GetChainHead calls the endpoint chain_getHeader to get the latest chain head -func GetChainHead(ctx context.Context, t *testing.T, rpcPort string) *types.Header { - endpoint := NewEndpoint(rpcPort) - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, ChainGetHeader, params) - require.NoError(t, err) - - header := new(modules.ChainBlockHeaderResponse) - err = DecodeRPC(t, respBody, header) - require.NoError(t, err) - - return headerResponseToHeader(t, header) -} - -// GetChainHeadWithError calls the endpoint chain_getHeader to get the latest chain head -func GetChainHeadWithError(ctx context.Context, t *testing.T, rpcPort string) (*types.Header, error) { - endpoint := NewEndpoint(rpcPort) - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, ChainGetHeader, params) - require.NoError(t, err) - - header := new(modules.ChainBlockHeaderResponse) - err = DecodeRPC(t, respBody, header) - if err != nil { - return nil, err - } - - return headerResponseToHeader(t, header), nil -} - -// GetBlockHash calls the endpoint chain_getBlockHash to get the latest chain head. -// It will block until a response is received or the context gets canceled. -func GetBlockHash(ctx context.Context, t *testing.T, rpcPort, num string) (common.Hash, error) { - endpoint := NewEndpoint(rpcPort) - params := "[" + num + "]" - const requestWait = time.Second - respBody, err := PostRPCWithRetry(ctx, endpoint, ChainGetBlockHash, params, requestWait) - require.NoError(t, err) - - var hash string - err = DecodeRPC(t, respBody, &hash) - if err != nil { - return common.Hash{}, err - } - return common.MustHexToHash(hash), nil -} - -// GetFinalizedHead calls the endpoint chain_getFinalizedHead to get the latest finalised head -func GetFinalizedHead(ctx context.Context, t *testing.T, rpcPort string) common.Hash { - endpoint := NewEndpoint(rpcPort) - method := ChainGetFinalizedHead - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - var hash string - err = DecodeRPC(t, respBody, &hash) - require.NoError(t, err) - return common.MustHexToHash(hash) -} - -// GetFinalizedHeadByRound calls the endpoint chain_getFinalizedHeadByRound to get the finalised head at a given round -// TODO: add setID, hard-coded at 1 for now -func GetFinalizedHeadByRound(ctx context.Context, t *testing.T, rpcPort string, round uint64) (common.Hash, error) { - p := strconv.Itoa(int(round)) - endpoint := NewEndpoint(rpcPort) - method := ChainGetFinalizedHeadByRound - params := "[" + p + ",1]" - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - var hash string - err = DecodeRPC(t, respBody, &hash) - if err != nil { - return common.Hash{}, err - } - - return common.MustHexToHash(hash), nil -} - -// GetBlock calls the endpoint chain_getBlock -func GetBlock(ctx context.Context, t *testing.T, rpcPort string, hash common.Hash) *types.Block { - endpoint := NewEndpoint(rpcPort) - method := ChainGetBlock - params := fmt.Sprintf(`["%s"]`, hash) - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - block := new(modules.ChainBlockResponse) - err = DecodeRPC(t, respBody, block) - if err != nil { - return nil - } - - header := block.Block.Header - - parentHash, err := common.HexToHash(header.ParentHash) - require.NoError(t, err) - - nb, err := common.HexToBytes(header.Number) - require.NoError(t, err) - number := common.BytesToUint(nb) - - stateRoot, err := common.HexToHash(header.StateRoot) - require.NoError(t, err) - - extrinsicsRoot, err := common.HexToHash(header.ExtrinsicsRoot) - require.NoError(t, err) - - h, err := types.NewHeader(parentHash, stateRoot, extrinsicsRoot, number, rpcLogsToDigest(t, header.Digest.Logs)) - require.NoError(t, err) - - b, err := types.NewBodyFromExtrinsicStrings(block.Block.Body) - require.NoError(t, err, fmt.Sprintf("%v", block.Block.Body)) - - return &types.Block{ - Header: *h, - Body: *b, - } -} diff --git a/tests/utils/common.go b/tests/utils/common.go index 7948583c4b..1a4b4805b7 100644 --- a/tests/utils/common.go +++ b/tests/utils/common.go @@ -4,7 +4,6 @@ package utils import ( - "encoding/json" "os" ) @@ -12,59 +11,9 @@ var ( // MODE is the value for the environnent variable MODE. MODE = os.Getenv("MODE") - // HOSTNAME is the value for the environnent variable HOSTNAME. - HOSTNAME = os.Getenv("HOSTNAME") // PORT is the value for the environnent variable PORT. PORT = os.Getenv("PORT") // LOGLEVEL is the value for the environnent variable LOGLEVEL. LOGLEVEL = os.Getenv("LOG") - - // NETWORK_SIZE is the value for the environnent variable NETWORK_SIZE. - NETWORK_SIZE = os.Getenv("NETWORK_SIZE") //nolint:revive ) - -// ServerResponse wraps the RPC response -type ServerResponse struct { - // JSON-RPC Version - Version string `json:"jsonrpc"` - // Resulting values - Result json.RawMessage `json:"result"` - // Any generated errors - Error *Error `json:"error"` - // Request id - ID *json.RawMessage `json:"id"` -} - -// WebsocketResponse wraps the Websocket response -type WebsocketResponse struct { - // JSON-RPC Version - Version string `json:"jsonrpc"` - // Method name called - Method string `json:"method"` - // Resulting values - Result json.RawMessage `json:"result"` - // Params values including results - Params json.RawMessage `json:"params"` - // Any generated errors - Error *Error `json:"error"` - // Request id - Subscription *json.RawMessage `json:"subscription"` - // Request id - ID *json.RawMessage `json:"id"` -} - -// ErrCode is a int type used for the rpc error codes -type ErrCode int - -// Error is a struct that holds the error message and the error code for a error -type Error struct { - Message string `json:"message"` - ErrorCode ErrCode `json:"code"` - Data map[string]interface{} `json:"data"` -} - -// Error returns the error Message string -func (e *Error) Error() string { - return e.Message -} diff --git a/tests/utils/config/config.go b/tests/utils/config/config.go new file mode 100644 index 0000000000..81499e8ac6 --- /dev/null +++ b/tests/utils/config/config.go @@ -0,0 +1,51 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package config + +import ( + "github.com/ChainSafe/gossamer/dot/config/toml" +) + +// LogGrandpa generates a grandpa config. +func LogGrandpa() (cfg toml.Config) { + cfg = Default() + cfg.Log = toml.LogConfig{ + CoreLvl: "crit", + NetworkLvl: "debug", + RuntimeLvl: "crit", + BlockProducerLvl: "info", + FinalityGadgetLvl: "debug", + } + return cfg +} + +// NoBabe generates a no-babe config. +func NoBabe() (cfg toml.Config) { + cfg = Default() + cfg.Global.LogLvl = "info" + cfg.Log = toml.LogConfig{ + SyncLvl: "debug", + NetworkLvl: "debug", + } + cfg.Core.BabeAuthority = false + return cfg +} + +// NoGrandpa generates an no-grandpa config. +func NoGrandpa() (cfg toml.Config) { + cfg = Default() + cfg.Core.GrandpaAuthority = false + cfg.Core.BABELead = true + cfg.Core.GrandpaInterval = 1 + return cfg +} + +// NotAuthority generates an non-authority config. +func NotAuthority() (cfg toml.Config) { + cfg = Default() + cfg.Core.Roles = 1 + cfg.Core.BabeAuthority = false + cfg.Core.GrandpaAuthority = false + return cfg +} diff --git a/tests/utils/config/default.go b/tests/utils/config/default.go new file mode 100644 index 0000000000..ba18ce7757 --- /dev/null +++ b/tests/utils/config/default.go @@ -0,0 +1,52 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package config + +import ( + "github.com/ChainSafe/gossamer/dot/config/toml" +) + +// Default returns a default TOML configuration for Gossamer. +func Default() toml.Config { + return toml.Config{ + Global: toml.GlobalConfig{ + Name: "Gossamer", + ID: "gssmr", + LogLvl: "info", + MetricsAddress: "localhost:9876", + RetainBlocks: 256, + Pruning: "archive", + }, + Log: toml.LogConfig{ + CoreLvl: "info", + SyncLvl: "info", + }, + Account: toml.AccountConfig{ + Key: "", + Unlock: "", + }, + Core: toml.CoreConfig{ + Roles: 4, + BabeAuthority: true, + GrandpaAuthority: true, + GrandpaInterval: 1, + }, + Network: toml.NetworkConfig{ + Bootnodes: nil, + ProtocolID: "/gossamer/gssmr/0", + NoBootstrap: false, + NoMDNS: false, + MinPeers: 1, + MaxPeers: 3, + }, + RPC: toml.RPCConfig{ + Enabled: true, + Unsafe: true, + WSUnsafe: true, + Host: "localhost", + Modules: []string{"system", "author", "chain", "state", "dev", "rpc"}, + WS: false, + }, + } +} diff --git a/tests/utils/config/write.go b/tests/utils/config/write.go new file mode 100644 index 0000000000..e4f1dc4c75 --- /dev/null +++ b/tests/utils/config/write.go @@ -0,0 +1,27 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package config + +import ( + "os" + "path/filepath" + "testing" + + ctoml "github.com/ChainSafe/gossamer/dot/config/toml" + "github.com/naoina/toml" + "github.com/stretchr/testify/require" +) + +// Write writes the toml configuration to a file +// in a temporary test directory which gets removed at +// the end of the test. +func Write(t *testing.T, cfg ctoml.Config) (configPath string) { + t.Helper() + configPath = filepath.Join(t.TempDir(), "config.toml") + raw, err := toml.Marshal(cfg) + require.NoError(t, err) + err = os.WriteFile(configPath, raw, os.ModePerm) + require.NoError(t, err) + return configPath +} diff --git a/tests/utils/dev.go b/tests/utils/dev.go deleted file mode 100644 index 43680e10dd..0000000000 --- a/tests/utils/dev.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "context" - "encoding/binary" - "strconv" - "testing" - "time" - - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// PauseBABE calls the endpoint dev_control with the params ["babe", "stop"] -func PauseBABE(ctx context.Context, rpcPort string) error { - endpoint := NewEndpoint(rpcPort) - const params = `["babe", "stop"]` - _, err := PostRPC(ctx, endpoint, DevControl, params) - return err -} - -// SlotDuration Calls dev endpoint for slot duration -func SlotDuration(ctx context.Context, t *testing.T, rpcPort string) time.Duration { - endpoint := NewEndpoint(rpcPort) - const method = "dev_slotDuration" - const params = "[]" - slotDuration, err := PostRPC(ctx, endpoint, method, params) - - if err != nil { - require.NoError(t, err) - } - - slotDurationDecoded := new(string) - err = DecodeRPC(t, slotDuration, slotDurationDecoded) - require.NoError(t, err) - - slotDurationParsed := binary.LittleEndian.Uint64(common.MustHexToBytes(*slotDurationDecoded)) - duration, err := time.ParseDuration(strconv.Itoa(int(slotDurationParsed)) + "ms") - require.NoError(t, err) - return duration -} - -// EpochLength Calls dev endpoint for epoch length -func EpochLength(ctx context.Context, t *testing.T, rpcPort string) uint64 { - endpoint := NewEndpoint(rpcPort) - const method = "dev_epochLength" - const params = "[]" - epochLength, err := PostRPC(ctx, endpoint, method, params) - if err != nil { - require.NoError(t, err) - } - - epochLengthDecoded := new(string) - err = DecodeRPC(t, epochLength, epochLengthDecoded) - require.NoError(t, err) - - epochLengthParsed := binary.LittleEndian.Uint64(common.MustHexToBytes(*epochLengthDecoded)) - return epochLengthParsed -} diff --git a/tests/utils/framework.go b/tests/utils/framework.go index 135aae3698..febb137a88 100644 --- a/tests/utils/framework.go +++ b/tests/utils/framework.go @@ -6,34 +6,40 @@ package utils import ( "context" "fmt" - "os" "strconv" "testing" + "github.com/ChainSafe/gossamer/dot/config/toml" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" scribble "github.com/nanobox-io/golang-scribble" ) // Framework struct to hold references to framework data type Framework struct { - nodes []Node + nodes node.Nodes db *scribble.Driver callQty int } -// InitFramework creates given quanity of nodes -func InitFramework(qtyNodes int) (*Framework, error) { +// NewFramework creates a new framework. +func NewFramework() (framework *Framework) { + return &Framework{} +} + +// InitFramework creates given quantity of nodes +func InitFramework(ctx context.Context, t *testing.T, qtyNodes int, + tomlConfig toml.Config) (*Framework, error) { f := &Framework{} - nodes, err := InitNodes(qtyNodes, ConfigDefault) - if err != nil { - return nil, err - } - f.nodes = nodes - tempDir, err := os.MkdirTemp("", "gossamer-stress-db") + f.nodes = node.MakeNodes(t, qtyNodes, tomlConfig) + + err := f.nodes.Init(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("cannot init nodes: %w", err) } - db, err := scribble.New(tempDir, nil) + + db, err := scribble.New(t.TempDir(), nil) if err != nil { return nil, err } @@ -43,20 +49,9 @@ func InitFramework(qtyNodes int) (*Framework, error) { } // StartNodes calls RestartGossamor for all nodes -func (fw *Framework) StartNodes(t *testing.T) (errorList []error) { - for i, node := range fw.nodes { - var err error - fw.nodes[i], err = startGossamer(t, node, false) - if err != nil { - errorList = append(errorList, err) - } - } - return errorList -} - -// KillNodes stops all running nodes -func (fw *Framework) KillNodes(t *testing.T) []error { - return TearDown(t, fw.nodes) +func (fw *Framework) StartNodes(ctx context.Context, t *testing.T) ( + runtimeErrors []<-chan error, startErr error) { + return fw.nodes.Start(ctx) } // CallRPC call RPC method with given params for node at idx @@ -66,12 +61,12 @@ func (fw *Framework) CallRPC(ctx context.Context, idx int, method, params string return nil, fmt.Errorf("node index greater than quantity of nodes") } node := fw.nodes[idx] - respBody, err := PostRPC(ctx, NewEndpoint(node.RPCPort), method, params) + respBody, err := rpc.Post(ctx, rpc.NewEndpoint(node.RPCPort()), method, params) if err != nil { return nil, err } - err = DecodeRPC_NT(respBody, &respJSON) + err = rpc.Decode(respBody, &respJSON) if err != nil { return nil, fmt.Errorf("error making RPC call %v", err) } diff --git a/tests/utils/gossamer_utils.go b/tests/utils/gossamer_utils.go index b229920bb1..d513b05010 100644 --- a/tests/utils/gossamer_utils.go +++ b/tests/utils/gossamer_utils.go @@ -4,572 +4,34 @@ package utils import ( - "bufio" - "context" - "fmt" - "io" "os" - "os/exec" "path/filepath" - "strconv" - "sync" "testing" - "time" "github.com/ChainSafe/gossamer/dot" - ctoml "github.com/ChainSafe/gossamer/dot/config/toml" - "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/utils" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Logger is the utils package local logger. var Logger = log.NewFromGlobal(log.AddContext("pkg", "test/utils")) -var maxRetries = 24 -var ( - // KeyList is the list of built-in keys - KeyList = []string{"alice", "bob", "charlie", "dave", "eve", "ferdie", "george", "heather", "ian"} - basePort = 7000 +// GenerateGenesisAuths generates a genesis file with numAuths authorities +// and returns the file path to the genesis file. The genesis file is +// automatically removed when the test ends. +func GenerateGenesisAuths(t *testing.T, numAuths int) (genesisPath string) { + gssmrGenesisPath := utils.GetGssmrGenesisPathTest(t) - // BaseRPCPort is the starting RPC port for test nodes - BaseRPCPort = 8540 + buildSpec, err := dot.BuildFromGenesis(gssmrGenesisPath, numAuths) + require.NoError(t, err) - // BaseWSPort is the starting Websocket port for test nodes - BaseWSPort = 8546 + buildSpecJSON, err := buildSpec.ToJSONRaw() + require.NoError(t, err) - currentDir, _ = os.Getwd() - gossamerCMD = filepath.Join(currentDir, "../..", "bin/gossamer") + genesisPath = filepath.Join(t.TempDir(), "genesis.json") + err = os.WriteFile(genesisPath, buildSpecJSON, os.ModePerm) + require.NoError(t, err) - // GenesisOneAuth is the genesis file that has 1 authority - GenesisOneAuth = filepath.Join(currentDir, "../utils/genesis_oneauth.json") - // GenesisThreeAuths is the genesis file that has 3 authorities - GenesisThreeAuths = filepath.Join(currentDir, "../utils/genesis_threeauths.json") - // GenesisTwoAuthsSecondaryVRF0_9_10 is the genesis file that has 2 authorities and block production by - // secondary VRF slots enabled - GenesisTwoAuthsSecondaryVRF0_9_10 = filepath.Join(currentDir, "../utils/genesis_two_auths_secondaryvrf_0_9_10.json") - - // GenesisSixAuths is the genesis file that has 6 authorities - GenesisSixAuths = filepath.Join(currentDir, "../utils/genesis_sixauths.json") - // GenesisDefault is the default gssmr genesis file - GenesisDefault = filepath.Join(currentDir, "../..", "chain/gssmr/genesis.json") - // GenesisDev is the default dev genesis file - GenesisDev = filepath.Join(currentDir, "../..", "chain/dev/genesis-spec.json") - - // ConfigDefault is the default config file - ConfigDefault = filepath.Join(currentDir, "../utils/config_default.toml") - // ConfigLogGrandpa is a config file where log levels are set to CRIT except for GRANDPA - ConfigLogGrandpa = filepath.Join(currentDir, "../utils/config_log_grandpa.toml") - // ConfigNoBABE is a config file with BABE disabled - ConfigNoBABE = filepath.Join(currentDir, "../utils/config_nobabe.toml") - // ConfigNoGrandpa is a config file with grandpa disabled - ConfigNoGrandpa = filepath.Join(currentDir, "../utils/config_nograndpa.toml") - // ConfigNotAuthority is a config file with no authority functionality - ConfigNotAuthority = filepath.Join(currentDir, "../utils/config_notauthority.toml") -) - -// Node represents a gossamer process -type Node struct { - Process *exec.Cmd - Key string - RPCPort string - Idx int - basePath string - config string - WSPort string - BABELead bool -} - -// InitGossamer initialises given node number and returns node reference -func InitGossamer(idx int, basePath, genesis, config string) ( - node Node, err error) { - cmdInit := exec.Command(gossamerCMD, "init", - "--config", config, - "--basepath", basePath, - "--genesis", genesis, - "--force", - ) - - Logger.Info("initialising gossamer using " + cmdInit.String() + "...") - stdOutInit, err := cmdInit.CombinedOutput() - if err != nil { - fmt.Printf("%s", stdOutInit) - return node, err - } - - Logger.Infof("initialised gossamer node %d!", idx) - return Node{ - Idx: idx, - RPCPort: strconv.Itoa(BaseRPCPort + idx), - WSPort: strconv.Itoa(BaseWSPort + idx), - basePath: basePath, - config: config, - }, nil -} - -// startGossamer starts given node -func startGossamer(t *testing.T, node Node, websocket bool) ( - updatedNode Node, err error) { - var key string - var params = []string{"--port", strconv.Itoa(basePort + node.Idx), - "--config", node.config, - "--basepath", node.basePath, - "--rpchost", HOSTNAME, - "--rpcport", node.RPCPort, - "--rpcmods", "system,author,chain,state,dev,rpc", - "--rpc", - "--no-telemetry", - "--log", "info"} - - if node.BABELead { - params = append(params, "--babe-lead") - } - - if node.Idx >= len(KeyList) { - params = append(params, "--roles", "1") - } else { - key = KeyList[node.Idx] - params = append(params, "--roles", "4", - "--key", key) - } - - if websocket { - params = append(params, "--ws", - "--wsport", node.WSPort) - } - node.Process = exec.Command(gossamerCMD, params...) - - node.Key = key - - Logger.Infof("node basepath: %s", node.basePath) - // create log file - outfile, err := os.Create(filepath.Join(node.basePath, "log.out")) - if err != nil { - Logger.Errorf("Error when trying to set a log file for gossamer output: %s", err) - return node, err - } - - // create error log file - errfile, err := os.Create(filepath.Join(node.basePath, "error.out")) - if err != nil { - Logger.Errorf("Error when trying to set a log file for gossamer output: %s", err) - return node, err - } - - t.Cleanup(func() { - time.Sleep(time.Second) // wait for goroutine to finish writing - err = outfile.Close() - assert.NoError(t, err) - err = errfile.Close() - assert.NoError(t, err) - }) - - stdoutPipe, err := node.Process.StdoutPipe() - if err != nil { - Logger.Errorf("failed to get stdoutPipe from node %d: %s", node.Idx, err) - return node, err - } - - stderrPipe, err := node.Process.StderrPipe() - if err != nil { - Logger.Errorf("failed to get stderrPipe from node %d: %s", node.Idx, err) - return node, err - } - - Logger.Infof("starting gossamer at %s...", node.Process) - err = node.Process.Start() - if err != nil { - Logger.Errorf("Could not execute gossamer cmd: %s", err) - return node, err - } - - writer := bufio.NewWriter(outfile) - go func() { - _, err := io.Copy(writer, stdoutPipe) - if err != nil { - Logger.Errorf("failed copying stdout to writer: %s", err) - } - }() - errWriter := bufio.NewWriter(errfile) - go func() { - _, err := io.Copy(errWriter, stderrPipe) - if err != nil { - Logger.Errorf("failed copying stderr to writer: %s", err) - } - }() - - ctx := context.Background() - - var started bool - for i := 0; i < maxRetries; i++ { - time.Sleep(time.Second * 5) - - const checkNodeStartedTimeout = time.Second - checkNodeCtx, cancel := context.WithTimeout(ctx, checkNodeStartedTimeout) - - addr := fmt.Sprintf("http://%s:%s", HOSTNAME, node.RPCPort) - err = checkNodeStarted(checkNodeCtx, t, addr) - - cancel() - - if err == nil { - started = true - break - } - } - - if started { - Logger.Infof("node started with key %s and cmd.Process.Pid %d", key, node.Process.Process.Pid) - } else { - Logger.Criticalf("node didn't start: %s", err) - errFileContents, _ := os.ReadFile(errfile.Name()) - t.Logf("%s\n", errFileContents) - return node, err - } - - return node, nil -} - -// RunGossamer will initialise and start a gossamer instance -func RunGossamer(t *testing.T, idx int, basepath, genesis, config string, websocket, babeLead bool) ( - node Node, err error) { - node, err = InitGossamer(idx, basepath, genesis, config) - if err != nil { - return node, fmt.Errorf("could not initialise gossamer: %w", err) - } - - if idx == 0 || babeLead { - node.BABELead = true - } - - node, err = startGossamer(t, node, websocket) - if err != nil { - return node, fmt.Errorf("could not start gossamer: %w", err) - } - - return node, nil -} - -// checkNodeStarted check if gossamer node is started -func checkNodeStarted(ctx context.Context, t *testing.T, gossamerHost string) error { - const method = "system_health" - const params = "{}" - respBody, err := PostRPC(ctx, gossamerHost, method, params) - if err != nil { - return err - } - - target := new(modules.SystemHealthResponse) - err = DecodeRPC(t, respBody, target) - if err != nil { - return err - } - - if !target.ShouldHavePeers { - return fmt.Errorf("no peers") - } - - return nil -} - -// killProcess kills a instance of gossamer -func killProcess(t *testing.T, cmd *exec.Cmd) error { - err := cmd.Process.Kill() - if err != nil { - t.Log("failed to kill process", "cmd", cmd) - } - return err -} - -// InitNodes initialises given number of nodes -func InitNodes(num int, config string) (nodes []Node, err error) { - tempDir, err := os.MkdirTemp("", "gossamer-stress-") - if err != nil { - return nil, err - } - - for i := 0; i < num; i++ { - node, err := InitGossamer(i, tempDir+strconv.Itoa(i), GenesisDefault, config) - if err != nil { - Logger.Errorf("failed to initialise Gossamer for node index %d", i) - return nil, err - } - - nodes = append(nodes, node) - } - return nodes, nil -} - -// StartNodes starts given array of nodes -func StartNodes(t *testing.T, nodes []Node) (err error) { - for i, n := range nodes { - nodes[i], err = startGossamer(t, n, false) - if err != nil { - return fmt.Errorf("node %d of %d: %w", - i+1, len(nodes), err) - } - } - return nil -} - -// InitializeAndStartNodes will spin up `num` gossamer nodes -func InitializeAndStartNodes(t *testing.T, num int, genesis, config string) ( - nodes []Node, err error) { - var wg sync.WaitGroup - var nodesMutex, errMutex sync.Mutex - wg.Add(num) - - for i := 0; i < num; i++ { - go func(i int) { - defer wg.Done() - name := strconv.Itoa(i) - if i < len(KeyList) { - name = KeyList[i] - } - node, runErr := RunGossamer(t, i, TestDir(t, name), genesis, config, false, false) - if runErr != nil { - errMutex.Lock() - if err == nil { - err = fmt.Errorf("failed to run Gossamer for node index %d: %w", i, runErr) - } - errMutex.Unlock() - return - } - - nodesMutex.Lock() - nodes = append(nodes, node) - nodesMutex.Unlock() - }(i) - } - - wg.Wait() - - if err != nil { - _ = StopNodes(t, nodes) - return nil, err - } - - return nodes, nil -} - -// InitializeAndStartNodesWebsocket will spin up `num` gossamer nodes running with Websocket rpc enabled -func InitializeAndStartNodesWebsocket(t *testing.T, num int, genesis, config string) ( - nodes []Node, err error) { - var nodesMutex, errMutex sync.Mutex - var wg sync.WaitGroup - - wg.Add(num) - - for i := 0; i < num; i++ { - go func(i int) { - defer wg.Done() - name := strconv.Itoa(i) - if i < len(KeyList) { - name = KeyList[i] - } - node, runErr := RunGossamer(t, i, TestDir(t, name), genesis, config, true, false) - if runErr != nil { - errMutex.Lock() - if err == nil { - err = fmt.Errorf("failed to run Gossamer for node index %d: %w", i, runErr) - } - errMutex.Unlock() - return - } - - nodesMutex.Lock() - nodes = append(nodes, node) - nodesMutex.Unlock() - }(i) - } - - wg.Wait() - - if err != nil { - _ = StopNodes(t, nodes) - return nil, err - } - - return nodes, nil -} - -// StopNodes stops the given nodes -func StopNodes(t *testing.T, nodes []Node) (errs []error) { - for i := range nodes { - cmd := nodes[i].Process - err := killProcess(t, cmd) - if err != nil { - Logger.Errorf("failed to kill Gossamer (cmd %s) for node index %d", cmd, i) - errs = append(errs, err) - } - } - - return errs -} - -// TearDown stops the given nodes and remove their datadir -func TearDown(t *testing.T, nodes []Node) (errorList []error) { - for i, node := range nodes { - cmd := nodes[i].Process - err := killProcess(t, cmd) - if err != nil { - Logger.Errorf("failed to kill Gossamer (cmd %s) for node index %d", cmd, i) - errorList = append(errorList, err) - } - - err = os.RemoveAll(node.basePath) - if err != nil { - Logger.Error("failed to remove base path directory " + node.basePath) - errorList = append(errorList, err) - } - } - - return errorList -} - -// TestDir returns the test directory path /test_data// -func TestDir(t *testing.T, name string) string { - return filepath.Join("/tmp/", t.Name(), name) -} - -// GenerateGenesisThreeAuth generates Genesis file with three authority. -func GenerateGenesisThreeAuth() { - genesisPath, err := utils.GetGssmrGenesisPath() - if err != nil { - panic(err) - } - - bs, err := dot.BuildFromGenesis(genesisPath, 3) - if err != nil { - Logger.Errorf("genesis file not found: %s", err) - os.Exit(1) - } - dot.CreateJSONRawFile(bs, GenesisThreeAuths) -} - -// GenerateGenesisSixAuth generates Genesis file with six authority. -func GenerateGenesisSixAuth(t *testing.T) { - bs, err := dot.BuildFromGenesis(utils.GetGssmrGenesisPathTest(t), 6) - if err != nil { - Logger.Errorf("genesis file not found: %s", err) - os.Exit(1) - } - dot.CreateJSONRawFile(bs, GenesisSixAuths) -} - -func generateDefaultConfig() *ctoml.Config { - return &ctoml.Config{ - Global: ctoml.GlobalConfig{ - Name: "Gossamer", - ID: "gssmr", - LogLvl: "crit", - MetricsAddress: "localhost:9876", - RetainBlocks: 256, - Pruning: "archive", - }, - Log: ctoml.LogConfig{ - CoreLvl: "info", - SyncLvl: "info", - }, - Init: ctoml.InitConfig{ - Genesis: "./chain/gssmr/genesis.json", - }, - Account: ctoml.AccountConfig{ - Key: "", - Unlock: "", - }, - Core: ctoml.CoreConfig{ - Roles: 4, - BabeAuthority: true, - GrandpaAuthority: true, - GrandpaInterval: 1, - }, - Network: ctoml.NetworkConfig{ - Bootnodes: nil, - ProtocolID: "/gossamer/gssmr/0", - NoBootstrap: false, - NoMDNS: false, - MinPeers: 1, - MaxPeers: 3, - }, - RPC: ctoml.RPCConfig{ - Enabled: false, - Unsafe: true, - WSUnsafe: true, - Host: "localhost", - Modules: []string{"system", "author", "chain", "state"}, - WS: false, - }, - } -} - -// CreateDefaultConfig generates and creates default config file. -func CreateDefaultConfig() { - cfg := generateDefaultConfig() - dot.ExportTomlConfig(cfg, ConfigDefault) -} - -func generateConfigLogGrandpa() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Log = ctoml.LogConfig{ - CoreLvl: "crit", - NetworkLvl: "debug", - RuntimeLvl: "crit", - BlockProducerLvl: "info", - FinalityGadgetLvl: "debug", - } - return cfg -} - -// CreateConfigLogGrandpa generates and creates grandpa config file. -func CreateConfigLogGrandpa() { - cfg := generateConfigLogGrandpa() - dot.ExportTomlConfig(cfg, ConfigLogGrandpa) -} - -func generateConfigNoBabe() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Global.LogLvl = "info" - cfg.Log = ctoml.LogConfig{ - SyncLvl: "debug", - NetworkLvl: "debug", - } - - cfg.Core.BabeAuthority = false - return cfg -} - -// CreateConfigNoBabe generates and creates no babe config file. -func CreateConfigNoBabe() { - cfg := generateConfigNoBabe() - dot.ExportTomlConfig(cfg, ConfigNoBABE) -} - -func generateConfigNoGrandpa() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Core.GrandpaAuthority = false - cfg.Core.BABELead = true - cfg.Core.GrandpaInterval = 1 - return cfg -} - -// CreateConfigNoGrandpa generates and creates no grandpa config file. -func CreateConfigNoGrandpa() { - cfg := generateConfigNoGrandpa() - dot.ExportTomlConfig(cfg, ConfigNoGrandpa) -} - -func generateConfigNotAuthority() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Core.Roles = 1 - cfg.Core.BabeAuthority = false - cfg.Core.GrandpaAuthority = false - return cfg -} - -// CreateConfigNotAuthority generates and creates non-authority config file. -func CreateConfigNotAuthority() { - cfg := generateConfigNotAuthority() - dot.ExportTomlConfig(cfg, ConfigNotAuthority) + return genesisPath } diff --git a/tests/utils/header.go b/tests/utils/header.go deleted file mode 100644 index a3eccf0b77..0000000000 --- a/tests/utils/header.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/rpc/modules" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// headerResponseToHeader converts a *ChainBlockHeaderResponse to a *types.Header -func headerResponseToHeader(t *testing.T, header *modules.ChainBlockHeaderResponse) *types.Header { - parentHash, err := common.HexToHash(header.ParentHash) - require.NoError(t, err) - - nb, err := common.HexToBytes(header.Number) - require.NoError(t, err) - number := common.BytesToUint(nb) - - stateRoot, err := common.HexToHash(header.StateRoot) - require.NoError(t, err) - - extrinsicsRoot, err := common.HexToHash(header.ExtrinsicsRoot) - require.NoError(t, err) - - h, err := types.NewHeader(parentHash, stateRoot, extrinsicsRoot, number, rpcLogsToDigest(t, header.Digest.Logs)) - require.NoError(t, err) - return h -} diff --git a/tests/utils/node/errors.go b/tests/utils/node/errors.go new file mode 100644 index 0000000000..c55bd48b01 --- /dev/null +++ b/tests/utils/node/errors.go @@ -0,0 +1,156 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "errors" + "fmt" + "sync" + "time" +) + +// errorsFanIn takes care of fanning runtime errors from +// different error channels to a single error channel. +// It also handles removal of specific runtime error channels +// from the fan in, which can be useful if one node crashes +// or is stopped on purpose. +type errorsFanIn struct { + nodeToRuntimeError map[string]<-chan error + nodeToFaninCancel map[string]context.CancelFunc + nodeToFaninDone map[string]<-chan struct{} + fifo chan nodeError + mutex sync.RWMutex +} + +type nodeError struct { + node string + err error +} + +// newErrorsFanIn returns a new errors fan in object. +func newErrorsFanIn() *errorsFanIn { + return &errorsFanIn{ + nodeToRuntimeError: make(map[string]<-chan error), + nodeToFaninCancel: make(map[string]context.CancelFunc), + nodeToFaninDone: make(map[string]<-chan struct{}), + fifo: make(chan nodeError), + } +} + +// Add adds a runtime error receiving channel to the fan in mechanism +// for the particular node string given. Note each node string must be +// unique or the code will panic. +func (e *errorsFanIn) Add(node string, runtimeError <-chan error) { + e.mutex.Lock() + defer e.mutex.Unlock() + + // check for duplicate node string + _, exists := e.nodeToRuntimeError[node] + if exists { + panic(fmt.Sprintf("node %q was already added", node)) + } + + e.nodeToRuntimeError[node] = runtimeError + ctx, cancel := context.WithCancel(context.Background()) + e.nodeToFaninCancel[node] = cancel + fanInDone := make(chan struct{}) + e.nodeToFaninDone[node] = fanInDone + + go fanIn(ctx, node, runtimeError, e.fifo, fanInDone) +} + +func fanIn(ctx context.Context, node string, + runtimeError <-chan error, fifo chan<- nodeError, + fanInDone chan<- struct{}) { + defer close(fanInDone) + + select { + case <-ctx.Done(): + return + case err := <-runtimeError: + fifo <- nodeError{ + node: node, + err: err, + } + } +} + +// len returns how many nodes are being monitored +// for runtime errors. +func (e *errorsFanIn) len() (length int) { + e.mutex.RLock() + defer e.mutex.RUnlock() + + return len(e.nodeToRuntimeError) +} + +// remove removes a node from the fan in mechanism +// and clears it from the internal maps. +func (e *errorsFanIn) remove(node string) { + e.mutex.Lock() + defer e.mutex.Unlock() + + e.removeWithoutLock(node) +} + +func (e *errorsFanIn) removeWithoutLock(node string) { + // Stop fanning in + cancelFanIn := e.nodeToFaninCancel[node] + fanInDone := e.nodeToFaninDone[node] + cancelFanIn() + <-fanInDone + + // Clear from maps + delete(e.nodeToRuntimeError, node) + delete(e.nodeToFaninCancel, node) + delete(e.nodeToFaninDone, node) +} + +var ( + ErrWaitTimedOut = errors.New("waiting for all nodes timed out") +) + +// waitForAll waits to collect all the runtime errors from all the +// nodes added and which did not crash previously. +// If the timeout duration specified is reached, all internal +// fan in operations are stopped and all the nodes are cleared from +// the internal maps, and an error is returned. +func (e *errorsFanIn) waitForAll(timeout time.Duration) (err error) { + e.mutex.Lock() + defer e.mutex.Unlock() + + timer := time.NewTimer(timeout) + + length := len(e.nodeToRuntimeError) + for i := 0; i < length; i++ { + select { + case <-timer.C: + for node := range e.nodeToRuntimeError { + e.removeWithoutLock(node) + } + return fmt.Errorf("%w: for %d nodes after %s", + ErrWaitTimedOut, len(e.nodeToRuntimeError), timeout) + case identifiedError := <-e.fifo: // one error per node max + node := identifiedError.node + e.removeWithoutLock(node) + } + } + + _ = timer.Stop() + + return nil +} + +// watch returns the next runtime error from the N runtime +// error channels, in a first in first out mechanism. +func (e *errorsFanIn) watch(ctx context.Context) (err error) { + select { + case <-ctx.Done(): + return ctx.Err() + case identifiedErr := <-e.fifo: // single fatal error + e.remove(identifiedErr.node) + return identifiedErr.err + } +} diff --git a/tests/utils/node/node.go b/tests/utils/node/node.go new file mode 100644 index 0000000000..8bb9d652bc --- /dev/null +++ b/tests/utils/node/node.go @@ -0,0 +1,289 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "testing" + + "github.com/ChainSafe/gossamer/dot/config/toml" + "github.com/ChainSafe/gossamer/lib/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/pathfinder" + "github.com/stretchr/testify/require" +) + +// Node is a structure holding all the settings to +// configure a Gossamer node. +type Node struct { + index *int + configPath string + tomlConfig toml.Config + writer io.Writer + logsBuffer *bytes.Buffer + binPath string +} + +// New returns a node configured using the +// toml configuration and options given. +func New(t *testing.T, tomlConfig toml.Config, + options ...Option) (node Node) { + node.tomlConfig = tomlConfig + for _, option := range options { + option(&node) + } + node.setDefaults(t) + node.setWriterPrefix() + node.configPath = config.Write(t, node.tomlConfig) + return node +} + +func (n Node) String() string { + indexString := fmt.Sprint(*n.index) + return fmt.Sprintf("%s-%s", n.tomlConfig.Account.Key, indexString) +} + +// RPCPort returns the rpc port of the node. +func (n Node) RPCPort() (port string) { return fmt.Sprint(n.tomlConfig.RPC.Port) } + +// WSPort returns the websocket port of the node. +func (n Node) WSPort() (port string) { return fmt.Sprint(n.tomlConfig.RPC.WSPort) } + +// Key returns the key of the node. +func (n Node) Key() (key string) { return n.tomlConfig.Account.Key } + +func intPtr(n int) *int { return &n } + +func (n *Node) setDefaults(t *testing.T) { + if n.index == nil { + n.index = intPtr(0) + } + + if n.tomlConfig.Global.BasePath == "" { + n.tomlConfig.Global.BasePath = t.TempDir() + } + + if n.tomlConfig.Init.Genesis == "" { + n.tomlConfig.Init.Genesis = utils.GetGssmrGenesisRawPathTest(t) + } + + if n.tomlConfig.Account.Key == "" { + keyList := []string{"alice", "bob", "charlie", "dave", "eve", "ferdie", "george", "heather", "ian"} + if *n.index < len(keyList) { + n.tomlConfig.Account.Key = keyList[*n.index] + } else { + n.tomlConfig.Account.Key = "default-key" + } + } + + if n.tomlConfig.Network.Port == 0 { + const basePort uint16 = 7000 + n.tomlConfig.Network.Port = basePort + uint16(*n.index) + } + + if n.tomlConfig.RPC.Enabled && n.tomlConfig.RPC.Port == 0 { + const basePort uint32 = 8540 + n.tomlConfig.RPC.Port = basePort + uint32(*n.index) + } + + if n.tomlConfig.RPC.WS && n.tomlConfig.RPC.WSPort == 0 { + const basePort uint32 = 8546 + n.tomlConfig.RPC.WSPort = basePort + uint32(*n.index) + } + + userSetWriter := n.writer != nil && n.writer != io.Discard + if !userSetWriter { + n.logsBuffer = bytes.NewBuffer(nil) + } + + if n.writer == nil { + n.writer = io.Discard + } + + if n.binPath == "" { + n.binPath = pathfinder.GetGossamer(t) + } +} + +// Init initialises the Gossamer node. +func (n *Node) Init(ctx context.Context) (err error) { + cmdInit := exec.CommandContext(ctx, n.binPath, "init", //nolint:gosec + "--config", n.configPath, + ) + + if n.logsBuffer != nil { + n.logsBuffer.Reset() + n.writer = io.MultiWriter(n.writer, n.logsBuffer) + } + + cmdInit.Stdout = n.writer + cmdInit.Stderr = n.writer + + err = cmdInit.Start() + if err != nil { + return fmt.Errorf("cannot start command: %w", err) + } + + err = cmdInit.Wait() + return n.wrapRuntimeError(ctx, cmdInit, err) +} + +// Start starts a Gossamer node using the node configuration of +// the receiving struct. It returns a start error if the node cannot +// be started, and runs the node until the context gets canceled. +// When the node crashes or is stopped, an error (nil or not) is sent +// in the waitErrCh. +func (n *Node) Start(ctx context.Context) (runtimeError <-chan error, startErr error) { + cmd := exec.CommandContext(ctx, n.binPath, //nolint:gosec + "--config", n.configPath, + "--no-telemetry") + + if n.logsBuffer != nil { + n.logsBuffer.Reset() + n.writer = io.MultiWriter(n.writer, n.logsBuffer) + } + + cmd.Stdout = n.writer + cmd.Stderr = cmd.Stdout // we assume no race between stdout and stderr + + err := cmd.Start() + if err != nil { + return nil, fmt.Errorf("cannot start %s: %w", cmd, err) + } + + waitErrCh := make(chan error) + go func(cmd *exec.Cmd, node *Node, waitErr chan<- error) { + err = cmd.Wait() + waitErr <- node.wrapRuntimeError(ctx, cmd, err) + }(cmd, n, waitErrCh) + + return waitErrCh, nil +} + +// StartAndWait starts a Gossamer node using the node configuration of +// the receiving struct. It returns a start error if the node cannot +// be started, and runs the node until the context gets canceled. +// When the node crashes or is stopped, an error (nil or not) is sent +// in the waitErrCh. +// It waits for the node to respond to an RPC health call before returning. +func (n *Node) StartAndWait(ctx context.Context) ( + runtimeError <-chan error, startErr error) { + runtimeError, startErr = n.Start(ctx) + if startErr != nil { + return nil, startErr + } + + err := waitForNode(ctx, n.RPCPort()) + if err != nil { + return nil, fmt.Errorf("failed waiting: %s", err) + } + + return runtimeError, nil +} + +// InitAndStartTest is a test helper method to initialise and start the node, +// as well as registering appriopriate test handlers. +// If initialising or starting fails, cleanup is done and the test fails instantly. +// If the node crashes during runtime, the passed `signalTestToStop` argument is +// called since the test cannot be failed from outside the main test goroutine. +func (n Node) InitAndStartTest(ctx context.Context, t *testing.T, + signalTestToStop context.CancelFunc) { + t.Helper() + + err := n.Init(ctx) + require.NoError(t, err) + + nodeCtx, nodeCancel := context.WithCancel(ctx) + + waitErr, err := n.StartAndWait(nodeCtx) + if err != nil { + t.Errorf("failed to start node %s: %s", n, err) + // Release resources and fail the test + nodeCancel() + t.FailNow() + } + + t.Logf("Node %s is ready", n) + + // watch for runtime fatal node error + watchDogCtx, watchDogCancel := context.WithCancel(ctx) + watchDogDone := make(chan struct{}) + go func() { + defer close(watchDogDone) + select { + case <-watchDogCtx.Done(): + return + case err := <-waitErr: // the node crashed + if watchDogCtx.Err() != nil { + // make sure the runtime watchdog is not meant + // to be disengaged, in case of signal racing. + return + } + t.Errorf("node %s crashed: %s", n, err) + // Release resources + nodeCancel() + // we cannot stop the test with t.FailNow() from a goroutine + // other than the test goroutine, so we call the following function + // to signal the test goroutine to stop the test. + signalTestToStop() + } + }() + + t.Cleanup(func() { + t.Helper() + // Disengage node watchdog goroutine + watchDogCancel() + <-watchDogDone + // Stop the node and wait for it to exit + nodeCancel() + <-waitErr + t.Logf("Node %s terminated", n) + }) +} + +func (n *Node) setWriterPrefix() { + if n.writer == io.Discard { + return // no need to wrap it + } + + n.writer = &prefixedWriter{ + prefix: []byte(n.String() + " "), + writer: n.writer, + } +} + +// wrapRuntimeError wraps the error given using the context available +// such as the command string or the log buffer. It returns nil if the +// argument error is nil. +func (n *Node) wrapRuntimeError(ctx context.Context, cmd *exec.Cmd, + waitErr error) (wrappedErr error) { + if waitErr == nil { + return nil + } + + if ctx.Err() != nil { + return fmt.Errorf("%s: %w: %s", n, ctx.Err(), waitErr) + } + + var logInformation string + if n.logsBuffer != nil { + // Add log information to error if no writer is set + // for this node. + logInformation = "\nLogs:\n" + n.logsBuffer.String() + } + + configData, configReadErr := os.ReadFile(n.configPath) + configString := string(configData) + if configReadErr != nil { + configString = configReadErr.Error() + } + + return fmt.Errorf("%s encountered a runtime error: %w\ncommand: %s\n\n%s\n\n%s", + n, waitErr, cmd, configString, logInformation) +} diff --git a/tests/utils/node/node_test.go b/tests/utils/node/node_test.go new file mode 100644 index 0000000000..42cc20a514 --- /dev/null +++ b/tests/utils/node/node_test.go @@ -0,0 +1,28 @@ +//go:build endtoend + +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "testing" + "time" + + "github.com/ChainSafe/gossamer/tests/utils/config" +) + +func Test_Node_InitAndStartTest(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + t.Cleanup(cancel) + + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + + n := New(t, tomlConfig) + + n.InitAndStartTest(ctx, t, cancel) + + cancel() +} diff --git a/tests/utils/node/nodes.go b/tests/utils/node/nodes.go new file mode 100644 index 0000000000..04515cef03 --- /dev/null +++ b/tests/utils/node/nodes.go @@ -0,0 +1,176 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/config/toml" +) + +// Nodes is a slice of nodes. +type Nodes []Node + +// MakeNodes creates `num` nodes using the `tomlConfig` +// as a base config for each node. It overrides some of configuration: +// - the first node is always the BABE lead (overrides the toml configuration) +// - the index of each node is incremented per node (overrides the SetIndex option, if set) +func MakeNodes(t *testing.T, num int, tomlConfig toml.Config, + options ...Option) (nodes Nodes) { + nodes = make(Nodes, num) + for i := range nodes { + options = append(options, SetIndex(i)) + tomlConfig.Core.BABELead = i == 0 + nodes[i] = New(t, tomlConfig, options...) + } + return nodes +} + +// Init initialises all nodes and returns an error if any +// init operation failed. +func (nodes Nodes) Init(ctx context.Context) (err error) { + initErrors := make(chan error) + for _, node := range nodes { + go func(node Node) { + err := node.Init(ctx) // takes 2 seconds + if err != nil { + err = fmt.Errorf("node %s failed to initialise: %w", node, err) + } + initErrors <- err + }(node) + } + + for range nodes { + initErr := <-initErrors + if err == nil && initErr != nil { + err = initErr + } + } + + return err +} + +// Start starts all the nodes and returns the number of started nodes +// and an eventual start error. The started number should be used by +// the caller to wait for `started` errors coming from the wait error +// channel. All the nodes are stopped when the context is canceled, +// and `started` errors will be sent in the waitErr channel. +func (nodes Nodes) Start(ctx context.Context) ( + runtimeErrors []<-chan error, startErr error) { + runtimeErrors = make([]<-chan error, 0, len(nodes)) + for _, node := range nodes { + runtimeError, err := node.Start(ctx) + if err != nil { + return runtimeErrors, fmt.Errorf("node with index %d: %w", + *node.index, err) + } + + runtimeErrors = append(runtimeErrors, runtimeError) + } + + for _, node := range nodes { + port := node.RPCPort() + err := waitForNode(ctx, port) + if err != nil { + return runtimeErrors, fmt.Errorf("node with index %d: %w", *node.index, err) + } + } + + return runtimeErrors, nil +} + +// InitAndStartTest is a test helper method to initialise and start nodes, +// as well as registering appriopriate test handlers. +// If any node fails to initialise or start, cleanup is done and the test +// is instantly failed. +// If any node crashes at runtime, all other nodes are shutdown, +// cleanup is done and the passed argument `signalTestToStop` +// is called to signal to the main test goroutine to stop. +func (nodes Nodes) InitAndStartTest(ctx context.Context, t *testing.T, + signalTestToStop context.CancelFunc) { + t.Helper() + + err := nodes.Init(ctx) + if err != nil { + t.Fatal(err) + } + + nodesCtx, nodesCancel := context.WithCancel(ctx) + runtimeErrors := newErrorsFanIn() + + for _, node := range nodes { + runtimeError, err := node.Start(nodesCtx) // takes little time + if err == nil { + runtimeErrors.Add(node.String(), runtimeError) + continue + } + + t.Errorf("Node %s failed to start: %s", node, err) + + stopNodes(t, nodesCancel, runtimeErrors) + t.FailNow() + } + + // this is run sequentially since all nodes start almost at the same time + // so waiting for one node will also wait for all the others. + // You can see this since the test logs out that all the nodes are ready + // at the same time. + for _, node := range nodes { + err := waitForNode(ctx, node.RPCPort()) + if err == nil { + t.Logf("Node %s is ready", node) + continue + } + + t.Errorf("Node %s failed to be ready: %s", node, err) + stopNodes(t, nodesCancel, runtimeErrors) + t.FailNow() + } + + // watch for runtime fatal error from any of the nodes + watchDogCtx, watchDogCancel := context.WithCancel(ctx) + watchDogDone := make(chan struct{}) + go func() { + defer close(watchDogDone) + err := runtimeErrors.watch(watchDogCtx) + watchDogWasStopped := errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) + if watchDogWasStopped { + return + } + + t.Errorf("one node has crashed: %s", err) + // we cannot stop the test with t.FailNow() from a goroutine + // other than the test goroutine, so we call failNow to signal + // it to the test goroutine. + signalTestToStop() + }() + + t.Cleanup(func() { + t.Helper() + // Disengage node watchdog goroutine + watchDogCancel() + <-watchDogDone + // Stop and wait for nodes to exit + stopNodes(t, nodesCancel, runtimeErrors) + }) +} + +func stopNodes(t *testing.T, nodesCancel context.CancelFunc, + runtimeErrors *errorsFanIn) { + t.Helper() + + // Stop the nodes and wait for them to exit + nodesCancel() + t.Logf("waiting on %d nodes to terminate...", runtimeErrors.len()) + const waitTimeout = 10 * time.Second + err := runtimeErrors.waitForAll(waitTimeout) + if err != nil { + t.Logf("WARNING: %s", err) + } +} diff --git a/tests/utils/node/options.go b/tests/utils/node/options.go new file mode 100644 index 0000000000..0e7923b37f --- /dev/null +++ b/tests/utils/node/options.go @@ -0,0 +1,23 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import "io" + +// Option is an option to use with the `New` constructor. +type Option func(node *Node) + +// SetIndex sets the index for the node. +func SetIndex(index int) Option { + return func(node *Node) { + node.index = intPtr(index) + } +} + +// SetWriter sets the writer for the node. +func SetWriter(writer io.Writer) Option { + return func(node *Node) { + node.writer = writer + } +} diff --git a/tests/utils/node/waitnode.go b/tests/utils/node/waitnode.go new file mode 100644 index 0000000000..41f0411d51 --- /dev/null +++ b/tests/utils/node/waitnode.go @@ -0,0 +1,47 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/ChainSafe/gossamer/tests/utils/rpc" +) + +func waitForNode(ctx context.Context, rpcPort string) (err error) { + const retryWait = time.Second + err = retry.UntilNoError(ctx, retryWait, func() (err error) { + const checkNodeStartedTimeout = time.Second + checkNodeCtx, checkNodeCancel := context.WithTimeout(ctx, checkNodeStartedTimeout) + err = checkNodeStarted(checkNodeCtx, "http://localhost:"+rpcPort) + checkNodeCancel() + return err + }) + + if err != nil { + return fmt.Errorf("node did not start: %w", err) + } + + return nil +} + +var errNodeNotExpectingPeers = errors.New("node should expect to have peers") + +// checkNodeStarted check if gossamer node is started +func checkNodeStarted(ctx context.Context, gossamerHost string) error { + health, err := rpc.GetHealth(ctx, gossamerHost) + if err != nil { + return fmt.Errorf("cannot get health: %w", err) + } + + if !health.ShouldHavePeers { + return errNodeNotExpectingPeers + } + + return nil +} diff --git a/tests/utils/node/writer.go b/tests/utils/node/writer.go new file mode 100644 index 0000000000..7dd0228161 --- /dev/null +++ b/tests/utils/node/writer.go @@ -0,0 +1,28 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "io" +) + +type prefixedWriter struct { + prefix []byte + writer io.Writer +} + +func (w *prefixedWriter) Write(p []byte) (n int, err error) { + toWrite := make([]byte, 0, len(w.prefix)+len(p)) + toWrite = append(toWrite, w.prefix...) + toWrite = append(toWrite, p...) + n, err = w.writer.Write(toWrite) + + // n has to match the length of p + n -= len(w.prefix) + if n < 0 { + n = 0 + } + + return n, err +} diff --git a/tests/utils/node/writer_test.go b/tests/utils/node/writer_test.go new file mode 100644 index 0000000000..c53487d2cb --- /dev/null +++ b/tests/utils/node/writer_test.go @@ -0,0 +1,38 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_prefixedWriter(t *testing.T) { + t.Parallel() + + writer := bytes.NewBuffer(nil) + prefixWriter := &prefixedWriter{ + prefix: []byte("prefix: "), + writer: writer, + } + + message := []byte("message\n") + n, err := prefixWriter.Write(message) + require.NoError(t, err) + expectedBytesWrittenCount := 8 + assert.Equal(t, expectedBytesWrittenCount, n) + expectedWritten := "prefix: message\n" + assert.Equal(t, expectedWritten, writer.String()) + + message = []byte("message two\n") + n, err = prefixWriter.Write(message) + require.NoError(t, err) + expectedBytesWrittenCount = 12 + assert.Equal(t, expectedBytesWrittenCount, n) + expectedWritten = "prefix: message\nprefix: message two\n" + assert.Equal(t, expectedWritten, writer.String()) +} diff --git a/tests/utils/pathfinder/gossamer.go b/tests/utils/pathfinder/gossamer.go new file mode 100644 index 0000000000..7164940dcd --- /dev/null +++ b/tests/utils/pathfinder/gossamer.go @@ -0,0 +1,22 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package pathfinder + +import ( + "path/filepath" + "testing" + + "github.com/ChainSafe/gossamer/lib/utils" + "github.com/stretchr/testify/require" +) + +// GetGossamer returns the path to the Gossamer binary +// as /bin/gossamer. +func GetGossamer(t *testing.T) (binPath string) { + t.Helper() + + projectRootPath, err := utils.GetProjectRootPath() + require.NoError(t, err, "cannot get project root path") + return filepath.Join(projectRootPath, "bin/gossamer") +} diff --git a/tests/utils/request_utils.go b/tests/utils/request_utils.go deleted file mode 100644 index 4e07bbab80..0000000000 --- a/tests/utils/request_utils.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/pkg/scale" - - "github.com/stretchr/testify/require" -) - -// PostRPC sends a payload using the method, host and params string given. -// It returns the response bytes and an eventual error. -func PostRPC(ctx context.Context, endpoint, method, params string) (data []byte, err error) { - requestBody := fmt.Sprintf(`{"jsonrpc":"2.0","method":"%s","params":%s,"id":1}`, method, params) - requestBuffer := bytes.NewBuffer([]byte(requestBody)) - - request, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, requestBuffer) - if err != nil { - return nil, fmt.Errorf("cannot create HTTP request: %w", err) - } - - const contentType = "application/json" - request.Header.Set("Content-Type", contentType) - request.Header.Set("Accept", contentType) - - response, err := http.DefaultClient.Do(request) - if err != nil { - return nil, fmt.Errorf("cannot do HTTP request: %w", err) - } - - data, err = io.ReadAll(response.Body) - if err != nil { - _ = response.Body.Close() - return nil, fmt.Errorf("cannot read HTTP response body: %w", err) - } - - err = response.Body.Close() - if err != nil { - return nil, fmt.Errorf("cannot close HTTP response body: %w", err) - } - - return data, nil -} - -// PostRPCWithRetry repeatitively calls `PostRPC` repeatitively -// until it succeeds within the requestWait duration or returns -// the last error if the context is canceled. -func PostRPCWithRetry(ctx context.Context, endpoint, method, params string, - requestWait time.Duration) (data []byte, err error) { - try := 0 - for { - try++ - - postRPCCtx, postRPCCancel := context.WithTimeout(ctx, requestWait) - - data, err = PostRPC(postRPCCtx, endpoint, method, params) - - if err == nil { - postRPCCancel() - return data, nil - } - - // wait for full requestWait duration or main context cancelation - <-postRPCCtx.Done() - postRPCCancel() - - if ctx.Err() != nil { - break - } - } - - totalTime := time.Duration(try) * requestWait - tryWord := "try" - if try > 1 { - tryWord = "tries" - } - return nil, fmt.Errorf("after %d %s totalling %s: %w", try, tryWord, totalTime, err) -} - -// DecodeRPC will decode []body into target interface -func DecodeRPC(t *testing.T, body []byte, target interface{}) error { - decoder := json.NewDecoder(bytes.NewReader(body)) - decoder.DisallowUnknownFields() - - var response ServerResponse - err := decoder.Decode(&response) - require.Nil(t, err, string(body)) - require.Equal(t, response.Version, "2.0") - - if response.Error != nil { - return errors.New(response.Error.Message) - } - - decoder = json.NewDecoder(bytes.NewReader(response.Result)) - decoder.DisallowUnknownFields() - - err = decoder.Decode(target) - require.Nil(t, err, string(body)) - return nil -} - -// DecodeWebsocket will decode body into target interface -func DecodeWebsocket(t *testing.T, body []byte, target interface{}) error { - decoder := json.NewDecoder(bytes.NewReader(body)) - decoder.DisallowUnknownFields() - - var response WebsocketResponse - err := decoder.Decode(&response) - require.Nil(t, err, string(body)) - require.Equal(t, response.Version, "2.0") - - if response.Error != nil { - return errors.New(response.Error.Message) - } - - if response.Result != nil { - decoder = json.NewDecoder(bytes.NewReader(response.Result)) - } else { - decoder = json.NewDecoder(bytes.NewReader(response.Params)) - } - - decoder.DisallowUnknownFields() - - err = decoder.Decode(target) - require.Nil(t, err, string(body)) - return nil -} - -// DecodeRPC_NT will decode []body into target interface (NT is Not Test testing required) -func DecodeRPC_NT(body []byte, target interface{}) error { //nolint:revive - decoder := json.NewDecoder(bytes.NewReader(body)) - decoder.DisallowUnknownFields() - - var response ServerResponse - err := decoder.Decode(&response) - if err != nil { - return err - } - - if response.Error != nil { - return errors.New(response.Error.Message) - } - - decoder = json.NewDecoder(bytes.NewReader(response.Result)) - decoder.DisallowUnknownFields() - - err = decoder.Decode(target) - return err -} - -// NewEndpoint will create a new endpoint string based on utils.HOSTNAME and port -func NewEndpoint(port string) string { - return "http://" + HOSTNAME + ":" + port -} - -func rpcLogsToDigest(t *testing.T, logs []string) scale.VaryingDataTypeSlice { - digest := types.NewDigest() - - for _, l := range logs { - itemBytes, err := common.HexToBytes(l) - require.NoError(t, err) - - var di = types.NewDigestItem() - err = scale.Unmarshal(itemBytes, &di) - require.NoError(t, err) - - err = digest.Add(di.Value()) - require.NoError(t, err) - } - - return digest -} diff --git a/tests/utils/retry/common.go b/tests/utils/retry/common.go new file mode 100644 index 0000000000..5562614ba9 --- /dev/null +++ b/tests/utils/retry/common.go @@ -0,0 +1,28 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package retry + +import ( + "context" + "fmt" + "time" +) + +func waitAfterFail(ctx context.Context, retryWait time.Duration, + failedTries *int) { + *failedTries++ + waitCtx, waitCancel := context.WithTimeout(ctx, retryWait) + <-waitCtx.Done() + waitCancel() +} + +func makeError(failedTries int, retryWait time.Duration, ctxErr error) (err error) { + totalRetryTime := time.Duration(failedTries) * retryWait + tryWord := "try" + if failedTries > 1 { + tryWord = "tries" + } + return fmt.Errorf("failed after %d %s during %s (%w)", + failedTries, tryWord, totalRetryTime, ctxErr) +} diff --git a/tests/utils/retry/untilnoerror.go b/tests/utils/retry/untilnoerror.go new file mode 100644 index 0000000000..c176344544 --- /dev/null +++ b/tests/utils/retry/untilnoerror.go @@ -0,0 +1,29 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package retry + +import ( + "context" + "time" +) + +// UntilNoError retries the function `f` until it returns a nil error. +// It waits `retryWait` after each failed call to `f`. +// If the context `ctx` is canceled, the function returns +// immediately an error stating the number of failed tries, +// for how long it retried and the last error returned by `f`. +func UntilNoError(ctx context.Context, retryWait time.Duration, + f func() (err error)) (err error) { + failedTries := 0 + for ctx.Err() == nil { + err = f() + if err == nil { + return nil + } + + waitAfterFail(ctx, retryWait, &failedTries) + } + + return makeError(failedTries, retryWait, ctx.Err()) +} diff --git a/tests/utils/retry/untilok.go b/tests/utils/retry/untilok.go new file mode 100644 index 0000000000..9b85d89248 --- /dev/null +++ b/tests/utils/retry/untilok.go @@ -0,0 +1,33 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package retry + +import ( + "context" + "fmt" + "time" +) + +// UntilOK retries the function `f` until it returns a true +// value for `ok` or a non nil error. +// It waits `retryWait` after each failed call to `f`. +// If the context `ctx` is canceled, the function returns +// immediately an error stating the number of failed tries, +// for how long it retried and the context error. +func UntilOK(ctx context.Context, retryWait time.Duration, + f func() (ok bool, err error)) (err error) { + failedTries := 0 + for ctx.Err() == nil { + ok, err := f() + if ok { + return nil + } else if err != nil { + return fmt.Errorf("stop retrying function: %w", err) + } + + waitAfterFail(ctx, retryWait, &failedTries) + } + + return makeError(failedTries, retryWait, ctx.Err()) +} diff --git a/tests/utils/rpc/chain.go b/tests/utils/rpc/chain.go new file mode 100644 index 0000000000..74094ff7cb --- /dev/null +++ b/tests/utils/rpc/chain.go @@ -0,0 +1,131 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "context" + "fmt" + "strconv" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +// GetChainHead calls the endpoint chain_getHeader to get the latest chain head +func GetChainHead(ctx context.Context, rpcPort string) (header *types.Header, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getHeader" + const params = "[]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return nil, fmt.Errorf("cannot post RPC: %w", err) + } + + var rpcHeader modules.ChainBlockHeaderResponse + err = Decode(respBody, &rpcHeader) + if err != nil { + return nil, fmt.Errorf("cannot decode RPC response: %w", err) + } + + header, err = headerResponseToHeader(rpcHeader) + if err != nil { + return nil, fmt.Errorf("malformed block header received: %w", err) + } + + return header, nil +} + +// GetBlockHash calls the endpoint chain_getBlockHash to get the latest chain head. +// It will block until a response is received or the context gets canceled. +func GetBlockHash(ctx context.Context, rpcPort, num string) (hash common.Hash, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getBlockHash" + params := "[" + num + "]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return hash, fmt.Errorf("cannot post RPC: %w", err) + } + + return hexStringBodyToHash(respBody) +} + +// GetFinalizedHead calls the endpoint chain_getFinalizedHead to get the latest finalised head +func GetFinalizedHead(ctx context.Context, rpcPort string) ( + hash common.Hash, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getFinalizedHead" + const params = "[]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return hash, fmt.Errorf("cannot post RPC: %w", err) + } + + return hexStringBodyToHash(respBody) +} + +// GetFinalizedHeadByRound calls the endpoint chain_getFinalizedHeadByRound to get the finalised head at a given round +// TODO: add setID, hard-coded at 1 for now +func GetFinalizedHeadByRound(ctx context.Context, rpcPort string, round uint64) ( + hash common.Hash, err error) { + p := strconv.Itoa(int(round)) + endpoint := NewEndpoint(rpcPort) + const method = "chain_getFinalizedHeadByRound" + params := "[" + p + ",1]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return hash, fmt.Errorf("cannot post RPC: %w", err) + } + + return hexStringBodyToHash(respBody) +} + +// GetBlock calls the endpoint chain_getBlock +func GetBlock(ctx context.Context, rpcPort string, hash common.Hash) ( + block *types.Block, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getBlock" + params := fmt.Sprintf(`["%s"]`, hash) + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return nil, fmt.Errorf("cannot post RPC: %w", err) + } + + rpcBlock := new(modules.ChainBlockResponse) + err = Decode(respBody, rpcBlock) + if err != nil { + return nil, fmt.Errorf("cannot decode RPC response body: %w", err) + } + + rpcHeader := rpcBlock.Block.Header + header, err := headerResponseToHeader(rpcHeader) + if err != nil { + return nil, fmt.Errorf("malformed block header received: %w", err) + } + + body, err := types.NewBodyFromExtrinsicStrings(rpcBlock.Block.Body) + if err != nil { + return nil, fmt.Errorf("cannot create body from RPC block body: %w", err) + } + + return &types.Block{ + Header: *header, + Body: *body, + }, nil +} + +func hexStringBodyToHash(body []byte) (hash common.Hash, err error) { + var hexHashString string + err = Decode(body, &hexHashString) + if err != nil { + return common.Hash{}, fmt.Errorf("cannot decode RPC: %w", err) + } + + hash, err = common.HexToHash(hexHashString) + if err != nil { + return common.Hash{}, fmt.Errorf("malformed block hash hex string: %w", err) + } + + return hash, nil +} diff --git a/tests/utils/rpc/dev.go b/tests/utils/rpc/dev.go new file mode 100644 index 0000000000..4c4e81efae --- /dev/null +++ b/tests/utils/rpc/dev.go @@ -0,0 +1,76 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/ChainSafe/gossamer/lib/common" +) + +// PauseBABE calls the endpoint dev_control with the params ["babe", "stop"] +func PauseBABE(ctx context.Context, rpcPort string) error { + endpoint := NewEndpoint(rpcPort) + const method = "dev_control" + const params = `["babe", "stop"]` + _, err := Post(ctx, endpoint, method, params) + return err +} + +// SlotDuration Calls dev endpoint for slot duration +func SlotDuration(ctx context.Context, rpcPort string) ( + slotDuration time.Duration, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "dev_slotDuration" + const params = "[]" + data, err := Post(ctx, endpoint, method, params) + if err != nil { + return 0, fmt.Errorf("cannot post RPC: %w", err) + } + + var slotDurationString string + err = Decode(data, &slotDurationString) + if err != nil { + return 0, fmt.Errorf("cannot decode RPC response: %w", err) + } + + b, err := common.HexToBytes(slotDurationString) + if err != nil { + return 0, fmt.Errorf("malformed slot duration hex string: %w", err) + } + + slotDurationUint64 := binary.LittleEndian.Uint64(b) + + slotDuration = time.Millisecond * time.Duration(slotDurationUint64) + + return slotDuration, nil +} + +// EpochLength Calls dev endpoint for epoch length +func EpochLength(ctx context.Context, rpcPort string) (epochLength uint64, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "dev_epochLength" + const params = "[]" + data, err := Post(ctx, endpoint, method, params) + if err != nil { + return 0, fmt.Errorf("cannot post RPC: %w", err) + } + + var epochLengthHexString string + err = Decode(data, &epochLengthHexString) + if err != nil { + return 0, fmt.Errorf("cannot decode RPC response: %w", err) + } + + b, err := common.HexToBytes(epochLengthHexString) + if err != nil { + return 0, fmt.Errorf("malformed epoch length hex string: %w", err) + } + + epochLength = binary.LittleEndian.Uint64(b) + return epochLength, nil +} diff --git a/tests/utils/rpc/header.go b/tests/utils/rpc/header.go new file mode 100644 index 0000000000..63e3184e86 --- /dev/null +++ b/tests/utils/rpc/header.go @@ -0,0 +1,49 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "fmt" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +// headerResponseToHeader converts a *ChainBlockHeaderResponse to a *types.Header +func headerResponseToHeader(rpcHeader modules.ChainBlockHeaderResponse) (header *types.Header, err error) { + parentHash, err := common.HexToHash(rpcHeader.ParentHash) + if err != nil { + return nil, fmt.Errorf("malformed parent hash: %w", err) + } + + nb, err := common.HexToBytes(rpcHeader.Number) + if err != nil { + return nil, fmt.Errorf("malformed number hex string: %w", err) + } + + number := common.BytesToUint(nb) + + stateRoot, err := common.HexToHash(rpcHeader.StateRoot) + if err != nil { + return nil, fmt.Errorf("malformed state root: %w", err) + } + + extrinsicsRoot, err := common.HexToHash(rpcHeader.ExtrinsicsRoot) + if err != nil { + return nil, fmt.Errorf("malformed extrinsic root: %w", err) + } + + digest, err := rpcLogsToDigest(rpcHeader.Digest.Logs) + if err != nil { + return nil, fmt.Errorf("malformed digest logs: %w", err) + } + + header, err = types.NewHeader(parentHash, stateRoot, extrinsicsRoot, number, digest) + if err != nil { + return nil, fmt.Errorf("cannot create new header: %w", err) + } + + return header, nil +} diff --git a/tests/utils/rpc/request.go b/tests/utils/rpc/request.go new file mode 100644 index 0000000000..1e0a5c3cc5 --- /dev/null +++ b/tests/utils/rpc/request.go @@ -0,0 +1,123 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" +) + +// Post sends a payload using the method, host and params string given. +// It returns the response bytes and an eventual error. +func Post(ctx context.Context, endpoint, method, params string) (data []byte, err error) { + requestBody := fmt.Sprintf(`{"jsonrpc":"2.0","method":"%s","params":%s,"id":1}`, method, params) + requestBuffer := bytes.NewBuffer([]byte(requestBody)) + + request, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, requestBuffer) + if err != nil { + return nil, fmt.Errorf("cannot create HTTP request: %w", err) + } + + const contentType = "application/json" + request.Header.Set("Content-Type", contentType) + request.Header.Set("Accept", contentType) + + response, err := http.DefaultClient.Do(request) + if err != nil { + return nil, fmt.Errorf("cannot do HTTP request: %w", err) + } + + data, err = io.ReadAll(response.Body) + if err != nil { + _ = response.Body.Close() + return nil, fmt.Errorf("cannot read HTTP response body: %w", err) + } + + err = response.Body.Close() + if err != nil { + return nil, fmt.Errorf("cannot close HTTP response body: %w", err) + } + + return data, nil +} + +var ( + ErrResponseVersion = errors.New("unexpected response version received") + ErrResponseError = errors.New("response error received") +) + +// Decode decodes []body into the target interface. +func Decode(body []byte, target interface{}) error { + decoder := json.NewDecoder(bytes.NewReader(body)) + decoder.DisallowUnknownFields() + + var response ServerResponse + err := decoder.Decode(&response) + if err != nil { + return fmt.Errorf("cannot decode response: %s: %w", + string(body), err) + } + + if response.Version != "2.0" { + return fmt.Errorf("%w: %s", ErrResponseVersion, response.Version) + } + + if response.Error != nil { + return fmt.Errorf("%w: %s (error code %d)", + ErrResponseError, response.Error.Message, response.Error.ErrorCode) + } + + jsonRawMessage := response.Result + if jsonRawMessage == nil { + jsonRawMessage = response.Params + } + decoder = json.NewDecoder(bytes.NewReader(jsonRawMessage)) + decoder.DisallowUnknownFields() + + err = decoder.Decode(target) + if err != nil { + return fmt.Errorf("cannot decode response result: %s: %w", + string(response.Result), err) + } + + return nil +} + +// NewEndpoint returns http://localhost: +func NewEndpoint(port string) string { + return "http://localhost:" + port +} + +func rpcLogsToDigest(logs []string) (digest scale.VaryingDataTypeSlice, err error) { + digest = types.NewDigest() + + for _, l := range logs { + itemBytes, err := common.HexToBytes(l) + if err != nil { + return digest, fmt.Errorf("malformed digest item hex string: %w", err) + } + + di := types.NewDigestItem() + err = scale.Unmarshal(itemBytes, &di) + if err != nil { + return digest, fmt.Errorf("malformed digest item bytes: %w", err) + } + + err = digest.Add(di.Value()) + if err != nil { + return digest, fmt.Errorf("cannot add digest item to digest: %w", err) + } + } + + return digest, nil +} diff --git a/tests/utils/rpc/system.go b/tests/utils/rpc/system.go new file mode 100644 index 0000000000..332190409b --- /dev/null +++ b/tests/utils/rpc/system.go @@ -0,0 +1,49 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "context" + "fmt" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/lib/common" +) + +// GetPeers calls the endpoint system_peers +func GetPeers(ctx context.Context, rpcPort string) (peers []common.PeerInfo, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "system_peers" + const params = "[]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return nil, fmt.Errorf("cannot post RPC: %w", err) + } + + var peersResponse modules.SystemPeersResponse + err = Decode(respBody, &peersResponse) + if err != nil { + return nil, fmt.Errorf("cannot decode RPC: %w", err) + } + + return peersResponse, nil +} + +// GetHealth sends an RPC request to `system_health`. +func GetHealth(ctx context.Context, address string) ( + health modules.SystemHealthResponse, err error) { + const method = "system_health" + const params = "{}" + respBody, err := Post(ctx, address, method, params) + if err != nil { + return health, fmt.Errorf("cannot post RPC: %w", err) + } + + err = Decode(respBody, &health) + if err != nil { + return health, fmt.Errorf("cannot decode RPC: %w", err) + } + + return health, nil +} diff --git a/tests/utils/rpc/types.go b/tests/utils/rpc/types.go new file mode 100644 index 0000000000..b26590a37b --- /dev/null +++ b/tests/utils/rpc/types.go @@ -0,0 +1,30 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import "encoding/json" + +// ServerResponse wraps the RPC response +type ServerResponse struct { + // JSON-RPC Version + Version string `json:"jsonrpc"` + // Method name called + Method string `json:"method"` + // Resulting values + Result json.RawMessage `json:"result"` + // Params values including results + Params json.RawMessage `json:"params"` + // Any generated errors + Error *Error `json:"error"` + Subscription *json.RawMessage `json:"subscription"` + // Request id + ID *json.RawMessage `json:"id"` +} + +// Error is a struct that holds the error message and the error code for a error +type Error struct { + Message string `json:"message"` + ErrorCode int `json:"code"` + Data map[string]interface{} `json:"data"` +} diff --git a/tests/utils/rpc_methods.go b/tests/utils/rpc_methods.go deleted file mode 100644 index 6b5d44060d..0000000000 --- a/tests/utils/rpc_methods.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -//nolint:revive -var ( - // CHAIN METHODS - ChainGetBlock = "chain_getBlock" - ChainGetHeader = "chain_getHeader" - ChainGetFinalizedHead = "chain_getFinalizedHead" - ChainGetFinalizedHeadByRound = "chain_getFinalizedHeadByRound" - ChainGetBlockHash = "chain_getBlockHash" - - // AUTHOR METHODS - AuthorSubmitExtrinsic = "author_submitExtrinsic" - AuthorPendingExtrinsics = "author_pendingExtrinsics" - - // STATE METHODS - StateGetStorage = "state_getStorage" - - // DEV METHODS - DevControl = "dev_control" - - // GRANDPA - GrandpaProveFinality = "grandpa_proveFinality" -) diff --git a/tests/utils/system.go b/tests/utils/system.go deleted file mode 100644 index e193a8436c..0000000000 --- a/tests/utils/system.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "context" - "testing" - - "github.com/ChainSafe/gossamer/dot/rpc/modules" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// GetPeers calls the endpoint system_peers -func GetPeers(ctx context.Context, t *testing.T, rpcPort string) []common.PeerInfo { - endpoint := NewEndpoint(rpcPort) - const method = "system_peers" - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - resp := new(modules.SystemPeersResponse) - err = DecodeRPC(t, respBody, resp) - require.NoError(t, err) - require.NotNil(t, resp) - - return *resp -} diff --git a/tests/utils/writer.go b/tests/utils/writer.go new file mode 100644 index 0000000000..47c738b352 --- /dev/null +++ b/tests/utils/writer.go @@ -0,0 +1,30 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package utils + +import ( + "io" + "testing" +) + +// TestWriter is a writer implementing `io.Writer` +// using the Go test logger `t.Log()`. +type TestWriter struct { + t *testing.T +} + +func (tw *TestWriter) Write(p []byte) (n int, err error) { + tw.t.Helper() + line := string(p) + tw.t.Log(line) + return len(p), nil +} + +// NewTestWriter creates a new writer which uses +// the Go test logger to write out. +func NewTestWriter(t *testing.T) (writer io.Writer) { + return &TestWriter{ + t: t, + } +}