From 56bca71c206c28af2cfc87701b2a2e63d6305f17 Mon Sep 17 00:00:00 2001 From: lystopad Date: Wed, 16 Oct 2024 10:23:51 +0200 Subject: [PATCH 01/13] Do not cancel in progress jobs for 'main' and latest release branches. (#12330) See https://github.com/erigontech/erigon/issues/12125 for more details. --- .github/workflows/ci.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1440eec2f12..65139178337 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,11 @@ on: concurrency: group: ${{ github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + cancel-in-progress: ${{ !contains(fromJSON('[ + "refs/heads/release/2.60", + "refs/heads/release/2.61", + "refs/heads/main" + ]'), github.ref) }} jobs: tests: From 235d7379402683b941e271fb607216e9aa3a29fe Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 16 Oct 2024 11:53:15 +0200 Subject: [PATCH 02/13] qa-tests: reschedule constrained-tip-tracking (#12333) Performing this test every day is too expensive, other more important tests are blocked, it is rescheduled so that it is performed only on Sunday. --- .github/workflows/qa-constrained-tip-tracking.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/qa-constrained-tip-tracking.yml b/.github/workflows/qa-constrained-tip-tracking.yml index 9d21670377a..d461a492bfb 100644 --- a/.github/workflows/qa-constrained-tip-tracking.yml +++ b/.github/workflows/qa-constrained-tip-tracking.yml @@ -2,7 +2,7 @@ name: QA - Constrained Tip tracking on: schedule: - - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday + - cron: '0 0 * * 0' # Run on Sunday at 00:00 AM UTC workflow_dispatch: # Run manually pull_request: branches: From cfa039e6834089ddf6193d349fe6f0fa28966f0e Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 16 Oct 2024 11:54:22 +0200 Subject: [PATCH 03/13] qa-tests: improve rpc bisection tool (#12334) - avoid cloning rpc-tests at each step - bump rcp-tests version --- .github/workflows/qa-rpc-test-bisection-tool.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/qa-rpc-test-bisection-tool.yml b/.github/workflows/qa-rpc-test-bisection-tool.yml index 1fbb90e3861..282c9faa667 100644 --- a/.github/workflows/qa-rpc-test-bisection-tool.yml +++ b/.github/workflows/qa-rpc-test-bisection-tool.yml @@ -36,6 +36,13 @@ jobs: - name: Create scripts directory run: mkdir -p $GITHUB_WORKSPACE/.github/scripts + - name: Checkout RPC Tests Repository & Install Requirements + run: | + rm -rf $GITHUB_WORKSPACE/rpc-tests + git -c advice.detachedHead=false clone --depth 1 --branch v1.00.0 https://github.com/erigontech/rpc-tests $GITHUB_WORKSPACE/rpc-tests + cd $GITHUB_WORKSPACE/rpc-tests + pip3 install -r requirements.txt + - name: Create test script run: | cat << 'EOF' > $GITHUB_WORKSPACE/.github/scripts/test_script.sh @@ -75,12 +82,6 @@ jobs: exit 125 # Skip this commit fi - # Checkout RPC Tests Repository & Install Requirements - rm -rf $GITHUB_WORKSPACE/rpc-tests - git -c advice.detachedHead=false clone --depth 1 --branch v0.42.0 https://github.com/erigontech/rpc-tests $GITHUB_WORKSPACE/rpc-tests - cd $GITHUB_WORKSPACE/rpc-tests - pip3 install -r requirements.txt - # Run the specified test cd $GITHUB_WORKSPACE/rpc-tests/integration From 9a2a792743e236553f28ca76bcdcc28cb6fcff7a Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 16 Oct 2024 12:09:59 +0200 Subject: [PATCH 04/13] qa-tests: enable ots_getTransactionBySenderAndNonce tests (#12335) The PR https://github.com/erigontech/erigon/pull/12322 fixed the behavior of erigon relative to these tests so we re-enable them --- .github/workflows/qa-rpc-integration-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index c7815a761e1..23e0bb2d178 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -191,8 +191,6 @@ jobs: trace_replayTransaction/test_23.tar,\ trace_replayTransaction/test_24.json,\ trace_replayTransaction/test_29.tar,\ - ots_getTransactionBySenderAndNonce/test_05.json,\ - ots_getTransactionBySenderAndNonce/test_11.json,\ ots_searchTransactionsAfter/test_01.json,\ ots_searchTransactionsAfter/test_03.json,\ ots_searchTransactionsAfter/test_04.json,\ From e0eb32221326524737c3f67ed26c3b59ea1b6165 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 16 Oct 2024 12:20:31 +0200 Subject: [PATCH 05/13] qa-tests: bump rpc-tests version (#12336) Update the rpc test suite to a new version that better supports Erigon v3 --- .github/workflows/qa-rpc-integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 23e0bb2d178..fc6c0e689fc 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -31,7 +31,7 @@ jobs: - name: Checkout RPC Tests Repository & Install Requirements run: | rm -rf ${{ runner.workspace }}/rpc-tests - git -c advice.detachedHead=false clone --depth 1 --branch v0.42.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests + git -c advice.detachedHead=false clone --depth 1 --branch v1.00.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests cd ${{ runner.workspace }}/rpc-tests pip3 install -r requirements.txt From dca92a5da3345bf8541a57102470fb7484701bd1 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 16 Oct 2024 14:22:03 +0200 Subject: [PATCH 06/13] qa-tests: fix rpc-tests version (#12340) --- .github/workflows/qa-rpc-integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index fc6c0e689fc..11c7b0c556b 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -31,7 +31,7 @@ jobs: - name: Checkout RPC Tests Repository & Install Requirements run: | rm -rf ${{ runner.workspace }}/rpc-tests - git -c advice.detachedHead=false clone --depth 1 --branch v1.00.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests + git -c advice.detachedHead=false clone --depth 1 --branch v1.0.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests cd ${{ runner.workspace }}/rpc-tests pip3 install -r requirements.txt From 1b50e16681ccce9ca575f0b71f4bbecf788dbc36 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Wed, 16 Oct 2024 15:38:54 +0200 Subject: [PATCH 07/13] qa-tests: disable failing rpc tests (#12341) --- .github/workflows/qa-rpc-integration-tests.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 11c7b0c556b..455590d49bb 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -278,7 +278,19 @@ jobs: trace_call/test_18.json,\ txpool_content/test_01.json,\ txpool_status/test_1.json,\ - web3_clientVersion/test_1.json + web3_clientVersion/test_1.json,\ + eth_estimateGas/test_14.json,\ + trace_replayBlockTransactions/test_26.tar,\ + trace_replayBlockTransactions/test_28.tar,\ + trace_replayBlockTransactions/test_29.tar,\ + trace_replayBlockTransactions/test_31.tar,\ + trace_replayBlockTransactions/test_32.tar,\ + trace_replayBlockTransactions/test_33.tar,\ + trace_replayBlockTransactions/test_34.tar,\ + trace_replayBlockTransactions/test_35.tar,\ + trace_replayTransaction/test_31.json,\ + trace_replayTransaction/test_32.json,\ + trace_replayTransaction/test_34.json # Capture test runner script exit status test_exit_status=$? From 315f6387f1532027350ca587da458465c019104c Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:31:16 +0100 Subject: [PATCH 08/13] polygon/bridge: fix BorTxLookup encoding discrepancy (#12345) ``` EROR[10-16|01:26:00.055] Staged Sync err="runtime error: index out of range [7] with length 3, trace: [stageloop.go:202 panic.go:770 panic.go:114 binary.go:183 db.go:548 db.go:474 stage_polygon_sync.go:1306 stage_polygon_sync.go:1816 stage_polygon_sync.go:578 stage_polygon_sync.g o:196 default_stages.go:479 sync.go:531 sync.go:410 stageloop.go:249 stageloop.go:101 asm_arm64.s:1222]" ``` Fixes an index out of range found while running Astrid stage integration. The problem was due to encoding mismatch in the BorTxLookup table. The current code uses big.Int byte encoding for storing the block number, while the Astrid Bridge assumed binary.BigEndian.Uint64 encoding. This PR fixes the runtime error and brings the 2 implementations in sync by keeping the new one backward compatible. --- polygon/bridge/db.go | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/polygon/bridge/db.go b/polygon/bridge/db.go index 0892326ca8e..3e7c3ab194e 100644 --- a/polygon/bridge/db.go +++ b/polygon/bridge/db.go @@ -22,6 +22,7 @@ import ( "encoding/binary" "errors" "fmt" + "math/big" "time" libcommon "github.com/erigontech/erigon-lib/common" @@ -217,12 +218,9 @@ func (s *MdbxStore) PutEventTxnToBlockNum(ctx context.Context, eventTxnToBlockNu } defer tx.Rollback() - vByte := make([]byte, 8) - + vBigNum := new(big.Int) for k, v := range eventTxnToBlockNum { - binary.BigEndian.PutUint64(vByte, v) - - err = tx.Put(kv.BorTxLookup, k.Bytes(), vByte) + err = tx.Put(kv.BorTxLookup, k.Bytes(), vBigNum.SetUint64(v).Bytes()) if err != nil { return err } @@ -248,7 +246,7 @@ func (s *MdbxStore) EventTxnToBlockNum(ctx context.Context, borTxHash libcommon. return blockNum, false, nil } - blockNum = binary.BigEndian.Uint64(v) + blockNum = new(big.Int).SetBytes(v).Uint64() return blockNum, true, nil } @@ -474,7 +472,7 @@ func Unwind(tx kv.RwTx, blockNum uint64) error { return UnwindEventTxnToBlockNum(tx, blockNum) } -// UnwindEventProcessedBlocks deletes data in kv.BorEventProcessedBlocks. +// UnwindBlockNumToEventID deletes data in kv.BorEventProcessedBlocks. // The blockNum parameter is exclusive, i.e. only data in the range (blockNum, last] is deleted. func UnwindBlockNumToEventID(tx kv.RwTx, blockNum uint64) error { c, err := tx.RwCursor(kv.BorEventNums) @@ -532,7 +530,7 @@ func UnwindEventProcessedBlocks(tx kv.RwTx, blockNum uint64) error { return err } -// UnwindEventProcessedBlocks deletes data in kv.BorTxLookup. +// UnwindEventTxnToBlockNum deletes data in kv.BorTxLookup. // The blockNum parameter is exclusive, i.e. only data in the range (blockNum, last] is deleted. func UnwindEventTxnToBlockNum(tx kv.RwTx, blockNum uint64) error { c, err := tx.RwCursor(kv.BorTxLookup) @@ -541,11 +539,10 @@ func UnwindEventTxnToBlockNum(tx kv.RwTx, blockNum uint64) error { } defer c.Close() - blockNumBytes := make([]byte, 8) - binary.BigEndian.PutUint64(blockNumBytes, blockNum) + blockNumBig := new(big.Int) var k, v []byte for k, v, err = c.Last(); err == nil && k != nil; k, v, err = c.Prev() { - if currentBlockNum := binary.BigEndian.Uint64(v); currentBlockNum <= blockNum { + if currentBlockNum := blockNumBig.SetBytes(v).Uint64(); currentBlockNum <= blockNum { break } From 67edaa82133a4bef1760c7227c6f306864a708ef Mon Sep 17 00:00:00 2001 From: Mark Holt <135143369+mh0lt@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:38:42 +0100 Subject: [PATCH 09/13] Dont reopen idx with refs (#12346) It looks like the closeIdx function here will close indexes while they are referenced by transactions. This adds a check to stop that. --- turbo/snapshotsync/freezeblocks/block_snapshots.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index b7d44c1e304..aabeb4d88e7 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -354,6 +354,10 @@ func (s *DirtySegment) reopenIdxIfNeed(dir string, optimistic bool) (err error) } func (s *DirtySegment) reopenIdx(dir string) (err error) { + if s.refcount.Load() > 0 { + return nil + } + s.closeIdx() if s.Decompressor == nil { return nil From 29fc481071314960d5859a595064b79cc1a41f66 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:20:03 +0100 Subject: [PATCH 10/13] astrid: fix ReportBadHeaderPoS nil ptr in standalone (#12347) Run into below error when running Astrid standalone. Handling blocks in Astrid is done by the driver, so we should not be calling ReportBadHeaderPoS in execution when run outside of Astrid. Additionally, ReportBadHeaderPoS should currently only be called for Ethereum-based PoS chains. ``` [INFO] [10-16|14:34:46.953] [4/6 Execution] Done blk=4540047 blks=4895 blk/s=1091.2 txs=10820 tx/s=2.41k gas/s=47.01M buf=566.5KB/512.0MB stepsInDB=0.00 step=5.9 alloc=307.2MB sys=920.5MB panic: assignment to entry in nil map goroutine 862285 [running]: github.com/erigontech/erigon/turbo/stages/headerdownload.(*HeaderDownload).ReportBadHeaderPoS(0xc000b46a00, {0x9d, 0x6f, 0x66, 0x23, 0x29, 0x7d, 0x90, 0x89, 0x20, ...}, ...) github.com/erigontech/erigon/turbo/stages/headerdownload/header_algos.go:167 +0x87 github.com/erigontech/erigon/eth/stagedsync.ExecV3({_, _}, _, {_, _}, _, {{0x32f5f90, 0xc001a840a8}, 0x20000000, {0x1, ...}, ...}, ...) github.com/erigontech/erigon/eth/stagedsync/exec3.go:916 +0x4e39 github.com/erigontech/erigon/eth/stagedsync.ExecBlockV3(_, {_, _}, {{_, _}, {_, _}, _}, _, {0x32de288, ...}, ...) github.com/erigontech/erigon/eth/stagedsync/stage_execute.go:163 +0x20f github.com/erigontech/erigon/eth/stagedsync.SpawnExecuteBlocksStage(_, {_, _}, {{_, _}, {_, _}, _}, _, {0x32de288, ...}, ...) github.com/erigontech/erigon/eth/stagedsync/stage_execute.go:253 +0x10e github.com/erigontech/erigon/eth/stagedsync.PipelineStages.func10(0x9?, 0x7fb39933fe18?, {0x32d7fe0?, 0xc000a08640?}, {{0x3316c90, 0xc00cbc6700}, {0x0, 0x0}, 0x0}, {0x32f4a08, ...}) github.com/erigontech/erigon/eth/stagedsync/default_stages.go:238 +0xee github.com/erigontech/erigon/eth/stagedsync.(*Sync).runStage(0xc000a08640, 0xc001d53810, {0x32f5f90, 0xc001a840a8}, {{0x3316c90, 0xc00cbc6700}, {0x0, 0x0}, 0x0}, 0x1, ...) github.com/erigontech/erigon/eth/stagedsync/sync.go:531 +0x190 github.com/erigontech/erigon/eth/stagedsync.(*Sync).Run(0xc000a08640, {0x32f5f90, 0xc001a840a8}, {{0x3316c90, 0xc00cbc6700}, {0x0, 0x0}, 0x0}, 0x73?, 0x0) github.com/erigontech/erigon/eth/stagedsync/sync.go:410 +0x2ad github.com/erigontech/erigon/turbo/execution/eth1.(*EthereumExecutionModule).updateForkChoice(0xc000a08780, {0x32de288, 0xc001d52190}, {0xb5, 0x5d, 0xb5, 0x42, 0x65, 0x2a, 0x58, ...}, ...) github.com/erigontech/erigon/turbo/execution/eth1/forkchoice.go:439 +0x13e5 created by github.com/erigontech/erigon/turbo/execution/eth1.(*EthereumExecutionModule).UpdateForkChoice in goroutine 206 github.com/erigontech/erigon/turbo/execution/eth1/forkchoice.go:129 +0x316 ``` --- eth/stagedsync/exec3.go | 7 ++++--- eth/stagedsync/stage_execute.go | 1 + eth/stagedsync/stage_senders.go | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 57a56b38c3f..1dc5ca0ef19 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -30,10 +30,11 @@ import ( "time" "github.com/c2h5oh/datasize" - "github.com/erigontech/erigon/core/rawdb/rawtemporaldb" "github.com/erigontech/mdbx-go/mdbx" "golang.org/x/sync/errgroup" + "github.com/erigontech/erigon/core/rawdb/rawtemporaldb" + "github.com/erigontech/erigon-lib/chain" "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/cmp" @@ -912,7 +913,7 @@ Loop: return err } logger.Warn(fmt.Sprintf("[%s] Execution failed", execStage.LogPrefix()), "block", blockNum, "txNum", txTask.TxNum, "hash", header.Hash().String(), "err", err) - if cfg.hd != nil && errors.Is(err, consensus.ErrInvalidBlock) { + if cfg.hd != nil && cfg.hd.POSSync() && errors.Is(err, consensus.ErrInvalidBlock) { cfg.hd.ReportBadHeaderPoS(header.Hash(), header.ParentHash) } if cfg.badBlockHalt { @@ -1226,7 +1227,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT if cfg.badBlockHalt { return false, errors.New("wrong trie root") } - if cfg.hd != nil { + if cfg.hd != nil && cfg.hd.POSSync() { cfg.hd.ReportBadHeaderPoS(header.Hash(), header.ParentHash) } minBlockNum := e.BlockNumber diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index d1c02c64333..8004a9dc247 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -63,6 +63,7 @@ const ( type headerDownloader interface { ReportBadHeaderPoS(badHeader, lastValidAncestor common.Hash) + POSSync() bool } type ExecuteBlockCfg struct { diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index c159095a728..61845af230f 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -282,7 +282,7 @@ Loop: return minBlockErr } minHeader := rawdb.ReadHeader(tx, minBlockHash, minBlockNum) - if cfg.hd != nil && errors.Is(minBlockErr, consensus.ErrInvalidBlock) { + if cfg.hd != nil && cfg.hd.POSSync() && errors.Is(minBlockErr, consensus.ErrInvalidBlock) { cfg.hd.ReportBadHeaderPoS(minBlockHash, minHeader.ParentHash) } From 8d8aa90773af53814d762757d0851b88eaa649a3 Mon Sep 17 00:00:00 2001 From: Mark Holt <135143369+mh0lt@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:46:44 +0100 Subject: [PATCH 11/13] Optionally disable fcu timer (#12348) For internal users of the execution api allow the option of not timing out so they can wait indefinitely. To avoid api changes this is signaled by passing a 0 timeout which is interpreted as wait forever rather than return immediately. --- polygon/sync/execution_client.go | 3 +-- turbo/execution/eth1/forkchoice.go | 29 +++++++++++++++++++++-------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/polygon/sync/execution_client.go b/polygon/sync/execution_client.go index 60958103fbc..6e5bca2d4c0 100644 --- a/polygon/sync/execution_client.go +++ b/polygon/sync/execution_client.go @@ -84,13 +84,12 @@ func (e *executionClient) InsertBlocks(ctx context.Context, blocks []*types.Bloc func (e *executionClient) UpdateForkChoice(ctx context.Context, tip *types.Header, finalizedHeader *types.Header) (common.Hash, error) { tipHash := tip.Hash() - const timeout = 5 * time.Second request := executionproto.ForkChoice{ HeadBlockHash: gointerfaces.ConvertHashToH256(tipHash), SafeBlockHash: gointerfaces.ConvertHashToH256(tipHash), FinalizedBlockHash: gointerfaces.ConvertHashToH256(finalizedHeader.Hash()), - Timeout: uint64(timeout.Milliseconds()), + Timeout: 0, } response, err := e.client.UpdateForkChoice(ctx, &request) diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index d24203e57f9..5a83991be80 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -127,19 +127,32 @@ func (e *EthereumExecutionModule) UpdateForkChoice(ctx context.Context, req *exe // So we wait at most the amount specified by req.Timeout before just sending out go e.updateForkChoice(e.bacgroundCtx, blockHash, safeHash, finalizedHash, outcomeCh) - fcuTimer := time.NewTimer(time.Duration(req.Timeout) * time.Millisecond) + + if req.Timeout > 0 { + fcuTimer := time.NewTimer(time.Duration(req.Timeout) * time.Millisecond) + + select { + case <-fcuTimer.C: + e.logger.Debug("treating forkChoiceUpdated as asynchronous as it is taking too long") + return &execution.ForkChoiceReceipt{ + LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), + Status: execution.ExecutionStatus_Busy, + }, nil + case outcome := <-outcomeCh: + return outcome.receipt, outcome.err + case <-ctx.Done(): + e.logger.Debug("forkChoiceUpdate cancelled") + return nil, ctx.Err() + } + } select { - case <-fcuTimer.C: - e.logger.Debug("treating forkChoiceUpdated as asynchronous as it is taking too long") - return &execution.ForkChoiceReceipt{ - LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), - Status: execution.ExecutionStatus_Busy, - }, nil case outcome := <-outcomeCh: return outcome.receipt, outcome.err + case <-ctx.Done(): + e.logger.Debug("forkChoiceUpdate cancelled") + return nil, ctx.Err() } - } func writeForkChoiceHashes(tx kv.RwTx, blockHash, safeHash, finalizedHash common.Hash) { From 911a910aa9d9664f92e666f2a12f75f0f692fdd7 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 17 Oct 2024 03:03:55 +0700 Subject: [PATCH 12/13] e3: compatibility with e2 on future blocks rpc (#12329) for https://github.com/erigontech/erigon/issues/12323 --------- Co-authored-by: Michelangelo Riccobene --- .github/workflows/qa-rpc-integration-tests.yml | 1 - turbo/rpchelper/helper.go | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 455590d49bb..c2275ca6c78 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -265,7 +265,6 @@ jobs: ots_getBlockTransactions/test_03.json,\ ots_getBlockTransactions/test_04.json,\ ots_getBlockTransactions/test_05.json,\ - ots_hasCode/test_09.json,\ ots_searchTransactionsAfter/test_14.json,\ ots_searchTransactionsBefore/test_13.json,\ ots_searchTransactionsBefore/test_14.json,\ diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index ae7988829fe..da8b03c490d 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -110,8 +110,8 @@ func _GetBlockNumber(ctx context.Context, requireCanonical bool, blockNrOrHash r if err != nil { return 0, libcommon.Hash{}, false, false, err } - if !ok { - return 0, libcommon.Hash{}, false, false, nil + if !ok { //future blocks must behave as "latest" + return blockNumber, hash, blockNumber == plainStateBlockNumber, true, nil } } else { number, err := br.HeaderNumber(ctx, tx, hash) From 04f6439459e3c92738c3ffd542335eddfe8fb7d6 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Wed, 16 Oct 2024 22:40:57 +0100 Subject: [PATCH 13/13] polygon/heimdall: optimise producers api at chain tip (#12312) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Calls to Producers API take ~12ms at the worst case ``` [DBUG] [10-11|20:02:18.270] [bor.heimdall] producers api timing blockNum=62918640 time=12.430836ms increments=399 [DBUG] [10-11|20:02:18.283] [bor.heimdall] producers api timing blockNum=62918640 time=12.440876ms increments=399 ``` The api is called 2 times at chain tip when processing a new block which is ~25ms per block delay which can be easily shaved off to nothing by using a lru cache memoisation. After optimisation ``` [DBUG] [10-16|12:37:35.628] [bor.heimdall] producers api timing blockNum=63108752 time=14.48µs increments=1 [DBUG] [10-16|12:37:35.628] [bor.heimdall] producers api timing blockNum=63108752 time=11.54µs increments=0 ``` --- cmd/rpcdaemon/cli/config.go | 13 +-- eth/backend.go | 12 +-- eth/stagedsync/stage_polygon_sync.go | 4 +- polygon/heimdall/reader.go | 19 ++-- polygon/heimdall/service.go | 25 ++--- polygon/heimdall/service_test.go | 8 +- .../heimdall/span_block_producers_tracker.go | 98 +++++++++++++------ .../blockNum_14323520.json | 70 +++++++++++++ .../blockNum_14323584.json | 70 +++++++++++++ 9 files changed, 252 insertions(+), 67 deletions(-) create mode 100644 polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323520.json create mode 100644 polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323584.json diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 02de6c062e0..e03bf01637a 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -73,6 +73,7 @@ import ( "github.com/erigontech/erigon/node" "github.com/erigontech/erigon/node/nodecfg" "github.com/erigontech/erigon/polygon/bor" + "github.com/erigontech/erigon/polygon/bor/borcfg" "github.com/erigontech/erigon/polygon/bor/valset" "github.com/erigontech/erigon/polygon/bridge" "github.com/erigontech/erigon/polygon/heimdall" @@ -532,12 +533,12 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger } heimdallConfig := heimdall.ReaderConfig{ - Ctx: ctx, - CalculateSprintNumberFn: cc.Bor.CalculateSprintNumber, - DataDir: cfg.DataDir, - TempDir: cfg.Dirs.Tmp, - Logger: logger, - RoTxLimit: roTxLimit, + Ctx: ctx, + BorConfig: cc.Bor.(*borcfg.BorConfig), + DataDir: cfg.DataDir, + TempDir: cfg.Dirs.Tmp, + Logger: logger, + RoTxLimit: roTxLimit, } heimdallReader, err = heimdall.AssembleReader(heimdallConfig) if err != nil { diff --git a/eth/backend.go b/eth/backend.go index d9d478cf0d6..c6353a4103a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -562,12 +562,12 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger polygonBridge = bridge.Assemble(bridgeConfig) heimdallConfig := heimdall.ServiceConfig{ - CalculateSprintNumberFn: borConfig.CalculateSprintNumber, - HeimdallURL: config.HeimdallURL, - DataDir: dirs.DataDir, - TempDir: tmpdir, - Logger: logger, - RoTxLimit: roTxLimit, + BorConfig: borConfig, + HeimdallURL: config.HeimdallURL, + DataDir: dirs.DataDir, + TempDir: tmpdir, + Logger: logger, + RoTxLimit: roTxLimit, } heimdallService = heimdall.AssembleService(heimdallConfig) diff --git a/eth/stagedsync/stage_polygon_sync.go b/eth/stagedsync/stage_polygon_sync.go index 6b81d8b70a6..9c9e94ac7d9 100644 --- a/eth/stagedsync/stage_polygon_sync.go +++ b/eth/stagedsync/stage_polygon_sync.go @@ -98,8 +98,8 @@ func NewPolygonSyncStageCfg( txActionStream: txActionStream, } borConfig := chainConfig.Bor.(*borcfg.BorConfig) - heimdallReader := heimdall.NewReader(borConfig.CalculateSprintNumber, heimdallStore, logger) - heimdallService := heimdall.NewService(borConfig.CalculateSprintNumber, heimdallClient, heimdallStore, logger, heimdallReader) + heimdallReader := heimdall.NewReader(borConfig, heimdallStore, logger) + heimdallService := heimdall.NewService(borConfig, heimdallClient, heimdallStore, logger, heimdallReader) bridgeService := bridge.NewBridge(bridgeStore, logger, borConfig, heimdallClient, nil) p2pService := p2p.NewService(maxPeers, logger, sentry, statusDataProvider.GetStatusData) checkpointVerifier := polygonsync.VerifyCheckpointHeaders diff --git a/polygon/heimdall/reader.go b/polygon/heimdall/reader.go index 6d164e51951..4c451908bc8 100644 --- a/polygon/heimdall/reader.go +++ b/polygon/heimdall/reader.go @@ -10,6 +10,7 @@ import ( "github.com/erigontech/erigon-lib/gointerfaces" remote "github.com/erigontech/erigon-lib/gointerfaces/remoteproto" "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/polygon/bor/borcfg" "github.com/erigontech/erigon/polygon/bor/valset" ) @@ -20,12 +21,12 @@ type Reader struct { } type ReaderConfig struct { - Ctx context.Context - CalculateSprintNumberFn CalculateSprintNumberFunc - DataDir string - TempDir string - Logger log.Logger - RoTxLimit int64 + Ctx context.Context + BorConfig *borcfg.BorConfig + DataDir string + TempDir string + Logger log.Logger + RoTxLimit int64 } // AssembleReader creates and opens the MDBX store. For use cases where the store is only being read from. Must call Close. @@ -37,14 +38,14 @@ func AssembleReader(config ReaderConfig) (*Reader, error) { return nil, err } - return NewReader(config.CalculateSprintNumberFn, store, config.Logger), nil + return NewReader(config.BorConfig, store, config.Logger), nil } -func NewReader(calculateSprintNumber CalculateSprintNumberFunc, store ServiceStore, logger log.Logger) *Reader { +func NewReader(borConfig *borcfg.BorConfig, store ServiceStore, logger log.Logger) *Reader { return &Reader{ logger: logger, store: store, - spanBlockProducersTracker: newSpanBlockProducersTracker(logger, calculateSprintNumber, store.SpanBlockProducerSelections()), + spanBlockProducersTracker: newSpanBlockProducersTracker(logger, borConfig, store.SpanBlockProducerSelections()), } } diff --git a/polygon/heimdall/service.go b/polygon/heimdall/service.go index 060a6d499dd..e0e6c760e9d 100644 --- a/polygon/heimdall/service.go +++ b/polygon/heimdall/service.go @@ -26,17 +26,18 @@ import ( libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/polygon/bor/borcfg" "github.com/erigontech/erigon/polygon/bor/valset" "github.com/erigontech/erigon/polygon/polygoncommon" ) type ServiceConfig struct { - CalculateSprintNumberFn CalculateSprintNumberFunc - HeimdallURL string - DataDir string - TempDir string - Logger log.Logger - RoTxLimit int64 + BorConfig *borcfg.BorConfig + HeimdallURL string + DataDir string + TempDir string + Logger log.Logger + RoTxLimit int64 } type Service interface { @@ -64,15 +65,15 @@ type service struct { func AssembleService(config ServiceConfig) Service { store := NewMdbxServiceStore(config.Logger, config.DataDir, config.TempDir, config.RoTxLimit) client := NewHeimdallClient(config.HeimdallURL, config.Logger) - reader := NewReader(config.CalculateSprintNumberFn, store, config.Logger) - return NewService(config.CalculateSprintNumberFn, client, store, config.Logger, reader) + reader := NewReader(config.BorConfig, store, config.Logger) + return NewService(config.BorConfig, client, store, config.Logger, reader) } -func NewService(calculateSprintNumberFn CalculateSprintNumberFunc, client HeimdallClient, store ServiceStore, logger log.Logger, reader *Reader) Service { - return newService(calculateSprintNumberFn, client, store, logger, reader) +func NewService(borConfig *borcfg.BorConfig, client HeimdallClient, store ServiceStore, logger log.Logger, reader *Reader) Service { + return newService(borConfig, client, store, logger, reader) } -func newService(calculateSprintNumberFn CalculateSprintNumberFunc, client HeimdallClient, store ServiceStore, logger log.Logger, reader *Reader) *service { +func newService(borConfig *borcfg.BorConfig, client HeimdallClient, store ServiceStore, logger log.Logger, reader *Reader) *service { checkpointFetcher := newCheckpointFetcher(client, logger) milestoneFetcher := newMilestoneFetcher(client, logger) spanFetcher := newSpanFetcher(client, logger) @@ -119,7 +120,7 @@ func newService(calculateSprintNumberFn CalculateSprintNumberFunc, client Heimda checkpointScraper: checkpointScraper, milestoneScraper: milestoneScraper, spanScraper: spanScraper, - spanBlockProducersTracker: newSpanBlockProducersTracker(logger, calculateSprintNumberFn, store.SpanBlockProducerSelections()), + spanBlockProducersTracker: newSpanBlockProducersTracker(logger, borConfig, store.SpanBlockProducerSelections()), } } diff --git a/polygon/heimdall/service_test.go b/polygon/heimdall/service_test.go index 376f6eddd80..c7aa8889f3f 100644 --- a/polygon/heimdall/service_test.go +++ b/polygon/heimdall/service_test.go @@ -103,7 +103,9 @@ func TestServiceWithMainnetData(t *testing.T) { 14_000_000, 14_250_000, 14_300_000, - 14_323_456, // span 2239 start + 14_323_456, // span 2239 start (sprint 1 of span 2239) + 14_323_520, // span 2239 start (sprint 2 of span 2239) - to test recent producers lru caching + 14_323_584, // span 2239 start (sprint 3 of span 2239) - to test recent producers lru caching 14_325_000, 14_329_854, 14_329_855, // span 2239 end @@ -161,8 +163,8 @@ func (suite *ServiceTestSuite) SetupSuite() { suite.setupSpans() suite.setupCheckpoints() suite.setupMilestones() - reader := NewReader(borConfig.CalculateSprintNumber, store, logger) - suite.service = newService(borConfig.CalculateSprintNumber, suite.client, store, logger, reader) + reader := NewReader(borConfig, store, logger) + suite.service = newService(borConfig, suite.client, store, logger, reader) err := suite.service.store.Prepare(suite.ctx) require.NoError(suite.T(), err) diff --git a/polygon/heimdall/span_block_producers_tracker.go b/polygon/heimdall/span_block_producers_tracker.go index daa6c65b983..89e6193935a 100644 --- a/polygon/heimdall/span_block_producers_tracker.go +++ b/polygon/heimdall/span_block_producers_tracker.go @@ -23,33 +23,41 @@ import ( "sync/atomic" "time" + lru "github.com/hashicorp/golang-lru/v2" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/polygon/bor/borcfg" "github.com/erigontech/erigon/polygon/bor/valset" ) -type CalculateSprintNumberFunc func(uint64) uint64 - func newSpanBlockProducersTracker( logger log.Logger, - calculateSprintNumber CalculateSprintNumberFunc, + borConfig *borcfg.BorConfig, store EntityStore[*SpanBlockProducerSelection], ) *spanBlockProducersTracker { + recentSelectionsLru, err := lru.New[uint64, SpanBlockProducerSelection](1024) + if err != nil { + panic(err) + } + return &spanBlockProducersTracker{ - logger: logger, - calculateSprintNumber: calculateSprintNumber, - store: store, - newSpans: make(chan *Span), - idleSignal: make(chan struct{}), + logger: logger, + borConfig: borConfig, + store: store, + recentSelections: recentSelectionsLru, + newSpans: make(chan *Span), + idleSignal: make(chan struct{}), } } type spanBlockProducersTracker struct { - logger log.Logger - calculateSprintNumber CalculateSprintNumberFunc - store EntityStore[*SpanBlockProducerSelection] - newSpans chan *Span - queued atomic.Int32 - idleSignal chan struct{} + logger log.Logger + borConfig *borcfg.BorConfig + store EntityStore[*SpanBlockProducerSelection] + recentSelections *lru.Cache[uint64, SpanBlockProducerSelection] // sprint number -> SpanBlockProducerSelection + newSpans chan *Span + queued atomic.Int32 + idleSignal chan struct{} } func (t *spanBlockProducersTracker) Run(ctx context.Context) error { @@ -145,8 +153,8 @@ func (t *spanBlockProducersTracker) ObserveSpan(ctx context.Context, newSpan *Sp return err } - spanStartSprintNum := t.calculateSprintNumber(lastProducerSelection.StartBlock) - spanEndSprintNum := t.calculateSprintNumber(lastProducerSelection.EndBlock) + spanStartSprintNum := t.borConfig.CalculateSprintNumber(lastProducerSelection.StartBlock) + spanEndSprintNum := t.borConfig.CalculateSprintNumber(lastProducerSelection.EndBlock) increments := int(spanEndSprintNum - spanStartSprintNum) for i := 0; i < increments; i++ { producers = valset.GetUpdatedValidatorSet(producers, producers.Validators, t.logger) @@ -172,36 +180,68 @@ func (t *spanBlockProducersTracker) ObserveSpan(ctx context.Context, newSpan *Sp func (t *spanBlockProducersTracker) Producers(ctx context.Context, blockNum uint64) (*valset.ValidatorSet, error) { startTime := time.Now() + + producers, increments, err := t.producers(ctx, blockNum) + if err != nil { + return nil, err + } + + t.logger.Debug( + heimdallLogPrefix("producers api timing"), + "blockNum", blockNum, + "time", time.Since(startTime), + "increments", increments, + ) + + return producers, nil +} + +func (t *spanBlockProducersTracker) producers(ctx context.Context, blockNum uint64) (*valset.ValidatorSet, int, error) { + currentSprintNum := t.borConfig.CalculateSprintNumber(blockNum) + + // have we previously calculated the producers for the same sprint num (chain tip optimisation) + if selection, ok := t.recentSelections.Get(currentSprintNum); ok { + return selection.Producers.Copy(), 0, nil + } + + // have we previously calculated the producers for the previous sprint num of the same span (chain tip optimisation) spanId := SpanIdAt(blockNum) + var prevSprintNum uint64 + if currentSprintNum > 0 { + prevSprintNum = currentSprintNum - 1 + } + if selection, ok := t.recentSelections.Get(prevSprintNum); ok && spanId == selection.SpanId { + producersCopy := selection.Producers.Copy() + producersCopy.IncrementProposerPriority(1) + selectionCopy := selection + selectionCopy.Producers = producersCopy + t.recentSelections.Add(currentSprintNum, selectionCopy) + return producersCopy, 1, nil + } + + // no recent selection that we can easily use, re-calculate from DB producerSelection, ok, err := t.store.Entity(ctx, uint64(spanId)) if err != nil { - return nil, err + return nil, 0, err } if !ok { - return nil, errors.New("no producers found for block num") + return nil, 0, errors.New("no producers found for block num") } producers := producerSelection.Producers producers.UpdateValidatorMap() err = producers.UpdateTotalVotingPower() if err != nil { - return nil, err + return nil, 0, err } - spanStartSprintNum := t.calculateSprintNumber(producerSelection.StartBlock) - currentSprintNum := t.calculateSprintNumber(blockNum) + spanStartSprintNum := t.borConfig.CalculateSprintNumber(producerSelection.StartBlock) increments := int(currentSprintNum - spanStartSprintNum) for i := 0; i < increments; i++ { producers = valset.GetUpdatedValidatorSet(producers, producers.Validators, t.logger) producers.IncrementProposerPriority(1) } - t.logger.Debug( - heimdallLogPrefix("producers api timing"), - "blockNum", blockNum, - "time", time.Since(startTime), - "increments", increments, - ) - - return producers, nil + t.recentSelections.Add(currentSprintNum, *producerSelection) + return producers, increments, nil } diff --git a/polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323520.json b/polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323520.json new file mode 100644 index 00000000000..5473f1c859b --- /dev/null +++ b/polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323520.json @@ -0,0 +1,70 @@ +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "Signers": [ + { + "Signer": "0x72f93a2740e00112d5f2cef404c0aa16fae21fa4", + "Difficulty": 15 + }, + { + "Signer": "0x7b5000af8ab69fd59eb0d4f5762bff57c9c04385", + "Difficulty": 14 + }, + { + "Signer": "0x7c7379531b2aee82e4ca06d4175d13b9cbeafd49", + "Difficulty": 13 + }, + { + "Signer": "0x8e9700392f9246a6c5b32ee3ecef586f156ed683", + "Difficulty": 12 + }, + { + "Signer": "0xb702f1c9154ac9c08da247a8e30ee6f2f3373f41", + "Difficulty": 11 + }, + { + "Signer": "0xc35649ae99be820c7b200a0add09b96d7032d232", + "Difficulty": 10 + }, + { + "Signer": "0xc6869257205e20c2a43cb31345db534aecb49f6e", + "Difficulty": 9 + }, + { + "Signer": "0xddb833b9e0e3f2f521480e7bcb3e676e0737047d", + "Difficulty": 8 + }, + { + "Signer": "0xe4cd4c302befddf3d544301369ae3ed1481652fd", + "Difficulty": 7 + }, + { + "Signer": "0xe77bbfd8ed65720f187efdd109e38d75eaca7385", + "Difficulty": 6 + }, + { + "Signer": "0xe7e2cb8c81c10ff191a73fe266788c9ce62ec754", + "Difficulty": 5 + }, + { + "Signer": "0x02f70172f7f490653665c9bfac0666147c8af1f5", + "Difficulty": 4 + }, + { + "Signer": "0x1ca971963bdb4ba2bf337c90660674acff5beb3f", + "Difficulty": 3 + }, + { + "Signer": "0x448aa1665fe1fae6d1a00a9209ea62d7dcd81a4b", + "Difficulty": 2 + }, + { + "Signer": "0x46a3a41bd932244dd08186e4c19f1a7e48cbcdf4", + "Difficulty": 1 + } + ], + "Diff": 15, + "Author": "0x72f93a2740e00112d5f2cef404c0aa16fae21fa4" + } +} diff --git a/polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323584.json b/polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323584.json new file mode 100644 index 00000000000..7f36ee87c45 --- /dev/null +++ b/polygon/heimdall/testdata/mainnet/getSnapshotProposerSequence/blockNum_14323584.json @@ -0,0 +1,70 @@ +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "Signers": [ + { + "Signer": "0x7b5000af8ab69fd59eb0d4f5762bff57c9c04385", + "Difficulty": 15 + }, + { + "Signer": "0x7c7379531b2aee82e4ca06d4175d13b9cbeafd49", + "Difficulty": 14 + }, + { + "Signer": "0x8e9700392f9246a6c5b32ee3ecef586f156ed683", + "Difficulty": 13 + }, + { + "Signer": "0xb702f1c9154ac9c08da247a8e30ee6f2f3373f41", + "Difficulty": 12 + }, + { + "Signer": "0xc35649ae99be820c7b200a0add09b96d7032d232", + "Difficulty": 11 + }, + { + "Signer": "0xc6869257205e20c2a43cb31345db534aecb49f6e", + "Difficulty": 10 + }, + { + "Signer": "0xddb833b9e0e3f2f521480e7bcb3e676e0737047d", + "Difficulty": 9 + }, + { + "Signer": "0xe4cd4c302befddf3d544301369ae3ed1481652fd", + "Difficulty": 8 + }, + { + "Signer": "0xe77bbfd8ed65720f187efdd109e38d75eaca7385", + "Difficulty": 7 + }, + { + "Signer": "0xe7e2cb8c81c10ff191a73fe266788c9ce62ec754", + "Difficulty": 6 + }, + { + "Signer": "0x02f70172f7f490653665c9bfac0666147c8af1f5", + "Difficulty": 5 + }, + { + "Signer": "0x1ca971963bdb4ba2bf337c90660674acff5beb3f", + "Difficulty": 4 + }, + { + "Signer": "0x448aa1665fe1fae6d1a00a9209ea62d7dcd81a4b", + "Difficulty": 3 + }, + { + "Signer": "0x46a3a41bd932244dd08186e4c19f1a7e48cbcdf4", + "Difficulty": 2 + }, + { + "Signer": "0x72f93a2740e00112d5f2cef404c0aa16fae21fa4", + "Difficulty": 1 + } + ], + "Diff": 15, + "Author": "0x7b5000af8ab69fd59eb0d4f5762bff57c9c04385" + } +}