diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index 5b44c3f021..2a11cff3a0 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -336,14 +336,14 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the beacon protocol. The changes are done inline.
-func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header, waitOnPrepare bool) error {
+func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
// Transition isn't triggered yet, use the legacy rules for preparation.
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
if err != nil {
return err
}
if !reached {
- return beacon.ethone.Prepare(chain, header, waitOnPrepare)
+ return beacon.ethone.Prepare(chain, header)
}
header.Difficulty = beaconDifficulty
return nil
diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go
index 59676edd0b..7d812645d3 100644
--- a/consensus/bor/bor.go
+++ b/consensus/bor/bor.go
@@ -58,7 +58,16 @@ const (
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
veblopBlockTimeout = time.Second * 8 // Timeout for new span check. DO NOT CHANGE THIS VALUE.
- minBlockBuildTime = 1 * time.Second // Minimum remaining time before extending the block deadline to avoid empty blocks
+ // minBlockBuildTime is the minimum remaining time before Prepare() extends
+ // the block deadline to avoid producing empty blocks. If time.Until(target)
+ // is less than this value, the target timestamp is pushed forward by one
+ // blockTime period.
+ //
+ // Abort-recovery rebuilds from pipelined SRC are exempt from this push. By the
+ // time speculative execution is discarded, most of the slot may already be
+ // gone; moving the header to the next slot would create avoidable 3-second
+ // blocks on 2-second devnets.
+ minBlockBuildTime = 1 * time.Second
)
// Bor protocol constants.
@@ -1009,7 +1018,7 @@ func (c *Bor) setGiuglianoExtraFields(header *types.Header, parent *types.Header
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
-func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, waitOnPrepare bool) error {
+func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
// If the block isn't a checkpoint, cast a random vote (good enough for now)
header.Coinbase = common.Address{}
header.Nonce = types.BlockNonce{}
@@ -1112,8 +1121,6 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, w
return fmt.Errorf("the floor of custom mining block time (%v) is less than the consensus block time: %v < %v", c.blockTime, c.blockTime.Seconds(), c.config.CalculatePeriod(number))
}
- var delay time.Duration
-
if c.blockTime > 0 && c.config.IsRio(header.Number) {
// Only enable custom block time for Rio and later
@@ -1131,10 +1138,8 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, w
actualNewBlockTime := parentActualBlockTime.Add(c.blockTime)
header.Time = uint64(actualNewBlockTime.Unix())
header.ActualTime = actualNewBlockTime
- delay = time.Until(parentActualBlockTime)
} else {
header.Time = parent.Time + CalcProducerDelay(number, succession, c.config)
- delay = time.Until(time.Unix(int64(parent.Time), 0))
}
now := time.Now()
@@ -1145,29 +1150,17 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, w
// Ensure minimum build time so the block has enough time to include transactions.
// The interrupt timer reserves 500ms for state root computation, so without
// sufficient remaining time the block would end up empty.
- if time.Until(header.GetActualTime()) < minBlockBuildTime {
+ //
+ // Abort-recovery rebuilds are different: speculative execution has already
+ // spent most of the slot, so pushing them again would create an avoidable
+ // extra block-time gap. Those late rebuilds should keep their original slot.
+ if !header.AbortRecovery && time.Until(header.GetActualTime()) < minBlockBuildTime {
header.Time = uint64(now.Add(blockTime).Unix())
if c.blockTime > 0 && c.config.IsRio(header.Number) {
header.ActualTime = now.Add(blockTime)
}
}
- // Wait before start the block production if needed (previously this wait was on Seal)
- if c.config.IsGiugliano(header.Number) && waitOnPrepare {
- var successionNumber int
- // if signer is not empty (RPC nodes have empty signer)
- if currentSigner.signer != (common.Address{}) {
- var err error
- successionNumber, err = snap.GetSignerSuccessionNumber(currentSigner.signer)
- if err != nil {
- return err
- }
- if successionNumber == 0 {
- <-time.After(delay)
- }
- }
- }
-
return nil
}
@@ -1361,25 +1354,9 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
return nil, nil, 0, err
}
+ // No block rewards in PoA, so the state remains as it is
start := time.Now()
-
- // No block rewards in PoA, so the state remains as it is.
- // Under delayed SRC, header.Root stores the parent block's actual state root;
- // the goroutine in BlockChain.spawnSRCGoroutine handles this block's root.
- if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) {
- dsrcReader, ok := chain.(core.DelayedSRCReader)
- if !ok {
- return nil, nil, 0, fmt.Errorf("chain does not implement DelayedSRCReader")
- }
- parentRoot := dsrcReader.GetPostStateRoot(header.ParentHash)
- if parentRoot == (common.Hash{}) {
- return nil, nil, 0, fmt.Errorf("delayed state root unavailable for parent %s", header.ParentHash)
- }
- header.Root = parentRoot
- } else {
- header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
- }
-
+ header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
commitTime := time.Since(start)
// Uncles are dropped
@@ -1404,6 +1381,81 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
return block, receipts, commitTime, nil
}
+// FinalizeForPipeline runs the same post-transaction state modifications as
+// FinalizeAndAssemble (state sync, span commits, contract code changes) but
+// does NOT compute IntermediateRoot or assemble the block. It returns the
+// stateSyncData so the caller can pass it to AssembleBlock later after the
+// background SRC goroutine has computed the state root.
+//
+// This is the pipelined SRC equivalent of the first half of FinalizeAndAssemble.
+func (c *Bor) FinalizeForPipeline(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, body *types.Body, receipts []*types.Receipt) ([]*types.StateSyncData, error) {
+ headerNumber := header.Number.Uint64()
+ if body.Withdrawals != nil || header.WithdrawalsHash != nil {
+ return nil, consensus.ErrUnexpectedWithdrawals
+ }
+ if header.RequestsHash != nil {
+ return nil, consensus.ErrUnexpectedRequests
+ }
+
+ var (
+ stateSyncData []*types.StateSyncData
+ err error
+ )
+
+ if IsSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) {
+ cx := statefull.ChainContext{Chain: chain, Bor: c}
+
+ if !c.config.IsRio(header.Number) {
+ if err = c.checkAndCommitSpan(statedb, header, cx); err != nil {
+ log.Error("Error while committing span", "error", err)
+ return nil, err
+ }
+ }
+
+ if c.HeimdallClient != nil {
+ stateSyncData, err = c.CommitStates(statedb, header, cx)
+ if err != nil {
+ log.Error("Error while committing states", "error", err)
+ return nil, err
+ }
+ }
+ }
+
+ if err = c.changeContractCodeIfNeeded(headerNumber, statedb); err != nil {
+ log.Error("Error changing contract code", "error", err)
+ return nil, err
+ }
+
+ return stateSyncData, nil
+}
+
+// AssembleBlock constructs the final block from a pre-computed state root,
+// without calling IntermediateRoot. This is used by pipelined SRC where the
+// state root is computed by a background goroutine.
+//
+// stateSyncData is the state sync data collected during Finalize(). If non-nil
+// and the Madhugiri fork is active, a StateSyncTx is appended to the body.
+func (c *Bor) AssembleBlock(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, body *types.Body, receipts []*types.Receipt, stateRoot common.Hash, stateSyncData []*types.StateSyncData) (*types.Block, []*types.Receipt, error) {
+ headerNumber := header.Number.Uint64()
+
+ header.Root = stateRoot
+ header.UncleHash = types.CalcUncleHash(nil)
+
+ if len(stateSyncData) > 0 && c.config != nil && c.config.IsMadhugiri(big.NewInt(int64(headerNumber))) {
+ stateSyncTx := types.NewTx(&types.StateSyncTx{
+ StateSyncData: stateSyncData,
+ })
+ body.Transactions = append(body.Transactions, stateSyncTx)
+ receipts = insertStateSyncTransactionAndCalculateReceipt(stateSyncTx, header, body, statedb, receipts)
+ } else {
+ bc := chain.(core.BorStateSyncer)
+ bc.SetStateSync(stateSyncData)
+ }
+
+ block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
+ return block, receipts, nil
+}
+
// Authorize injects a private key into the consensus engine to mint new blocks
// with.
func (c *Bor) Authorize(currentSigner common.Address, signFn SignerFn) {
@@ -1449,12 +1501,11 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, witnes
var delay time.Duration
- // Sweet, the protocol permits us to sign the block, wait for our time
- if c.config.IsGiugliano(header.Number) && successionNumber == 0 {
- delay = 0 // delay was moved to Prepare for giugliano and later
- } else {
- delay = time.Until(header.GetActualTime()) // Wait until we reach header time
- }
+ // Sweet, the protocol permits us to sign the block, wait for our time.
+ // Sequential mining paths build the block body before the slot and rely on
+ // Seal to hold propagation until the target time. The pipeline paths may
+ // already have waited explicitly, in which case this is effectively zero.
+ delay = time.Until(header.GetActualTime())
// wiggle was already accounted for in header.Time, this is just for logging
wiggle := time.Duration(successionNumber) * time.Duration(c.config.CalculateBackupMultiplier(number)) * time.Second
@@ -1470,7 +1521,13 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, witnes
}
// Wait until sealing is terminated or delay timeout.
- log.Info("Waiting for slot to sign and propagate", "number", number, "hash", header.Hash(), "delay-in-sec", uint(delay), "delay", common.PrettyDuration(delay))
+ log.Info(
+ "Waiting for slot to sign and propagate",
+ "number", number,
+ "hash", header.Hash(),
+ "delay-ms", float64(delay)/float64(time.Millisecond),
+ "delay", common.PrettyDuration(delay),
+ )
go func() {
select {
@@ -1483,7 +1540,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, witnes
"Sealing out-of-turn",
"number", number,
"hash", header.Hash,
- "wiggle-in-sec", uint(wiggle),
+ "wiggle-ms", float64(wiggle)/float64(time.Millisecond),
"wiggle", common.PrettyDuration(wiggle),
"in-turn-signer", snap.ValidatorSet.GetProposer().Address.Hex(),
)
@@ -1597,38 +1654,22 @@ func (c *Bor) checkAndCommitSpan(
headerNumber := header.Number.Uint64()
tempState := state.Inner().Copy()
- if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) {
- // Under delayed SRC, skip ResetPrefetcher + StartPrefetcher.
- // The full-node state is at root_{N-2} with a FlatDiff overlay
- // approximating root_{N-1}. ResetPrefetcher clears that overlay,
- // causing GetCurrentSpan to read stale root_{N-2} values — different
- // from what the stateless node sees at root_{N-1}. The mismatch leads
- // to different storage-slot access patterns, so the SRC goroutine
- // captures the wrong trie nodes.
- //
- // StartPrefetcher is also unnecessary: the witness is built by the
- // SRC goroutine, and tempState's reads are captured via
- // CommitSnapshot + TouchAllAddresses below.
- } else {
- tempState.ResetPrefetcher()
- tempState.StartPrefetcher("bor", state.Witness(), nil)
- }
+ tempState.ResetPrefetcher()
+ tempState.StartPrefetcher("bor", state.Witness(), nil)
span, err := c.spanner.GetCurrentSpan(ctx, header.ParentHash, tempState)
if err != nil {
return err
}
- if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) {
- // Under delayed SRC, use CommitSnapshot instead of IntermediateRoot
- // to capture all accesses without computing a trie root. Touch
- // every address on the main state so they appear in the block's
- // FlatDiff and the SRC goroutine includes their trie paths in
- // the witness.
- tempState.CommitSnapshot(false).TouchAllAddresses(state.Inner())
- } else {
- tempState.IntermediateRoot(false)
- }
+ tempState.IntermediateRoot(false)
+
+ // Propagate addresses accessed during GetCurrentSpan back to the original
+ // state so they appear in the FlatDiff ReadSet. Without this, the pipelined
+ // SRC goroutine's witness won't capture their trie proof nodes (the copy's
+ // reads aren't tracked on the original), causing stateless execution to fail
+ // with missing trie nodes for the validator contract.
+ tempState.PropagateReadsTo(state.Inner())
if c.needToCommitSpan(span, headerNumber) {
return c.FetchAndCommitSpan(ctx, span.Id+1, state, header, chain)
@@ -1765,30 +1806,21 @@ func (c *Bor) CommitStates(
if c.config.IsIndore(header.Number) {
// Fetch the LastStateId from contract via current state instance
tempState := state.Inner().Copy()
- if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) {
- // See comment in checkAndCommitSpan: under delayed SRC,
- // skip ResetPrefetcher + StartPrefetcher to preserve the
- // FlatDiff overlay and avoid stale root_{N-2} reads.
- } else {
- tempState.ResetPrefetcher()
- tempState.StartPrefetcher("bor", state.Witness(), nil)
- }
+ tempState.ResetPrefetcher()
+ tempState.StartPrefetcher("bor", state.Witness(), nil)
lastStateIDBig, err = c.GenesisContractsClient.LastStateId(tempState, number-1, header.ParentHash)
if err != nil {
return nil, err
}
- if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) {
- // Under delayed SRC, use CommitSnapshot instead of
- // IntermediateRoot to capture all accesses without computing
- // a trie root. Touch every address on the main state so they
- // appear in the block's FlatDiff and the SRC goroutine
- // includes their trie paths in the witness.
- tempState.CommitSnapshot(false).TouchAllAddresses(state.Inner())
- } else {
- tempState.IntermediateRoot(false)
- }
+ tempState.IntermediateRoot(false)
+
+ // Propagate addresses accessed during LastStateId back to the original
+ // state so they appear in the FlatDiff ReadSet. Without this, the
+ // pipelined SRC goroutine's witness won't capture their trie proof
+ // nodes, causing stateless execution to fail with missing trie nodes.
+ tempState.PropagateReadsTo(state.Inner())
stateSyncDelay := c.config.CalculateStateSyncDelay(number)
to = time.Unix(int64(header.Time-stateSyncDelay), 0)
diff --git a/consensus/bor/bor_test.go b/consensus/bor/bor_test.go
index 4db081af2c..073da7d354 100644
--- a/consensus/bor/bor_test.go
+++ b/consensus/bor/bor_test.go
@@ -740,7 +740,7 @@ func TestCustomBlockTimeValidation(t *testing.T) {
ParentHash: genesis.Hash(),
}
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
if tc.expectError {
require.Error(t, err, tc.description)
@@ -775,7 +775,7 @@ func TestCustomBlockTimeCalculation(t *testing.T) {
Number: big.NewInt(1),
ParentHash: genesis.Hash(),
}
- err := b.Prepare(chain.HeaderChain(), header1, false)
+ err := b.Prepare(chain.HeaderChain(), header1)
require.NoError(t, err)
require.False(t, header1.ActualTime.IsZero(), "ActualTime should be set")
@@ -802,7 +802,7 @@ func TestCustomBlockTimeCalculation(t *testing.T) {
ParentHash: genesis.Hash(),
}
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
require.NoError(t, err)
expectedTime := time.Unix(int64(baseTime), 0).Add(3 * time.Second)
@@ -835,7 +835,7 @@ func TestCustomBlockTimeCalculation(t *testing.T) {
ParentHash: parentHash,
}
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
require.NoError(t, err)
expectedTime := time.Unix(int64(baseTime), 0).Add(4 * time.Second)
@@ -868,7 +868,7 @@ func TestCustomBlockTimeBackwardCompatibility(t *testing.T) {
ParentHash: genesis.Hash(),
}
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
require.NoError(t, err)
require.True(t, header.ActualTime.IsZero(), "ActualTime should not be set when blockTime is 0")
@@ -903,7 +903,7 @@ func TestCustomBlockTimeClampsToNowAlsoUpdatesActualTime(t *testing.T) {
}
before := time.Now()
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
after := time.Now()
require.NoError(t, err)
@@ -1035,7 +1035,7 @@ func TestLateBlockTimestampFix(t *testing.T) {
header := &types.Header{Number: big.NewInt(1), ParentHash: chain.HeaderChain().GetHeaderByNumber(0).Hash()}
before := time.Now()
- require.NoError(t, b.Prepare(chain.HeaderChain(), header, false))
+ require.NoError(t, b.Prepare(chain.HeaderChain(), header))
// Should give full 2s build time from now, not from parent
expectedMin := before.Add(2 * time.Second).Unix()
@@ -1052,7 +1052,7 @@ func TestLateBlockTimestampFix(t *testing.T) {
header := &types.Header{Number: big.NewInt(1), ParentHash: chain.HeaderChain().GetHeaderByNumber(0).Hash()}
- require.NoError(t, b.Prepare(chain.HeaderChain(), header, false))
+ require.NoError(t, b.Prepare(chain.HeaderChain(), header))
// Should use parent.Time + period
genesis := chain.HeaderChain().GetHeaderByNumber(0)
@@ -1074,7 +1074,7 @@ func TestLateBlockTimestampFix(t *testing.T) {
header := &types.Header{Number: big.NewInt(1), ParentHash: chain.HeaderChain().GetHeaderByNumber(0).Hash()}
before := time.Now()
- require.NoError(t, b.Prepare(chain.HeaderChain(), header, false))
+ require.NoError(t, b.Prepare(chain.HeaderChain(), header))
expectedMin := before.Add(3 * time.Second).Unix()
require.GreaterOrEqual(t, int64(header.Time), expectedMin)
@@ -1119,7 +1119,7 @@ func TestLateBlockTimestampFix(t *testing.T) {
require.Greater(t, remaining, 500*time.Millisecond, "test setup: remaining should be > 500ms")
require.Less(t, remaining, minBlockBuildTime, "test setup: remaining should be < minBlockBuildTime")
- require.NoError(t, b.Prepare(chain.HeaderChain(), header, false))
+ require.NoError(t, b.Prepare(chain.HeaderChain(), header))
// Prepare should have extended the deadline since remaining < minBlockBuildTime.
// The new ActualTime should be at least blockTime from before Prepare ran.
@@ -1131,6 +1131,42 @@ func TestLateBlockTimestampFix(t *testing.T) {
require.True(t, header.ActualTime.After(expectedMin) || header.ActualTime.Equal(expectedMin),
"header.ActualTime should be at least blockTime from now")
})
+
+ t.Run("abort recovery keeps the original target", func(t *testing.T) {
+ sp := &fakeSpanner{vals: []*valset.Validator{{Address: addr1, VotingPower: 1}}}
+ rioCfg := ¶ms.BorConfig{
+ Sprint: map[string]uint64{"0": 64},
+ Period: map[string]uint64{"0": 2},
+ RioBlock: big.NewInt(0),
+ }
+ blockTime := 2 * time.Second
+
+ parentActualTime := time.Now().Add(-blockTime + 700*time.Millisecond)
+ genesisTime := uint64(parentActualTime.Unix())
+
+ chain, b := newChainAndBorForTest(t, sp, rioCfg, true, addr1, genesisTime)
+ b.blockTime = blockTime
+
+ genesis := chain.HeaderChain().GetHeaderByNumber(0)
+ parentHash := genesis.Hash()
+ b.parentActualTimeCache.Add(parentHash, parentActualTime)
+
+ expectedTargetWithoutExtension := parentActualTime.Add(blockTime)
+ remaining := time.Until(expectedTargetWithoutExtension)
+ require.Greater(t, remaining, 500*time.Millisecond, "test setup: remaining should be > 500ms")
+ require.Less(t, remaining, minBlockBuildTime, "test setup: remaining should be < minBlockBuildTime")
+
+ header := &types.Header{
+ Number: big.NewInt(1),
+ ParentHash: parentHash,
+ AbortRecovery: true,
+ }
+
+ require.NoError(t, b.Prepare(chain.HeaderChain(), header))
+ require.False(t, header.ActualTime.IsZero())
+ require.WithinDuration(t, expectedTargetWithoutExtension, header.ActualTime, 5*time.Millisecond)
+ require.Equal(t, uint64(expectedTargetWithoutExtension.Unix()), header.Time)
+ })
}
// setupFinalizeTest creates a test environment for FinalizeAndAssemble tests
@@ -3019,7 +3055,7 @@ func TestPrepare_NonSprintBlock(t *testing.T) {
UncleHash: uncleHash,
}
- err := b.Prepare(setup.chain.HeaderChain(), h, false)
+ err := b.Prepare(setup.chain.HeaderChain(), h)
require.NoError(t, err)
require.NotNil(t, h.Difficulty)
require.True(t, h.Difficulty.Uint64() > 0)
@@ -3044,7 +3080,7 @@ func TestPrepare_SprintStartBlock(t *testing.T) {
UncleHash: uncleHash,
}
- err := b.Prepare(chain.HeaderChain(), h, false)
+ err := b.Prepare(chain.HeaderChain(), h)
require.NoError(t, err)
// Extra should contain vanity + validator bytes + seal
require.True(t, len(h.Extra) > types.ExtraVanityLength+types.ExtraSealLength)
@@ -3588,7 +3624,7 @@ func TestPrepare_CancunEncoding(t *testing.T) {
UncleHash: uncleHash,
}
- err := b.Prepare(chain.HeaderChain(), h, false)
+ err := b.Prepare(chain.HeaderChain(), h)
require.NoError(t, err)
// Extra should contain vanity + RLP-encoded BlockExtraData + seal
require.True(t, len(h.Extra) > types.ExtraVanityLength+types.ExtraSealLength)
@@ -3600,7 +3636,7 @@ func TestPrepare_CancunEncoding(t *testing.T) {
GasLimit: genesis.GasLimit,
UncleHash: uncleHash,
}
- err = b.Prepare(chain.HeaderChain(), h2, false)
+ err = b.Prepare(chain.HeaderChain(), h2)
require.NoError(t, err)
require.True(t, len(h2.Extra) > types.ExtraVanityLength+types.ExtraSealLength)
}
@@ -3958,7 +3994,7 @@ func TestPrepare_UnknownParent(t *testing.T) {
GasLimit: 8_000_000,
}
- err := b.Prepare(setup.chain.HeaderChain(), h, false)
+ err := b.Prepare(setup.chain.HeaderChain(), h)
require.Error(t, err)
}
func TestSeal_SignError(t *testing.T) {
@@ -4087,7 +4123,7 @@ func TestPrepare_ValidatorsByHashError(t *testing.T) {
// When GetCurrentValidatorsByHash returns nil values (fakeSpanner with empty vals)
sp.vals = nil
- err := b.Prepare(chain, h, false)
+ err := b.Prepare(chain, h)
// Should get errUnknownValidators since GetCurrentValidatorsByHash returns empty/nil
require.Error(t, err)
}
@@ -4381,219 +4417,54 @@ func TestFinalize_CheckAndCommitSpanError(t *testing.T) {
require.Nil(t, result)
}
-// P1 Test: TestBorPrepare_WaitOnPrepareFlag validates the new waitOnPrepare
-// parameter in the Prepare method
-func TestBorPrepare_WaitOnPrepareFlag(t *testing.T) {
+// TestPrepare_PrimaryProducerBuildsWithoutWaiting verifies that Prepare no
+// longer sleeps at the Giugliano boundary, preserving the full tx-building
+// window. The final slot wait now happens in Seal.
+func TestPrepare_PrimaryProducerBuildsWithoutWaiting(t *testing.T) {
t.Parallel()
- // Setup: Create a blockchain and Bor engine
addr := common.HexToAddress("0x1")
sp := &fakeSpanner{vals: []*valset.Validator{{Address: addr, VotingPower: 1}}}
borCfg := ¶ms.BorConfig{
- Sprint: map[string]uint64{"0": 64},
- Period: map[string]uint64{"0": 2},
+ Sprint: map[string]uint64{"0": 64},
+ Period: map[string]uint64{"0": 2},
+ GiuglianoBlock: big.NewInt(0),
}
- chain, b := newChainAndBorForTest(t, sp, borCfg, true, addr, uint64(time.Now().Unix()))
+ genesisTime := uint64(time.Now().Add(3 * time.Second).Unix())
+ chain, b := newChainAndBorForTest(t, sp, borCfg, true, addr, genesisTime)
defer chain.Stop()
genesis := chain.HeaderChain().GetHeaderByNumber(0)
require.NotNil(t, genesis)
- // Test 1: Prepare with waitOnPrepare=false should return quickly
- t.Run("no_wait", func(t *testing.T) {
- testHeader := createTestHeader(genesis, 1, borCfg.Period["0"])
-
- start := time.Now()
- err := b.Prepare(chain, testHeader, false)
- elapsed := time.Since(start)
-
- if err != nil {
- t.Fatalf("Prepare with waitOnPrepare=false failed: %v", err)
- }
-
- // Should complete very quickly (< 100ms) since no waiting
- if elapsed > 100*time.Millisecond {
- t.Logf("Warning: Prepare took %v, expected < 100ms when waitOnPrepare=false", elapsed)
- }
-
- // Verify header is valid
- if testHeader.Time == 0 {
- t.Error("Header time should be set")
- }
-
- t.Logf("Prepare with waitOnPrepare=false completed in %v", elapsed)
- })
-
- // Test 2: Prepare with waitOnPrepare=true should wait for the proper block time
- t.Run("with_wait", func(t *testing.T) {
- // Create a config with Giugliano enabled to activate wait-in-Prepare logic
- borCfgWithBhilai := ¶ms.BorConfig{
- Sprint: map[string]uint64{"0": 64},
- Period: map[string]uint64{"0": 2},
- GiuglianoBlock: big.NewInt(0), // Enable Giugliano from block 0
- }
-
- // Set genesis time 3 seconds in the future to ensure enough wait time
- // even after test setup overhead
- genesisTime := uint64(time.Now().Add(3 * time.Second).Unix())
-
- // Use DevFakeAuthor=true so the signer is authorized and is the primary producer
- chainWithWait, bWithWait := newChainAndBorForTest(t, sp, borCfgWithBhilai, true, addr, genesisTime)
- defer chainWithWait.Stop()
-
- genesisWithWait := chainWithWait.HeaderChain().GetHeaderByNumber(0)
- require.NotNil(t, genesisWithWait)
-
- testHeader := createTestHeader(genesisWithWait, 1, borCfgWithBhilai.Period["0"])
-
- // Calculate expected wait time dynamically based on actual genesis time
- // This accounts for test setup overhead between setting genesis time and calling Prepare
- start := time.Now()
- genesisTimestamp := time.Unix(int64(genesisWithWait.Time), 0)
- expectedDelay := time.Until(genesisTimestamp)
-
- // If genesis time has already passed due to slow test setup, test won't wait
- if expectedDelay < 0 {
- t.Skipf("Test setup took too long (%v), genesis time already passed", time.Since(time.Unix(int64(genesisTime), 0)))
- }
-
- err := bWithWait.Prepare(chainWithWait, testHeader, true)
- elapsed := time.Since(start)
-
- if err != nil {
- t.Fatalf("Prepare with waitOnPrepare=true failed: %v", err)
- }
-
- // With Giugliano enabled, DevFakeAuthor=true (making this node the primary producer),
- // and waitOnPrepare=true, should wait until parent (genesis) time has passed
- // Allow 100ms tolerance for timing precision and scheduling overhead
- minWait := expectedDelay - 100*time.Millisecond
- maxWait := expectedDelay + 200*time.Millisecond // Allow extra time for scheduling
-
- if minWait < 0 {
- minWait = 0
- }
-
- if elapsed < minWait {
- t.Errorf("Prepare waited %v, expected at least %v (calculated from expectedDelay=%v)", elapsed, minWait, expectedDelay)
- }
- if elapsed > maxWait {
- t.Logf("Warning: Prepare took %v, expected around %v (calculated from expectedDelay=%v)", elapsed, expectedDelay, expectedDelay)
- }
-
- // Verify header is valid
- if testHeader.Time == 0 {
- t.Error("Header time should be set")
- }
-
- t.Logf("Prepare with waitOnPrepare=true completed in %v (expected delay was %v)", elapsed, expectedDelay)
- })
-
- // Test 3: Verify both produce compatible headers
- t.Run("compatibility", func(t *testing.T) {
- header1 := createTestHeader(genesis, 3, borCfg.Period["0"])
- header2 := createTestHeader(genesis, 3, borCfg.Period["0"])
-
- err1 := b.Prepare(chain, header1, false)
- err2 := b.Prepare(chain, header2, true)
+ header := createTestHeader(genesis, 1, borCfg.Period["0"])
- if err1 != nil || err2 != nil {
- t.Fatalf("Prepare failed: err1=%v, err2=%v", err1, err2)
- }
-
- // Both should produce valid headers with same block number
- if header1.Number.Cmp(header2.Number) != 0 {
- t.Error("Headers should have same block number")
- }
+ start := time.Now()
+ err := b.Prepare(chain, header)
+ elapsed := time.Since(start)
- t.Logf("Both waitOnPrepare modes produce compatible headers for block %d", header1.Number.Uint64())
- })
-}
-
-// TestPrepare_WaitGate_GiuglianoOnly verifies that the wait-in-Prepare
-// mechanism activates only when IsGiugliano is true.
-func TestPrepare_WaitGate_GiuglianoOnly(t *testing.T) {
- t.Parallel()
-
- addr := common.HexToAddress("0x1")
- sp := &fakeSpanner{vals: []*valset.Validator{{Address: addr, VotingPower: 1}}}
-
- t.Run("before Giugliano – waitOnPrepare=true returns quickly", func(t *testing.T) {
- borCfg := ¶ms.BorConfig{
- Sprint: map[string]uint64{"0": 64},
- Period: map[string]uint64{"0": 2},
- // GiuglianoBlock not set → IsGiugliano always false
- }
- // Set genesis time slightly in the future so there would be a non-trivial delay
- // if the wait were active.
- genesisTime := uint64(time.Now().Add(2 * time.Second).Unix())
- chain, b := newChainAndBorForTest(t, sp, borCfg, true, addr, genesisTime)
- defer chain.Stop()
-
- genesis := chain.HeaderChain().GetHeaderByNumber(0)
- require.NotNil(t, genesis)
-
- header := &types.Header{Number: big.NewInt(1), ParentHash: genesis.Hash()}
-
- start := time.Now()
- err := b.Prepare(chain, header, true)
- elapsed := time.Since(start)
-
- require.NoError(t, err)
- // Without Giugliano the wait block is skipped; should return in < 200 ms
- require.Less(t, elapsed, 200*time.Millisecond,
- "Prepare should not wait when Giugliano is not active")
- })
-
- t.Run("at Giugliano – waitOnPrepare=true waits for primary producer", func(t *testing.T) {
- borCfg := ¶ms.BorConfig{
- Sprint: map[string]uint64{"0": 64},
- Period: map[string]uint64{"0": 2},
- GiuglianoBlock: big.NewInt(0),
- }
- // Genesis 3 s in the future → there will be a measurable wait.
- genesisTime := uint64(time.Now().Add(3 * time.Second).Unix())
- chain, b := newChainAndBorForTest(t, sp, borCfg, true, addr, genesisTime)
- defer chain.Stop()
-
- genesis := chain.HeaderChain().GetHeaderByNumber(0)
- require.NotNil(t, genesis)
-
- // Measure expected delay right before calling Prepare, same pattern as TestBorPrepare_WaitOnPrepareFlag.
- expectedDelay := time.Until(time.Unix(int64(genesis.Time), 0))
- if expectedDelay < 100*time.Millisecond {
- t.Skip("genesis time already passed due to slow setup")
- }
-
- header := &types.Header{Number: big.NewInt(1), ParentHash: genesis.Hash()}
-
- start := time.Now()
- err := b.Prepare(chain, header, true)
- elapsed := time.Since(start)
-
- require.NoError(t, err)
- minWait := expectedDelay - 200*time.Millisecond
- if minWait < 0 {
- minWait = 0
- }
- require.Greater(t, elapsed, minWait,
- "Prepare should wait for primary producer when Giugliano is active")
- })
+ require.NoError(t, err)
+ require.Less(t, elapsed, 200*time.Millisecond,
+ "Prepare should stay fast so tx selection gets the full slot")
+ require.NotZero(t, header.Time, "Prepare should still populate header time")
}
-// TestSeal_PrimaryProducerDelay_GiuglianoBoundary verifies that delay=0 in Seal
-// for the primary producer (succession==0) is gated on IsGiugliano.
+// TestSeal_PrimaryProducerDelay_GiuglianoBoundary verifies that primary
+// producers wait until the block target time in Seal on both sides of the
+// Giugliano boundary. This preserves the tx-building window for sequential
+// paths while keeping propagation aligned with the slot.
func TestSeal_PrimaryProducerDelay_GiuglianoBoundary(t *testing.T) {
t.Parallel()
addr := common.HexToAddress("0x1")
sp := &fakeSpanner{vals: []*valset.Validator{{Address: addr, VotingPower: 1}}}
- now := uint64(time.Now().Unix())
+ now := uint64(time.Now().Unix()) - 100
- makeHeader := func(borCfg *params.BorConfig) (*types.Header, *Bor, *core.BlockChain) {
+ makeBlock := func(borCfg *params.BorConfig) (*types.Block, *Bor, *core.BlockChain, time.Time) {
chain, b := newChainAndBorForTest(t, sp, borCfg, true, addr, now)
genesis := chain.HeaderChain().GetHeaderByNumber(0)
require.NotNil(t, genesis)
+ target := time.Now().Add(350 * time.Millisecond)
h := &types.Header{
Number: big.NewInt(1),
ParentHash: genesis.Hash(),
@@ -4601,55 +4472,47 @@ func TestSeal_PrimaryProducerDelay_GiuglianoBoundary(t *testing.T) {
UncleHash: uncleHash,
Difficulty: big.NewInt(1),
GasLimit: 8_000_000,
+ Time: uint64(target.Unix()),
+ ActualTime: target,
}
- // Set header.Time so GetActualTime() returns something in the past
- h.Time = now - 1
- return h, b, chain
+ body := &types.Body{Transactions: types.Transactions{types.NewTx(&types.LegacyTx{})}}
+ return types.NewBlock(h, body, nil, trie.NewStackTrie(nil)), b, chain, target
}
- t.Run("before Giugliano – primary producer has non-zero delay", func(t *testing.T) {
- borCfg := ¶ms.BorConfig{
- Sprint: map[string]uint64{"0": 64},
- Period: map[string]uint64{"0": 2},
- // GiuglianoBlock not set
- }
- h, b, chain := makeHeader(borCfg)
+ assertWaitsUntilTarget := func(t *testing.T, borCfg *params.BorConfig) {
+ block, b, chain, target := makeBlock(borCfg)
defer chain.Stop()
- snap, err := b.snapshot(chain.HeaderChain(), h, nil, false)
- require.NoError(t, err)
+ b.Authorize(addr, func(accounts.Account, string, []byte) ([]byte, error) {
+ return make([]byte, types.ExtraSealLength), nil
+ })
+
+ results := make(chan *consensus.NewSealedBlockEvent, 1)
+ stop := make(chan struct{})
- successionNumber, err := snap.GetSignerSuccessionNumber(addr)
+ err := b.Seal(chain.HeaderChain(), block, nil, results, stop)
require.NoError(t, err)
- require.Equal(t, 0, successionNumber, "DevFakeAuthor should be primary producer")
-
- // Before Giugliano the delay=0 branch should NOT be taken.
- // The else branch sets delay = time.Until(header.GetActualTime()).
- // Since header.Time is in the past, delay ≤ 0 — but the point is the branch
- // selected is the else, not the delay=0 one.
- isNewHF := b.config.IsGiugliano(h.Number)
- require.False(t, isNewHF, "IsGiugliano should be false before GiuglianoBlock")
- })
- t.Run("at Giugliano – primary producer gets delay=0", func(t *testing.T) {
- borCfg := ¶ms.BorConfig{
- Sprint: map[string]uint64{"0": 64},
- Period: map[string]uint64{"0": 2},
- GiuglianoBlock: big.NewInt(0),
+ select {
+ case result := <-results:
+ require.NotNil(t, result)
+ require.NotNil(t, result.Block)
+ require.False(t, time.Now().Before(target.Add(-50*time.Millisecond)),
+ "seal result arrived before target time %v", target)
+ case <-time.After(5 * time.Second):
+ t.Fatal("timed out waiting for sealed block")
}
- h, b, chain := makeHeader(borCfg)
- defer chain.Stop()
-
- snap, err := b.snapshot(chain.HeaderChain(), h, nil, false)
- require.NoError(t, err)
+ }
- successionNumber, err := snap.GetSignerSuccessionNumber(addr)
- require.NoError(t, err)
- require.Equal(t, 0, successionNumber, "DevFakeAuthor should be primary producer")
+ t.Run("before Giugliano", func(t *testing.T) {
+ borCfg := borConfigWithDelays(64)
+ assertWaitsUntilTarget(t, borCfg)
+ })
- isNewHF := b.config.IsGiugliano(h.Number)
- require.True(t, isNewHF, "IsGiugliano should be true at GiuglianoBlock=0")
- // The Seal function would take the delay=0 branch for this signer/header combination.
+ t.Run("at Giugliano", func(t *testing.T) {
+ borCfg := borConfigWithDelays(64)
+ borCfg.GiuglianoBlock = big.NewInt(0)
+ assertWaitsUntilTarget(t, borCfg)
})
}
@@ -4832,7 +4695,7 @@ func TestSubSecondLateBlockTriggersTimeAdjustment(t *testing.T) {
}
before := time.Now()
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
require.NoError(t, err)
expectedMin := uint64(before.Add(1 * time.Second).Unix())
@@ -4875,7 +4738,7 @@ func TestSubSecondLateBlockTriggersTimeAdjustment(t *testing.T) {
}
before := time.Now()
- err := b.Prepare(chain.HeaderChain(), header, false)
+ err := b.Prepare(chain.HeaderChain(), header)
require.NoError(t, err)
require.False(t, header.ActualTime.IsZero(),
@@ -5161,7 +5024,7 @@ func TestPrepare_GiuglianoExtraFields_SprintEnd(t *testing.T) {
UncleHash: uncleHash,
}
- err := b.Prepare(chain.HeaderChain(), h, false)
+ err := b.Prepare(chain.HeaderChain(), h)
require.NoError(t, err)
gasTarget, bfcd := h.GetBaseFeeParams(cfg)
@@ -5183,7 +5046,7 @@ func TestPrepare_GiuglianoExtraFields_NonSprint(t *testing.T) {
UncleHash: uncleHash,
}
- err := b.Prepare(chain.HeaderChain(), h, false)
+ err := b.Prepare(chain.HeaderChain(), h)
require.NoError(t, err)
gasTarget, bfcd := h.GetBaseFeeParams(cfg)
@@ -5205,7 +5068,7 @@ func TestPrepare_PreGiugliano_NoExtraFields(t *testing.T) {
UncleHash: uncleHash,
}
- err := b.Prepare(chain.HeaderChain(), h, false)
+ err := b.Prepare(chain.HeaderChain(), h)
require.NoError(t, err)
gasTarget, bfcd := h.GetBaseFeeParams(cfg)
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 6201a00d76..8bef5de036 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -552,7 +552,7 @@ func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*typ
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
-func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header, waitOnPrepare bool) error {
+func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
// If the block isn't a checkpoint, cast a random vote (good enough for now)
header.Coinbase = common.Address{}
header.Nonce = types.BlockNonce{}
diff --git a/consensus/consensus.go b/consensus/consensus.go
index fcf82150a6..f320192493 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -84,7 +84,7 @@ type Engine interface {
// Prepare initializes the consensus fields of a block header according to the
// rules of a particular engine. The changes are executed inline.
- Prepare(chain ChainHeaderReader, header *types.Header, waitOnPrepare bool) error
+ Prepare(chain ChainHeaderReader, header *types.Header) error
// Finalize runs any post-transaction state modifications (e.g. block rewards
// or process withdrawals) but does not assemble the block.
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index 7c2c0097d3..74be88bbe5 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -496,7 +496,7 @@ var DynamicDifficultyCalculator = makeDifficultyCalculator
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the ethash protocol. The changes are done inline.
-func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header, waitOnPrepare bool) error {
+func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil {
return consensus.ErrUnknownAncestor
diff --git a/core/block_validator.go b/core/block_validator.go
index dd5453db2e..22962695f3 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -128,6 +127,37 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
return nil
}
+// ValidateStateCheap validates the cheap (non-trie) post-state checks: gas used,
+// bloom filter, receipt root, and requests hash. It does NOT compute the state
+// root (IntermediateRoot), which is the expensive operation. Used by the pipelined
+// import path where IntermediateRoot is deferred to a background SRC goroutine.
+func (v *BlockValidator) ValidateStateCheap(block *types.Block, statedb *state.StateDB, res *ProcessResult) error {
+ if res == nil {
+ return errors.New("nil ProcessResult value")
+ }
+ header := block.Header()
+ if block.GasUsed() != res.GasUsed {
+ return fmt.Errorf("%w (remote: %d local: %d)", ErrGasUsedMismatch, block.GasUsed(), res.GasUsed)
+ }
+ rbloom := types.MergeBloom(res.Receipts)
+ if rbloom != header.Bloom {
+ return fmt.Errorf("%w (remote: %x local: %x)", ErrBloomMismatch, header.Bloom, rbloom)
+ }
+ receiptSha := types.DeriveSha(res.Receipts, trie.NewStackTrie(nil))
+ if receiptSha != header.ReceiptHash {
+ return fmt.Errorf("%w (remote: %x local: %x)", ErrReceiptRootMismatch, header.ReceiptHash, receiptSha)
+ }
+ if header.RequestsHash != nil {
+ reqhash := types.CalcRequestsHash(res.Requests)
+ if reqhash != *header.RequestsHash {
+ return fmt.Errorf("%w (remote: %x local: %x)", ErrRequestsHashMismatch, *header.RequestsHash, reqhash)
+ }
+ } else if res.Requests != nil {
+ return errors.New("block has requests before prague fork")
+ }
+ return nil
+}
+
// ValidateState validates the various changes that happen after a state transition,
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
@@ -167,20 +197,6 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
} else if res.Requests != nil {
return errors.New("block has requests before prague fork")
}
- // Under delayed SRC, header.Root = state root of the PARENT block.
- // Verify it matches the persisted delayed root and skip IntermediateRoot —
- // the background goroutine spawned by spawnSRCGoroutine computes root_N.
- if v.config.Bor != nil && v.config.Bor.IsDelayedSRC(header.Number) {
- parentActualRoot := v.bc.GetPostStateRoot(header.ParentHash)
- if parentActualRoot == (common.Hash{}) {
- return fmt.Errorf("delayed state root unavailable for parent %x", header.ParentHash)
- }
- if header.Root != parentActualRoot {
- return fmt.Errorf("invalid delayed state root (header: %x, parent actual: %x)", header.Root, parentActualRoot)
- }
- return nil
- }
-
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
diff --git a/core/blockchain.go b/core/blockchain.go
index 286675d043..9aa4dd461a 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -69,7 +69,7 @@ var (
headSafeBlockGauge = metrics.NewRegisteredGauge("chain/head/safe", nil)
chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil)
- chainMgaspsMeter = metrics.NewRegisteredResettingTimer("chain/mgasps", nil) //nolint:unused
+ chainMgaspsMeter = metrics.NewRegisteredResettingTimer("chain/mgasps", nil)
accountReadTimer = metrics.NewRegisteredResettingTimer("chain/account/reads", nil)
accountHashTimer = metrics.NewRegisteredResettingTimer("chain/account/hashes", nil)
@@ -109,10 +109,20 @@ var (
blockImportTimer = metrics.NewRegisteredMeter("chain/imports", nil)
- blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
- blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
- blockCrossValidationTimer = metrics.NewRegisteredResettingTimer("chain/crossvalidation", nil) //nolint:revive,unused
- blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ // blockValidationTimer does NOT fire when pipelined SRC is enabled.
+ // Reason: pipelined import uses ValidateStateCheap (gas + bloom + receipt
+ // root only); the full root match happens later in the SRC goroutine.
+ // Closest pipeline signals: chain/imports/pipelined/collect (caller's wait
+ // on root verification) and chain/imports/pipelined/root_mismatch (must stay zero).
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockCrossValidationTimer = metrics.NewRegisteredResettingTimer("chain/crossvalidation", nil) //nolint:revive,unused
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ // blockWriteTimer does NOT fire when pipelined SRC is enabled.
+ // Reason: pipelined import splits "write" across two code paths — metadata/batch
+ // write in writeBlockAndSetHeadPipelined and async state commit in the SRC
+ // goroutine — so there is no single "write phase" number. Approximate by summing
+ // chain/batch/write + chain/state/commit + chain/{account,storage}/commits.
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
blockExecutionParallelCounter = metrics.NewRegisteredCounter("chain/execution/parallel", nil)
blockExecutionSerialCounter = metrics.NewRegisteredCounter("chain/execution/serial", nil)
@@ -145,6 +155,95 @@ var (
blockBatchWriteTimer = metrics.NewRegisteredTimer("chain/batch/write", nil) // time to flush the block batch to disk (blockBatch.Write) — spikes indicate DB compaction stalls
stateCommitTimer = metrics.NewRegisteredTimer("chain/state/commit", nil) // time for statedb.CommitWithUpdate — in pathdb mode, spikes indicate diff layer flushes
+ // Pipelined import SRC metrics
+ pipelineImportBlocksCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/blocks", nil)
+ pipelineImportTotalTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/total", nil)
+ pipelineImportSRCTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/src", nil)
+ pipelineImportCollectTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/collect", nil)
+ pipelineImportFallbackCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/fallback", nil)
+ pipelineImportHitCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/hit", nil) // pending matched next block's parent — overlap achieved
+ pipelineImportMissCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/miss", nil) // pending didn't match — flushed (reorg/gap)
+ pipelineImportRootMismatchCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/root_mismatch", nil) // SRC goroutine returned wrong root — safety alarm, must stay zero
+ // Mode gauge — 1 when pipelined SRC import is enabled on this node, 0 otherwise.
+ // Dashboards can use this to distinguish "metric is zero because pipelining is off"
+ // from "metric is zero because the pipelined code path bypassed its emit site".
+ pipelineImportEnabledGauge = metrics.NewRegisteredGauge("chain/imports/pipelined/enabled", nil)
+
+ // Cheap-exec timer for pipelined import. Wraps the synchronous
+ // ProcessBlock call (FlatDiff overlay path). Disambiguates "cheap exec
+ // is itself slow" from "main path waited on prev SRC" — chain/imports/
+ // pipelined/collect covers only the wait, and the parity chain/execution
+ // timer wraps the entire persistPipelinedImport (which includes that wait),
+ // so neither pinpoints the cheap exec on its own.
+ pipelineImportCheapExecTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/cheap_exec", nil)
+ pipelineImportCheapValidationTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/cheap_validation", nil)
+ pipelineImportPostExecTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/post_exec", nil)
+ pipelineImportPrefetchStopTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/prefetch_stop", nil)
+ pipelineImportPrefetchDrainTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/prefetch_stop/drain", nil)
+ pipelineImportPrefetchReportTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/prefetch_stop/report", nil)
+ pipelineImportCommitSnapshotTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/commit_snapshot", nil)
+ pipelineImportStateSyncFeedTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/state_sync_feed", nil)
+ pipelineImportReorgCheckTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/reorg_check", nil)
+ pipelineImportSetFlatDiffTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/set_flatdiff", nil)
+ pipelineImportWriteHeadTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/write_head", nil)
+ pipelineImportSpawnSRCTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/spawn_src", nil)
+ pipelineImportWarmSnapshotCollect = metrics.NewRegisteredTimer("chain/imports/pipelined/warm_snapshot/collect", nil)
+ pipelineImportWarmSnapshotBuild = metrics.NewRegisteredTimer("chain/imports/pipelined/warm_snapshot/build", nil)
+ pipelineImportWarmSnapshotFetchers = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/fetchers", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportPrefetchSubfetchers = metrics.NewRegisteredHistogram("chain/imports/pipelined/prefetch_stop/subfetchers", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportWarmSnapshotNodes = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/nodes", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportWarmSnapshotBytes = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/bytes", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportWarmSnapshotAccountNodes = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/account_nodes", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportWarmSnapshotStorageNodes = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/storage_nodes", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportWarmSnapshotAccountBytes = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/account_bytes", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineImportWarmSnapshotStorageBytes = metrics.NewRegisteredHistogram("chain/imports/pipelined/warm_snapshot/storage_bytes", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ // Normal import phase timers. These mirror the pipelined phase timers enough
+ // to compare the "Imported new chain segment" elapsed breakdown between
+ // develop-style import and pipelined import.
+ normalImportTotalTimer = metrics.NewRegisteredTimer("chain/imports/normal/total", nil)
+ normalImportProcessTimer = metrics.NewRegisteredTimer("chain/imports/normal/process", nil)
+ normalImportValidationTimer = metrics.NewRegisteredTimer("chain/imports/normal/validation", nil)
+ normalImportReorgCheckTimer = metrics.NewRegisteredTimer("chain/imports/normal/reorg_check", nil)
+ normalImportWriteTimer = metrics.NewRegisteredTimer("chain/imports/normal/write", nil)
+
+ // Auto-collection phase timers. The auto-collection goroutine runs
+ // asynchronously after persistPipelinedImport returns:
+ // WaitForSRC -> verifyImportSRCRoot -> publishImportWitness -> handleImportTrieGC
+ // The main path's collect-wait (chain/imports/pipelined/collect) blocks
+ // until ALL these phases finish, so a sustained main-path wait is not
+ // necessarily a slow SRC compute — it could be slow witness publish or
+ // trie GC. WaitForSRC duration is already covered by chain/imports/
+ // pipelined/src; total covers the whole runImportAutoCollection wall
+ // time so dashboards can verify verify+publish+gc sums to total minus src.
+ pipelineImportAutoCollectTotalTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/auto_collect/total", nil)
+ pipelineImportAutoCollectVerifyTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/auto_collect/verify", nil)
+ pipelineImportAutoCollectPublishTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/auto_collect/publish", nil)
+ pipelineImportAutoCollectGCTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/auto_collect/gc", nil)
+
+ // preloadFlatDiffReads instrumentation.
+ pipelineSRCPreloadTimer = metrics.NewRegisteredTimer("chain/pipelined/src/preload", nil)
+ pipelineSRCPreloadReadAccountsHistogram = metrics.NewRegisteredHistogram("chain/pipelined/src/preload/read_accounts", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineSRCPreloadSlotsHistogram = metrics.NewRegisteredHistogram("chain/pipelined/src/preload/slots", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineSRCPreloadDestructsHistogram = metrics.NewRegisteredHistogram("chain/pipelined/src/preload/destructs", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineSRCPreloadNonexistentHistogram = metrics.NewRegisteredHistogram("chain/pipelined/src/preload/nonexistent", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pipelineSRCPreloadSlotsPerAccountHistogram = metrics.NewRegisteredHistogram("chain/pipelined/src/preload/slots_per_account", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ // Throughput histograms (mode-agnostic — emitted from both normal and pipelined import paths).
+ gasUsedPerBlockHistogram = metrics.NewRegisteredHistogram("chain/gas_used_per_block", nil, metrics.NewExpDecaySample(1028, 0.015))
+ txsPerBlockHistogram = metrics.NewRegisteredHistogram("chain/txs_per_block", nil, metrics.NewExpDecaySample(1028, 0.015))
+ importSegmentBlocksHistogram = metrics.NewRegisteredHistogram("chain/imports/segment/blocks", nil, metrics.NewExpDecaySample(1028, 0.015))
+ importSegmentElapsedTimer = metrics.NewRegisteredTimer("chain/imports/segment/elapsed", nil)
+ importSegmentGasUsedHistogram = metrics.NewRegisteredHistogram("chain/imports/segment/gas_used", nil, metrics.NewExpDecaySample(1028, 0.015))
+ importSegmentMgaspsHistogram = metrics.NewRegisteredHistogram("chain/imports/segment/mgasps", nil, metrics.NewExpDecaySample(1028, 0.015))
+ // Witness size histogram in bytes. Spikes here directly drive stateless-peer bandwidth cost.
+ witnessSizeBytesHistogram = metrics.NewRegisteredHistogram("chain/witness/size_bytes", nil, metrics.NewExpDecaySample(1028, 0.015))
+ // End-to-end import timer: from block processing start until the witness is
+ // on disk and peer-visible (non-pipelined: end of writeBlockWithState;
+ // pipelined: after WitnessReadyEvent fires in the auto-collection goroutine).
+ // Apples-to-apples A/B metric between modes.
+ witnessReadyEndToEndTimer = metrics.NewRegisteredTimer("chain/imports/witness_ready_end_to_end", nil)
+
errInsertionInterrupted = errors.New("insertion is interrupted")
errChainStopped = errors.New("blockchain is stopped")
errInvalidOldChain = errors.New("invalid old chain")
@@ -159,6 +258,11 @@ const (
receiptsCacheLimit = 1024
txLookupCacheLimit = 1024
+ slowImportBlockThreshold = time.Second
+ slowImportPostExecThreshold = 500 * time.Millisecond
+ slowImportCollectThreshold = 100 * time.Millisecond
+ slowImportSnapshotThreshold = 100 * time.Millisecond
+
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
//
// Changelog:
@@ -257,6 +361,36 @@ type BlockChainConfig struct {
// MilestoneFetcher returns the latest milestone end block from Heimdall.
MilestoneFetcher func(ctx context.Context) (uint64, error)
+
+ // EnablePipelinedImportSRC enables pipelined state root computation during
+ // block import: overlap SRC(N) with tx execution of block N+1.
+ EnablePipelinedImportSRC bool
+
+ // PipelinedImportSRCLogs enables verbose logging for the import pipeline.
+ PipelinedImportSRCLogs bool
+
+ // PipelinedSRCWarmSnapshot enables a warm-cache handoff from the
+ // execution-side trie prefetcher to the pipelined SRC goroutine. When
+ // true, persistPipelinedImport captures the trie nodes the prefetcher had
+ // loaded into a quiesced WarmSnapshot and passes it to SRC; SRC's
+ // NewTrieOnly reader then consults the snapshot before falling through to
+ // pathdb. Targets the cold-cache restart/catch-up cost where the SRC
+ // goroutine repeats trie reads the prefetcher already performed.
+ //
+ // Default false. NewTrieOnly semantics, witness completeness, and root
+ // determinism are unaffected — the snapshot only short-circuits the
+ // underlying NodeReader fetch; trie walks and prevalueTracer recording
+ // fire identically whether the served node came from the snapshot or
+ // pathdb.
+ PipelinedSRCWarmSnapshot bool
+}
+
+// PipelineImportOpts configures ProcessBlock for pipelined import mode.
+// When non-nil, ProcessBlock opens state at CommittedParentRoot (with optional
+// FlatDiff overlay) and uses ValidateStateCheap instead of full ValidateState.
+type PipelineImportOpts struct {
+ CommittedParentRoot common.Hash // Last committed trie root (grandparent when FlatDiff is set)
+ FlatDiff *state.FlatDiff // Previous block's state overlay (nil for first block in pipeline)
}
// DefaultConfig returns the default config.
@@ -343,7 +477,7 @@ type txLookup struct {
transaction *types.Transaction
}
-// pendingSRCState tracks an in-flight state root computation goroutine.
+// pendingSRCState tracks an in-flight pipelined state root computation goroutine.
// root, witness, and err are written by the goroutine before wg.Done();
// callers block on wg.Wait() and read them afterwards.
type pendingSRCState struct {
@@ -351,10 +485,30 @@ type pendingSRCState struct {
blockNumber uint64
wg sync.WaitGroup
root common.Hash
- witness *stateless.Witness // complete witness for stateless execution of this block
+ witness []byte // RLP-encoded witness built by the SRC goroutine
err error
}
+// pendingImportSRCState stores the state of a block whose SRC goroutine has
+// been spawned. Block metadata is written to DB immediately; the state commit
+// runs in the background. An auto-collection goroutine waits for SRC to finish
+// and immediately writes the witness + handles trie GC, so collection doesn't
+// depend on the arrival of the next block.
+type pendingImportSRCState struct {
+ block *types.Block
+ flatDiff *state.FlatDiff
+ committedRoot common.Hash // last committed trie root when SRC was spawned
+ procTime time.Duration // for gcproc accumulation
+ blockStart time.Time // block processing start — used for chain/imports/witness_ready_end_to_end
+ makeWitness bool // whether the SRC goroutine is producing a witness for this block
+
+ // collectedCh is closed when auto-collection completes (verify root,
+ // write witness, trie GC). Callers block on <-collectedCh.
+ collectedCh chan struct{}
+ collectedRoot common.Hash // verified root (set before closing collectedCh)
+ collectedErr error // non-nil if SRC failed or root mismatch
+}
+
// BlockChain represents the canonical chain given a database with a genesis
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
//
@@ -389,21 +543,11 @@ type BlockChain struct {
chainHeadFeed event.Feed
logsFeed event.Feed
blockProcFeed event.Feed
- witnessFeed event.Feed
+ witnessReadyFeed event.Feed
blockProcCounter int32
scope event.SubscriptionScope
genesisBlock *types.Block
- // lastFlatDiff holds the FlatDiff from the most recently committed block's
- // CommitSnapshot. Under delayed SRC, the miner uses it together with the
- // grandparent's committed root to open a statedb via NewWithFlatBase,
- // allowing block N+1 execution to start before G_N finishes.
- // lastFlatDiffBlockHash is the hash of the block that produced lastFlatDiff,
- // used by insertChain to verify the diff is for the correct parent before seeding.
- lastFlatDiff *state.FlatDiff
- lastFlatDiffBlockHash common.Hash
- lastFlatDiffMu sync.RWMutex
-
// This mutex synchronizes chain write operations.
// Readers don't need to take it, they can just read the database.
chainmu *syncx.ClosableMutex
@@ -452,10 +596,25 @@ type BlockChain struct {
chainSideFeed event.Feed // Side chain data feed (removed from geth but needed in bor)
milestoneFetcher func(ctx context.Context) (uint64, error) // Function to fetch the latest milestone end block from Heimdall.
- // DelayedSRC: concurrent state root calculation.
- // pendingSRC tracks the in-flight state root goroutine for the most recent block.
+ // Pipelined SRC: concurrent state root calculation.
+ // pendingSRC tracks the in-flight SRC goroutine for the most recent block.
pendingSRC *pendingSRCState
pendingSRCMu sync.Mutex
+
+ // pendingImportSRC tracks a block whose SRC goroutine is in-flight during
+ // pipelined import. Persists across insertChain calls.
+ pendingImportSRC *pendingImportSRCState
+ pendingImportSRCMu sync.Mutex
+
+ // lastFlatDiff holds the FlatDiff from the most recently committed block.
+ // The miner uses it together with the grandparent's committed root to open
+ // a StateDB via NewWithFlatBase, allowing block N+1 execution to start
+ // before the SRC goroutine finishes.
+ lastFlatDiff *state.FlatDiff
+ lastFlatDiffBlockNum uint64
+ lastFlatDiffParentRoot common.Hash // committed root that the FlatDiff is based on
+ lastFlatDiffBlockRoot common.Hash // the block's own state root (from header)
+ lastFlatDiffMu sync.RWMutex
}
// NewBlockChain returns a fully initialised block chain using information
@@ -465,6 +624,11 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
if cfg == nil {
cfg = DefaultConfig()
}
+ if cfg.EnablePipelinedImportSRC {
+ pipelineImportEnabledGauge.Update(1)
+ } else {
+ pipelineImportEnabledGauge.Update(0)
+ }
// Open trie database with provided config
enableVerkle, err := EnableVerkleAtGenesis(db, genesis)
@@ -594,16 +758,6 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
}
}
}
- // Delayed SRC crash recovery: if the head block is in the delayed-SRC range
- // and its post-execution state root is missing, re-execute the head block to
- // recover the FlatDiff and spawn the SRC goroutine.
- head = bc.CurrentBlock() // re-read, may have been rewound above
- if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(head.Number) && !bc.cfg.Stateless {
- postRoot := bc.GetPostStateRoot(head.Hash())
- if postRoot == (common.Hash{}) || !bc.HasState(postRoot) {
- bc.recoverDelayedSRC(head)
- }
- }
// Ensure that a previous crash in SetHead doesn't leave extra ancients
//nolint:nestif
if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 {
@@ -745,18 +899,7 @@ func NewParallelBlockChain(db ethdb.Database, genesis *Genesis, engine consensus
return bc, nil
}
-// ProcessBlock executes the transactions in block, validates state, and returns
-// the resulting receipts, logs, gas used, and updated StateDB.
-func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) {
- return bc.processBlock(block, parent, nil, witness, followupInterrupt)
-}
-
-// processBlock is the internal implementation of ProcessBlock.
-// When flatDiff is non-nil (delayed SRC path), each statedb is opened at
-// parent.Root and then has flatDiff applied as an in-memory overlay, allowing
-// block N+1's transaction execution to begin concurrently with the background
-// goroutine that commits block N's state root to the path DB.
-func (bc *BlockChain) processBlock(block *types.Block, parent *types.Header, flatDiff *state.FlatDiff, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) {
+func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool, pipeOpts *PipelineImportOpts) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) {
// Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -781,32 +924,27 @@ func (bc *BlockChain) processBlock(block *types.Block, parent *types.Header, fla
}()
}
- parentRoot := parent.Root
- prefetch, process, err := bc.statedb.ReadersWithCacheStats(parentRoot)
+ // Under pipelined import parent.Root may not be committed yet (SRC still
+ // running). Use the last committed root for trie reads; the FlatDiff
+ // overlay below makes those reads see the previous block's post-state.
+ readerRoot := pipelineReaderRoot(parent, pipeOpts)
+ prefetch, process, err := bc.statedb.ReadersWithCacheStats(readerRoot)
if err != nil {
return nil, nil, 0, nil, 0, err
}
- throwaway, err := state.NewWithReader(parentRoot, bc.statedb, prefetch)
+ throwaway, err := state.NewWithReader(readerRoot, bc.statedb, prefetch)
if err != nil {
return nil, nil, 0, nil, 0, err
}
- if flatDiff != nil {
- throwaway.SetFlatDiffRef(flatDiff)
- }
- statedb, err := state.NewWithReader(parentRoot, bc.statedb, process)
+ statedb, err := state.NewWithReader(readerRoot, bc.statedb, process)
if err != nil {
return nil, nil, 0, nil, 0, err
}
- if flatDiff != nil {
- statedb.SetFlatDiffRef(flatDiff)
- }
- parallelStatedb, err := state.NewWithReader(parentRoot, bc.statedb, process)
+ parallelStatedb, err := state.NewWithReader(readerRoot, bc.statedb, process)
if err != nil {
return nil, nil, 0, nil, 0, err
}
- if flatDiff != nil {
- parallelStatedb.SetFlatDiffRef(flatDiff)
- }
+ applyFlatDiffOverlayToAll(pipeOpts, throwaway, statedb, parallelStatedb)
// Upload the statistics of reader at the end
defer func() {
@@ -880,7 +1018,7 @@ func (bc *BlockChain) processBlock(block *types.Block, parent *types.Header, fla
blockExecutionParallelTimer.UpdateSince(pstart)
if err == nil {
vstart := time.Now()
- err = bc.validator.ValidateState(block, parallelStatedb, res, false)
+ err = validateStateForPipeline(bc.validator, block, parallelStatedb, res, pipeOpts)
vtime = time.Since(vstart)
}
if res == nil {
@@ -900,7 +1038,7 @@ func (bc *BlockChain) processBlock(block *types.Block, parent *types.Header, fla
blockExecutionSerialTimer.UpdateSince(pstart)
if err == nil {
vstart := time.Now()
- err = bc.validator.ValidateState(block, statedb, res, false)
+ err = validateStateForPipeline(bc.validator, block, statedb, res, pipeOpts)
vtime = time.Since(vstart)
}
if res == nil {
@@ -1085,7 +1223,6 @@ func (bc *BlockChain) loadLastState() error {
if pruning := bc.historyPrunePoint.Load(); pruning != nil {
log.Info("Chain history is pruned", "earliest", pruning.BlockNumber, "hash", pruning.BlockHash)
}
-
return nil
}
@@ -1711,6 +1848,11 @@ func (bc *BlockChain) stopWithoutSaving() {
if bc.stateSizer != nil {
bc.stateSizer.Stop()
}
+ // Flush any pending import SRC before waiting for goroutines.
+ if err := bc.flushPendingImportSRC(); err != nil {
+ log.Error("Failed to flush pending import SRC during shutdown", "err", err)
+ }
+
// Now wait for all chain modifications to end and persistent goroutines to exit.
//
// Note: Close waits for the mutex to become available, i.e. any running chain
@@ -2304,47 +2446,10 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
- // System call appends state-sync logs into state. So, `state.Logs()` contains
- // all logs including system-call logs (state sync logs) while `logs` contains
- // only logs generated by transactions (receipts).
- //
- // That means that state.Logs() can have more logs than receipt logs.
- // In that case, we can safely assume that extra logs are from state sync logs.
- //
- // block logs = receipt logs + state sync logs = `state.Logs()`
- blockLogs := statedb.Logs()
-
- var stateSyncLogs []*types.Log
-
- if len(blockLogs) > 0 {
- // After Madhugiri HF we don't write bor receipts separately
- if !(bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsMadhugiri(block.Number())) && len(blockLogs) > len(logs) {
- sort.SliceStable(blockLogs, func(i, j int) bool {
- return blockLogs[i].Index < blockLogs[j].Index
- })
- stateSyncLogs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()`
-
- // State sync logs don't have tx index, tx hash and other necessary fields
- // DeriveFieldsForBorLogs will fill those fields for websocket subscriptions
- types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs)))
-
- // Derive the cumulative gas used from last receipt of this block
- var cumulativeGasUsed uint64
- if len(receipts) > 0 {
- cumulativeGasUsed = receipts[len(receipts)-1].CumulativeGasUsed
- }
-
- // Write bor receipt
- rawdb.WriteBorReceipt(blockBatch, block.Hash(), block.NumberU64(), &types.ReceiptForStorage{
- Status: types.ReceiptStatusSuccessful, // make receipt status successful
- Logs: stateSyncLogs,
- CumulativeGasUsed: cumulativeGasUsed,
- })
-
- // Write bor tx reverse lookup
- rawdb.WriteBorTxLookupEntry(blockBatch, block.Hash(), block.NumberU64())
- }
- }
+ // Bor state-sync logs: system calls append state-sync logs into state, so
+ // state.Logs() may exceed the transaction-produced logs. Pre-Madhugiri we
+ // write a synthetic bor receipt + tx lookup entry for those.
+ stateSyncLogs := bc.writeBorStateSyncLogs(blockBatch, block, receipts, logs, statedb)
rawdb.WritePreimages(blockBatch, statedb.Preimages())
@@ -2366,6 +2471,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
bc.WriteWitness(block.Hash(), witnessBytes)
dbWriteDuration := time.Since(writeStart)
witnessDbWriteTimer.Update(dbWriteDuration)
+ witnessSizeBytesHistogram.Update(int64(len(witnessBytes)))
if encodeDuration > 100*time.Millisecond {
log.Warn("Slow witness encoding", "block", block.NumberU64(), "elapsed", common.PrettyDuration(encodeDuration), "size", common.StorageSize(len(witnessBytes)))
@@ -2469,392 +2575,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
return stateSyncLogs, nil
}
-// writeBlockData writes the block data (TD, block body, receipts, preimages,
-// witness) to the database WITHOUT committing trie state. Used by the delayed-SRC
-// path where a background goroutine handles CommitWithUpdate concurrently.
-// Returns state-sync logs (bor-specific logs not covered by receipts) for feed emission.
-func (bc *BlockChain) writeBlockData(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB) ([]*types.Log, error) {
- ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
- if ptd == nil {
- return nil, consensus.ErrUnknownAncestor
- }
- externTd := new(big.Int).Add(block.Difficulty(), ptd)
-
- blockBatch := bc.db.NewBatch()
- rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
- rawdb.WriteBlock(blockBatch, block)
- rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
-
- var stateSyncLogs []*types.Log
- blockLogs := statedb.Logs()
- if len(blockLogs) > 0 {
- if !(bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsMadhugiri(block.Number())) && len(blockLogs) > len(logs) {
- sort.SliceStable(blockLogs, func(i, j int) bool {
- return blockLogs[i].Index < blockLogs[j].Index
- })
- stateSyncLogs = blockLogs[len(logs):]
- types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs)))
-
- var cumulativeGasUsed uint64
- if len(receipts) > 0 {
- cumulativeGasUsed = receipts[len(receipts)-1].CumulativeGasUsed
- }
- rawdb.WriteBorReceipt(blockBatch, block.Hash(), block.NumberU64(), &types.ReceiptForStorage{
- Status: types.ReceiptStatusSuccessful,
- Logs: stateSyncLogs,
- CumulativeGasUsed: cumulativeGasUsed,
- })
- rawdb.WriteBorTxLookupEntry(blockBatch, block.Hash(), block.NumberU64())
- }
- }
-
- rawdb.WritePreimages(blockBatch, statedb.Preimages())
-
- // Under delayed SRC, the witness built during tx execution (via NewWithFlatBase)
- // is incomplete: accounts in the FlatDiff overlay bypass the trie, so their MPT
- // proof nodes are never captured. The complete witness is built by the SRC
- // goroutine (spawnSRCGoroutine) and written there after CommitWithUpdate.
-
- if err := blockBatch.Write(); err != nil {
- log.Crit("Failed to write block into disk", "err", err)
- }
- rawdb.WriteBytecodeSyncLastBlock(bc.db, block.NumberU64())
- return stateSyncLogs, nil
-}
-
-// writeBlockDataAndSetHead is the delayed-SRC analogue of writeBlockAndSetHead:
-// it persists block data without trie state (trie commit is done by the SRC goroutine)
-// and then applies the block as the new chain head.
-func (bc *BlockChain) writeBlockDataAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool) (WriteStatus, error) {
- stateSyncLogs, err := bc.writeBlockData(block, receipts, logs, statedb)
- if err != nil {
- return NonStatTy, err
- }
-
- currentBlock := bc.CurrentBlock()
- reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
- if err != nil {
- return NonStatTy, err
- }
-
- var status WriteStatus
- if reorg {
- if block.ParentHash() != currentBlock.Hash() {
- if err = bc.reorg(currentBlock, block.Header()); err != nil {
- return NonStatTy, err
- }
- }
- status = CanonStatTy
- } else {
- status = SideStatTy
- }
-
- if status == CanonStatTy {
- bc.writeHeadBlock(block)
-
- bc.chainFeed.Send(ChainEvent{
- Header: block.Header(),
- Receipts: receipts,
- Transactions: block.Transactions(),
- })
-
- if len(logs) > 0 {
- bc.logsFeed.Send(logs)
- }
- if len(stateSyncLogs) > 0 {
- bc.logsFeed.Send(stateSyncLogs)
- }
- if emitHeadEvent {
- bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
- bc.stateSyncMu.RLock()
- for _, data := range bc.GetStateSync() {
- bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
- }
- bc.stateSyncMu.RUnlock()
- }
- } else {
- bc.chainSideFeed.Send(ChainSideEvent{Header: block.Header()})
-
- bc.chain2HeadFeed.Send(Chain2HeadEvent{
- Type: Chain2HeadForkEvent,
- NewChain: []*types.Header{block.Header()},
- })
- }
-
- return status, nil
-}
-
-// recoverDelayedSRC re-executes the head block to recover the FlatDiff
-// and spawn the SRC goroutine after a crash. This is needed because
-// under delayed SRC the background goroutine may not have finished
-// (or its results may not have been journaled) before the crash.
-func (bc *BlockChain) recoverDelayedSRC(head *types.Header) {
- block := bc.GetBlock(head.Hash(), head.Number.Uint64())
- if block == nil {
- log.Error("Delayed SRC recovery: head block not found", "number", head.Number, "hash", head.Hash())
- return
- }
-
- // head.Root = root_{N-1} under delayed SRC; HasState already confirmed it's available.
- statedb, err := bc.StateAt(head.Root)
- if err != nil {
- log.Error("Delayed SRC recovery: failed to open state", "root", head.Root, "err", err)
- return
- }
-
- _, err = bc.processor.Process(block, statedb, bc.cfg.VmConfig, nil, context.Background())
- if err != nil {
- log.Error("Delayed SRC recovery: block re-execution failed", "number", head.Number, "err", err)
- return
- }
-
- flatDiff := statedb.CommitSnapshot(bc.chainConfig.IsEIP158(head.Number))
-
- bc.lastFlatDiffMu.Lock()
- bc.lastFlatDiff = flatDiff
- bc.lastFlatDiffBlockHash = block.Hash()
- bc.lastFlatDiffMu.Unlock()
-
- bc.spawnSRCGoroutine(block, head.Root, flatDiff)
- log.Info("Delayed SRC recovery: re-executed head block", "number", head.Number, "hash", head.Hash())
-}
-
-// GetPostStateRoot returns the actual post-execution state root for the given
-// block. It checks, in order:
-//
-// 1. The in-flight SRC goroutine (blocks until it finishes).
-// 2. The canonical child's header (block[N+1].Root == root_N by protocol invariant).
-// 3. The persisted post-state root key-value store.
-// 4. For pre-fork blocks, header.Root is the block's own post-execution root.
-func (bc *BlockChain) GetPostStateRoot(blockHash common.Hash) common.Hash {
- // 1. Check in-flight goroutine.
- bc.pendingSRCMu.Lock()
- pending := bc.pendingSRC
- bc.pendingSRCMu.Unlock()
-
- if pending != nil && pending.blockHash == blockHash {
- pending.wg.Wait()
- if pending.err != nil {
- log.Error("Delayed SRC goroutine failed", "blockHash", blockHash, "err", pending.err)
- return common.Hash{}
- }
- return pending.root
- }
-
- // 2-4. No in-flight goroutine; resolve from on-chain data.
- header := bc.GetHeaderByHash(blockHash)
- if header == nil {
- return common.Hash{}
- }
- if bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(header.Number) {
- return header.Root
- }
- child := bc.GetHeaderByNumber(header.Number.Uint64() + 1)
- if child != nil && child.ParentHash == blockHash {
- return child.Root
- }
- return rawdb.ReadPostStateRoot(bc.db, blockHash)
-}
-
-// PostExecutionStateAt returns a StateDB representing the post-execution state
-// of the given block header. Under delayed SRC, if the FlatDiff for this block
-// is still cached (i.e. this is the chain head), it returns a non-blocking
-// overlay state via NewWithFlatBase — matching the miner's approach.
-// Otherwise it falls back to resolving the actual state root (which may block
-// if the background SRC goroutine is still running).
-func (bc *BlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) {
- // Fast path: if delayed SRC is active and we have the FlatDiff for this
- // block, use it as an overlay on top of header.Root (= root_{N-1}).
- if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(header.Number) {
- bc.lastFlatDiffMu.RLock()
- flatDiff := bc.lastFlatDiff
- flatDiffHash := bc.lastFlatDiffBlockHash
- bc.lastFlatDiffMu.RUnlock()
-
- if flatDiff != nil && flatDiffHash == header.Hash() {
- return state.NewWithFlatBase(header.Root, bc.statedb, flatDiff)
- }
- }
-
- // Slow path: resolve the actual post-execution root.
- // For delayed-SRC blocks this may block on the background goroutine.
- // For pre-fork blocks, GetPostStateRoot returns common.Hash{} and we
- // use header.Root directly.
- root := header.Root
- if r := bc.GetPostStateRoot(header.Hash()); r != (common.Hash{}) {
- root = r
- }
- return bc.StateAt(root)
-}
-
-// expectedPreStateRoot returns the parent header's on-chain Root field.
-// This is what witness.Root() (= Headers[0].Root) should equal — it validates
-// that the witness carries the correct parent header.
-//
-// Note: under delayed SRC, parentHeader.Root = root_{N-2}, not root_{N-1}.
-// The actual pre-state root validation (block.Root() == root_{N-1}) is done
-// separately in writeBlockAndSetHead.
-func (bc *BlockChain) expectedPreStateRoot(block *types.Block) (common.Hash, error) {
- parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
- if parent == nil {
- return common.Hash{}, fmt.Errorf("parent header not found: %s (block %d)", block.ParentHash(), block.NumberU64())
- }
- return parent.Root, nil
-}
-
-// GetDelayedWitnessForBlock returns the stateless witness for block blockHash
-// that was built as a byproduct of the delayed SRC goroutine. It blocks until
-// the goroutine finishes, identical in structure to GetPostStateRoot.
-// Returns nil if the witness was not built (e.g. pre-fork block or goroutine
-// failure) or if the goroutine for blockHash is no longer in flight.
-func (bc *BlockChain) GetDelayedWitnessForBlock(blockHash common.Hash) *stateless.Witness {
- bc.pendingSRCMu.Lock()
- pending := bc.pendingSRC
- bc.pendingSRCMu.Unlock()
-
- if pending != nil && pending.blockHash == blockHash {
- pending.wg.Wait()
- if pending.err != nil {
- return nil
- }
- return pending.witness
- }
- // Witness is not retained after the goroutine is superseded; callers
- // that need it must request it before the next block's goroutine starts.
- return nil
-}
-
-// spawnSRCGoroutine launches a background goroutine that computes the actual
-// state root for block by replaying flatDiff on top of parentRoot.
-// The result is stored in pending.root; pending.wg is decremented when finished.
-// As a byproduct of the MPT hashing, a complete witness for stateless execution
-// of block is built and stored in pending.witness.
-func (bc *BlockChain) spawnSRCGoroutine(block *types.Block, parentRoot common.Hash, flatDiff *state.FlatDiff) {
- pending := &pendingSRCState{
- blockHash: block.Hash(),
- blockNumber: block.NumberU64(),
- }
-
- bc.pendingSRCMu.Lock()
- bc.pendingSRC = pending
- bc.pendingSRCMu.Unlock()
-
- deleteEmptyObjects := bc.chainConfig.IsEIP158(block.Number())
- isCancun := bc.chainConfig.IsCancun(block.Number())
-
- // bc.wg.Go handles Add(1)/Done() for graceful shutdown tracking.
- // pending.wg tracks completion for GetPostStateRoot callers.
- pending.wg.Add(1)
- bc.wg.Go(func() {
- defer pending.wg.Done()
-
- // Create a snapshot-less database so that all account and storage
- // reads go directly through the MPT. This ensures the prevalueTracer
- // on each trie captures every intermediate node, which is later
- // flushed into the witness. Using the snapshot would bypass the trie
- // and leave those proof-path nodes out of the witness.
- // noSnapDB := state.NewDatabase(bc.statedb.TrieDB(), nil)
- tmpDB, err := state.New(parentRoot, bc.statedb)
- if err != nil {
- log.Error("Delayed SRC: failed to open tmpDB", "parentRoot", parentRoot, "err", err)
- pending.err = err
- return
- }
-
- // Attach a witness so that IntermediateRoot captures all root_{N-1}
- // trie nodes as a byproduct of the MPT hashing. parentRoot is the
- // correct pre-state root for stateless execution of block N.
- witness, witnessErr := stateless.NewWitness(block.Header(), bc)
- if witnessErr != nil {
- log.Warn("Delayed SRC: failed to create witness", "block", block.NumberU64(), "err", witnessErr)
- } else {
- // Embed parentRoot as the pre-state root. NewWitness zeroed context.Root;
- // a non-zero value here signals delayed SRC to witness.Root().
- witness.Header().Root = parentRoot
- tmpDB.SetWitness(witness)
- }
-
- // Mark all write mutations as dirty.
- tmpDB.ApplyFlatDiffForCommit(flatDiff)
-
- // Load read-only accounts and storage slots so that the statedb
- // has stateObjects (with originStorage) for every address and slot
- // that was accessed during the original block execution. These reads
- // go through the reader's trie; IntermediateRoot (called by
- // CommitWithUpdate) then re-walks read-only accounts and storage
- // through s.trie / obj.trie to capture proof-path nodes for the
- // witness when no prefetcher is present.
- for _, addr := range flatDiff.ReadSet {
- tmpDB.GetBalance(addr)
- for _, slot := range flatDiff.ReadStorage[addr] {
- tmpDB.GetState(addr, slot)
- }
- }
- // Load read-only storage for mutated accounts (slots in originStorage
- // that aren't in pendingStorage). These reads capture trie nodes that
- // stateless execution needs (e.g., span commit reads validator contract
- // slots it doesn't write).
- for addr := range flatDiff.Accounts {
- for _, slot := range flatDiff.ReadStorage[addr] {
- tmpDB.GetState(addr, slot)
- }
- }
-
- // Pure-destruct accounts (created AND destroyed within block N) are
- // absent from root_{N-1}. SelfDestruct returns early for them, so
- // CommitWithUpdate never traverses their account trie paths. The
- // stateless node still needs these paths for deleteStateObject.
- // Force a read to create stateObjects; IntermediateRoot captures
- // the account trie nodes via the no-prefetcher witness path.
- for addr := range flatDiff.Destructs {
- if _, resurrected := flatDiff.Accounts[addr]; !resurrected {
- tmpDB.GetBalance(addr)
- }
- }
-
- // Non-existent accounts accessed during execution (e.g., by
- // state-sync EVM calls) need proof-of-absence trie nodes in the
- // witness. GetBalance triggers a trie read through the reader;
- // IntermediateRoot (called by CommitWithUpdate) then walks
- // these paths through s.trie to capture the proof nodes.
- for _, addr := range flatDiff.NonExistentReads {
- tmpDB.GetBalance(addr)
- }
-
- root, stateUpdate, err := tmpDB.CommitWithUpdate(block.NumberU64(), deleteEmptyObjects, isCancun)
- if err != nil {
- log.Error("Delayed SRC: CommitWithUpdate failed", "block", block.NumberU64(), "err", err)
- pending.err = err
- return
- }
-
- if bc.stateSizer != nil {
- bc.stateSizer.Notify(stateUpdate)
- }
-
- // Write the complete witness to the database and announce it.
- // This must happen after CommitWithUpdate so that all trie nodes
- // (for both write and read-set accounts) have been accumulated.
- if witness != nil {
- var witBuf bytes.Buffer
- if err := witness.EncodeRLP(&witBuf); err != nil {
- log.Error("Delayed SRC: failed to encode witness", "block", block.NumberU64(), "err", err)
- } else {
- bc.WriteWitness(bc.db, block.Hash(), witBuf.Bytes())
- bc.witnessFeed.Send(WitnessReadyEvent{Block: block, Witness: witness})
- }
- }
-
- // Persist so GetPostStateRoot can find this root on restart
- // even before a child block is imported.
- rawdb.WritePostStateRoot(bc.db, block.Hash(), root)
-
- // Set root and witness before wg.Done() so callers see them.
- pending.root = root
- pending.witness = witness
- })
-}
-
// WriteBlockAndSetHead writes the given block and all associated state to the database,
// and applies the block as the new chain head.
func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
@@ -2869,119 +2589,15 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool, stateless bool) (status WriteStatus, err error) {
- // Under delayed SRC: CommitWithUpdate is deferred — either to a background
- // goroutine (miner/import path) or handled inline (stateless path).
- if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(block.Number()) {
- parentRoot := bc.GetPostStateRoot(block.ParentHash())
- if parentRoot == (common.Hash{}) {
- return NonStatTy, fmt.Errorf("delayed state root unavailable for parent %s", block.ParentHash())
- }
- // Validate: block.Root() must equal the parent's computed post-state root.
- // This mirrors ValidateState (block_validator.go:178) for stateless nodes,
- // where ValidateState returns early (stateless=true skips root checks).
- if block.Root() != parentRoot {
- return NonStatTy, fmt.Errorf("delayed SRC state root mismatch: header.Root=%x, computedParentRoot=%x, block=%d",
- block.Root(), parentRoot, block.NumberU64())
- }
-
- if stateless {
- // Stateless path: the state root is cheap to compute on the
- // witness-backed trie, so there's no need to defer it. Record
- // the cross-root for the next block's validation, then fall
- // through to writeBlockWithState which naturally handles code
- // persistence, witness writing, etc.
- crossRoot := state.IntermediateRoot(bc.chainConfig.IsEIP158(block.Number()))
- pending := &pendingSRCState{
- blockHash: block.Hash(),
- blockNumber: block.NumberU64(),
- root: crossRoot,
- }
- // pending.wg is at zero, so wg.Wait() returns immediately.
- bc.pendingSRCMu.Lock()
- bc.pendingSRC = pending
- bc.pendingSRCMu.Unlock()
- // Persist to DB so the root survives reorgs and restarts.
- rawdb.WritePostStateRoot(bc.db, block.Hash(), crossRoot)
- // Fall through to writeBlockWithState below.
- } else {
- // Full-node path: defer CommitWithUpdate to a background goroutine.
- flatDiff := state.CommitSnapshot(bc.chainConfig.IsEIP158(block.Number()))
- bc.lastFlatDiffMu.Lock()
- bc.lastFlatDiff = flatDiff
- bc.lastFlatDiffBlockHash = block.Hash()
- bc.lastFlatDiffMu.Unlock()
- bc.spawnSRCGoroutine(block, parentRoot, flatDiff)
- return bc.writeBlockDataAndSetHead(block, receipts, logs, state, emitHeadEvent)
- }
- }
-
stateSyncLogs, err := bc.writeBlockWithState(block, receipts, logs, state)
if err != nil {
return NonStatTy, err
}
-
- currentBlock := bc.CurrentBlock()
- reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
+ status, err = bc.resolvePostWriteStatus(block, stateless)
if err != nil {
return NonStatTy, err
}
-
- if reorg {
- // Reorganise the chain if the parent is not the head block
- if block.ParentHash() != currentBlock.Hash() {
- if err = bc.reorg(currentBlock, block.Header()); err != nil {
- if !(stateless && err == errInvalidNewChain) { // fast forward may raise an invalid new chain error, skipping for stateless
- return NonStatTy, err
- }
- }
- }
-
- status = CanonStatTy
- } else {
- status = SideStatTy
- }
-
- // Set new head.
- if status == CanonStatTy {
- bc.writeHeadBlock(block)
-
- bc.chainFeed.Send(ChainEvent{
- Header: block.Header(),
- Receipts: receipts,
- Transactions: block.Transactions(),
- })
-
- if len(logs) > 0 {
- bc.logsFeed.Send(logs)
- }
- // send state sync logs into logs feed
- if len(stateSyncLogs) > 0 {
- bc.logsFeed.Send(stateSyncLogs)
- }
- // In theory, we should fire a ChainHeadEvent when we inject
- // a canonical block, but sometimes we can insert a batch of
- // canonical blocks. Avoid firing too many ChainHeadEvents,
- // we will fire an accumulated ChainHeadEvent and disable fire
- // event here.
- if emitHeadEvent {
- bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
- // BOR state sync feed related changes
- bc.stateSyncMu.RLock()
- for _, data := range bc.GetStateSync() {
- bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
- }
- bc.stateSyncMu.RUnlock()
- // BOR
- }
- } else {
- bc.chainSideFeed.Send(ChainSideEvent{Header: block.Header()})
-
- bc.chain2HeadFeed.Send(Chain2HeadEvent{
- Type: Chain2HeadForkEvent,
- NewChain: []*types.Header{block.Header()},
- })
- }
-
+ bc.emitPostWriteEvents(block, receipts, logs, stateSyncLogs, status, emitHeadEvent)
return status, nil
}
@@ -3223,12 +2839,11 @@ func (bc *BlockChain) insertChainStatelessParallel(chain types.Blocks, witnesses
// Validate witness pre-state for this block (if present) before writing
if i < len(witnesses) && witnesses[i] != nil {
- expectedRoot, err := bc.expectedPreStateRoot(block)
- if err != nil {
- stopHeaders()
- return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err)
+ var headerReader stateless.HeaderReader = bc
+ if witnesses[i].HeaderReader() != nil {
+ headerReader = witnesses[i].HeaderReader()
}
- if err := stateless.ValidateWitnessPreState(witnesses[i], expectedRoot); err != nil {
+ if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader, block.Header()); err != nil {
stopHeaders()
return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err)
}
@@ -3388,11 +3003,11 @@ func (bc *BlockChain) insertChainStatelessSequential(chain types.Blocks, witness
// End-of-batch witness validation
for i, block := range chain {
if i < len(witnesses) && witnesses[i] != nil {
- expectedRoot, err := bc.expectedPreStateRoot(block)
- if err != nil {
- return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err)
+ var headerReader stateless.HeaderReader = bc
+ if witnesses[i].HeaderReader() != nil {
+ headerReader = witnesses[i].HeaderReader()
}
- if err := stateless.ValidateWitnessPreState(witnesses[i], expectedRoot); err != nil {
+ if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader, block.Header()); err != nil {
return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err)
}
}
@@ -3565,25 +3180,6 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
// Track the singleton witness from this chain insertion (if any)
var witness *stateless.Witness
- // prevFlatDiff is the FlatDiff extracted from the previous block under delayed SRC.
- // Carrying it across iterations lets block N+1 open state at parent.Root + flatDiff_N
- // immediately, without waiting for the background goroutine to commit root_N.
- //
- // Seed from bc.lastFlatDiff when the first block in this batch is the direct
- // successor of the block that produced lastFlatDiff. This handles the case
- // where block N was processed in a previous insertChain call (or by the miner
- // path) and block N+1 now arrives in a fresh call. Without seeding here,
- // processBlock would open state at parent.Root = root_{N-1} (under delayed SRC)
- // without the flatDiff_N overlay, yielding stale nonces and bad block errors.
- var prevFlatDiff *state.FlatDiff
- if bc.chainConfig.Bor != nil && len(chain) > 0 && bc.chainConfig.Bor.IsDelayedSRC(chain[0].Number()) {
- bc.lastFlatDiffMu.RLock()
- if bc.lastFlatDiffBlockHash == chain[0].ParentHash() {
- prevFlatDiff = bc.lastFlatDiff
- }
- bc.lastFlatDiffMu.RUnlock()
- }
-
// accumulator for canonical blocks
var canonAccum []*types.Block
@@ -3677,22 +3273,16 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
- isDelayedSRC := bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(block.Number())
-
- // Under delayed SRC, parent.Root is the committed trie base (= root_{N-1} for block N).
- // prevFlatDiff, if non-nil, carries block N-1's mutations as an in-memory overlay so
- // block N's transaction execution can begin immediately without waiting for the
- // background goroutine (G_{N-1}) to finish committing root_{N-1} to the path DB.
- // The sync point (ValidateState → GetPostStateRoot) is deferred until
- // AFTER transaction execution completes inside processBlock.
- if !isDelayedSRC {
- prevFlatDiff = nil // reset when leaving the delayed-SRC regime
+ // --- Pipelined import: check for pending SRC from previous block ---
+ pipelineActive := bc.cfg.EnablePipelinedImportSRC && setHead && !bc.cfg.Stateless
+ var pipeOpts *PipelineImportOpts
+ if pipelineActive {
+ pipeOpts = bc.buildPipelineImportOpts(block, parent)
}
- statedb, err := state.New(parent.Root, bc.statedb)
- if err != nil {
- return nil, it.index, err
- }
+ // Note: ProcessBlock opens its own statedbs internally. The statedb
+ // created here in the original code was only used for activeState tracking.
+ // With pipelined import, ProcessBlock handles all state opening.
// If we are past Byzantium, enable prefetching to pull in trie node paths
// while processing transactions. Before Byzantium the prefetcher is mostly
@@ -3707,12 +3297,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
return nil, it.index, err
}
}
- // Bor: We start the prefetcher in process block function called below
- // and not here as we copy state for block-stm in that function. Also,
- // we don't want to start duplicate prefetchers per block.
- // statedb.StartPrefetcher("chain", witness)
}
- activeState = statedb
var followupInterrupt atomic.Bool
@@ -3723,14 +3308,11 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
if witnesses != nil && len(witnesses) > it.processed()-1 && witnesses[it.processed()-1] != nil {
// 1. Validate the witness.
- expectedRoot, err := bc.expectedPreStateRoot(block)
- if err != nil {
- log.Error("Pre-state root unavailable for witness validation", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err)
- bc.reportBlock(block, &ProcessResult{}, err)
- followupInterrupt.Store(true)
- return nil, it.index, fmt.Errorf("witness validation failed: %w", err)
+ var headerReader stateless.HeaderReader = bc
+ if witnesses[it.processed()-1].HeaderReader() != nil {
+ headerReader = witnesses[it.processed()-1].HeaderReader()
}
- if err := stateless.ValidateWitnessPreState(witnesses[it.processed()-1], expectedRoot); err != nil {
+ if err := stateless.ValidateWitnessPreState(witnesses[it.processed()-1], headerReader, block.Header()); err != nil {
log.Error("Witness validation failed during chain insertion", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err)
bc.reportBlock(block, &ProcessResult{}, err)
followupInterrupt.Store(true)
@@ -3751,7 +3333,16 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
}
}
- receipts, logs, usedGas, statedb, vtime, err := bc.processBlock(block, parent, prevFlatDiff, witness, &followupInterrupt)
+ cheapExecStart := time.Now()
+ receipts, logs, usedGas, statedb, vtime, err := bc.ProcessBlock(block, parent, witness, &followupInterrupt, pipeOpts)
+ cheapExecElapsed := time.Since(cheapExecStart)
+ if pipelineActive {
+ pipelineImportCheapExecTimer.Update(cheapExecElapsed)
+ pipelineImportCheapValidationTimer.Update(vtime)
+ } else {
+ normalImportProcessTimer.Update(cheapExecElapsed)
+ normalImportValidationTimer.Update(vtime)
+ }
bc.statedb.TrieDB().SetReadBackend(nil)
bc.statedb.EnableSnapInReader()
activeState = statedb
@@ -3759,21 +3350,58 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
if err != nil {
bc.reportBlock(block, &ProcessResult{Receipts: receipts}, err)
followupInterrupt.Store(true)
+ // Flush any pending import SRC before returning on error. Log any
+ // flush error (e.g., previous block's root mismatch) — the outer
+ // err takes precedence for the caller, but a silent flush failure
+ // would mask real corruption from the prior pipelined block.
+ if pipelineActive {
+ if flushErr := bc.flushPendingImportSRC(); flushErr != nil {
+ log.Error("Pipelined import: flush failed after ProcessBlock error",
+ "block", block.NumberU64(), "flushErr", flushErr, "processErr", err)
+ }
+ }
return nil, it.index, err
}
- // BOR state sync feed related changes
- bc.stateSyncMu.RLock()
- for _, data := range bc.GetStateSync() {
- bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
- }
- bc.stateSyncMu.RUnlock()
- // BOR
- ptime := time.Since(pstart) - vtime - statedb.BorConsensusTime
-
- proctime := time.Since(start) // processing + validation
-
- // Update the metrics touched during block processing and validation
+ // --- Pipelined import: extract FlatDiff, collect previous SRC, write metadata, spawn SRC ---
+ if pipelineActive {
+ adjustBack, err := bc.persistPipelinedImport(block, parent, statedb, receipts, logs, start, cheapExecElapsed, vtime, computeWitness)
+ if err != nil {
+ followupInterrupt.Store(true)
+ idx := it.index
+ if adjustBack {
+ idx--
+ }
+ return nil, idx, err
+ }
+ followupInterrupt.Store(true)
+ stats.processed++
+ stats.usedGas += usedGas
+ lastCanon = block
+ var snapDiffItems, snapBufItems common.StorageSize
+ if bc.snaps != nil {
+ snapDiffItems, snapBufItems = bc.snaps.Size()
+ }
+ trieDiffNodes, trieBufNodes, _ := bc.triedb.Size()
+ stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead, false)
+ emitPipelinedImportParityMetrics(statedb, start, pstart, vtime, block)
+ continue
+ }
+
+ // --- Normal (non-pipelined) write path ---
+
+ // BOR state sync feed related changes
+ bc.stateSyncMu.RLock()
+ for _, data := range bc.GetStateSync() {
+ bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
+ }
+ bc.stateSyncMu.RUnlock()
+ // BOR
+ ptime := time.Since(pstart) - vtime - statedb.BorConsensusTime
+
+ proctime := time.Since(start) // processing + validation
+
+ // Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
@@ -3801,7 +3429,10 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
// available sometime before) and the block turns out to be invalid (i.e. not
// honouring the milestone or checkpoint). Use the block itself as current block
// so that it's considered as a `past` chain and the validation doesn't get bypassed.
+ reorgCheckStart := time.Now()
isValid, err = bc.forker.ValidateReorg(block.Header(), []*types.Header{block.Header()})
+ reorgCheckElapsed := time.Since(reorgCheckStart)
+ normalImportReorgCheckTimer.Update(reorgCheckElapsed)
if err != nil {
return nil, it.index, err
}
@@ -3810,49 +3441,14 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
return nil, it.index, whitelist.ErrMismatch
}
- if isDelayedSRC {
- // ValidateState (inside processBlock) was the sync point: it called
- // GetPostStateRoot(block.ParentHash()) and waited for G_{N-1}.
- // pendingSRC still points to G_{N-1}'s entry; reading from the closed
- // done-channel is instant — no second goroutine barrier here.
- actualParentRoot := bc.GetPostStateRoot(block.ParentHash())
- if actualParentRoot == (common.Hash{}) {
- return nil, it.index, fmt.Errorf("delayed state root unavailable for parent %s", block.ParentHash())
- }
-
- // Extract flat diff cheaply (~1ms, no MPT hashing) and spawn the
- // background goroutine that will compute and persist root_N.
- flatDiff := statedb.CommitSnapshot(bc.chainConfig.IsEIP158(block.Number()))
- bc.spawnSRCGoroutine(block, actualParentRoot, flatDiff)
-
- // Pass the flat diff to the next iteration so it can open state at
- // parent.Root (= root_{N-1}) + flatDiff overlay, starting tx execution
- // concurrently with this goroutine's commitAndFlush.
- prevFlatDiff = flatDiff
-
- // Also update lastFlatDiff so the local miner uses the correct pre-state
- // when building the next block after importing this one from a peer.
- // Without this, a validator that imports a peer block via insertChain
- // keeps a stale lastFlatDiff and mines the next block from the wrong
- // base state (missing all mutations from the imported block).
- bc.lastFlatDiffMu.Lock()
- bc.lastFlatDiff = flatDiff
- bc.lastFlatDiffBlockHash = block.Hash()
- bc.lastFlatDiffMu.Unlock()
-
- if !setHead {
- _, err = bc.writeBlockData(block, receipts, logs, statedb)
- } else {
- status, err = bc.writeBlockDataAndSetHead(block, receipts, logs, statedb, false)
- }
+ if !setHead {
+ // Don't set the head, only insert the block
+ _, err = bc.writeBlockWithState(block, receipts, logs, statedb)
} else {
- if !setHead {
- // Don't set the head, only insert the block
- _, err = bc.writeBlockWithState(block, receipts, logs, statedb)
- } else {
- status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false, false)
- }
+ status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false, false)
}
+ writeElapsed := time.Since(wstart)
+ normalImportWriteTimer.Update(writeElapsed)
followupInterrupt.Store(true)
@@ -3868,7 +3464,18 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
witnessCollectionTimer.Update(statedb.WitnessCollection)
blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
- blockInsertTimer.UpdateSince(start)
+ elapsedNormal := time.Since(start)
+ blockInsertTimer.Update(elapsedNormal)
+ normalImportTotalTimer.Update(elapsedNormal)
+ bc.logSlowNormalImport(block, cheapExecElapsed, vtime, reorgCheckElapsed, writeElapsed, elapsedNormal, statedb)
+ gasUsedPerBlockHistogram.Update(int64(block.GasUsed()))
+ txsPerBlockHistogram.Update(int64(len(block.Transactions())))
+ if elapsedNormal > 0 {
+ chainMgaspsMeter.Update(time.Duration(float64(block.GasUsed()) * 1000 / float64(elapsedNormal)))
+ }
+ // Witness has already been written inside writeBlockWithState by this point,
+ // so "witness ready" == "import complete" in the non-pipelined case.
+ witnessReadyEndToEndTimer.Update(elapsedNormal)
// Report the import stats before returning the various results
stats.processed++
@@ -3888,7 +3495,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
if !setHead {
// After merge we expect few side chains. Simply count
- // all blocks the CL gives us for GC processing time.
+ // all blocks the CL gives us for GC processing time
bc.gcproc += proctime
return witness, it.index, nil // Direct block insertion of a single block
}
@@ -3910,7 +3517,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool,
lastCanon = block
- // Only count canonical blocks for GC processing time.
+ // Only count canonical blocks for GC processing time
bc.gcproc += proctime
case SideStatTy:
@@ -3952,10 +3559,10 @@ func (bpr *blockProcessingResult) Witness() *stateless.Witness {
return bpr.witness
}
-// processBlockStateful executes and validates the given block. If there was no error
+// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
// nolint : unused
-func (bc *BlockChain) processBlockStateful(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool, diskdb ethdb.Database) (_ *blockProcessingResult, blockEndErr error) {
+func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool, diskdb ethdb.Database) (_ *blockProcessingResult, blockEndErr error) {
startTime := time.Now()
if bc.logger != nil && bc.logger.OnBlockStart != nil {
td := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
@@ -4017,9 +3624,7 @@ func (bc *BlockChain) processBlockStateful(block *types.Block, statedb *state.St
if err != nil {
return nil, fmt.Errorf("stateless self-validation failed: %v", err)
}
- // Under delayed SRC, block.Root() = parent's state root, not this block's;
- // skip the equality check in that case.
- if (bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(block.Number())) && crossStateRoot != block.Root() {
+ if crossStateRoot != block.Root() {
return nil, fmt.Errorf("stateless self-validation root mismatch (cross: %x local: %x)", crossStateRoot, block.Root())
}
if crossReceiptRoot != block.ReceiptHash() {
@@ -4537,21 +4142,6 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error
// Release the tx-lookup lock after mutation.
bc.txLookupLock.Unlock()
- // Delayed-SRC cleanup: if the in-flight SRC goroutine is for a dropped block,
- // clear it so GetPostStateRoot falls back to the canonical child-header lookup.
- if bc.chainConfig.Bor != nil {
- bc.pendingSRCMu.Lock()
- if bc.pendingSRC != nil {
- for _, h := range oldChain {
- if bc.pendingSRC.blockHash == h.Hash() {
- bc.pendingSRC = nil
- break
- }
- }
- }
- bc.pendingSRCMu.Unlock()
- }
-
return nil
}
@@ -4797,25 +4387,1135 @@ func (bc *BlockChain) SubscribeChain2HeadEvent(ch chan<- Chain2HeadEvent) event.
return bc.scope.Track(bc.chain2HeadFeed.Subscribe(ch))
}
-// SubscribeWitnessReadyEvent registers a subscription for WitnessReadyEvent,
-// which is fired after the delayed-SRC goroutine finishes and the complete
-// witness has been written to the database.
-func (bc *BlockChain) SubscribeWitnessReadyEvent(ch chan<- WitnessReadyEvent) event.Subscription {
- return bc.scope.Track(bc.witnessFeed.Subscribe(ch))
+// WriteBlockAndSetHeadPipelined writes block data (header, body, receipts) to
+// the database and sets it as the chain head, WITHOUT committing trie state.
+// The state commit is handled separately by the SRC goroutine that already
+// called CommitWithUpdate. This avoids the "layer stale" error that occurs
+// when two CommitWithUpdate calls diverge from the same parent root.
+// WriteBlockAndSetHeadPipelined is the public variant that acquires the chain mutex.
+// Used by the miner pipeline (resultLoop) where the mutex is not already held.
+func (bc *BlockChain) WriteBlockAndSetHeadPipelined(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool, witnessBytes []byte) (WriteStatus, error) {
+ if !bc.chainmu.TryLock() {
+ return NonStatTy, errChainStopped
+ }
+ defer bc.chainmu.Unlock()
+
+ return bc.writeBlockAndSetHeadPipelined(block, receipts, logs, statedb, emitHeadEvent, witnessBytes)
+}
+
+// writeBlockAndSetHeadPipelined is the internal implementation. It writes block
+// data (header, body, receipts) to the database and sets it as the chain head,
+// WITHOUT committing trie state. The state commit is handled by the SRC goroutine.
+// This function does NOT acquire the chain mutex — the caller must ensure
+// proper synchronization (e.g., called from insertChainWithWitnesses).
+func (bc *BlockChain) writeBlockAndSetHeadPipelined(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool, witnessBytes []byte) (WriteStatus, error) {
+ ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
+ if ptd == nil {
+ return NonStatTy, consensus.ErrUnknownAncestor
+ }
+ stateSyncLogs, err := bc.writePipelinedBlockBatch(block, receipts, logs, statedb, witnessBytes, new(big.Int).Add(block.Difficulty(), ptd))
+ if err != nil {
+ return NonStatTy, err
+ }
+ status, err := bc.resolvePostWriteStatus(block, false)
+ if err != nil {
+ return NonStatTy, err
+ }
+ bc.emitPostWriteEvents(block, receipts, logs, stateSyncLogs, status, emitHeadEvent)
+ return status, nil
+}
+
+// writePipelinedBlockBatch assembles one atomic batch with the block, its
+// receipts, bor state-sync logs (pre-Madhugiri only), preimages, the SRC
+// goroutine's witness, and total difficulty — then flushes it. Returns the
+// stateSyncLogs slice so the caller can emit them on the logs feed.
+// The SRC witness replaces the execution-side witness because FlatDiff
+// overlay accounts bypass the trie during speculative execution, so their
+// MPT proof nodes are only captured during SRC's CommitWithUpdate.
+func (bc *BlockChain) writePipelinedBlockBatch(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, witnessBytes []byte, externTd *big.Int) ([]*types.Log, error) {
+ blockBatch := bc.db.NewBatch()
+ rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
+ rawdb.WriteBlock(blockBatch, block)
+ rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
+ stateSyncLogs := bc.writeBorStateSyncLogs(blockBatch, block, receipts, logs, statedb)
+ rawdb.WritePreimages(blockBatch, statedb.Preimages())
+ if len(witnessBytes) > 0 {
+ witWriteStart := time.Now()
+ bc.WriteWitness(block.Hash(), witnessBytes)
+ witnessDbWriteTimer.UpdateSince(witWriteStart)
+ witnessSizeBytesHistogram.Update(int64(len(witnessBytes)))
+ }
+ batchStart := time.Now()
+ if err := blockBatch.Write(); err != nil {
+ log.Crit("Failed to write block into disk", "err", err)
+ }
+ blockBatchWriteTimer.UpdateSince(batchStart)
+ rawdb.WriteBytecodeSyncLastBlock(bc.db, block.NumberU64())
+ return stateSyncLogs, nil
+}
+
+// writeBorStateSyncLogs emits a synthetic bor receipt + tx lookup entry for
+// state-sync logs (logs the node observed from Heimdall but no EVM tx
+// produced). Madhugiri replaces this with native receipt encoding and the
+// legacy path is skipped there. Returns the state-sync logs slice so the
+// caller can forward them on the logs feed.
+func (bc *BlockChain) writeBorStateSyncLogs(batch ethdb.Batch, block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB) []*types.Log {
+ blockLogs := statedb.Logs()
+ if len(blockLogs) == 0 {
+ return nil
+ }
+ if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsMadhugiri(block.Number()) {
+ return nil
+ }
+ if len(blockLogs) <= len(logs) {
+ return nil
+ }
+ sort.SliceStable(blockLogs, func(i, j int) bool {
+ return blockLogs[i].Index < blockLogs[j].Index
+ })
+ stateSyncLogs := blockLogs[len(logs):]
+ types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs)))
+ var cumulativeGasUsed uint64
+ if len(receipts) > 0 {
+ cumulativeGasUsed = receipts[len(receipts)-1].CumulativeGasUsed
+ }
+ rawdb.WriteBorReceipt(batch, block.Hash(), block.NumberU64(), &types.ReceiptForStorage{
+ Status: types.ReceiptStatusSuccessful,
+ Logs: stateSyncLogs,
+ CumulativeGasUsed: cumulativeGasUsed,
+ })
+ rawdb.WriteBorTxLookupEntry(batch, block.Hash(), block.NumberU64())
+ return stateSyncLogs
+}
+
+// resolvePostWriteStatus decides CanonStatTy vs SideStatTy for a freshly
+// written block and performs a reorg when needed. Shared by the standard
+// and pipelined write paths — non-deterministic tie-breaking here would
+// cause consensus splits between nodes. The stateless flag relaxes
+// errInvalidNewChain during fast-forward reorgs for stateless sync.
+func (bc *BlockChain) resolvePostWriteStatus(block *types.Block, stateless bool) (WriteStatus, error) {
+ currentBlock := bc.CurrentBlock()
+ reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
+ if err != nil {
+ return NonStatTy, err
+ }
+ if !reorg {
+ return SideStatTy, nil
+ }
+ if block.ParentHash() != currentBlock.Hash() {
+ if err := bc.reorg(currentBlock, block.Header()); err != nil {
+ if !(stateless && err == errInvalidNewChain) {
+ return NonStatTy, err
+ }
+ }
+ }
+ return CanonStatTy, nil
+}
+
+// emitPostWriteEvents publishes the correct event set for the resolved
+// write status. For CanonStatTy: writeHeadBlock + ChainEvent + (optional)
+// ChainHeadEvent + any state-sync events. For SideStatTy: chainSideFeed +
+// chain2HeadFeed. Shared by the standard and pipelined write paths.
+func (bc *BlockChain) emitPostWriteEvents(block *types.Block, receipts []*types.Receipt, logs, stateSyncLogs []*types.Log, status WriteStatus, emitHeadEvent bool) {
+ if status != CanonStatTy {
+ bc.chainSideFeed.Send(ChainSideEvent{Header: block.Header()})
+ bc.chain2HeadFeed.Send(Chain2HeadEvent{
+ Type: Chain2HeadForkEvent,
+ NewChain: []*types.Header{block.Header()},
+ })
+ return
+ }
+ bc.writeHeadBlock(block)
+ bc.chainFeed.Send(ChainEvent{
+ Header: block.Header(),
+ Receipts: receipts,
+ Transactions: block.Transactions(),
+ })
+ if len(logs) > 0 {
+ bc.logsFeed.Send(logs)
+ }
+ if len(stateSyncLogs) > 0 {
+ bc.logsFeed.Send(stateSyncLogs)
+ }
+ if !emitHeadEvent {
+ return
+ }
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
+ bc.stateSyncMu.RLock()
+ for _, data := range bc.GetStateSync() {
+ bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
+ }
+ bc.stateSyncMu.RUnlock()
+}
+
+// --- Pipelined SRC methods ---
+
+// PostExecState returns a StateDB representing the post-execution state
+// of the given block header. Under pipelined SRC, if the FlatDiff for this block
+// is still cached (i.e. this is the chain head), it returns a non-blocking
+// overlay state via NewWithFlatBase. Otherwise it falls back to resolving the
+// actual state root via StateAt.
+//
+// This is used by the txpool and RPC layer to get correct state when the chain
+// head was produced via the pipeline (where the committed trie root may lag
+// behind the actual post-execution state).
+func (bc *BlockChain) PostExecState(header *types.Header) (*state.StateDB, error) {
+ // Fast path: if we have the FlatDiff for this block, use it as an overlay.
+ // Matching by block number (not hash) because the hash may not be final
+ // at the time SetLastFlatDiff is called (Root and seal signature are added later).
+ bc.lastFlatDiffMu.RLock()
+ flatDiff := bc.lastFlatDiff
+ flatDiffBlockNum := bc.lastFlatDiffBlockNum
+ flatDiffParentRoot := bc.lastFlatDiffParentRoot
+ bc.lastFlatDiffMu.RUnlock()
+
+ if flatDiff != nil && flatDiffBlockNum == header.Number.Uint64() {
+ // Open at the parent's committed root (which IS in the trie DB) and
+ // overlay the FlatDiff. We cannot use header.Root because it may not
+ // be committed yet (pipelined import SRC still running).
+ return state.NewWithFlatBase(flatDiffParentRoot, bc.statedb, flatDiff)
+ }
+
+ // Slow path: use the committed state root directly.
+ return bc.StateAt(header.Root)
+}
+
+// SpawnSRCGoroutine launches a background goroutine that computes the actual
+// state root for block by replaying flatDiff on top of parentRoot. When
+// makeWitness is true, the goroutine also completes (or, for legacy call
+// sites, produces) a stateless witness; when false, witness work, FlatDiff
+// read-surface preload, and witness encoding are all skipped — only deferred
+// state-root validation runs. The result is stored in pending.root;
+// pending.wg is decremented when finished.
+//
+// Witness ownership (when makeWitness=true) follows the LINEAR OWNERSHIP
+// INVARIANT documented at runSRCCompute. The import path passes execWitness =
+// the witness already populated by EVM execution (AddCode + AddBlockHash)
+// with allowOwnWitness=false; SRC then completes that single witness with
+// trie-proof nodes during ApplyFlatDiffForCommit + CommitWithUpdate. Call
+// sites with no execution witness in scope set execWitness=nil and
+// allowOwnWitness=true to explicitly permit SRC to create its own witness.
+// warmSnapshotInput is an optional quiesced prefetcher handoff; SRC builds the
+// immutable WarmSnapshot from it inside the goroutine so the import thread does
+// not pay the copy/hash/index cost.
+func (bc *BlockChain) SpawnSRCGoroutine(block *types.Block, parentRoot common.Hash, flatDiff *state.FlatDiff, makeWitness bool, execWitness *stateless.Witness, allowOwnWitness bool, warmSnapshotInput *state.WarmSnapshotInput) {
+ pending := &pendingSRCState{
+ blockHash: block.Hash(),
+ blockNumber: block.NumberU64(),
+ }
+ bc.pendingSRCMu.Lock()
+ bc.pendingSRC = pending
+ bc.pendingSRCMu.Unlock()
+
+ pending.wg.Add(1)
+ bc.wg.Add(1)
+ go bc.runSRCCompute(pending, block, parentRoot, flatDiff, makeWitness, execWitness, allowOwnWitness, warmSnapshotInput)
+}
+
+// runSRCCompute is the SRC goroutine body. Opens a trie-only StateDB at the
+// committed parent root, replays the FlatDiff, and commits to produce block
+// N's state root. When makeWitness is true, also preloads the FlatDiff read
+// surface so the witness covers proof-path nodes the speculative execution
+// skipped, and encodes + caches the resulting witness. When false, preload
+// and witness encoding are skipped — only deferred root validation runs. All
+// observable side effects (pending.root, pending.err, pending.witness,
+// witness cache) happen here before wg.Done().
+//
+// LINEAR OWNERSHIP INVARIANT for the execution witness W:
+//
+// 1. The main thread writes to W during ProcessBlock:
+// - AddCode via statedb.go (GetCode/GetCodeSize on contract calls)
+// - AddBlockHash via vm/instructions.go (BLOCKHASH opcode)
+// - AddState via statedb.go Finalise/IntermediateRoot reads
+// 2. The trie prefetcher does not write to W — subfetcher.loop only
+// populates trie-local prevalueTracer state. The synchronous prefetcher
+// stop used by the import handoff has writers-exited semantics
+// (trie_prefetcher.go: <-sf.term gated on loop's `defer close(sf.term)`),
+// which provides the ordering guarantee should that ever change.
+// 3. The import path stops the prefetcher synchronously in
+// persistPipelinedImport before this goroutine is spawned. After that
+// stop returns, every subfetcher goroutine has exited.
+// 4. The main thread hands W to this goroutine via SpawnSRCGoroutine and
+// never touches W again — it moves on to the next block with a fresh
+// witness.
+// 5. This goroutine writes to W during ApplyFlatDiffForCommit +
+// CommitWithUpdate, then encodes via encodeAndCachePendingWitness. The
+// cached bytes are immutable thereafter.
+//
+// The invariant requires:
+// - No AddState / AddCode / AddBlockHash call site reachable from a
+// prefetcher-spawned goroutine.
+// - No terminate(true) (async) call on the SRC handoff path.
+// - No reuse of W on the main thread after SpawnSRCGoroutine.
+func (bc *BlockChain) runSRCCompute(pending *pendingSRCState, block *types.Block, parentRoot common.Hash, flatDiff *state.FlatDiff, makeWitness bool, execWitness *stateless.Witness, allowOwnWitness bool, warmSnapshotInput *state.WarmSnapshotInput) {
+ defer bc.wg.Done()
+ defer pending.wg.Done()
+ defer func() {
+ if r := recover(); r != nil {
+ log.Error("Pipelined SRC: panic in SRC goroutine", "block", block.NumberU64(), "err", r)
+ pending.err = fmt.Errorf("SRC goroutine panicked: %v", r)
+ }
+ }()
+
+ // Hard-fail when a caller asked for a witness but did not hand one in.
+ // allowOwnWitness=true is the explicit opt-in for call sites that want
+ // SRC to create its own witness. Without it, the caller's contract is
+ // that the published witness is the same object EVM execution populated,
+ // preserving execution-time entries such as BLOCKHASH headers. BorWitness
+ // encoding intentionally excludes Codes (see core/stateless/encoding.go),
+ // so bytecode entries collected on the in-memory witness are not part of
+ // the canonical Bor witness wire format.
+ if makeWitness && execWitness == nil && !allowOwnWitness {
+ pending.err = fmt.Errorf(
+ "pipelined SRC witness requested without execution witness: block=%d hash=%s allowOwnWitness=false",
+ block.NumberU64(), block.Hash(),
+ )
+ return
+ }
+
+ var warmSnapshot *state.WarmSnapshot
+ if warmSnapshotInput != nil {
+ buildStart := time.Now()
+ warmSnapshot = warmSnapshotInput.Build()
+ pipelineImportWarmSnapshotBuild.UpdateSince(buildStart)
+ // Drop the handoff maps before SRC starts the trie walk. The final
+ // WarmSnapshot owns blob copies; keeping the input alive would retain a
+ // second copy of the same warm surface until the goroutine exits.
+ warmSnapshotInput = nil
+ }
+ tmpDB, witness, err := bc.openSRCStateDB(parentRoot, block, makeWitness, execWitness, warmSnapshot)
+ if err != nil {
+ pending.err = err
+ return
+ }
+ tmpDB.ApplyFlatDiffForCommit(flatDiff)
+
+ // Preload + read-surface histograms only fire when the witness is being
+ // produced — preloadFlatDiffReads exists solely to populate the witness
+ // with proof-path trie nodes, and the histograms describe its work.
+ if makeWitness {
+ // measure the preload step's wall-time and the size of its read surface. ReadStorage
+ // is iterated directly (not via ReadSet) because it also contains
+ // read-only slots on mutated accounts — answers "how is storage-read
+ // load distributed", which is what shapes any future parallelisation.
+ // Fires for both import and miner SRC since runSRCCompute is shared.
+ readAccounts := len(flatDiff.ReadSet)
+ preloadSlots := 0
+ for _, slots := range flatDiff.ReadStorage {
+ preloadSlots += len(slots)
+ pipelineSRCPreloadSlotsPerAccountHistogram.Update(int64(len(slots)))
+ }
+ pipelineSRCPreloadReadAccountsHistogram.Update(int64(readAccounts))
+ pipelineSRCPreloadSlotsHistogram.Update(int64(preloadSlots))
+ pipelineSRCPreloadDestructsHistogram.Update(int64(len(flatDiff.Destructs)))
+ pipelineSRCPreloadNonexistentHistogram.Update(int64(len(flatDiff.NonExistentReads)))
+
+ preloadStart := time.Now()
+ preloadFlatDiffReads(tmpDB, flatDiff)
+ pipelineSRCPreloadTimer.UpdateSince(preloadStart)
+ }
+
+ deleteEmptyObjects := bc.chainConfig.IsEIP158(block.Number())
+ commitStart := time.Now()
+ root, stateUpdate, err := tmpDB.CommitWithUpdate(block.NumberU64(), deleteEmptyObjects, bc.chainConfig.IsCancun(block.Number()))
+ stateCommitTimer.UpdateSince(commitStart)
+ if err != nil {
+ log.Error("Pipelined SRC: CommitWithUpdate failed", "block", block.NumberU64(), "err", err)
+ pending.err = err
+ return
+ }
+ emitSRCStateDBMetrics(tmpDB)
+ if bc.stateSizer != nil {
+ bc.stateSizer.Notify(stateUpdate)
+ }
+ if makeWitness {
+ bc.encodeAndCachePendingWitness(pending, witness, block)
+ }
+ pending.root = root
+}
+
+// openSRCStateDB opens a StateDB at parentRoot for the pipelined SRC goroutine.
+// Reader choice depends on makeWitness:
+//
+// - makeWitness=true: NewTrieOnly. Every read walks the MPT, which is what
+// lets the witness capture proof-path nodes for FlatDiff overlay accounts
+// whose trie nodes weren't touched during speculative execution. Flat
+// readers would short-circuit the trie and leave the witness incomplete.
+// - makeWitness=false: state.New (multi-reader). Pre-state reads performed
+// by ApplyFlatDiffForCommit (origin balance, origin storage, code lookup
+// via getOrNewStateObject) and SelfDestruct can hit a flat reader (pathdb
+// StateReader in path mode, snapshot in hash mode) instead of the MPT.
+// state.New falls back to the trie reader when no flat reader is
+// installed or StateReader errors, so correctness does not depend on the
+// flat reader being present. Root-consistency between readers at an
+// in-memory committed root is validated by the parity tests.
+//
+// Witness ownership when makeWitness=true:
+//
+// - execWitness != nil: caller hands in the witness already populated by
+// EVM execution (AddCode + AddBlockHash + execution-time AddState). SRC
+// reuses it by attaching it to tmpDB so subsequent AddState calls during
+// ApplyFlatDiffForCommit and CommitWithUpdate land in the same object.
+// - execWitness == nil: only legal for call sites that opted in via
+// allowOwnWitness=true at the SpawnSRCGoroutine call site. SRC creates
+// its own witness, which contains only entries collected by the SRC path.
+// Callers that require execution-time witness entries must pass
+// execWitness != nil (enforced at the top of runSRCCompute).
+//
+// BorWitness serialises Headers and State only. Codes collected on the
+// in-memory witness are not part of the canonical Bor witness wire format.
+//
+// CommitWithUpdate walks the MPT for hashing regardless of reader choice, so
+// the state-root computation cost is unaffected; only the pre-state reads
+// avoid cold trie traversals.
+func (bc *BlockChain) openSRCStateDB(parentRoot common.Hash, block *types.Block, makeWitness bool, execWitness *stateless.Witness, warmSnapshot *state.WarmSnapshot) (*state.StateDB, *stateless.Witness, error) {
+ if !makeWitness {
+ // Witness-off path uses the multi-reader (flat reader where
+ // available) and does not need the warm-snapshot handoff: flat
+ // readers already short-circuit pathdb diff-layer walks for hot
+ // state.
+ tmpDB, err := state.New(parentRoot, bc.statedb)
+ if err != nil {
+ log.Error("Pipelined SRC: failed to open tmpDB", "parentRoot", parentRoot, "err", err)
+ return nil, nil, err
+ }
+ return tmpDB, nil, nil
+ }
+ // Witness-on path uses NewTrieOnly so every read walks the MPT and the
+ // witness captures proof-path nodes. When a warm snapshot is supplied,
+ // install a snapshot-aware reader: trie reads consult the snapshot
+ // (hash-verified) before falling through to pathdb. NewTrieOnly
+ // semantics, prevalueTracer recording, and witness completeness are
+ // unaffected — the snapshot only short-circuits the underlying
+ // NodeReader fetch.
+ tmpDB, err := state.NewTrieOnlyWithSnapshot(parentRoot, bc.statedb, warmSnapshot)
+ if err != nil {
+ log.Error("Pipelined SRC: failed to open tmpDB", "parentRoot", parentRoot, "err", err)
+ return nil, nil, err
+ }
+ witness := execWitness
+ if witness == nil {
+ // Miner / legacy fallback only; runSRCCompute already rejected this
+ // branch for the import path via the allowOwnWitness check.
+ newWitness, witnessErr := stateless.NewWitness(block.Header(), bc)
+ if witnessErr != nil {
+ log.Warn("Pipelined SRC: failed to create witness", "block", block.NumberU64(), "err", witnessErr)
+ return tmpDB, nil, nil
+ }
+ witness = newWitness
+ }
+ tmpDB.SetWitness(witness)
+ return tmpDB, witness, nil
+}
+
+// preloadFlatDiffReads touches every address/slot in the FlatDiff's read
+// surface so the witness sees the proof-path trie nodes even when the
+// speculative execution used the flat overlay. Covers:
+// - ReadSet accounts (+ their ReadStorage slots)
+// - Read-only storage for mutated accounts (ReadStorage)
+// - Pure-destruct accounts (no resurrection)
+// - Non-existent address reads (proof-of-absence)
+func preloadFlatDiffReads(tmpDB *state.StateDB, flatDiff *state.FlatDiff) {
+ for _, addr := range flatDiff.ReadSet {
+ tmpDB.GetBalance(addr)
+ for _, slot := range flatDiff.ReadStorage[addr] {
+ tmpDB.GetState(addr, slot)
+ }
+ }
+ for addr := range flatDiff.Accounts {
+ for _, slot := range flatDiff.ReadStorage[addr] {
+ tmpDB.GetState(addr, slot)
+ }
+ }
+ for addr := range flatDiff.Destructs {
+ if _, resurrected := flatDiff.Accounts[addr]; !resurrected {
+ tmpDB.GetBalance(addr)
+ }
+ }
+ for _, addr := range flatDiff.NonExistentReads {
+ tmpDB.GetBalance(addr)
+ }
+}
+
+// emitSRCStateDBMetrics reports the hash/update/commit timers from the
+// trie-only statedb. These mirror the import-path names in both modes so
+// dashboards work whether pipelining is on or off.
+func emitSRCStateDBMetrics(tmpDB *state.StateDB) {
+ accountHashTimer.Update(tmpDB.AccountHashes)
+ storageHashTimer.Update(tmpDB.StorageHashes)
+ accountUpdateTimer.Update(tmpDB.AccountUpdates)
+ storageUpdateTimer.Update(tmpDB.StorageUpdates)
+ accountCommitTimer.Update(tmpDB.AccountCommits)
+ storageCommitTimer.Update(tmpDB.StorageCommits)
+ snapshotCommitTimer.Update(tmpDB.SnapshotCommits)
+ triedbCommitTimer.Update(tmpDB.TrieDBCommits)
+ witnessCollectionTimer.Update(tmpDB.WitnessCollection)
+}
+
+// encodeAndCachePendingWitness RLP-encodes the witness (complete only after
+// CommitWithUpdate has run) and pushes it into the pending state + cache.
+// For imported blocks the hash is already final; for mined blocks the real
+// hash isn't known until Seal() finalises Extra, so the caller retrieves
+// the bytes via WaitForSRC and writes to DB under the sealed hash in
+// resultLoop.
+func (bc *BlockChain) encodeAndCachePendingWitness(pending *pendingSRCState, witness *stateless.Witness, block *types.Block) {
+ if witness == nil {
+ return
+ }
+ var witBuf bytes.Buffer
+ encodeStart := time.Now()
+ if err := witness.EncodeRLP(&witBuf); err != nil {
+ log.Error("Pipelined SRC: failed to encode witness", "block", block.NumberU64(), "err", err)
+ return
+ }
+ witnessEncodeTimer.UpdateSince(encodeStart)
+ pending.witness = witBuf.Bytes()
+ bc.witnessCache.Add(block.Hash(), pending.witness)
+}
+
+// WaitForSRC blocks until the pending SRC goroutine completes and returns the
+// computed state root and RLP-encoded witness. The witness may be nil if witness
+// creation failed or was not applicable. Returns an error if the goroutine
+// failed or no SRC is pending.
+func (bc *BlockChain) WaitForSRC() (common.Hash, []byte, error) {
+ bc.pendingSRCMu.Lock()
+ pending := bc.pendingSRC
+ bc.pendingSRCMu.Unlock()
+
+ if pending == nil {
+ return common.Hash{}, nil, errors.New("no pending SRC goroutine")
+ }
+
+ pending.wg.Wait()
+ if pending.err != nil {
+ return common.Hash{}, nil, pending.err
+ }
+ return pending.root, pending.witness, nil
+}
+
+// flushPendingImportSRC collects the pending import SRC goroutine (if any),
+// verifies the root, writes the block to DB, handles trie GC, and clears
+// the pending state. Called on shutdown, reorg, and when an incoming block
+// doesn't continue from the pending block.
+// flushPendingImportSRC waits for the auto-collection goroutine to finish
+// and clears the pending state. Called on shutdown and when an incoming block
+// doesn't follow the pending one (reorg/gap).
+func (bc *BlockChain) flushPendingImportSRC() error {
+ bc.pendingImportSRCMu.Lock()
+ pending := bc.pendingImportSRC
+ bc.pendingImportSRC = nil
+ bc.pendingImportSRCMu.Unlock()
+
+ if pending == nil {
+ return nil
+ }
+
+ pipelineImportFallbackCounter.Inc(1)
+
+ // Wait for auto-collection to finish (it handles verify, witness, trie GC)
+ <-pending.collectedCh
+ return pending.collectedErr
+}
+
+// collectPendingImportSRC collects the pending import SRC goroutine, writes
+// the previous block, and returns the new committed root. Unlike flush, this
+// does NOT clear pendingImportSRC (the caller replaces it with the new block).
+// collectPendingImportSRC waits for the auto-collection goroutine to finish
+// and returns the committed root. The actual work (verify root, write witness,
+// trie GC) is done by the auto-collection goroutine spawned alongside the SRC.
+func (bc *BlockChain) collectPendingImportSRC() (common.Hash, error) {
+ bc.pendingImportSRCMu.Lock()
+ pending := bc.pendingImportSRC
+ bc.pendingImportSRCMu.Unlock()
+
+ if pending == nil {
+ return common.Hash{}, errors.New("no pending import SRC")
+ }
+
+ // Wait for auto-collection goroutine to finish
+ <-pending.collectedCh
+
+ if pending.collectedErr != nil {
+ return common.Hash{}, pending.collectedErr
+ }
+ return pending.collectedRoot, nil
+}
+
+// handleImportTrieGC performs trie garbage collection after a pipelined import
+// SRC has committed the state. Replicates writeBlockWithState's GC logic.
+func (bc *BlockChain) handleImportTrieGC(root common.Hash, blockNum uint64, procTime time.Duration) {
+ bc.gcproc += procTime
+ if bc.triedb.Scheme() == rawdb.PathScheme {
+ return
+ }
+ if bc.cfg.ArchiveMode {
+ _ = bc.triedb.Commit(root, false)
+ return
+ }
+ bc.triedb.Reference(root, common.Hash{})
+ bc.triegc.Push(root, -int64(blockNum))
+
+ triesInMemory := bc.cfg.GetTriesInMemory()
+ if blockNum <= triesInMemory {
+ return
+ }
+ bc.capTrieIfDirty()
+ chosen := blockNum - triesInMemory
+ bc.maybeFlushChosen(chosen, triesInMemory)
+ bc.dereferenceUpTo(chosen)
+}
+
+// capTrieIfDirty flushes dirty trie nodes to disk when either node memory
+// or preimages exceed their configured limits. Uses IdealBatchSize as a
+// margin so the cap leaves room for further inserts before the next check.
+func (bc *BlockChain) capTrieIfDirty() {
+ _, nodes, imgs := bc.triedb.Size()
+ limit := common.StorageSize(bc.cfg.TrieDirtyLimit) * 1024 * 1024
+ if nodes > limit || imgs > 4*1024*1024 {
+ _ = bc.triedb.Cap(limit - ethdb.IdealBatchSize)
+ }
+}
+
+// maybeFlushChosen commits state at block `chosen` when accumulated
+// processing time has crossed the flush interval. Skips on reorg (chosen
+// header missing); logs a warning when we're overdue vs. the optimum ratio.
+func (bc *BlockChain) maybeFlushChosen(chosen, triesInMemory uint64) {
+ flushInterval := time.Duration(bc.flushInterval.Load())
+ if bc.gcproc <= flushInterval {
+ return
+ }
+ header := bc.GetHeaderByNumber(chosen)
+ if header == nil {
+ log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
+ return
+ }
+ if chosen < bc.lastWrite+triesInMemory && bc.gcproc >= 2*flushInterval {
+ log.Info("State in memory for too long, committing",
+ "time", bc.gcproc, "allowance", flushInterval,
+ "optimum", float64(chosen-bc.lastWrite)/float64(triesInMemory))
+ }
+ _ = bc.triedb.Commit(header.Root, true)
+ bc.lastWrite = chosen
+ bc.gcproc = 0
+}
+
+// dereferenceUpTo drops GC references for every cached trie root at or
+// below `chosen`, freeing the memory held for reorg-safety. Roots above
+// `chosen` are pushed back so we stop at the first still-in-memory entry.
+func (bc *BlockChain) dereferenceUpTo(chosen uint64) {
+ for !bc.triegc.Empty() {
+ r, number := bc.triegc.Pop()
+ if uint64(-number) > chosen {
+ bc.triegc.Push(r, number)
+ return
+ }
+ bc.triedb.Dereference(r)
+ }
+}
+
+// pipelineReaderRoot returns the trie root to open state readers against
+// during pipelined import. The block's parent.Root may not be committed
+// yet (the SRC goroutine for the parent is still running), so we fall back
+// to the last-committed root stored on the PipelineImportOpts. Callers
+// combine this with applyFlatDiffOverlayToAll to see post-execution state.
+func pipelineReaderRoot(parent *types.Header, pipeOpts *PipelineImportOpts) common.Hash {
+ if pipeOpts != nil {
+ return pipeOpts.CommittedParentRoot
+ }
+ return parent.Root
+}
+
+// applyFlatDiffOverlayToAll attaches the pipelined FlatDiff to every
+// statedb so reads see the previous block's post-execution values without
+// waiting for the SRC goroutine to commit the trie. No-op when pipelining
+// is off or the overlay is absent.
+func applyFlatDiffOverlayToAll(pipeOpts *PipelineImportOpts, dbs ...*state.StateDB) {
+ if pipeOpts == nil || pipeOpts.FlatDiff == nil {
+ return
+ }
+ for _, db := range dbs {
+ db.SetFlatDiffRef(pipeOpts.FlatDiff)
+ }
+}
+
+// validateStateForPipeline dispatches to the cheap validator under
+// pipelined import (gas + bloom + receipt root only; the full root match
+// happens later in the SRC goroutine) and to the full validator otherwise.
+// Centralising this keeps ProcessBlock's parallel/serial branches symmetric.
+func validateStateForPipeline(validator Validator, block *types.Block, statedb *state.StateDB, res *ProcessResult, pipeOpts *PipelineImportOpts) error {
+ if pipeOpts != nil {
+ return validator.ValidateStateCheap(block, statedb, res)
+ }
+ return validator.ValidateState(block, statedb, res, false)
+}
+
+// pipelinedImportPersistTimings captures the synchronous post-execution phases
+// that are included in the "Imported new chain segment" elapsed time but are
+// not part of ProcessBlock itself.
+type pipelinedImportPersistTimings struct {
+ prefetchStop time.Duration
+ prefetchDrain time.Duration
+ prefetchReport time.Duration
+ warmCollect time.Duration
+ commitSnapshot time.Duration
+ collect time.Duration
+ stateSyncFeed time.Duration
+ reorgCheck time.Duration
+ setFlatDiff time.Duration
+ writeHead time.Duration
+ spawnSRC time.Duration
+ total time.Duration
+
+ warmSnapshotNodes int
+ warmSnapshotBytes int
+ warmAccountNodes int
+ warmStorageNodes int
+ warmAccountBytes int
+ warmStorageBytes int
+ warmFetchers int
+ prefetchFetchers int
+}
+
+// persistPipelinedImport handles the post-ProcessBlock work for a pipelined
+// imported block: extract FlatDiff, collect any still-pending SRC from the
+// previous block, publish the state-sync feed, write block metadata
+// immediately (so sync protocol sees it), spawn a new SRC goroutine, and
+// start auto-collection. adjustBack=true signals the caller to decrement
+// it.index when returning the error (because the failure belongs to the
+// previously pending block, not the current one).
+func (bc *BlockChain) persistPipelinedImport(block *types.Block, parent *types.Header, statedb *state.StateDB, receipts []*types.Receipt, logs []*types.Log, start time.Time, cheapExec, validation time.Duration, makeWitness bool) (adjustBack bool, err error) {
+ persistStart := time.Now()
+ timings := pipelinedImportPersistTimings{}
+ defer func() {
+ timings.total = time.Since(persistStart)
+ pipelineImportPostExecTimer.Update(timings.total)
+ bc.logSlowPipelinedImport(block, time.Since(start), cheapExec, validation, timings, statedb)
+ }()
+ // The pipelined path doesn't commit this StateDB; the SRC goroutine opens
+ // its own NewTrieOnly tmpDB. Stop the prefetcher *before* CommitSnapshot
+ // so Finalise can't queue more prefetch tasks that we'd then synchronously
+ // wait to drain. Without this, every block in a multi-block batch except
+ // the last leaks its prefetcher (the deferred StopPrefetcher in
+ // insertChainWithWitnesses only fires on the final activeState).
+ // Stop the prefetcher synchronously. When PipelinedSRCWarmSnapshot is
+ // enabled AND this block produces a witness, capture the trie nodes the
+ // prefetcher had loaded into a quiesced handoff bundle so SRC can avoid
+ // re-reading them from cold pebble. The bundle is collected AFTER the
+ // prefetcher goroutines have exited (writers-exited drain), so the source
+ // tries are quiescent and safe to read. Queued speculative prefetch tasks
+ // may be discarded by StopAndCollectWarmSnapshot; missing warm nodes are
+ // just snapshot misses and SRC falls through to pathdb. The expensive final
+ // WarmSnapshot copy/hash/index build happens in the SRC goroutine, not on
+ // the import thread. A nil bundle (flag off, witness-off path, no
+ // prefetcher, or no warm nodes) reduces to the pre-snapshot behaviour:
+ // SRC's NodeReader is the plain pathdb chain. The makeWitness gate matters
+ // because the witness-off SRC path uses the multi-reader (with flat reader)
+ // which doesn't consult the snapshot (see openSRCStateDB), so capturing one
+ // would burn collect work for no benefit.
+ var warmSnapshotInput *state.WarmSnapshotInput
+ var snapshotStats state.PrefetcherSnapshotStats
+ warmSnapshotEnabled := makeWitness && bc.cfg.PipelinedSRCWarmSnapshot
+ phaseStart := time.Now()
+ if warmSnapshotEnabled {
+ warmSnapshotInput, snapshotStats = statedb.StopAndCollectWarmSnapshot()
+ } else {
+ statedb.StopPrefetcher()
+ }
+ timings.prefetchStop = time.Since(phaseStart)
+ timings.prefetchDrain = snapshotStats.Drain
+ timings.warmCollect = snapshotStats.Collect
+ timings.prefetchReport = snapshotStats.Report
+ timings.warmFetchers = snapshotStats.LoadedFetchers
+ timings.prefetchFetchers = snapshotStats.Fetchers
+ timings.warmAccountNodes = snapshotStats.AccountNodes
+ timings.warmStorageNodes = snapshotStats.StorageNodes
+ timings.warmAccountBytes = snapshotStats.AccountBytes
+ timings.warmStorageBytes = snapshotStats.StorageBytes
+ timings.warmSnapshotNodes = snapshotStats.AccountNodes + snapshotStats.StorageNodes
+ timings.warmSnapshotBytes = snapshotStats.AccountBytes + snapshotStats.StorageBytes
+ pipelineImportPrefetchStopTimer.Update(timings.prefetchStop)
+ if warmSnapshotEnabled {
+ pipelineImportPrefetchDrainTimer.Update(snapshotStats.Drain)
+ pipelineImportWarmSnapshotCollect.Update(snapshotStats.Collect)
+ pipelineImportPrefetchReportTimer.Update(snapshotStats.Report)
+ pipelineImportPrefetchSubfetchers.Update(int64(snapshotStats.Fetchers))
+ pipelineImportWarmSnapshotFetchers.Update(int64(snapshotStats.LoadedFetchers))
+ pipelineImportWarmSnapshotAccountNodes.Update(int64(snapshotStats.AccountNodes))
+ pipelineImportWarmSnapshotStorageNodes.Update(int64(snapshotStats.StorageNodes))
+ pipelineImportWarmSnapshotAccountBytes.Update(int64(snapshotStats.AccountBytes))
+ pipelineImportWarmSnapshotStorageBytes.Update(int64(snapshotStats.StorageBytes))
+ }
+ if warmSnapshotEnabled {
+ pipelineImportWarmSnapshotNodes.Update(int64(timings.warmSnapshotNodes))
+ pipelineImportWarmSnapshotBytes.Update(int64(timings.warmSnapshotBytes))
+ }
+ // Capture the execution witness so SRC can complete it. After the
+ // prefetcher stop above, every subfetcher goroutine has exited (sync wait
+ // via <-sf.term). The trie prefetcher does not write to the witness —
+ // subfetcher.loop only populates trie-local prevalueTracer state — and the
+ // stop ordering is the structural guarantee that must be preserved if that
+ // ever changes. See LINEAR OWNERSHIP INVARIANT at runSRCCompute.
+ var execWitness *stateless.Witness
+ if makeWitness {
+ execWitness = statedb.Witness()
+ }
+ phaseStart = time.Now()
+ flatDiff := statedb.CommitSnapshot(bc.chainConfig.IsEIP158(block.Number()))
+ timings.commitSnapshot = time.Since(phaseStart)
+ pipelineImportCommitSnapshotTimer.Update(timings.commitSnapshot)
+
+ committedRoot, collectElapsed, err := bc.collectPrevImportSRCIfAny(block, parent)
+ timings.collect = collectElapsed
+ if err != nil {
+ return true, err
+ }
+ phaseStart = time.Now()
+ bc.emitStateSyncFeed()
+ timings.stateSyncFeed = time.Since(phaseStart)
+ pipelineImportStateSyncFeedTimer.Update(timings.stateSyncFeed)
+
+ // Verify the block against the whitelisted milestone/checkpoint. Mirrors
+ // the non-pipelined path's per-block check — guards the race where Heimdall
+ // whitelists a milestone AFTER the upfront check at the start of insertChain
+ // but BEFORE this block is written. The block itself is passed as the
+ // current head so the validation treats it as a `past` chain.
+ phaseStart = time.Now()
+ isValid, err := bc.forker.ValidateReorg(block.Header(), []*types.Header{block.Header()})
+ timings.reorgCheck = time.Since(phaseStart)
+ pipelineImportReorgCheckTimer.Update(timings.reorgCheck)
+ if err != nil {
+ return false, err
+ }
+ if !isValid {
+ return false, whitelist.ErrMismatch
+ }
+
+ // Store FlatDiff BEFORE writing metadata. writeBlockAndSetHeadPipelined
+ // emits ChainEvent which triggers subscribers that read state; FlatDiff
+ // must be available so PostExecState works for those reads.
+ phaseStart = time.Now()
+ bc.SetLastFlatDiff(flatDiff, block.NumberU64(), committedRoot, block.Root())
+ timings.setFlatDiff = time.Since(phaseStart)
+ pipelineImportSetFlatDiffTimer.Update(timings.setFlatDiff)
+ // State commit is deferred to the SRC goroutine. emitHeadEvent=false
+ // because the deferred ChainHeadEvent at end of insertChain handles it.
+ phaseStart = time.Now()
+ if _, err := bc.writeBlockAndSetHeadPipelined(block, receipts, logs, statedb, false, nil); err != nil {
+ timings.writeHead = time.Since(phaseStart)
+ pipelineImportWriteHeadTimer.Update(timings.writeHead)
+ return false, err
+ }
+ timings.writeHead = time.Since(phaseStart)
+ pipelineImportWriteHeadTimer.Update(timings.writeHead)
+
+ tmpBlock := types.NewBlockWithHeader(block.Header()).WithBody(*block.Body())
+ // Import passes execWitness from execution and requires SRC to publish
+ // that same witness object. runSRCCompute hard-fails on a nil witness
+ // when allowOwnWitness=false. warmSnapshotInput may be nil (flag off or
+ // no warm nodes); SRC tolerates that and falls back to the plain pathdb
+ // reader chain.
+ phaseStart = time.Now()
+ bc.SpawnSRCGoroutine(tmpBlock, committedRoot, flatDiff, makeWitness, execWitness, false, warmSnapshotInput)
+ timings.spawnSRC = time.Since(phaseStart)
+ pipelineImportSpawnSRCTimer.Update(timings.spawnSRC)
+ newPending := &pendingImportSRCState{
+ block: block,
+ flatDiff: flatDiff,
+ committedRoot: committedRoot,
+ procTime: time.Since(start),
+ blockStart: start,
+ makeWitness: makeWitness,
+ collectedCh: make(chan struct{}),
+ }
+ bc.pendingImportSRCMu.Lock()
+ bc.pendingImportSRC = newPending
+ bc.pendingImportSRCMu.Unlock()
+ bc.wg.Add(1)
+ go bc.runImportAutoCollection(newPending)
+ if bc.cfg.PipelinedImportSRCLogs {
+ log.Info("Pipelined import: spawned SRC",
+ "block", block.NumberU64(), "committedRoot", committedRoot,
+ "txs", len(block.Transactions()))
+ }
+ return false, nil
+}
+
+func (bc *BlockChain) logSlowPipelinedImport(block *types.Block, total, cheapExec, validation time.Duration, timings pipelinedImportPersistTimings, statedb *state.StateDB) {
+ if total < slowImportBlockThreshold &&
+ timings.total < slowImportPostExecThreshold &&
+ timings.collect < slowImportCollectThreshold &&
+ timings.prefetchStop < slowImportSnapshotThreshold {
+ return
+ }
+ log.Warn("Slow pipelined import phase",
+ "block", block.NumberU64(),
+ "txs", len(block.Transactions()),
+ "mgas", float64(block.GasUsed())/1_000_000,
+ "total", common.PrettyDuration(total),
+ "cheapExec", common.PrettyDuration(cheapExec),
+ "validation", common.PrettyDuration(validation),
+ "postExec", common.PrettyDuration(timings.total),
+ "prefetchStop", common.PrettyDuration(timings.prefetchStop),
+ "prefetchDrain", common.PrettyDuration(timings.prefetchDrain),
+ "warmCollect", common.PrettyDuration(timings.warmCollect),
+ "prefetchReport", common.PrettyDuration(timings.prefetchReport),
+ "prefetchFetchers", timings.prefetchFetchers,
+ "warmFetchers", timings.warmFetchers,
+ "warmNodes", timings.warmSnapshotNodes,
+ "warmBytes", common.StorageSize(timings.warmSnapshotBytes),
+ "warmAccountNodes", timings.warmAccountNodes,
+ "warmStorageNodes", timings.warmStorageNodes,
+ "warmAccountBytes", common.StorageSize(timings.warmAccountBytes),
+ "warmStorageBytes", common.StorageSize(timings.warmStorageBytes),
+ "commitSnapshot", common.PrettyDuration(timings.commitSnapshot),
+ "collect", common.PrettyDuration(timings.collect),
+ "stateSyncFeed", common.PrettyDuration(timings.stateSyncFeed),
+ "reorgCheck", common.PrettyDuration(timings.reorgCheck),
+ "setFlatDiff", common.PrettyDuration(timings.setFlatDiff),
+ "writeHead", common.PrettyDuration(timings.writeHead),
+ "spawnSRC", common.PrettyDuration(timings.spawnSRC),
+ "accountReads", common.PrettyDuration(statedb.AccountReads),
+ "storageReads", common.PrettyDuration(statedb.StorageReads),
+ "snapshotAccountReads", common.PrettyDuration(statedb.SnapshotAccountReads),
+ "snapshotStorageReads", common.PrettyDuration(statedb.SnapshotStorageReads),
+ "accountUpdates", common.PrettyDuration(statedb.AccountUpdates),
+ "storageUpdates", common.PrettyDuration(statedb.StorageUpdates),
+ "accountHashes", common.PrettyDuration(statedb.AccountHashes),
+ "storageHashes", common.PrettyDuration(statedb.StorageHashes),
+ "witnessCollection", common.PrettyDuration(statedb.WitnessCollection))
+}
+
+func (bc *BlockChain) logSlowNormalImport(block *types.Block, process, validation, reorgCheck, write, total time.Duration, statedb *state.StateDB) {
+ if total < slowImportBlockThreshold && write < slowImportPostExecThreshold {
+ return
+ }
+ log.Warn("Slow normal import phase",
+ "block", block.NumberU64(),
+ "txs", len(block.Transactions()),
+ "mgas", float64(block.GasUsed())/1_000_000,
+ "total", common.PrettyDuration(total),
+ "process", common.PrettyDuration(process),
+ "validation", common.PrettyDuration(validation),
+ "reorgCheck", common.PrettyDuration(reorgCheck),
+ "write", common.PrettyDuration(write),
+ "accountReads", common.PrettyDuration(statedb.AccountReads),
+ "storageReads", common.PrettyDuration(statedb.StorageReads),
+ "accountUpdates", common.PrettyDuration(statedb.AccountUpdates),
+ "storageUpdates", common.PrettyDuration(statedb.StorageUpdates),
+ "accountHashes", common.PrettyDuration(statedb.AccountHashes),
+ "storageHashes", common.PrettyDuration(statedb.StorageHashes),
+ "accountCommits", common.PrettyDuration(statedb.AccountCommits),
+ "storageCommits", common.PrettyDuration(statedb.StorageCommits),
+ "snapshotCommits", common.PrettyDuration(statedb.SnapshotCommits),
+ "trieDBCommits", common.PrettyDuration(statedb.TrieDBCommits),
+ "witnessCollection", common.PrettyDuration(statedb.WitnessCollection))
+}
+
+// collectPrevImportSRCIfAny blocks on the auto-collection channel of the
+// previous pending SRC (if any) and returns its committed root. If no SRC
+// is pending (first block of the insertChain call), parent.Root is the
+// committed root. Errors propagate as "this block belongs to the previous
+// pending one" — caller returns it.index - 1.
+func (bc *BlockChain) collectPrevImportSRCIfAny(block *types.Block, parent *types.Header) (common.Hash, time.Duration, error) {
+ bc.pendingImportSRCMu.Lock()
+ pending := bc.pendingImportSRC
+ bc.pendingImportSRCMu.Unlock()
+ if pending == nil {
+ return parent.Root, 0, nil
+ }
+ if bc.cfg.PipelinedImportSRCLogs {
+ log.Info("Pipelined import: collecting previous SRC",
+ "block", block.NumberU64(), "pendingBlock", pending.block.NumberU64())
+ }
+ collectStart := time.Now()
+ committedRoot, err := bc.collectPendingImportSRC()
+ elapsed := time.Since(collectStart)
+ pipelineImportCollectTimer.Update(elapsed)
+ return committedRoot, elapsed, err
+}
+
+// emitStateSyncFeed publishes any queued state-sync events under the
+// stateSyncMu read lock. Kept separate from writeBlockAndSetHeadPipelined
+// so the import path can control when subscribers see them (before the
+// FlatDiff is published, so PostExecState overlays work).
+func (bc *BlockChain) emitStateSyncFeed() {
+ bc.stateSyncMu.RLock()
+ defer bc.stateSyncMu.RUnlock()
+ for _, data := range bc.GetStateSync() {
+ bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
+ }
+}
+
+// buildPipelineImportOpts inspects the current pending SRC state and returns
+// the PipelineImportOpts the next ProcessBlock should use. If the pending
+// block is block.Parent, the next block can overlay the FlatDiff (true
+// cross-call overlap). Otherwise the pending state is flushed (reorg/gap)
+// and the block enters the pipeline fresh against parent.Root.
+func (bc *BlockChain) buildPipelineImportOpts(block *types.Block, parent *types.Header) *PipelineImportOpts {
+ if bc.cfg.PipelinedImportSRCLogs {
+ log.Info("Pipelined import: started processing block",
+ "block", block.NumberU64(), "txs", len(block.Transactions()))
+ }
+ bc.pendingImportSRCMu.Lock()
+ pending := bc.pendingImportSRC
+ bc.pendingImportSRCMu.Unlock()
+ if pending != nil {
+ if block.ParentHash() == pending.block.Hash() {
+ pipelineImportHitCounter.Inc(1)
+ return &PipelineImportOpts{
+ CommittedParentRoot: pending.committedRoot,
+ FlatDiff: pending.flatDiff,
+ }
+ }
+ pipelineImportMissCounter.Inc(1)
+ if err := bc.flushPendingImportSRC(); err != nil {
+ log.Error("Pipelined import: flush failed on mismatch", "err", err)
+ }
+ }
+ // First block in pipeline — still enter it so the SRC goroutine persists
+ // for the next insertChain call, enabling cross-call overlap.
+ return &PipelineImportOpts{CommittedParentRoot: parent.Root}
+}
+
+// runImportAutoCollection waits for a pending import SRC to finish, verifies
+// the computed state root, writes the witness and emits WitnessReadyEvent,
+// then does trie GC. Any failure is captured on p so flushPendingImportSRC/
+// collectPendingImportSRC can surface it synchronously.
+func (bc *BlockChain) runImportAutoCollection(p *pendingImportSRCState) {
+ defer bc.wg.Done()
+ autoCollectStart := time.Now()
+ // Defer order is LIFO: this runs before bc.wg.Done above, matching the
+ // original behaviour where close(p.collectedCh) happens before wg.Done.
+ // The total timer wraps the full goroutine wall time so the main path's
+ // collect-wait can be reconciled against (src + verify + publish + gc).
+ defer func() {
+ pipelineImportAutoCollectTotalTimer.UpdateSince(autoCollectStart)
+ close(p.collectedCh)
+ }()
+ srcStart := time.Now()
+ root, witnessBytes, err := bc.WaitForSRC()
+ pipelineImportSRCTimer.UpdateSince(srcStart)
+ if err != nil {
+ log.Error("Pipelined import: SRC goroutine failed", "block", p.block.NumberU64(), "err", err)
+ p.collectedErr = err
+ return
+ }
+ verifyStart := time.Now()
+ verifyOk := bc.verifyImportSRCRoot(p, root)
+ pipelineImportAutoCollectVerifyTimer.UpdateSince(verifyStart)
+ if !verifyOk {
+ return
+ }
+ p.collectedRoot = root
+ if bc.cfg.PipelinedImportSRCLogs {
+ log.Info("Pipelined import: SRC verified", "block", p.block.NumberU64(), "root", root)
+ }
+ publishStart := time.Now()
+ bc.publishImportWitness(p, witnessBytes)
+ pipelineImportAutoCollectPublishTimer.UpdateSince(publishStart)
+ if !p.blockStart.IsZero() {
+ witnessReadyEndToEndTimer.UpdateSince(p.blockStart)
+ }
+ gcStart := time.Now()
+ bc.handleImportTrieGC(root, p.block.NumberU64(), p.procTime)
+ pipelineImportAutoCollectGCTimer.UpdateSince(gcStart)
+ pipelineImportBlocksCounter.Inc(1)
+}
+
+// verifyImportSRCRoot compares the SRC-computed root with the imported
+// block's root. On mismatch (should never happen — a mismatch means SRC
+// diverged from the block the peer sent), reverts the chain head to the
+// parent and surfaces the error on p. Returns false on mismatch.
+func (bc *BlockChain) verifyImportSRCRoot(p *pendingImportSRCState, root common.Hash) bool {
+ if root == p.block.Root() {
+ return true
+ }
+ pipelineImportRootMismatchCounter.Inc(1)
+ p.collectedErr = fmt.Errorf("pipelined import: root mismatch (expected: %x got: %x) block: %d",
+ p.block.Root(), root, p.block.NumberU64())
+ log.Error("Pipelined import: root mismatch, reverting chain head",
+ "block", p.block.NumberU64(), "expected", p.block.Root(), "got", root)
+ bc.reportBlock(p.block, nil, p.collectedErr)
+ if parentBlock := bc.GetBlock(p.block.ParentHash(), p.block.NumberU64()-1); parentBlock != nil {
+ // writeHeadBlock requires chainmu. This goroutine runs async of
+ // insertChainWithWitnesses, so we must acquire it explicitly to avoid
+ // racing with a concurrent InsertChain mutating chain head state.
+ // TryLock blocks while the mutex is held but returns false if the
+ // chain is shutting down — skip recovery in that case.
+ if bc.chainmu.TryLock() {
+ bc.writeHeadBlock(parentBlock)
+ bc.chainmu.Unlock()
+ } else {
+ log.Warn("Pipelined import: skipped head revert (chain closing)",
+ "block", p.block.NumberU64())
+ }
+ }
+ return false
+}
+
+// publishImportWitness persists the SRC-computed witness bytes to the
+// witness store and notifies WIT peers via the witness-ready feed.
+func (bc *BlockChain) publishImportWitness(p *pendingImportSRCState, witnessBytes []byte) {
+ if len(witnessBytes) == 0 {
+ return
+ }
+ bc.WriteWitness(p.block.Hash(), witnessBytes)
+ witnessSizeBytesHistogram.Update(int64(len(witnessBytes)))
+ bc.witnessReadyFeed.Send(WitnessReadyEvent{
+ BlockHash: p.block.Hash(),
+ BlockNumber: p.block.NumberU64(),
+ })
+}
+
+// emitPipelinedImportParityMetrics emits the read-side, execution,
+// bor-consensus, and throughput timers under the same metric names the
+// non-pipelined path uses, so dashboards work identically regardless of
+// whether the chain is in pipelined mode. Hash/update/commit/stateCommit
+// timers fire from the SRC goroutine's tmpDB in runSRCCompute.
+func emitPipelinedImportParityMetrics(statedb *state.StateDB, start, pstart time.Time, vtime time.Duration, block *types.Block) {
+ ptimePipelined := time.Since(pstart) - vtime - statedb.BorConsensusTime
+ trieReadPipelined := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.SnapshotStorageReads + statedb.StorageReads
+ accountReadTimer.Update(statedb.AccountReads)
+ storageReadTimer.Update(statedb.StorageReads)
+ snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads)
+ snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads)
+ blockExecutionTimer.Update(ptimePipelined - trieReadPipelined)
+ borConsensusTime.Update(statedb.BorConsensusTime)
+ elapsedPipelined := time.Since(start)
+ blockInsertTimer.Update(elapsedPipelined)
+ pipelineImportTotalTimer.Update(elapsedPipelined)
+ gasUsedPerBlockHistogram.Update(int64(block.GasUsed()))
+ txsPerBlockHistogram.Update(int64(len(block.Transactions())))
+ if elapsedPipelined > 0 {
+ chainMgaspsMeter.Update(time.Duration(float64(block.GasUsed()) * 1000 / float64(elapsedPipelined)))
+ }
}
// GetLastFlatDiff returns the FlatDiff captured from the most recently committed
-// block's CommitSnapshot. Under delayed SRC, the miner uses this to open a
-// NewWithFlatBase statedb without waiting for the current SRC goroutine.
+// block. The miner uses this to open a NewWithFlatBase StateDB without waiting
+// for the current SRC goroutine to finish.
func (bc *BlockChain) GetLastFlatDiff() *state.FlatDiff {
bc.lastFlatDiffMu.RLock()
defer bc.lastFlatDiffMu.RUnlock()
return bc.lastFlatDiff
}
-// StateAtWithFlatDiff opens a statedb at baseRoot with flatDiff as an in-memory
-// overlay, equivalent to state.NewWithFlatBase. Used by the miner under delayed
-// SRC to begin executing block N+1 before G_N has finished.
+// SetLastFlatDiff stores the FlatDiff and the block number it belongs to.
+// The block number is used by PostExecState to match the FlatDiff
+// to the correct block (hash matching is unreliable because Root and seal
+// signature are not available when FlatDiff is captured).
+func (bc *BlockChain) SetLastFlatDiff(diff *state.FlatDiff, blockNum uint64, parentRoot common.Hash, blockRoot common.Hash) {
+ bc.lastFlatDiffMu.Lock()
+ bc.lastFlatDiff = diff
+ bc.lastFlatDiffBlockNum = blockNum
+ bc.lastFlatDiffParentRoot = parentRoot
+ bc.lastFlatDiffBlockRoot = blockRoot
+ bc.lastFlatDiffMu.Unlock()
+}
+
+// StateAtWithFlatDiff opens a StateDB at baseRoot with flatDiff as an in-memory
+// overlay, allowing reads to see the post-state of the block that produced
+// flatDiff without waiting for its state root to be committed to the trie DB.
func (bc *BlockChain) StateAtWithFlatDiff(baseRoot common.Hash, flatDiff *state.FlatDiff) (*state.StateDB, error) {
return state.NewWithFlatBase(baseRoot, bc.statedb, flatDiff)
}
@@ -4829,26 +5529,21 @@ func (bc *BlockChain) ProcessBlockWithWitnesses(block *types.Block, witness *sta
// Validate witness.
// During parallel import, defer pre-state validation to the end of the batch.
if !bc.parallelStatelessImportEnabled.Load() {
- expectedRoot, err := bc.expectedPreStateRoot(block)
- if err != nil {
- log.Error("Pre-state root unavailable for witness validation", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err)
- return nil, nil, fmt.Errorf("witness validation failed: %w", err)
+ var headerReader stateless.HeaderReader
+ if witness.HeaderReader() != nil {
+ headerReader = witness.HeaderReader()
+ } else {
+ headerReader = bc
}
- if err := stateless.ValidateWitnessPreState(witness, expectedRoot); err != nil {
+ if err := stateless.ValidateWitnessPreState(witness, headerReader, block.Header()); err != nil {
log.Error("Witness validation failed during stateless processing", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err)
return nil, nil, fmt.Errorf("witness validation failed: %w", err)
}
}
- // Remove the receipt hash so ExecuteStateless can recompute it from scratch.
- // Under delayed SRC, block.Root() carries the pre-state root for this block
- // (the actual post-execution state root of the parent); preserve it so that
- // ExecuteStateless can use it to open the correct pre-execution state.
- // For pre-fork blocks, zero Root too so ExecuteStateless recomputes it.
+ // Remove critical computed fields from the block to force true recalculation
context := block.Header()
- if bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(block.Number()) {
- context.Root = common.Hash{}
- }
+ context.Root = common.Hash{}
context.ReceiptHash = common.Hash{}
task := types.NewBlockWithHeader(context).WithBody(*block.Body())
@@ -4863,9 +5558,7 @@ func (bc *BlockChain) ProcessBlockWithWitnesses(block *types.Block, witness *sta
log.Error("Stateless self-validation failed", "block", block.Number(), "hash", block.Hash(), "error", err)
return nil, nil, err
}
- // Under delayed SRC, block.Root() = parent's state root, not this block's;
- // skip the equality check in that case.
- if (bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(block.Number())) && crossStateRoot != block.Root() {
+ if crossStateRoot != block.Root() {
log.Error("Stateless self-validation root mismatch", "block", block.Number(), "hash", block.Hash(), "cross", crossStateRoot, "local", block.Root())
err = fmt.Errorf("%w: remote %x != local %x", ErrStatelessStateRootMismatch, block.Root(), crossStateRoot)
return nil, nil, err
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index 3c097b5909..cae4dcfb02 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -53,6 +53,12 @@ func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, sn
for _, block := range chain[(index+1)-st.processed : index+1] {
txs += len(block.Transactions())
}
+ importSegmentBlocksHistogram.Update(int64(st.processed))
+ importSegmentElapsedTimer.Update(elapsed)
+ importSegmentGasUsedHistogram.Update(int64(st.usedGas))
+ if elapsed > 0 {
+ importSegmentMgaspsHistogram.Update(int64(mgasps))
+ }
end := chain[index]
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index 67e99f6b54..75234d2250 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -21,6 +21,8 @@ import (
"fmt"
"math/big"
+ "time"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
@@ -158,7 +160,9 @@ func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
}
// GetWitness retrieves a witness in RLP encoding from the database by hash,
-// caching it if found.
+// caching it if found. If the witness is not yet available but the pipelined
+// import SRC goroutine is generating it for this block, GetWitness blocks
+// until the SRC completes and the witness is written.
func (bc *BlockChain) GetWitness(hash common.Hash) []byte {
// Short circuit if the witness is already in the cache, retrieve otherwise
if cached, ok := bc.witnessCache.Get(hash); ok {
@@ -166,6 +170,11 @@ func (bc *BlockChain) GetWitness(hash common.Hash) []byte {
}
witness := bc.witnessStore.ReadWitness(hash)
+ if len(witness) == 0 {
+ // Witness not in DB yet — check if the pipelined import SRC goroutine
+ // is currently generating it. If so, wait for it to finish.
+ witness = bc.waitForPipelinedWitness(hash)
+ }
if len(witness) == 0 {
return nil
}
@@ -174,6 +183,72 @@ func (bc *BlockChain) GetWitness(hash common.Hash) []byte {
return witness
}
+// waitForPipelinedWitness waits for a witness that is being generated by
+// the pipelined import SRC goroutine. It handles two cases:
+//
+// 1. The requested block IS the current pendingImportSRC — block on its
+// collectedCh until the SRC finishes and the witness is written.
+//
+// 2. The requested block is in the current import batch but hasn't been
+// processed yet (or SRC just completed) — poll the witness cache briefly
+// since the batch processes blocks rapidly (~2ms each).
+//
+// Returns nil if the witness doesn't appear within the timeout.
+func (bc *BlockChain) waitForPipelinedWitness(hash common.Hash) []byte {
+ if !bc.cfg.EnablePipelinedImportSRC {
+ return nil
+ }
+ if w, ok := bc.waitForPendingSRCWitness(hash); ok {
+ return w
+ }
+ return bc.pollWitnessCache(hash, 2*time.Second, 10*time.Millisecond)
+}
+
+// waitForPendingSRCWitness returns the witness when hash matches the single
+// in-flight pending SRC block — blocking on collectedCh ensures the witness
+// has been written to cache or store. ok=false means hash is not this block
+// (caller should fall back to polling the cache). When the pending block was
+// imported with makeWitness=false, returns (nil, true) immediately — the
+// witness will never be produced, so neither blocking on collectedCh nor
+// falling through to the 2s cache poll would help.
+func (bc *BlockChain) waitForPendingSRCWitness(hash common.Hash) ([]byte, bool) {
+ bc.pendingImportSRCMu.Lock()
+ pending := bc.pendingImportSRC
+ bc.pendingImportSRCMu.Unlock()
+ if pending == nil || pending.block.Hash() != hash {
+ return nil, false
+ }
+ if !pending.makeWitness {
+ return nil, true
+ }
+ <-pending.collectedCh
+ if w, ok := bc.witnessCache.Get(hash); ok {
+ return w, true
+ }
+ return bc.witnessStore.ReadWitness(hash), true
+}
+
+// pollWitnessCache waits up to `timeout` for the witness to land in the
+// in-memory cache, checking every `interval`. Used when the block isn't
+// the current pending SRC (may be in an earlier import batch, or SRC just
+// finished and the cache insert is racing with our read).
+func (bc *BlockChain) pollWitnessCache(hash common.Hash, timeout, interval time.Duration) []byte {
+ deadline := time.NewTimer(timeout)
+ defer deadline.Stop()
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if w, ok := bc.witnessCache.Get(hash); ok {
+ return w
+ }
+ case <-deadline.C:
+ return nil
+ }
+ }
+}
+
// HasWitness checks if a witness is present in the cache or database.
func (bc *BlockChain) HasWitness(hash common.Hash) bool {
if bc.witnessCache.Contains(hash) {
@@ -182,6 +257,13 @@ func (bc *BlockChain) HasWitness(hash common.Hash) bool {
return bc.witnessStore.HasWitness(hash)
}
+// CacheWitness adds a witness to the in-memory cache without writing to the
+// persistent store. Used by pipelined SRC to make witnesses available to the
+// WIT protocol immediately after broadcast, before the async DB write completes.
+func (bc *BlockChain) CacheWitness(hash common.Hash, witness []byte) {
+ bc.witnessCache.Add(hash, witness)
+}
+
// WriteWitness writes the witness to the witness store and updates the cache.
func (bc *BlockChain) WriteWitness(hash common.Hash, witness []byte) {
bc.witnessStore.WriteWitness(hash, witness)
@@ -466,9 +548,22 @@ func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
}
// HasState checks if state trie is fully present in the database or not.
+// For pipelined import, also returns true if the hash matches a pending
+// import SRC block whose state will be committed momentarily.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.statedb.OpenTrie(hash)
- return err == nil
+ if err == nil {
+ return true
+ }
+ // Check if the state is being committed by a pipelined import SRC goroutine.
+ // The block metadata is already in DB; the state commit is in-flight.
+ bc.pendingImportSRCMu.Lock()
+ pending := bc.pendingImportSRC
+ bc.pendingImportSRCMu.Unlock()
+ if pending != nil && pending.block.Root() == hash {
+ return true
+ }
+ return false
}
// HasBlockAndState checks if a block and associated state trie is fully present
@@ -510,6 +605,24 @@ func (bc *BlockChain) State() (*state.StateDB, error) {
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
+ // Fast path: if this is the latest pipelined import block whose SRC hasn't
+ // committed yet, use FlatDiff overlay. This allows eth_call, eth_estimateGas,
+ // and other state readers to work during the brief window between metadata
+ // write and SRC completion.
+ bc.lastFlatDiffMu.RLock()
+ flatDiff := bc.lastFlatDiff
+ flatDiffBlockRoot := bc.lastFlatDiffBlockRoot
+ flatDiffParentRoot := bc.lastFlatDiffParentRoot
+ bc.lastFlatDiffMu.RUnlock()
+
+ if flatDiff != nil && root == flatDiffBlockRoot {
+ sdb, err := state.NewWithFlatBase(flatDiffParentRoot, bc.statedb, flatDiff)
+ if err != nil {
+ return state.New(root, bc.statedb)
+ }
+ return sdb, nil
+ }
+
return state.New(root, bc.statedb)
}
@@ -519,19 +632,40 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
// is for actual transaction processing. This enables independent cache hit/miss tracking
// for both phases of block production.
func (bc *BlockChain) StateAtWithReaders(root common.Hash) (*state.StateDB, *state.StateDB, state.ReaderWithStats, state.ReaderWithStats, error) {
- prefetchReader, processReader, err := bc.statedb.ReadersWithCacheStats(root)
+ // If the root matches the latest pipelined import block (whose SRC hasn't
+ // committed yet), open readers at the committed parent root and apply the
+ // FlatDiff overlay. This allows the miner to build pending blocks even when
+ // the chain head's state root is not yet committed to the trie DB.
+ readerRoot := root
+ bc.lastFlatDiffMu.RLock()
+ flatDiff := bc.lastFlatDiff
+ flatDiffBlockRoot := bc.lastFlatDiffBlockRoot
+ flatDiffParentRoot := bc.lastFlatDiffParentRoot
+ bc.lastFlatDiffMu.RUnlock()
+
+ if flatDiff != nil && root == flatDiffBlockRoot {
+ readerRoot = flatDiffParentRoot
+ }
+
+ prefetchReader, processReader, err := bc.statedb.ReadersWithCacheStats(readerRoot)
if err != nil {
return nil, nil, nil, nil, err
}
- statedb, err := state.NewWithReader(root, bc.statedb, processReader)
+ statedb, err := state.NewWithReader(readerRoot, bc.statedb, processReader)
if err != nil {
return nil, nil, nil, nil, err
}
- throwaway, err := state.NewWithReader(root, bc.statedb, prefetchReader)
+ throwaway, err := state.NewWithReader(readerRoot, bc.statedb, prefetchReader)
if err != nil {
return nil, nil, nil, nil, err
}
+ // Apply FlatDiff overlay so the miner sees the latest block's post-state.
+ if flatDiff != nil && root == flatDiffBlockRoot {
+ statedb.SetFlatDiffRef(flatDiff)
+ throwaway.SetFlatDiffRef(flatDiff)
+ }
+
return statedb, throwaway, prefetchReader, processReader, nil
}
@@ -659,6 +793,12 @@ func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
}
+// SubscribeWitnessReadyEvent registers a subscription for witness availability
+// events from the pipelined import SRC goroutine.
+func (bc *BlockChain) SubscribeWitnessReadyEvent(ch chan<- WitnessReadyEvent) event.Subscription {
+ return bc.scope.Track(bc.witnessReadyFeed.Subscribe(ch))
+}
+
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
// block processing has started while false means it has stopped.
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
@@ -684,13 +824,6 @@ type BorStateSyncer interface {
SubscribeStateSyncEvent(ch chan<- StateSyncEvent) event.Subscription
}
-// DelayedSRCReader is implemented by BlockChain and allows consensus code to
-// retrieve the actual post-execution state root stored separately under the
-// delayed-SRC protocol (where header.Root holds the parent's state root).
-type DelayedSRCReader interface {
- GetPostStateRoot(blockHash common.Hash) common.Hash
-}
-
// SetStateSync set sync data in state_data
func (bc *BlockChain) SetStateSync(stateData []*types.StateSyncData) {
bc.stateSyncMu.Lock()
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 1a8c73bc0f..809a705a6b 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -27,6 +27,7 @@ import (
"os"
"path"
"reflect"
+ "strings"
"sync"
"sync/atomic"
"testing"
@@ -185,7 +186,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
if err != nil {
return err
}
- receipts, logs, usedGas, statedb, _, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header(), nil, nil)
+ receipts, logs, usedGas, statedb, _, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header(), nil, nil, nil)
res := &ProcessResult{
Receipts: receipts,
Logs: logs,
@@ -6427,3 +6428,1608 @@ func TestWriteBlockMetrics(t *testing.T) {
t.Error("stateCommitTimer mean duration should be non-negative")
}
}
+
+// ---------------------------------------------------------------------------
+// Pipelined Import SRC Tests
+// ---------------------------------------------------------------------------
+
+// pipelinedConfig returns a BlockChainConfig with pipelined import SRC enabled.
+func pipelinedConfig(scheme string) *BlockChainConfig {
+ cfg := DefaultConfig().WithStateScheme(scheme)
+ cfg.EnablePipelinedImportSRC = true
+ cfg.PipelinedImportSRCLogs = true
+ return cfg
+}
+
+// pipelinedConfigWithWarmSnapshot returns the standard pipelined config with
+// the warm-snapshot handoff enabled. Used by snapshot-on-vs-off parity tests.
+func pipelinedConfigWithWarmSnapshot(scheme string) *BlockChainConfig {
+ cfg := pipelinedConfig(scheme)
+ cfg.PipelinedSRCWarmSnapshot = true
+ return cfg
+}
+
+// TestPipelinedImportSRC_MultipleBlocks generates 10 blocks with transactions and
+// inserts them into two chains — one with pipelined SRC enabled and one without.
+// The state roots of every canonical block must match between both chains.
+func TestPipelinedImportSRC_MultipleBlocks(t *testing.T) {
+ testPipelinedImportSRC_MultipleBlocks(t, rawdb.HashScheme)
+ testPipelinedImportSRC_MultipleBlocks(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_MultipleBlocks(t *testing.T, scheme string) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ // Generate 10 blocks with a simple transfer in each.
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 10, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ // Chain with pipeline enabled.
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ // Reference chain without pipeline.
+ refChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(scheme))
+ if err != nil {
+ t.Fatalf("failed to create reference chain: %v", err)
+ }
+ defer refChain.Stop()
+
+ if _, err := pipeChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("pipeline chain: failed to insert blocks: %v", err)
+ }
+ if _, err := refChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("reference chain: failed to insert blocks: %v", err)
+ }
+
+ // Both chains must agree on head.
+ if pipeChain.CurrentBlock().Number.Uint64() != 10 {
+ t.Fatalf("pipeline chain head = %d, want 10", pipeChain.CurrentBlock().Number.Uint64())
+ }
+ if refChain.CurrentBlock().Number.Uint64() != 10 {
+ t.Fatalf("reference chain head = %d, want 10", refChain.CurrentBlock().Number.Uint64())
+ }
+
+ // All canonical blocks must have matching state roots.
+ for i := uint64(1); i <= 10; i++ {
+ pipeBlock := pipeChain.GetBlockByNumber(i)
+ refBlock := refChain.GetBlockByNumber(i)
+ if pipeBlock == nil || refBlock == nil {
+ t.Fatalf("block %d: missing on pipeline(%v) or reference(%v)", i, pipeBlock == nil, refBlock == nil)
+ }
+ if pipeBlock.Root() != refBlock.Root() {
+ t.Errorf("block %d: state root mismatch pipeline=%s reference=%s", i, pipeBlock.Root(), refBlock.Root())
+ }
+ if pipeBlock.Hash() != refBlock.Hash() {
+ t.Errorf("block %d: block hash mismatch pipeline=%s reference=%s", i, pipeBlock.Hash(), refBlock.Hash())
+ }
+ }
+}
+
+// TestPipelinedImportSRC_SingleBlock inserts a single block with pipeline enabled
+// and verifies correctness of the state.
+func TestPipelinedImportSRC_SingleBlock(t *testing.T) {
+ testPipelinedImportSRC_SingleBlock(t, rawdb.HashScheme)
+ testPipelinedImportSRC_SingleBlock(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_SingleBlock(t *testing.T, scheme string) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme))
+ if err != nil {
+ t.Fatalf("failed to create chain: %v", err)
+ }
+ defer chain.Stop()
+
+ if _, err := chain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("failed to insert block: %v", err)
+ }
+
+ if chain.CurrentBlock().Number.Uint64() != 1 {
+ t.Fatalf("head = %d, want 1", chain.CurrentBlock().Number.Uint64())
+ }
+
+ statedb, err := chain.StateAt(blocks[0].Root())
+ if err != nil {
+ t.Fatalf("StateAt failed: %v", err)
+ }
+
+ // Recipient should have received 1000 wei.
+ bal := statedb.GetBalance(recipient)
+ if bal.IsZero() {
+ t.Error("recipient balance should be non-zero after transfer")
+ }
+}
+
+// TestPipelinedImportSRC_CrossCallPersistence inserts blocks across two separate
+// InsertChain calls with pipelined SRC and verifies that state persists correctly
+// between calls (the pending SRC from the first batch is flushed before the
+// second batch begins).
+func TestPipelinedImportSRC_CrossCallPersistence(t *testing.T) {
+ testPipelinedImportSRC_CrossCallPersistence(t, rawdb.HashScheme)
+ testPipelinedImportSRC_CrossCallPersistence(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_CrossCallPersistence(t *testing.T, scheme string) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 6, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ // Pipeline chain: split insertion across two calls.
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ if _, err := pipeChain.InsertChain(blocks[:3], false); err != nil {
+ t.Fatalf("pipeline: first batch insert failed: %v", err)
+ }
+ if _, err := pipeChain.InsertChain(blocks[3:], false); err != nil {
+ t.Fatalf("pipeline: second batch insert failed: %v", err)
+ }
+
+ // Reference chain: single call.
+ refChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(scheme))
+ if err != nil {
+ t.Fatalf("failed to create reference chain: %v", err)
+ }
+ defer refChain.Stop()
+
+ if _, err := refChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("reference: insert failed: %v", err)
+ }
+
+ if pipeChain.CurrentBlock().Number.Uint64() != 6 {
+ t.Fatalf("pipeline head = %d, want 6", pipeChain.CurrentBlock().Number.Uint64())
+ }
+
+ for i := uint64(1); i <= 6; i++ {
+ pipeBlock := pipeChain.GetBlockByNumber(i)
+ refBlock := refChain.GetBlockByNumber(i)
+ if pipeBlock == nil || refBlock == nil {
+ t.Fatalf("block %d missing", i)
+ }
+ if pipeBlock.Root() != refBlock.Root() {
+ t.Errorf("block %d: state root mismatch pipeline=%s reference=%s", i, pipeBlock.Root(), refBlock.Root())
+ }
+ }
+}
+
+// TestPipelinedImportSRC_Reorg inserts a main chain and then a longer fork to
+// trigger a reorg. Verifies that the fork becomes canonical and all state roots
+// are valid after the reorg.
+func TestPipelinedImportSRC_Reorg(t *testing.T) {
+ testPipelinedImportSRC_Reorg(t, rawdb.HashScheme)
+ testPipelinedImportSRC_Reorg(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_Reorg(t *testing.T, scheme string) {
+ var (
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{
+ addr1: {Balance: funds},
+ addr2: {Balance: funds},
+ },
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ // Main chain: 5 blocks, transfers from addr1.
+ _, mainBlocks, _ := GenerateChainWithGenesis(gspec, engine, 5, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr1), common.HexToAddress("0x1111"), big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key1,
+ )
+ gen.AddTx(tx)
+ })
+
+ // Fork chain: 7 blocks branching from genesis, using addr2 so it creates
+ // different state. Longer chain so it becomes canonical.
+ _, forkBlocks, _ := GenerateChainWithGenesis(gspec, engine, 7, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr2), common.HexToAddress("0x2222"), big.NewInt(2000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key2,
+ )
+ gen.AddTx(tx)
+ })
+
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme))
+ if err != nil {
+ t.Fatalf("failed to create chain: %v", err)
+ }
+ defer chain.Stop()
+
+ // Insert main chain.
+ if _, err := chain.InsertChain(mainBlocks, false); err != nil {
+ t.Fatalf("main chain insert failed: %v", err)
+ }
+ if chain.CurrentBlock().Number.Uint64() != 5 {
+ t.Fatalf("after main: head = %d, want 5", chain.CurrentBlock().Number.Uint64())
+ }
+
+ // Insert fork chain — should trigger reorg since it's longer.
+ if _, err := chain.InsertChain(forkBlocks, false); err != nil {
+ t.Fatalf("fork chain insert failed: %v", err)
+ }
+ if chain.CurrentBlock().Number.Uint64() != 7 {
+ t.Fatalf("after fork: head = %d, want 7", chain.CurrentBlock().Number.Uint64())
+ }
+
+ // Verify the fork is now canonical by checking block hashes.
+ for i := uint64(1); i <= 7; i++ {
+ canonical := chain.GetBlockByNumber(i)
+ if canonical == nil {
+ t.Fatalf("missing canonical block %d after reorg", i)
+ }
+ if canonical.Hash() != forkBlocks[i-1].Hash() {
+ t.Errorf("block %d: canonical hash %s != fork hash %s", i, canonical.Hash(), forkBlocks[i-1].Hash())
+ }
+ }
+
+ // Verify state is accessible for the canonical head.
+ statedb, err := chain.StateAt(chain.CurrentBlock().Root)
+ if err != nil {
+ t.Fatalf("StateAt head failed: %v", err)
+ }
+ // addr2 sent 2000 wei per block for 7 blocks => should have less than initial funds.
+ bal := statedb.GetBalance(addr2)
+ if bal.IsZero() {
+ t.Error("addr2 balance should be non-zero")
+ }
+}
+
+// TestPipelinedImportSRC_StateAtDuringPipeline generates blocks that modify
+// account balances and verifies that StateAt returns correct balances for each
+// block's root after pipelined insertion.
+func TestPipelinedImportSRC_StateAtDuringPipeline(t *testing.T) {
+ testPipelinedImportSRC_StateAtDuringPipeline(t, rawdb.HashScheme)
+ testPipelinedImportSRC_StateAtDuringPipeline(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_StateAtDuringPipeline(t *testing.T, scheme string) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ txValue = big.NewInt(10000) // 10000 wei per block
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ numBlocks := 5
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, txValue, params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme))
+ if err != nil {
+ t.Fatalf("failed to create chain: %v", err)
+ }
+ defer chain.Stop()
+
+ if _, err := chain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ // Verify state at each block root shows monotonically increasing recipient balance.
+ var prevBal *uint256.Int
+ for i := 0; i < numBlocks; i++ {
+ statedb, err := chain.StateAt(blocks[i].Root())
+ if err != nil {
+ t.Fatalf("block %d: StateAt failed: %v", i+1, err)
+ }
+ bal := statedb.GetBalance(recipient)
+ if bal.IsZero() {
+ t.Errorf("block %d: recipient balance is zero, expected non-zero", i+1)
+ }
+ if prevBal != nil && bal.Cmp(prevBal) <= 0 {
+ t.Errorf("block %d: recipient balance %s should be greater than previous %s", i+1, bal, prevBal)
+ }
+ prevBal = bal.Clone()
+ }
+
+ // Final balance should equal txValue * numBlocks.
+ expectedBal := new(big.Int).Mul(txValue, big.NewInt(int64(numBlocks)))
+ finalState, _ := chain.StateAt(blocks[numBlocks-1].Root())
+ got := finalState.GetBalance(recipient).ToBig()
+ if got.Cmp(expectedBal) != 0 {
+ t.Errorf("final recipient balance: got %s, want %s", got, expectedBal)
+ }
+}
+
+// TestPipelinedImportSRC_ValidateStateCheap verifies that blocks inserted with
+// pipelined SRC pass all cheap validation checks (gas used, bloom filter,
+// receipt root). This is implicitly tested by successful insertion, but this
+// test explicitly verifies no errors by comparing against a reference chain.
+func TestPipelinedImportSRC_ValidateStateCheap(t *testing.T) {
+ testPipelinedImportSRC_ValidateStateCheap(t, rawdb.HashScheme)
+ testPipelinedImportSRC_ValidateStateCheap(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_ValidateStateCheap(t *testing.T, scheme string) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ // Insert with pipeline — any ValidateStateCheap failure would surface as
+ // an InsertChain error.
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ n, err := pipeChain.InsertChain(blocks, false)
+ if err != nil {
+ t.Fatalf("pipeline InsertChain failed at block %d: %v", n, err)
+ }
+
+ // Reference chain for comparison.
+ refChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(scheme))
+ if err != nil {
+ t.Fatalf("failed to create reference chain: %v", err)
+ }
+ defer refChain.Stop()
+
+ if _, err := refChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("reference InsertChain failed: %v", err)
+ }
+
+ // Verify: every block has matching gas, bloom, receipt root, and state root.
+ for i := uint64(1); i <= 8; i++ {
+ pBlock := pipeChain.GetBlockByNumber(i)
+ rBlock := refChain.GetBlockByNumber(i)
+ if pBlock == nil || rBlock == nil {
+ t.Fatalf("block %d missing", i)
+ }
+ if pBlock.GasUsed() != rBlock.GasUsed() {
+ t.Errorf("block %d: gas used mismatch %d vs %d", i, pBlock.GasUsed(), rBlock.GasUsed())
+ }
+ if pBlock.Bloom() != rBlock.Bloom() {
+ t.Errorf("block %d: bloom filter mismatch", i)
+ }
+ if pBlock.ReceiptHash() != rBlock.ReceiptHash() {
+ t.Errorf("block %d: receipt hash mismatch %s vs %s", i, pBlock.ReceiptHash(), rBlock.ReceiptHash())
+ }
+ if pBlock.Root() != rBlock.Root() {
+ t.Errorf("block %d: state root mismatch %s vs %s", i, pBlock.Root(), rBlock.Root())
+ }
+ }
+}
+
+// TestPipelinedImportMetrics verifies that the pipelined-import metrics and
+// their parity timers actually increment when blocks flow through the
+// pipelined path, and that the mode gauge reflects the enabled config.
+func TestPipelinedImportMetrics(t *testing.T) {
+ metrics.Enable()
+
+ const numBlocks = 5
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ // Snapshot counters before — metrics registrations are process-global, so
+ // other tests in the same binary may have already moved them.
+ blocksBefore := pipelineImportBlocksCounter.Snapshot().Count()
+ hitBefore := pipelineImportHitCounter.Snapshot().Count()
+ mismatchBefore := pipelineImportRootMismatchCounter.Snapshot().Count()
+ insertBefore := blockInsertTimer.Snapshot().Count()
+ stateCommitBefore := stateCommitTimer.Snapshot().Count()
+
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ if got := pipelineImportEnabledGauge.Snapshot().Value(); got != 1 {
+ t.Errorf("pipelineImportEnabledGauge = %d, want 1 when EnablePipelinedImportSRC=true", got)
+ }
+
+ if _, err := pipeChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("pipeline InsertChain failed: %v", err)
+ }
+
+ // Drain the trailing pending SRC so per-block counters reflect every block.
+ if err := pipeChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC failed: %v", err)
+ }
+
+ blocksDelta := pipelineImportBlocksCounter.Snapshot().Count() - blocksBefore
+ hitDelta := pipelineImportHitCounter.Snapshot().Count() - hitBefore
+ mismatchDelta := pipelineImportRootMismatchCounter.Snapshot().Count() - mismatchBefore
+ insertDelta := blockInsertTimer.Snapshot().Count() - insertBefore
+ stateCommitDelta := stateCommitTimer.Snapshot().Count() - stateCommitBefore
+
+ if blocksDelta != numBlocks {
+ t.Errorf("pipelineImportBlocksCounter delta = %d, want %d", blocksDelta, numBlocks)
+ }
+ // First block has no pending predecessor; subsequent blocks should all hit.
+ if hitDelta != numBlocks-1 {
+ t.Errorf("pipelineImportHitCounter delta = %d, want %d", hitDelta, numBlocks-1)
+ }
+ if mismatchDelta != 0 {
+ t.Errorf("pipelineImportRootMismatchCounter delta = %d, want 0 (safety alarm)", mismatchDelta)
+ }
+ if insertDelta != numBlocks {
+ t.Errorf("blockInsertTimer (parity) delta = %d, want %d", insertDelta, numBlocks)
+ }
+ if stateCommitDelta != numBlocks {
+ t.Errorf("stateCommitTimer (parity, from SRC goroutine) delta = %d, want %d", stateCommitDelta, numBlocks)
+ }
+}
+
+// TestPipelineImportDisabledGauge verifies the mode gauge reads 0 when the
+// pipeline is not enabled in the chain config.
+func TestPipelineImportDisabledGauge(t *testing.T) {
+ metrics.Enable()
+
+ gspec := &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ engine := ethash.NewFaker()
+
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create chain: %v", err)
+ }
+ defer chain.Stop()
+
+ if got := pipelineImportEnabledGauge.Snapshot().Value(); got != 0 {
+ t.Errorf("pipelineImportEnabledGauge = %d, want 0 when EnablePipelinedImportSRC=false", got)
+ }
+}
+
+// TestPipelineFlatDiffHitMeters verifies that the FlatDiff overlay meters
+// increment when consecutive blocks under pipelined import touch accounts/slots
+// mutated by the previous block.
+func TestPipelineFlatDiffHitMeters(t *testing.T) {
+ metrics.Enable()
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ // Every block transfers from the same `addr` to `recipient` — both addresses
+ // are in the previous block's FlatDiff, so reads in the next block's
+ // execution should hit the overlay.
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ // The FlatDiff meters live in the state package (unexported); look them up
+ // by name in the global registry rather than exposing accessors.
+ flatAcctMeter, ok := metrics.DefaultRegistry.Get("state/flatdiff/account_hits").(*metrics.Meter)
+ if !ok {
+ t.Fatal("state/flatdiff/account_hits meter not registered")
+ }
+ accountHitsBefore := flatAcctMeter.Snapshot().Count()
+
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ if _, err := pipeChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("pipeline InsertChain failed: %v", err)
+ }
+ _ = pipeChain.flushPendingImportSRC()
+
+ if flatAcctMeter.Snapshot().Count()-accountHitsBefore == 0 {
+ t.Error("state/flatdiff/account_hits should have non-zero delta after consecutive-block transfers")
+ }
+ // Storage hits depend on the specific SSTORE pattern; pure balance-transfer
+ // blocks may not hit storage slots. We only assert account-side hits here.
+}
+
+// TestPipelinedImportSRC_MakeWitnessFalse verifies that when the pipelined
+// import path is invoked with makeWitness=false (the producewitnesses=false
+// configuration), the SRC goroutine still computes and validates the state
+// root — but skips witness construction, FlatDiff read-surface preload, and
+// witness encoding/caching entirely.
+func TestPipelinedImportSRC_MakeWitnessFalse(t *testing.T) {
+ metrics.Enable()
+
+ const numBlocks = 5
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ // Snapshot the metrics that should NOT advance when makeWitness=false.
+ preloadTimerBefore := pipelineSRCPreloadTimer.Snapshot().Count()
+ preloadSlotsBefore := pipelineSRCPreloadSlotsHistogram.Snapshot().Count()
+ preloadAccountsBefore := pipelineSRCPreloadReadAccountsHistogram.Snapshot().Count()
+ // And the one that SHOULD advance — metrics are package-global, so we must
+ // compare deltas rather than absolute counts.
+ stateCommitBefore := stateCommitTimer.Snapshot().Count()
+
+ // makeWitness=false is the default for InsertChain.
+ if _, err := pipeChain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("pipeline InsertChain failed: %v", err)
+ }
+ if err := pipeChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC failed: %v", err)
+ }
+
+ // State roots must match the canonical roots from GenerateChainWithGenesis
+ // — root validation runs even with witness off.
+ for i := uint64(1); i <= numBlocks; i++ {
+ pipeBlock := pipeChain.GetBlockByNumber(i)
+ if pipeBlock == nil {
+ t.Fatalf("block %d: missing on pipeline chain", i)
+ }
+ if pipeBlock.Root() != blocks[i-1].Root() {
+ t.Errorf("block %d: state root mismatch pipeline=%s expected=%s", i, pipeBlock.Root(), blocks[i-1].Root())
+ }
+ }
+
+ // No witness should be produced or persisted — neither cache nor store.
+ // HasWitness covers both surfaces; check both explicitly so a future
+ // refactor that splits the write paths still trips the assertion.
+ for i := uint64(1); i <= numBlocks; i++ {
+ hash := pipeChain.GetBlockByNumber(i).Hash()
+ if pipeChain.witnessCache.Contains(hash) {
+ t.Errorf("block %d: witnessCache unexpectedly contains witness when makeWitness=false", i)
+ }
+ if pipeChain.witnessStore.HasWitness(hash) {
+ t.Errorf("block %d: witnessStore unexpectedly contains witness when makeWitness=false", i)
+ }
+ if pipeChain.HasWitness(hash) {
+ t.Errorf("block %d: HasWitness=true when makeWitness=false", i)
+ }
+ }
+
+ // Preload timer + histograms must not have fired — they exist solely to
+ // populate the witness with proof-path nodes.
+ if delta := pipelineSRCPreloadTimer.Snapshot().Count() - preloadTimerBefore; delta != 0 {
+ t.Errorf("pipelineSRCPreloadTimer delta = %d, want 0 when makeWitness=false", delta)
+ }
+ if delta := pipelineSRCPreloadSlotsHistogram.Snapshot().Count() - preloadSlotsBefore; delta != 0 {
+ t.Errorf("pipelineSRCPreloadSlotsHistogram delta = %d, want 0 when makeWitness=false", delta)
+ }
+ if delta := pipelineSRCPreloadReadAccountsHistogram.Snapshot().Count() - preloadAccountsBefore; delta != 0 {
+ t.Errorf("pipelineSRCPreloadReadAccountsHistogram delta = %d, want 0 when makeWitness=false", delta)
+ }
+
+ // stateCommitTimer should still fire once per block — CommitWithUpdate
+ // runs unconditionally, witness or not.
+ if delta := stateCommitTimer.Snapshot().Count() - stateCommitBefore; delta != numBlocks {
+ t.Errorf("stateCommitTimer delta = %d, want %d when makeWitness=false (CommitWithUpdate must still run)", delta, numBlocks)
+ }
+}
+
+// TestPipelinedImportSRC_MakeWitnessTrue verifies that when InsertChain is
+// called with makeWitness=true and the pipeline is enabled, the SRC goroutine
+// produces a witness, caches it, and preload metrics fire as expected.
+func TestPipelinedImportSRC_MakeWitnessTrue(t *testing.T) {
+ metrics.Enable()
+
+ const numBlocks = 3
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ preloadTimerBefore := pipelineSRCPreloadTimer.Snapshot().Count()
+ preloadSlotsBefore := pipelineSRCPreloadSlotsHistogram.Snapshot().Count()
+ preloadAccountsBefore := pipelineSRCPreloadReadAccountsHistogram.Snapshot().Count()
+
+ if _, err := pipeChain.InsertChain(blocks, true); err != nil {
+ t.Fatalf("pipeline InsertChain (makeWitness=true) failed: %v", err)
+ }
+ if err := pipeChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC failed: %v", err)
+ }
+
+ // Witness should be cached for every imported block.
+ for i := uint64(1); i <= numBlocks; i++ {
+ hash := pipeChain.GetBlockByNumber(i).Hash()
+ if !pipeChain.witnessCache.Contains(hash) {
+ t.Errorf("block %d: witnessCache missing witness when makeWitness=true", i)
+ }
+ }
+
+ // Preload timer + histograms should fire once per block.
+ if delta := pipelineSRCPreloadTimer.Snapshot().Count() - preloadTimerBefore; delta != numBlocks {
+ t.Errorf("pipelineSRCPreloadTimer delta = %d, want %d when makeWitness=true", delta, numBlocks)
+ }
+ if delta := pipelineSRCPreloadSlotsHistogram.Snapshot().Count() - preloadSlotsBefore; delta != numBlocks {
+ t.Errorf("pipelineSRCPreloadSlotsHistogram delta = %d, want %d when makeWitness=true", delta, numBlocks)
+ }
+ if delta := pipelineSRCPreloadReadAccountsHistogram.Snapshot().Count() - preloadAccountsBefore; delta != numBlocks {
+ t.Errorf("pipelineSRCPreloadReadAccountsHistogram delta = %d, want %d when makeWitness=true", delta, numBlocks)
+ }
+}
+
+// TestPipelinedImportSRC_RootParityWitnessOnVsOff is the consensus-critical
+// parity check for mitigation (2.5): the pipelined SRC goroutine uses
+// state.NewTrieOnly when makeWitness=true and state.New (multi-reader) when
+// makeWitness=false. Both reader paths must produce byte-identical state
+// roots when importing the same blocks — otherwise consensus would split
+// between witness-producing and witness-off nodes on the same network.
+//
+// Two import shapes are exercised:
+// - shape A: a single InsertChain(blocks) call — batch behaviour
+// - shape B: two consecutive InsertChain calls — exercises cross-call
+// pending-SRC reuse (the FlatDiff overlay path between batches)
+//
+// Path scheme is used because state.New only differs from state.NewTrieOnly
+// when a flat reader is actually wired (pathdb StateReader); under hash
+// scheme without a snapshot the multi-reader degenerates to trie-only and
+// the test would not detect a real parity bug.
+func TestPipelinedImportSRC_RootParityWitnessOnVsOff(t *testing.T) {
+ const numBlocks = 8
+ const splitAt = 3 // shape B: insert blocks[:splitAt] then blocks[splitAt:]
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ // Witness=true path: NewTrieOnly reader, single InsertChain batch.
+ witnessOnChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.PathScheme))
+ if err != nil {
+ t.Fatalf("witness-on chain: %v", err)
+ }
+ defer witnessOnChain.Stop()
+ if _, err := witnessOnChain.InsertChain(blocks, true); err != nil {
+ t.Fatalf("witness-on InsertChain: %v", err)
+ }
+ if err := witnessOnChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("witness-on flush: %v", err)
+ }
+
+ // Witness=false path, shape A: single InsertChain batch with state.New reader.
+ witnessOffBatch, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.PathScheme))
+ if err != nil {
+ t.Fatalf("witness-off batch chain: %v", err)
+ }
+ defer witnessOffBatch.Stop()
+ if _, err := witnessOffBatch.InsertChain(blocks, false); err != nil {
+ t.Fatalf("witness-off batch InsertChain: %v", err)
+ }
+ if err := witnessOffBatch.flushPendingImportSRC(); err != nil {
+ t.Fatalf("witness-off batch flush: %v", err)
+ }
+
+ // Witness=false path, shape B: split insertion exercises cross-call
+ // pending-SRC reuse — the second InsertChain opens with a pending FlatDiff
+ // from the first batch and must produce the same roots as the batched
+ // path.
+ witnessOffSplit, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.PathScheme))
+ if err != nil {
+ t.Fatalf("witness-off split chain: %v", err)
+ }
+ defer witnessOffSplit.Stop()
+ if _, err := witnessOffSplit.InsertChain(blocks[:splitAt], false); err != nil {
+ t.Fatalf("witness-off split first batch: %v", err)
+ }
+ if _, err := witnessOffSplit.InsertChain(blocks[splitAt:], false); err != nil {
+ t.Fatalf("witness-off split second batch: %v", err)
+ }
+ if err := witnessOffSplit.flushPendingImportSRC(); err != nil {
+ t.Fatalf("witness-off split flush: %v", err)
+ }
+
+ // Per-block parity: every chain must agree with the canonical root from
+ // the generator AND with each other.
+ for i := uint64(1); i <= numBlocks; i++ {
+ canonical := blocks[i-1].Root()
+
+ on := witnessOnChain.GetBlockByNumber(i)
+ offBatch := witnessOffBatch.GetBlockByNumber(i)
+ offSplit := witnessOffSplit.GetBlockByNumber(i)
+ if on == nil || offBatch == nil || offSplit == nil {
+ t.Fatalf("block %d: missing on one of the chains (on=%v offBatch=%v offSplit=%v)",
+ i, on == nil, offBatch == nil, offSplit == nil)
+ }
+
+ if on.Root() != canonical {
+ t.Errorf("block %d: witness-on root %s != canonical %s", i, on.Root(), canonical)
+ }
+ if offBatch.Root() != canonical {
+ t.Errorf("block %d: witness-off batch root %s != canonical %s", i, offBatch.Root(), canonical)
+ }
+ if offSplit.Root() != canonical {
+ t.Errorf("block %d: witness-off split root %s != canonical %s", i, offSplit.Root(), canonical)
+ }
+ if on.Root() != offBatch.Root() {
+ t.Errorf("block %d: witness-on vs witness-off-batch root mismatch %s != %s", i, on.Root(), offBatch.Root())
+ }
+ if offBatch.Root() != offSplit.Root() {
+ t.Errorf("block %d: witness-off batch vs split root mismatch %s != %s", i, offBatch.Root(), offSplit.Root())
+ }
+ }
+}
+
+// TestPipelinedImportSRC_WitnessHardFailsWithoutExecWitness verifies that
+// runSRCCompute rejects the configuration where a witness is requested but
+// none is supplied by the caller. The import path always hands the
+// EVM-populated witness through to SRC; only miner/legacy callers may set
+// allowOwnWitness=true to opt into SRC creating its own witness. Spawning
+// the SRC goroutine with makeWitness=true, execWitness=nil, and
+// allowOwnWitness=false must set pending.err.
+func TestPipelinedImportSRC_WitnessHardFailsWithoutExecWitness(t *testing.T) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create chain: %v", err)
+ }
+ defer chain.Stop()
+
+ // First import a block normally so we have a committed parent root and a
+ // FlatDiff to feed the SRC. The import path's makeWitness=false is used so
+ // no witness work runs in the legitimate insertion.
+ if _, err := chain.InsertChain(blocks, false); err != nil {
+ t.Fatalf("InsertChain failed: %v", err)
+ }
+ if err := chain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC failed: %v", err)
+ }
+
+ // Now manually spawn an SRC goroutine with makeWitness=true,
+ // execWitness=nil, allowOwnWitness=false. Use a dummy FlatDiff to keep
+ // ApplyFlatDiffForCommit from being reached; runSRCCompute should error
+ // out before touching it.
+ parent := chain.GetBlockByNumber(0)
+ target := blocks[0]
+ chain.SpawnSRCGoroutine(target, parent.Root(), &state.FlatDiff{}, true, nil, false, nil)
+
+ // Drain via the public wait API: the goroutine sets pending.err and exits.
+ chain.pendingSRCMu.Lock()
+ pending := chain.pendingSRC
+ chain.pendingSRCMu.Unlock()
+ if pending == nil {
+ t.Fatal("expected pendingSRC after SpawnSRCGoroutine")
+ }
+ pending.wg.Wait()
+
+ if pending.err == nil {
+ t.Fatal("expected pending.err to be set when execWitness=nil and allowOwnWitness=false; " +
+ "got nil — the hard-fail check is not enforcing the witness contract")
+ }
+ if !strings.Contains(pending.err.Error(), "without execution witness") {
+ t.Errorf("pending.err = %v, want error mentioning 'without execution witness'", pending.err)
+ }
+}
+
+// TestPipelinedImportSRC_WitnessIncludesBlockHashAncestors verifies that
+// BLOCKHASH opcode access during EVM execution is reflected in the witness
+// published by the pipelined SRC path.
+//
+// The pipelined import path runs EVM execution and SRC commit on different
+// goroutines but must publish a single completed witness. AddBlockHash fires
+// during execution (vm/instructions.go::opBlockhash) on the witness attached
+// to the executing StateDB; the published witness must therefore include
+// those Headers entries. BLOCKHASH ancestor coverage is checked through
+// Headers because BorWitness serialises Headers but not Codes; verifiers
+// source bytecode from local storage.
+//
+// Test setup:
+// - Block 1: regular value transfer, no BLOCKHASH.
+// - Block 2: calls a contract whose bytecode runs BLOCKHASH(0). At block 2
+// this triggers Witness.AddBlockHash(0), which extends Headers to include
+// genesis (parent=block-1 is already in Headers from NewWitness; reaching
+// back to genesis adds the second entry).
+//
+// Generation requires a real chain context for AddTxWithChain to satisfy the
+// EVM's blockhash lookup — chain_makers.go's plain AddTx uses a fake
+// BlockChain with no headers and crashes on GetHashFn. The test builds a
+// separate ctxChain for generation, then imports all blocks into a fresh
+// pipeChain configured with pipelined SRC.
+func TestPipelinedImportSRC_WitnessIncludesBlockHashAncestors(t *testing.T) {
+ // Bytecode: PUSH1 0x00 ; BLOCKHASH ; POP ; STOP
+ // Reads the hash of block 0 (genesis), discards it. Triggers
+ // witness.AddBlockHash(0) inside vm/instructions.go::opBlockhash on
+ // whichever StateDB owns the witness at execution time.
+ contractCode := []byte{0x60, 0x00, 0x40, 0x50, 0x00}
+ contractAddr := common.HexToAddress("0xb1a5cab1ec0de")
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{
+ addr: {Balance: funds},
+ contractAddr: {Code: contractCode, Balance: big.NewInt(0)},
+ },
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ // Phase 1: Generate block 1 (no BLOCKHASH yet) using a fresh shared db
+ // that we'll reuse for ctxChain so headers are written exactly once.
+ db := rawdb.NewMemoryDatabase()
+ tdb := triedb.NewDatabase(db, triedb.HashDefaults)
+ genesisBlock := gspec.MustCommit(db, tdb)
+ prefix, _ := GenerateChain(gspec.Config, genesisBlock, engine, db, 1, func(i int, gen *BlockGen) {})
+
+ // Phase 2: Build ctxChain on the same db so BlockGen.AddTxWithChain has
+ // a real HeaderChain to satisfy the BLOCKHASH lookup in block 2.
+ ctxChain, err := NewBlockChain(db, gspec, engine, DefaultConfig().WithStateScheme(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("ctxChain: %v", err)
+ }
+ if _, err := ctxChain.InsertChain(prefix, false); err != nil {
+ t.Fatalf("ctxChain: insert prefix: %v", err)
+ }
+
+ // Phase 3: Generate block 2 with a tx that calls the contract → BLOCKHASH(0)
+ // fires during EVM execution, extending the execution witness's Headers.
+ block2Slice, _ := GenerateChain(gspec.Config, prefix[0], engine, db, 1, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), contractAddr, big.NewInt(0), 100_000, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTxWithChain(ctxChain, tx)
+ })
+ ctxChain.Stop()
+
+ allBlocks := append(append([]*types.Block{}, prefix...), block2Slice...)
+
+ // Phase 4: Fresh pipeChain, import everything with witness=true
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("pipeChain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ if _, err := pipeChain.InsertChain(allBlocks, true); err != nil {
+ t.Fatalf("InsertChain (makeWitness=true): %v", err)
+ }
+ if err := pipeChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC: %v", err)
+ }
+
+ // Phase 5: Decode the published witness for block 2 and verify Headers
+ // extends past parent, which is what AddBlockHash(0) records during
+ // execution.
+ target := block2Slice[0]
+ encoded := pipeChain.GetWitness(target.Hash())
+ if encoded == nil {
+ t.Fatalf("block 2: witness missing from cache")
+ }
+ var w stateless.Witness
+ if err := rlp.DecodeBytes(encoded, &w); err != nil {
+ t.Fatalf("decode witness: %v", err)
+ }
+
+ // AddBlockHash(0) walks back from parent (block 1) to genesis, so the
+ // published witness's Headers slice should have length 2: [block 1, genesis].
+ if len(w.Headers) < 2 {
+ t.Fatalf("Headers length = %d, want >= 2 (parent + genesis from BLOCKHASH(0)); "+
+ "the published witness must include execution-time AddBlockHash entries",
+ len(w.Headers))
+ }
+
+ parentHeader := prefix[0].Header()
+ if w.Headers[0].Hash() != parentHeader.Hash() {
+ t.Errorf("Headers[0] = %s, want parent (block 1) %s", w.Headers[0].Hash(), parentHeader.Hash())
+ }
+ foundGenesis := false
+ for _, h := range w.Headers {
+ if h.Number.Uint64() == 0 && h.Hash() == genesisBlock.Hash() {
+ foundGenesis = true
+ break
+ }
+ }
+ if !foundGenesis {
+ t.Errorf("Headers does not contain genesis (BLOCKHASH(0) referenced it); Headers=%d entries", len(w.Headers))
+ }
+
+ if err := stateless.ValidateWitnessPreState(&w, pipeChain, target.Header()); err != nil {
+ t.Errorf("ValidateWitnessPreState failed on the published witness: %v", err)
+ }
+}
+
+// TestPipelinedImportSRC_WitnessEncodeDecodeRoundtrip verifies that pipelined
+// witnesses encode-decode cleanly and pass canonical pre-state validation,
+// covering the basic shared-witness contract: the same Witness object that
+// EVM execution populated must round-trip through RLP and validate.
+func TestPipelinedImportSRC_WitnessEncodeDecodeRoundtrip(t *testing.T) {
+ const numBlocks = 3
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("failed to create pipeline chain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ if _, err := pipeChain.InsertChain(blocks, true); err != nil {
+ t.Fatalf("InsertChain (makeWitness=true) failed: %v", err)
+ }
+ if err := pipeChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC failed: %v", err)
+ }
+
+ for i := uint64(1); i <= numBlocks; i++ {
+ block := pipeChain.GetBlockByNumber(i)
+ encoded := pipeChain.GetWitness(block.Hash())
+ if encoded == nil {
+ t.Fatalf("block %d: witness missing from cache", i)
+ }
+ var w stateless.Witness
+ if err := rlp.DecodeBytes(encoded, &w); err != nil {
+ t.Fatalf("block %d: decode witness: %v", i, err)
+ }
+ if len(w.Headers) == 0 {
+ t.Errorf("block %d: decoded witness has no Headers", i)
+ }
+ if err := stateless.ValidateWitnessPreState(&w, pipeChain, block.Header()); err != nil {
+ t.Errorf("block %d: ValidateWitnessPreState failed: %v", i, err)
+ }
+ }
+}
+
+// TestPipelinedImportSRC_WarmSnapshotWitnessParity is the consensus-critical
+// parity test for the warm-snapshot handoff. The same chain is imported
+// twice — once with PipelinedSRCWarmSnapshot=false (baseline pathdb-only
+// reader) and once with PipelinedSRCWarmSnapshot=true (snapshot-aware
+// reader). Per-block state roots, decoded witnesses, and the State proof
+// node sets must be identical, AND each published witness must replay
+// statelessly via ProcessBlockWithWitnesses to the same root the chain
+// produced. Any divergence proves the snapshot path silently dropped or
+// substituted proof nodes.
+//
+// Root parity alone is necessary but not sufficient: it proves the commit
+// walk produced the same hash, not that the published witness covers the
+// same proof surface. Stateless replay is the strongest assertion — it
+// reconstructs state from the witness alone (plus the receiver's local
+// codes/headers) and recomputes the post-state root.
+//
+// Runs under both HashScheme and PathScheme; PathScheme is the production
+// target and exercises the pathdb fallthrough path in the snapshot reader.
+func TestPipelinedImportSRC_WarmSnapshotWitnessParity(t *testing.T) {
+ testPipelinedImportSRC_WarmSnapshotWitnessParity(t, rawdb.HashScheme)
+ testPipelinedImportSRC_WarmSnapshotWitnessParity(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_WarmSnapshotWitnessParity(t *testing.T, scheme string) {
+ t.Run(scheme, func(t *testing.T) {
+ const numBlocks = 8
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{addr: {Balance: funds}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ importInto := func(t *testing.T, label string, cfg *BlockChainConfig) *BlockChain {
+ t.Helper()
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, cfg)
+ if err != nil {
+ t.Fatalf("%s: NewBlockChain: %v", label, err)
+ }
+ t.Cleanup(chain.Stop)
+ if _, err := chain.InsertChain(blocks, true); err != nil {
+ t.Fatalf("%s: InsertChain: %v", label, err)
+ }
+ if err := chain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("%s: flushPendingImportSRC: %v", label, err)
+ }
+ return chain
+ }
+
+ chainOff := importInto(t, "snapshot-off", pipelinedConfig(scheme))
+ chainOn := importInto(t, "snapshot-on", pipelinedConfigWithWarmSnapshot(scheme))
+
+ for i := uint64(1); i <= numBlocks; i++ {
+ blkOff := chainOff.GetBlockByNumber(i)
+ blkOn := chainOn.GetBlockByNumber(i)
+ if blkOff == nil || blkOn == nil {
+ t.Fatalf("block %d: missing on one of the chains (off=%v on=%v)", i, blkOff == nil, blkOn == nil)
+ }
+ if blkOff.Root() != blkOn.Root() {
+ t.Errorf("block %d: state root mismatch off=%s on=%s", i, blkOff.Root(), blkOn.Root())
+ }
+
+ encOff := chainOff.GetWitness(blkOff.Hash())
+ encOn := chainOn.GetWitness(blkOn.Hash())
+ if encOff == nil || encOn == nil {
+ t.Fatalf("block %d: witness missing (off=%v on=%v)", i, encOff == nil, encOn == nil)
+ }
+
+ var wOff, wOn stateless.Witness
+ if err := rlp.DecodeBytes(encOff, &wOff); err != nil {
+ t.Fatalf("block %d: decode off witness: %v", i, err)
+ }
+ if err := rlp.DecodeBytes(encOn, &wOn); err != nil {
+ t.Fatalf("block %d: decode on witness: %v", i, err)
+ }
+
+ if err := stateless.ValidateWitnessPreState(&wOff, chainOff, blkOff.Header()); err != nil {
+ t.Errorf("block %d: off witness pre-state validation: %v", i, err)
+ }
+ if err := stateless.ValidateWitnessPreState(&wOn, chainOn, blkOn.Header()); err != nil {
+ t.Errorf("block %d: on witness pre-state validation: %v", i, err)
+ }
+
+ // State proof-node parity: the snapshot path must produce the
+ // same set of trie proof nodes as the no-snapshot path. The
+ // State map's keys are RLP node blobs; equal sets = equal proof
+ // coverage.
+ if len(wOff.State) != len(wOn.State) {
+ t.Errorf("block %d: State size off=%d on=%d", i, len(wOff.State), len(wOn.State))
+ }
+ for k := range wOff.State {
+ if _, ok := wOn.State[k]; !ok {
+ t.Errorf("block %d: snapshot-on witness missing proof node present in baseline (len=%d)", i, len(k))
+ break
+ }
+ }
+ for k := range wOn.State {
+ if _, ok := wOff.State[k]; !ok {
+ t.Errorf("block %d: snapshot-on witness has proof node not in baseline (len=%d)", i, len(k))
+ break
+ }
+ }
+
+ // Headers parity: BLOCKHASH ancestor inclusion must be identical
+ // across snapshot-on/off. (For this transfer-only chain there are
+ // no BLOCKHASH ops; Headers is just [parent].)
+ if len(wOff.Headers) != len(wOn.Headers) {
+ t.Errorf("block %d: Headers length off=%d on=%d", i, len(wOff.Headers), len(wOn.Headers))
+ }
+
+ // Stateless replay parity: each chain's published witness must
+ // reconstruct state and recompute the post-state root via
+ // ProcessBlockWithWitnesses. ExecuteStateless returns an error
+ // if the recomputed root diverges from block.Root() — that's
+ // exactly the assertion we want. SetHeader installs the
+ // context header (the block being replayed) on the decoded
+ // witness, which RLP decode does not preserve. The witness's
+ // HeaderReader (used to resolve BLOCKHASH ancestor lookups) is
+ // also dropped by RLP decode, but ProcessBlockWithWitnesses
+ // falls back to the BlockChain itself when the witness has no
+ // HeaderReader set — so for in-memory test chains nothing
+ // further needs wiring here.
+ wOff.SetHeader(blkOff.Header())
+ wOn.SetHeader(blkOn.Header())
+ if _, _, err := chainOff.ProcessBlockWithWitnesses(blkOff, &wOff); err != nil {
+ t.Errorf("block %d: stateless replay (off chain witness) failed: %v", i, err)
+ }
+ if _, _, err := chainOn.ProcessBlockWithWitnesses(blkOn, &wOn); err != nil {
+ t.Errorf("block %d: stateless replay (on chain witness) failed: %v", i, err)
+ }
+ }
+ })
+}
+
+// TestPipelinedImportSRC_WarmSnapshotPreservesBlockHashAncestors mirrors
+// TestPipelinedImportSRC_WitnessIncludesBlockHashAncestors but with the
+// warm-snapshot handoff enabled. BLOCKHASH ancestor coverage is collected on
+// the execution witness during EVM execution, before SRC starts; the
+// snapshot path only changes how SRC's trie reads are served, not the
+// witness ownership chain. Therefore Headers must still extend to include
+// the BLOCKHASH-referenced ancestor, and the published witness must
+// statelessly replay.
+//
+// Runs under both HashScheme and PathScheme.
+func TestPipelinedImportSRC_WarmSnapshotPreservesBlockHashAncestors(t *testing.T) {
+ testPipelinedImportSRC_WarmSnapshotPreservesBlockHashAncestors(t, rawdb.HashScheme)
+ testPipelinedImportSRC_WarmSnapshotPreservesBlockHashAncestors(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_WarmSnapshotPreservesBlockHashAncestors(t *testing.T, scheme string) {
+ t.Run(scheme, func(t *testing.T) {
+ runWarmSnapshotBlockHashTest(t, scheme)
+ })
+}
+
+func runWarmSnapshotBlockHashTest(t *testing.T, scheme string) {
+ contractCode := []byte{0x60, 0x00, 0x40, 0x50, 0x00}
+ contractAddr := common.HexToAddress("0xb1a5cab1ec0de")
+
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{
+ addr: {Balance: funds},
+ contractAddr: {Code: contractCode, Balance: big.NewInt(0)},
+ },
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ db := rawdb.NewMemoryDatabase()
+ tdb := triedb.NewDatabase(db, triedb.HashDefaults)
+ genesisBlock := gspec.MustCommit(db, tdb)
+ prefix, _ := GenerateChain(gspec.Config, genesisBlock, engine, db, 1, func(i int, gen *BlockGen) {})
+
+ ctxChain, err := NewBlockChain(db, gspec, engine, DefaultConfig().WithStateScheme(rawdb.HashScheme))
+ if err != nil {
+ t.Fatalf("ctxChain: %v", err)
+ }
+ if _, err := ctxChain.InsertChain(prefix, false); err != nil {
+ t.Fatalf("ctxChain insert prefix: %v", err)
+ }
+ block2Slice, _ := GenerateChain(gspec.Config, prefix[0], engine, db, 1, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), contractAddr, big.NewInt(0), 100_000, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTxWithChain(ctxChain, tx)
+ })
+ ctxChain.Stop()
+
+ allBlocks := append(append([]*types.Block{}, prefix...), block2Slice...)
+
+ pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfigWithWarmSnapshot(scheme))
+ if err != nil {
+ t.Fatalf("pipeChain: %v", err)
+ }
+ defer pipeChain.Stop()
+
+ if _, err := pipeChain.InsertChain(allBlocks, true); err != nil {
+ t.Fatalf("InsertChain (snapshot=true, witness=true): %v", err)
+ }
+ if err := pipeChain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("flushPendingImportSRC: %v", err)
+ }
+
+ target := block2Slice[0]
+ encoded := pipeChain.GetWitness(target.Hash())
+ if encoded == nil {
+ t.Fatalf("block 2: witness missing from cache")
+ }
+ var w stateless.Witness
+ if err := rlp.DecodeBytes(encoded, &w); err != nil {
+ t.Fatalf("decode witness: %v", err)
+ }
+ if len(w.Headers) < 2 {
+ t.Fatalf("Headers length = %d, want >= 2 (parent + genesis from BLOCKHASH(0)) — "+
+ "warm-snapshot path must preserve BLOCKHASH ancestor coverage", len(w.Headers))
+ }
+ parentHeader := prefix[0].Header()
+ if w.Headers[0].Hash() != parentHeader.Hash() {
+ t.Errorf("Headers[0] = %s, want parent %s", w.Headers[0].Hash(), parentHeader.Hash())
+ }
+ foundGenesis := false
+ for _, h := range w.Headers {
+ if h.Number.Uint64() == 0 && h.Hash() == genesisBlock.Hash() {
+ foundGenesis = true
+ break
+ }
+ }
+ if !foundGenesis {
+ t.Errorf("Headers does not contain genesis (BLOCKHASH(0) referenced it); Headers=%d entries", len(w.Headers))
+ }
+ if err := stateless.ValidateWitnessPreState(&w, pipeChain, target.Header()); err != nil {
+ t.Errorf("ValidateWitnessPreState failed on snapshot-on witness: %v", err)
+ }
+
+ // Stateless replay: the published witness must reconstruct state
+ // (including the BLOCKHASH ancestor lookup) and recompute the same
+ // post-state root via ExecuteStateless. This is the consumer-side
+ // check that the snapshot path's witness is actually replayable, not
+ // just structurally well-formed.
+ w.SetHeader(target.Header())
+ if _, _, err := pipeChain.ProcessBlockWithWitnesses(target, &w); err != nil {
+ t.Errorf("stateless replay of snapshot-on BLOCKHASH witness failed: %v", err)
+ }
+}
+
+// TestPipelinedImportSRC_WarmSnapshotStorageTrieParity is the storage-trie
+// counterpart to the witness parity test. The snapshot reader is wrapped at
+// the database.NodeReader layer used by both account and storage tries; this
+// test specifically exercises storage-trie reads (SLOAD) and writes (SSTORE)
+// so the storage-owner branch of newSnapshotNodeDatabase / trieReader.Storage
+// is covered. A single contract is deployed with pre-populated storage; each
+// block's transaction loads a previously-set slot and writes a new one,
+// progressively growing the storage trie. The witness must include the
+// storage proof nodes touched by both the SLOAD and the SSTORE update path.
+//
+// Snapshot-off and snapshot-on import the same chain into independent
+// blockchains; per-block roots, decoded witness State sets, and stateless
+// replay must all agree. Runs under both HashScheme and PathScheme.
+func TestPipelinedImportSRC_WarmSnapshotStorageTrieParity(t *testing.T) {
+ testPipelinedImportSRC_WarmSnapshotStorageTrieParity(t, rawdb.HashScheme)
+ testPipelinedImportSRC_WarmSnapshotStorageTrieParity(t, rawdb.PathScheme)
+}
+
+func testPipelinedImportSRC_WarmSnapshotStorageTrieParity(t *testing.T, scheme string) {
+ t.Run(scheme, func(t *testing.T) {
+ // Contract program:
+ // SLOAD(NUMBER - 1) ; POP — read a previously-set slot, forcing
+ // a storage-trie pre-state read whose
+ // proof nodes must be in the witness
+ // SSTORE(NUMBER, NUMBER) — write a new slot, growing the trie
+ // (the update walks the proof path)
+ contractCode := []byte{
+ byte(vm.PUSH1), 0x01,
+ byte(vm.NUMBER),
+ byte(vm.SUB),
+ byte(vm.SLOAD),
+ byte(vm.POP),
+ byte(vm.NUMBER),
+ byte(vm.NUMBER),
+ byte(vm.SSTORE),
+ byte(vm.STOP),
+ }
+ contractAddr := common.HexToAddress("0x5707a6e500000000000000000000000000000001")
+
+ // Pre-populate slots 0..7 in the contract's storage trie so block 1's
+ // SLOAD(0) hits a real entry rather than an empty slot, ensuring the
+ // pre-state read actually walks storage-trie nodes.
+ preStorage := make(map[common.Hash]common.Hash, 8)
+ for i := 0; i < 8; i++ {
+ preStorage[common.BigToHash(big.NewInt(int64(i)))] = common.BigToHash(big.NewInt(int64(i + 1000)))
+ }
+
+ const numBlocks = 6
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))
+ gspec = &Genesis{
+ Config: params.AllEthashProtocolChanges,
+ Alloc: types.GenesisAlloc{
+ addr: {Balance: funds},
+ contractAddr: {
+ Code: contractCode,
+ Balance: big.NewInt(0),
+ Storage: preStorage,
+ },
+ },
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ signer = types.LatestSigner(gspec.Config)
+ engine = ethash.NewFaker()
+ )
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) {
+ tx, _ := types.SignTx(
+ types.NewTransaction(gen.TxNonce(addr), contractAddr, big.NewInt(0), 100_000, gen.header.BaseFee, nil),
+ signer, key,
+ )
+ gen.AddTx(tx)
+ })
+
+ importInto := func(t *testing.T, label string, cfg *BlockChainConfig) *BlockChain {
+ t.Helper()
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, cfg)
+ if err != nil {
+ t.Fatalf("%s: NewBlockChain: %v", label, err)
+ }
+ t.Cleanup(chain.Stop)
+ if _, err := chain.InsertChain(blocks, true); err != nil {
+ t.Fatalf("%s: InsertChain: %v", label, err)
+ }
+ if err := chain.flushPendingImportSRC(); err != nil {
+ t.Fatalf("%s: flushPendingImportSRC: %v", label, err)
+ }
+ return chain
+ }
+
+ chainOff := importInto(t, "snapshot-off", pipelinedConfig(scheme))
+ chainOn := importInto(t, "snapshot-on", pipelinedConfigWithWarmSnapshot(scheme))
+
+ for i := uint64(1); i <= numBlocks; i++ {
+ blkOff := chainOff.GetBlockByNumber(i)
+ blkOn := chainOn.GetBlockByNumber(i)
+ if blkOff == nil || blkOn == nil {
+ t.Fatalf("block %d: missing on one of the chains (off=%v on=%v)", i, blkOff == nil, blkOn == nil)
+ }
+ if blkOff.Root() != blkOn.Root() {
+ t.Errorf("block %d: state root mismatch off=%s on=%s", i, blkOff.Root(), blkOn.Root())
+ }
+
+ encOff := chainOff.GetWitness(blkOff.Hash())
+ encOn := chainOn.GetWitness(blkOn.Hash())
+ if encOff == nil || encOn == nil {
+ t.Fatalf("block %d: witness missing (off=%v on=%v)", i, encOff == nil, encOn == nil)
+ }
+
+ var wOff, wOn stateless.Witness
+ if err := rlp.DecodeBytes(encOff, &wOff); err != nil {
+ t.Fatalf("block %d: decode off witness: %v", i, err)
+ }
+ if err := rlp.DecodeBytes(encOn, &wOn); err != nil {
+ t.Fatalf("block %d: decode on witness: %v", i, err)
+ }
+
+ if err := stateless.ValidateWitnessPreState(&wOff, chainOff, blkOff.Header()); err != nil {
+ t.Errorf("block %d: off witness pre-state validation: %v", i, err)
+ }
+ if err := stateless.ValidateWitnessPreState(&wOn, chainOn, blkOn.Header()); err != nil {
+ t.Errorf("block %d: on witness pre-state validation: %v", i, err)
+ }
+
+ // State proof-node parity. The snapshot path must produce the same
+ // set of trie proof nodes as the pathdb-only path. For this test
+ // the State map covers both account-trie and storage-trie nodes.
+ if len(wOff.State) != len(wOn.State) {
+ t.Errorf("block %d: State size off=%d on=%d", i, len(wOff.State), len(wOn.State))
+ }
+ for k := range wOff.State {
+ if _, ok := wOn.State[k]; !ok {
+ t.Errorf("block %d: snapshot-on witness missing proof node present in baseline (len=%d)", i, len(k))
+ break
+ }
+ }
+ for k := range wOn.State {
+ if _, ok := wOff.State[k]; !ok {
+ t.Errorf("block %d: snapshot-on witness has proof node not in baseline (len=%d)", i, len(k))
+ break
+ }
+ }
+
+ // Stateless replay parity. ExecuteStateless reconstructs the
+ // pre-state from the witness's State proof nodes (including
+ // storage subtries) and recomputes the post-state root. Failure
+ // here indicates the snapshot path served a storage-trie node
+ // whose blob disagrees with what pathdb would have served.
+ wOff.SetHeader(blkOff.Header())
+ wOn.SetHeader(blkOn.Header())
+ if _, _, err := chainOff.ProcessBlockWithWitnesses(blkOff, &wOff); err != nil {
+ t.Errorf("block %d: stateless replay (off chain witness) failed: %v", i, err)
+ }
+ if _, _, err := chainOn.ProcessBlockWithWitnesses(blkOn, &wOn); err != nil {
+ t.Errorf("block %d: stateless replay (on chain witness) failed: %v", i, err)
+ }
+ }
+ })
+}
diff --git a/core/events.go b/core/events.go
index a35ab615cf..19afd30300 100644
--- a/core/events.go
+++ b/core/events.go
@@ -19,6 +19,7 @@ package core
import (
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
)
@@ -36,13 +37,6 @@ type NewMinedBlockEvent struct {
SealedAt time.Time // time when WriteBlockAndSetHead completed, used to measure broadcast latency
}
-// WitnessReadyEvent is posted when a delayed-SRC witness has been fully
-// computed and written to the database, signalling that it can be broadcast.
-type WitnessReadyEvent struct {
- Block *types.Block
- Witness *stateless.Witness
-}
-
// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }
@@ -56,6 +50,14 @@ type ChainSideEvent struct {
Header *types.Header
}
+// WitnessReadyEvent is posted when a pipelined import SRC goroutine finishes
+// and writes the witness to the database. The handler uses this to announce
+// witness availability to peers via the WIT protocol.
+type WitnessReadyEvent struct {
+ BlockHash common.Hash
+ BlockNumber uint64
+}
+
type ChainHeadEvent struct {
Header *types.Header
}
diff --git a/core/evm.go b/core/evm.go
index 68b2a52ea2..8939609d3f 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -19,6 +19,7 @@ package core
import (
"math/big"
"sync"
+ "sync/atomic"
"github.com/holiman/uint256"
@@ -155,6 +156,65 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash
}
}
+// SpeculativeGetHashFn returns a GetHashFunc for use during pipelined SRC
+// speculative execution of block N+1, where block N's hash is not yet known
+// (SRC(N) is still computing root_N).
+//
+// It uses three-tier resolution:
+// - Tier 1 (n == pendingBlockN): lazy-resolves by calling srcDone(), which
+// blocks until SRC(N) completes and returns hash(block_N). Cached after
+// first call.
+// - Tier 2 (n == pendingBlockN-1): returns blockN1Header.Hash() directly.
+// Block N-1 is fully committed and in the chain DB.
+// - Tier 3 (n < pendingBlockN-1): delegates to GetHashFn anchored at
+// block N-1. Its cache seeds from blockN1Header.ParentHash = hash(block_{N-2}),
+// so index 0 gives BLOCKHASH(N-2), which is correct.
+//
+// srcDone is called at most once and must return hash(block_N) after SRC(N)
+// completes. It may block.
+func SpeculativeGetHashFn(blockN1Header *types.Header, chain ChainContext, pendingBlockN uint64, srcDone func() common.Hash, blockhashNAccessed *atomic.Bool) func(uint64) common.Hash {
+ blockN1Hash := blockN1Header.Hash()
+ olderFn := GetHashFn(blockN1Header, chain) // blocks N-2 and below
+ resolveN := newPendingBlockNResolver(srcDone, blockhashNAccessed)
+ return func(n uint64) common.Hash {
+ switch {
+ case n >= pendingBlockN+1:
+ return common.Hash{} // future block
+ case n == pendingBlockN:
+ return resolveN()
+ case n == pendingBlockN-1:
+ return blockN1Hash
+ default:
+ return olderFn(n)
+ }
+ }
+}
+
+// newPendingBlockNResolver returns a closure that lazily resolves pending
+// block N's hash via srcDone. On every invocation it flags blockhashNAccessed
+// so the caller knows the speculative block read BLOCKHASH(N) — the resolved
+// hash is pre-seal (no signature in Extra) and will differ from the final
+// on-chain hash, so the speculative execution must be aborted.
+func newPendingBlockNResolver(srcDone func() common.Hash, blockhashNAccessed *atomic.Bool) func() common.Hash {
+ var (
+ resolvedHash common.Hash
+ resolved bool
+ mu sync.Mutex
+ )
+ return func() common.Hash {
+ if blockhashNAccessed != nil {
+ blockhashNAccessed.Store(true)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ if !resolved {
+ resolvedHash = srcDone()
+ resolved = true
+ }
+ return resolvedHash
+ }
+}
+
// CanTransfer checks whether there are enough funds in the address' account to make a transfer.
// This does not take the necessary gas in to account to make the transfer valid.
func CanTransfer(db vm.StateDB, addr common.Address, amount *uint256.Int) bool {
diff --git a/core/evm_speculative_test.go b/core/evm_speculative_test.go
new file mode 100644
index 0000000000..5e59094ba9
--- /dev/null
+++ b/core/evm_speculative_test.go
@@ -0,0 +1,277 @@
+package core
+
+import (
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// mockChainContext implements ChainContext for testing SpeculativeGetHashFn.
+type mockChainContext struct {
+ headers map[uint64]*types.Header
+}
+
+func (m *mockChainContext) Config() *params.ChainConfig {
+ return params.TestChainConfig
+}
+
+func (m *mockChainContext) CurrentHeader() *types.Header {
+ return nil
+}
+
+func (m *mockChainContext) GetHeader(hash common.Hash, number uint64) *types.Header {
+ return m.headers[number]
+}
+
+func (m *mockChainContext) GetHeaderByNumber(number uint64) *types.Header {
+ return m.headers[number]
+}
+
+func (m *mockChainContext) GetHeaderByHash(hash common.Hash) *types.Header {
+ for _, h := range m.headers {
+ if h.Hash() == hash {
+ return h
+ }
+ }
+ return nil
+}
+
+func (m *mockChainContext) GetTd(hash common.Hash, number uint64) *big.Int {
+ return big.NewInt(1)
+}
+
+func (m *mockChainContext) Engine() consensus.Engine {
+ return nil
+}
+
+// buildChain builds a simple chain of headers from 0 to count-1.
+func buildChain(count int) (*mockChainContext, []*types.Header) {
+ headers := make([]*types.Header, count)
+ chain := &mockChainContext{headers: make(map[uint64]*types.Header)}
+
+ for i := 0; i < count; i++ {
+ h := &types.Header{
+ Number: big.NewInt(int64(i)),
+ ParentHash: common.Hash{},
+ Extra: []byte("test"),
+ }
+ if i > 0 {
+ h.ParentHash = headers[i-1].Hash()
+ }
+ headers[i] = h
+ chain.headers[uint64(i)] = h
+ }
+
+ return chain, headers
+}
+
+func TestSpeculativeGetHashFn_Tier1_LazyResolve(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ // Block N=9 is pending (SRC running), block N-1=8 is committed.
+ blockN1Header := headers[8] // block 8
+ pendingBlockN := uint64(9)
+ expectedBlockNHash := common.HexToHash("0xdeadbeef")
+
+ var srcCalled bool
+ srcDone := func() common.Hash {
+ srcCalled = true
+ return expectedBlockNHash
+ }
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil)
+
+ // Tier 1: BLOCKHASH(9) should lazy-resolve
+ result := fn(9)
+ if result != expectedBlockNHash {
+ t.Errorf("Tier 1: expected %x, got %x", expectedBlockNHash, result)
+ }
+ if !srcCalled {
+ t.Error("Tier 1: srcDone was not called")
+ }
+
+ // Second call should return cached value without calling srcDone again
+ srcCalled = false
+ result = fn(9)
+ if result != expectedBlockNHash {
+ t.Errorf("Tier 1 (cached): expected %x, got %x", expectedBlockNHash, result)
+ }
+}
+
+func TestSpeculativeGetHashFn_Tier1_SetsAbortFlag(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ blockN1Header := headers[8]
+ pendingBlockN := uint64(9)
+ expectedBlockNHash := common.HexToHash("0xdeadbeef")
+ var accessed atomic.Bool
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, func() common.Hash {
+ return expectedBlockNHash
+ }, &accessed)
+
+ result := fn(9)
+ if result != expectedBlockNHash {
+ t.Errorf("Tier 1: expected %x, got %x", expectedBlockNHash, result)
+ }
+ if !accessed.Load() {
+ t.Fatal("Tier 1: BLOCKHASH(N) access did not set abort flag")
+ }
+}
+
+func TestSpeculativeGetHashFn_Tier2_ImmediateParent(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ blockN1Header := headers[8] // block 8
+ pendingBlockN := uint64(9)
+ expectedN1Hash := blockN1Header.Hash()
+
+ srcDone := func() common.Hash {
+ t.Error("srcDone should not be called for Tier 2")
+ return common.Hash{}
+ }
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil)
+
+ // Tier 2: BLOCKHASH(8) should return block 8's hash immediately
+ result := fn(8)
+ if result != expectedN1Hash {
+ t.Errorf("Tier 2: expected %x, got %x", expectedN1Hash, result)
+ }
+}
+
+func TestSpeculativeGetHashFn_OlderTiersDoNotSetAbortFlag(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ blockN1Header := headers[8]
+ pendingBlockN := uint64(9)
+ var accessed atomic.Bool
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, func() common.Hash {
+ t.Fatal("srcDone should not be called for Tier 2/3")
+ return common.Hash{}
+ }, &accessed)
+
+ _ = fn(8)
+ if accessed.Load() {
+ t.Fatal("Tier 2: BLOCKHASH(N-1) incorrectly set abort flag")
+ }
+
+ _ = fn(7)
+ if accessed.Load() {
+ t.Fatal("Tier 3: BLOCKHASH(N-2) incorrectly set abort flag")
+ }
+}
+
+func TestSpeculativeGetHashFn_Tier3_OlderBlocks(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ blockN1Header := headers[8] // block 8
+ pendingBlockN := uint64(9)
+
+ srcDone := func() common.Hash {
+ t.Error("srcDone should not be called for Tier 3")
+ return common.Hash{}
+ }
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil)
+
+ // Tier 3: BLOCKHASH(7) should resolve via chain walk from block 8
+ expectedHash7 := headers[7].Hash()
+ result := fn(7)
+ if result != expectedHash7 {
+ t.Errorf("Tier 3 (block 7): expected %x, got %x", expectedHash7, result)
+ }
+
+ // BLOCKHASH(5) — deeper walk
+ expectedHash5 := headers[5].Hash()
+ result = fn(5)
+ if result != expectedHash5 {
+ t.Errorf("Tier 3 (block 5): expected %x, got %x", expectedHash5, result)
+ }
+
+ // BLOCKHASH(0) — genesis
+ expectedHash0 := headers[0].Hash()
+ result = fn(0)
+ if result != expectedHash0 {
+ t.Errorf("Tier 3 (block 0): expected %x, got %x", expectedHash0, result)
+ }
+}
+
+func TestSpeculativeGetHashFn_FutureBlock(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ blockN1Header := headers[8]
+ pendingBlockN := uint64(9)
+
+ srcDone := func() common.Hash {
+ t.Error("srcDone should not be called for future blocks")
+ return common.Hash{}
+ }
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil)
+
+ // BLOCKHASH(10) — future block, should return zero
+ result := fn(10)
+ if result != (common.Hash{}) {
+ t.Errorf("Future block: expected zero hash, got %x", result)
+ }
+
+ // BLOCKHASH(11) — also future
+ result = fn(11)
+ if result != (common.Hash{}) {
+ t.Errorf("Future block 11: expected zero hash, got %x", result)
+ }
+}
+
+func TestSpeculativeGetHashFn_Tier1_Blocking(t *testing.T) {
+ chain, headers := buildChain(10)
+
+ blockN1Header := headers[8]
+ pendingBlockN := uint64(9)
+ expectedHash := common.HexToHash("0xabcdef")
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ srcDone := func() common.Hash {
+ wg.Wait() // block until released
+ return expectedHash
+ }
+
+ fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil)
+
+ // Start BLOCKHASH(9) in a goroutine — it should block
+ resultCh := make(chan common.Hash, 1)
+ go func() {
+ resultCh <- fn(9)
+ }()
+
+ // Verify it hasn't resolved yet
+ select {
+ case <-resultCh:
+ t.Error("BLOCKHASH(9) resolved before srcDone was released")
+ case <-time.After(100 * time.Millisecond):
+ // expected — still blocking
+ }
+
+ // Release srcDone
+ wg.Done()
+
+ // Now it should resolve
+ select {
+ case result := <-resultCh:
+ if result != expectedHash {
+ t.Errorf("Tier 1 blocking: expected %x, got %x", expectedHash, result)
+ }
+ case <-time.After(2 * time.Second):
+ t.Error("BLOCKHASH(9) did not resolve after srcDone was released")
+ }
+}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index ad49db9251..555c358826 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -339,24 +339,6 @@ func DeleteWitness(db ethdb.KeyValueWriter, blockHash common.Hash) {
}
}
-// WritePostStateRoot stores the post-execution state root for a given block.
-// This persists across reorgs and restarts so that GetPostStateRoot can
-// retrieve the root when no child block exists yet.
-func WritePostStateRoot(db ethdb.KeyValueWriter, blockHash common.Hash, root common.Hash) {
- if err := db.Put(postStateRootKey(blockHash), root.Bytes()); err != nil {
- log.Crit("Failed to store post-state root", "err", err)
- }
-}
-
-// ReadPostStateRoot retrieves the post-execution state root for the given block.
-func ReadPostStateRoot(db ethdb.KeyValueReader, blockHash common.Hash) common.Hash {
- data, err := db.Get(postStateRootKey(blockHash))
- if err != nil || len(data) == 0 {
- return common.Hash{}
- }
- return common.BytesToHash(data)
-}
-
func ReadWitnessPruneCursor(db ethdb.KeyValueReader) *uint64 {
log.Debug("ReadWitnessCursor")
data, err := db.Get(witnessPruneCursorKey())
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index a3113e170a..4f5cb05bf0 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -164,8 +164,6 @@ var (
WitnessPruneCursorKey = []byte("witnessPruneCursorKey")
WitnessPruneHeadKey = []byte("witnessPruneHeadKey")
- PostStateRootPrefix = []byte("post-state-root-") // PostStateRootPrefix + hash -> post-execution state root
-
// BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
BloomBitsIndexPrefix = []byte("iB")
@@ -297,11 +295,6 @@ func witnessSizeKey(hash common.Hash) []byte {
return append(WitnessSizePrefix, hash.Bytes()...)
}
-// postStateRootKey = PostStateRootPrefix + hash
-func postStateRootKey(hash common.Hash) []byte {
- return append(PostStateRootPrefix, hash.Bytes()...)
-}
-
func witnessPruneCursorKey() []byte {
return WitnessPruneCursorKey
}
diff --git a/core/state/database.go b/core/state/database.go
index 53745b86e8..5fbc565617 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -200,6 +200,19 @@ func NewDatabaseForTesting() *CachingDB {
return NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
}
+// TrieOnlyReader returns a state reader that uses only the trie (MPT), skipping
+// flat/snapshot readers. This ensures all account and storage reads walk the trie,
+// which is required for witness building — the witness captures trie nodes during
+// the walk. Without this, flat readers short-circuit the trie and proof paths are
+// never captured.
+func (db *CachingDB) TrieOnlyReader(stateRoot common.Hash) (Reader, error) {
+ tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
+ if err != nil {
+ return nil, err
+ }
+ return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), tr), nil
+}
+
// Reader returns a state reader associated with the specified state root.
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
var readers []StateReader
diff --git a/core/state/metrics.go b/core/state/metrics.go
index dd4b2e9838..0114bcc87c 100644
--- a/core/state/metrics.go
+++ b/core/state/metrics.go
@@ -29,4 +29,18 @@ var (
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
+
+ // FlatDiff overlay hit meters — fire when a state read is satisfied by the
+ // previous block's FlatDiff instead of falling through to the committed trie.
+ // Non-zero rate confirms the pipelined SRC overlay is active on this statedb
+ // (applies to both block import and speculative build paths).
+ //
+ // These also serve as the build-side cache-visibility substitute under
+ // pipelining: the speculative build path uses NewWithFlatBase, which creates
+ // a plain StateDB without the instrumented prefetch/process readers that
+ // populate chain/*/reads/cache/*. Those meters therefore receive no
+ // build-side contribution when pipelining is enabled. Use the flatdiff
+ // meters here for overlay efficiency signals in pipelined build mode.
+ flatDiffAccountHitsMeter = metrics.NewRegisteredMeter("state/flatdiff/account_hits", nil)
+ flatDiffStorageHitsMeter = metrics.NewRegisteredMeter("state/flatdiff/storage_hits", nil)
)
diff --git a/core/state/reader.go b/core/state/reader.go
index b9b5f4b30b..36a87a098d 100644
--- a/core/state/reader.go
+++ b/core/state/reader.go
@@ -234,7 +234,13 @@ func (r *flatReader) Storage(addr common.Address, key common.Hash) (common.Hash,
// trieReader is safe for concurrent read.
type trieReader struct {
root common.Hash // State root which uniquely represent a state
- db *triedb.Database // Database for loading trie
+ db *triedb.Database // Database for loading trie (kept for IsVerkle / Disk access)
+
+ // nodeDB is the database used to construct sub-tries (storage tries) on
+ // demand. It defaults to db, but the snapshot-aware constructor wraps db
+ // so storage trie reads consult a WarmSnapshot before falling through to
+ // pathdb. Keeping it as an interface lets the wrapper remain transparent.
+ nodeDB database.NodeDatabase
// Main trie, resolved in constructor. Note either the Merkle-Patricia-tree
// or Verkle-tree is not safe for concurrent read.
@@ -277,6 +283,37 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
return &trieReader{
root: root,
db: db,
+ nodeDB: db,
+ mainTrie: tr,
+ subRoots: make(map[common.Address]common.Hash),
+ subTries: make(map[common.Address]Trie),
+ }, nil
+}
+
+// newTrieReaderWithSnapshot mirrors newTrieReader but receives a snapshot-aware
+// NodeDatabase so trie reads can consult a WarmSnapshot before falling through
+// to pathdb/pebble. The snapshot is hash-verified by the supplied NodeDatabase;
+// misses or hash mismatches transparently fall through. The trie itself is
+// unchanged (NewStateTrie sees a wrapped NodeDatabase only) — its
+// resolveAndTrack path and prevalueTracer recording fire identically whether
+// the served node came from the snapshot or pathdb, so witness completeness
+// under NewTrieOnly semantics is preserved.
+//
+// Verkle is not supported by this path: pipelined SRC is MPT-only and the
+// snapshot is constructed from MPT trie nodes. Callers that need verkle
+// readers must use newTrieReader.
+func newTrieReaderWithSnapshot(root common.Hash, db *triedb.Database, nodeDB database.NodeDatabase) (*trieReader, error) {
+ if db.IsVerkle() {
+ return nil, errors.New("warm snapshot reader: verkle scheme is not supported")
+ }
+ tr, err := trie.NewStateTrie(trie.StateTrieID(root), nodeDB)
+ if err != nil {
+ return nil, err
+ }
+ return &trieReader{
+ root: root,
+ db: db,
+ nodeDB: nodeDB,
mainTrie: tr,
subRoots: make(map[common.Address]common.Hash),
subTries: make(map[common.Address]Trie),
@@ -342,7 +379,7 @@ func (r *trieReader) Storage(addr common.Address, key common.Hash) (common.Hash,
root = r.subRoots[addr]
}
var err error
- tr, err = trie.NewStateTrie(trie.StorageTrieID(r.root, crypto.Keccak256Hash(addr.Bytes()), root), r.db)
+ tr, err = trie.NewStateTrie(trie.StorageTrieID(r.root, crypto.Keccak256Hash(addr.Bytes()), root), r.nodeDB)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 7ecee2e053..2d95d2f2d5 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -54,6 +54,26 @@ type stateObject struct {
origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
data types.StateAccount // Account data with all mutations applied in the scope of block
+ // prefetchRoot holds the storage root from the committed parent state, used
+ // exclusively for prefetcher interactions during pipelined SRC.
+ //
+ // When an account is loaded from FlatDiff (the previous block's uncommitted
+ // mutations), its origin.Root and data.Root reflect block N's post-state —
+ // but the prefetcher's NodeReader is opened at committedParentRoot (the
+ // grandparent). This creates a (stateRoot, storageRoot) mismatch: the reader
+ // can only resolve trie nodes for the grandparent's storage root, not block
+ // N's. The result is "Unexpected trie node" hash-mismatch errors on every
+ // storage trie root resolution, killing the prefetcher for those accounts.
+ //
+ // prefetchRoot stores the grandparent's storage root — the one consistent
+ // with the prefetcher's reader. It is set only for FlatDiff-sourced accounts;
+ // for accounts loaded from the committed state it stays zero, and
+ // getPrefetchRoot() falls back to data.Root (which is already consistent).
+ //
+ // The committed root is obtained from the flat state reader (in-memory
+ // snapshot), so the cost is effectively zero.
+ prefetchRoot common.Hash
+
// Write caches.
trie Trie // storage trie, which becomes non-nil on first access
code []byte // contract bytecode, which gets set when code is loaded
@@ -122,6 +142,26 @@ func (s *stateObject) touch() {
s.db.journal.touchChange(s.address)
}
+// getPrefetchRoot returns the storage root to use for all prefetcher
+// interactions (prefetch, trie lookup, used). This must be consistent across
+// all calls for a given account so the subfetcher trieID matches.
+//
+// For accounts loaded from FlatDiff (pipelined SRC), the storage root in
+// origin/data reflects block N's post-state, but the prefetcher's NodeReader
+// is at committedParentRoot (the grandparent). Using block N's root would
+// cause a hash mismatch when resolving the storage trie root node. Instead,
+// we return the grandparent's storage root (stored in prefetchRoot), which
+// is consistent with the reader.
+//
+// For accounts loaded from the committed state (normal path), prefetchRoot
+// is zero and we fall back to data.Root, which is already consistent.
+func (s *stateObject) getPrefetchRoot() common.Hash {
+ if s.prefetchRoot != (common.Hash{}) {
+ return s.prefetchRoot
+ }
+ return s.data.Root
+}
+
// getTrie returns the associated storage trie. The trie will be opened if it's
// not loaded previously. An error will be returned if trie can't be loaded.
//
@@ -153,8 +193,10 @@ func (s *stateObject) getPrefetchedTrie() Trie {
if (s.data.Root == types.EmptyRootHash && !s.db.db.TrieDB().IsVerkle()) || s.db.prefetcher == nil {
return nil
}
- // Attempt to retrieve the trie from the prefetcher
- return s.db.prefetcher.trie(s.addrHash, s.data.Root)
+ // Use getPrefetchRoot() so the trieID matches the one used when scheduling
+ // the prefetch. For FlatDiff accounts this is the committed parent's storage
+ // root; for normal accounts it equals data.Root (unchanged behavior).
+ return s.db.prefetcher.trie(s.addrHash, s.getPrefetchRoot())
}
// GetState retrieves a value associated with the given storage key.
@@ -191,6 +233,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
if s.db.flatDiffRef != nil {
if slots, ok := s.db.flatDiffRef.Storage[s.address]; ok {
if value, ok := slots[key]; ok {
+ flatDiffStorageHitsMeter.Mark(1)
s.originStorage[key] = value
return value
}
@@ -218,8 +261,11 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
s.db.StorageReads += time.Since(start)
// Schedule the resolved storage slots for prefetching if it's enabled.
+ // Use getPrefetchRoot() for the storage root so the subfetcher's trieID
+ // is consistent with the prefetcher's NodeReader state root. For FlatDiff
+ // accounts, this is the committed parent's storage root (not block N's).
if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash {
- if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, nil, []common.Hash{key}, true); err != nil {
+ if err = s.db.prefetcher.prefetch(s.addrHash, s.getPrefetchRoot(), s.address, nil, []common.Hash{key}, true); err != nil {
log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err)
}
}
@@ -280,8 +326,9 @@ func (s *stateObject) finalise() {
// byzantium fork) and entry is necessary to modify the value back.
s.pendingStorage[key] = value
}
+ // Use getPrefetchRoot() for consistency with other prefetcher calls.
if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
- if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil {
+ if err := s.db.prefetcher.prefetch(s.addrHash, s.getPrefetchRoot(), s.address, nil, slotsToPrefetch, false); err != nil {
log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err)
}
}
@@ -377,8 +424,9 @@ func (s *stateObject) updateTrie() (Trie, error) {
s.db.StorageDeleted.Add(1)
}
+ // Use getPrefetchRoot() so the trieID matches the one used during scheduling.
if s.db.prefetcher != nil {
- s.db.prefetcher.used(s.addrHash, s.data.Root, nil, used)
+ s.db.prefetcher.used(s.addrHash, s.getPrefetchRoot(), nil, used)
}
// When witness building is enabled without a prefetcher, storage reads
// went through the reader (a separate trie with its own PrevalueTracer)
@@ -520,6 +568,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
addrHash: s.addrHash,
origin: s.origin,
data: s.data,
+ prefetchRoot: s.prefetchRoot,
code: s.code,
originStorage: s.originStorage.Copy(),
pendingStorage: s.pendingStorage.Copy(),
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 119c1c7bf3..c3a8668020 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -151,7 +151,7 @@ type StateDB struct {
witnessStats *stateless.WitnessStats
// nonExistentReads tracks addresses that were looked up but don't exist
- // in the state trie. Under delayed SRC, these are included in the
+ // in the state trie. Under pipelined SRC, these are included in the
// FlatDiff so the SRC goroutine can walk their trie paths and capture
// proof-of-absence nodes for the witness. Without this, stateless
// execution fails when it tries to prove these accounts don't exist.
@@ -197,6 +197,41 @@ func New(root common.Hash, db Database) (*StateDB, error) {
return NewWithReader(root, db, reader)
}
+// NewTrieOnly creates a new state that uses only the trie reader (no flat/snapshot
+// readers). This forces all account and storage reads to walk the MPT, which is
+// required for witness building — the witness captures trie nodes during the walk.
+// Used by the pipelined SRC goroutine to ensure the witness is complete.
+func NewTrieOnly(root common.Hash, db *CachingDB) (*StateDB, error) {
+ reader, err := db.TrieOnlyReader(root)
+ if err != nil {
+ return nil, err
+ }
+ return NewWithReader(root, db, reader)
+}
+
+// NewTrieOnlyWithSnapshot is the warm-cache variant of NewTrieOnly. Trie reads
+// consult a WarmSnapshot (typically captured from the execution-side trie
+// prefetcher) before falling through to the regular pathdb-backed NodeReader.
+// Hits with matching hash skip diff-layer/disk-layer/pebble work entirely;
+// misses or hash mismatches are served by the underlying reader unchanged.
+// NewTrieOnly semantics are preserved — the trie still walks, prevalueTracer
+// still records, witness is still complete. The snapshot wrapper is installed
+// on the StateDB database itself, not just the initial Reader, so commit-time
+// OpenTrie/OpenStorageTrie calls also use the same warm handoff.
+//
+// A nil snapshot is equivalent to NewTrieOnly.
+func NewTrieOnlyWithSnapshot(root common.Hash, db *CachingDB, snapshot *WarmSnapshot) (*StateDB, error) {
+ if snapshot == nil || snapshot.Len() == 0 {
+ return NewTrieOnly(root, db)
+ }
+ snapshotDB := newSnapshotStateDatabase(db, snapshot)
+ reader, err := snapshotDB.Reader(root)
+ if err != nil {
+ return nil, err
+ }
+ return NewWithReader(root, snapshotDB, reader)
+}
+
// NewWithReader creates a new state for the specified state root. Unlike New,
// this function accepts an additional Reader which is bound to the given root.
func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, error) {
@@ -541,6 +576,79 @@ func (s *StateDB) StopPrefetcher() {
}
}
+// PrefetcherSnapshotStats describes the synchronous phases and warm-node mix
+// observed while stopping and snapshotting a trie prefetcher.
+type PrefetcherSnapshotStats struct {
+ Drain time.Duration
+ Collect time.Duration
+ Report time.Duration
+
+ Fetchers int
+ LoadedFetchers int
+ AccountFetchers int
+ StorageFetchers int
+
+ AccountNodes int
+ StorageNodes int
+ AccountBytes int
+ StorageBytes int
+}
+
+// StopAndCollectWarmSnapshot terminates a running prefetcher synchronously,
+// captures the trie nodes its subfetchers had loaded into a quiesced handoff
+// bundle, then reports stats and clears the prefetcher reference. The returned
+// input is owned by the caller and is safe to pass to another goroutine; the
+// final WarmSnapshot copy/hash/index build is intentionally deferred to
+// WarmSnapshotInput.Build.
+//
+// Step ordering matters and is enforced here:
+//
+// 1. terminateForSnapshot — signal stop, discard queued speculative work,
+// and wait for every subfetcher loop to exit (writers-exited semantics via
+// <-sf.term gated on `defer close(sf.term)`). Already-running trie reads
+// are allowed to finish; unstarted queued reads are not required because
+// missing warm nodes fall through to pathdb in SRC.
+// 2. snapshotWarmNodes — read each subfetcher's trie.Witness() while no
+// writer remains. Quiescent state guarantees the read is race-free.
+// 3. report — emit metrics from the same fetchers (unchanged from
+// StopPrefetcher).
+// 4. nil out s.prefetcher — detach the StateDB from the prefetcher.
+//
+// Returns nil when no prefetcher is installed or no subfetcher loaded any
+// nodes. Callers must tolerate a nil input and treat it as "no warm data; fall
+// through to the underlying reader for every node". The returned stats are
+// populated even when the input is nil, as long as a prefetcher existed.
+func (s *StateDB) StopAndCollectWarmSnapshot() (*WarmSnapshotInput, PrefetcherSnapshotStats) {
+ var stats PrefetcherSnapshotStats
+ if s.prefetcher == nil {
+ return nil, stats
+ }
+ phaseStart := time.Now()
+ s.prefetcher.terminateForSnapshot()
+ stats.Drain = time.Since(phaseStart)
+
+ phaseStart = time.Now()
+ tries, snapshotStats := s.prefetcher.snapshotWarmNodes()
+ stats.Collect = time.Since(phaseStart)
+ stats.Fetchers = snapshotStats.Fetchers
+ stats.LoadedFetchers = snapshotStats.LoadedFetchers
+ stats.AccountFetchers = snapshotStats.AccountFetchers
+ stats.StorageFetchers = snapshotStats.StorageFetchers
+ stats.AccountNodes = snapshotStats.AccountNodes
+ stats.StorageNodes = snapshotStats.StorageNodes
+ stats.AccountBytes = snapshotStats.AccountBytes
+ stats.StorageBytes = snapshotStats.StorageBytes
+
+ phaseStart = time.Now()
+ s.prefetcher.report()
+ stats.Report = time.Since(phaseStart)
+ s.prefetcher = nil
+ if len(tries) == 0 {
+ return nil, stats
+ }
+ return NewWarmSnapshotInput(tries), stats
+}
+
// ResetPrefetcher cleans the prefetcher from a State, commonly used in tempStates to track witness while no impacting block building
// Do also remove mutations previously tracked to just look to the new ones
func (s *StateDB) ResetPrefetcher() {
@@ -1029,14 +1137,40 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
// Check the FlatDiff reference for accounts mutated in the parent block.
if s.flatDiffRef != nil {
if acct, ok := s.flatDiffRef.Accounts[addr]; ok {
+ flatDiffAccountHitsMeter.Mark(1)
acctCopy := acct
obj := newObject(s, addr, &acctCopy)
if code, ok := s.flatDiffRef.Code[common.BytesToHash(acctCopy.CodeHash)]; ok {
obj.code = code
}
+ // Resolve the committed storage root for prefetcher consistency.
+ //
+ // The FlatDiff account's Root is block N's post-state storage root,
+ // but the prefetcher's NodeReader is opened at committedParentRoot
+ // (the grandparent). These are inconsistent — the reader can only
+ // resolve trie nodes for the grandparent's storage root. Without
+ // this, the prefetcher hits "Unexpected trie node" hash mismatches
+ // on every storage trie root resolution for FlatDiff accounts.
+ //
+ // We read the account from the committed state (flat reader, in-
+ // memory snapshot) to get the grandparent's storage root. This is
+ // the root that the prefetcher's reader can actually resolve.
+ if acctCopy.Root != types.EmptyRootHash {
+ if committedAcct, err := s.reader.Account(addr); err == nil && committedAcct != nil {
+ obj.prefetchRoot = committedAcct.Root
+ }
+ // If the account doesn't exist in the committed state (new in
+ // block N), prefetchRoot stays zero and getPrefetchRoot() falls
+ // back to data.Root. The prefetcher will skip it since the trie
+ // didn't exist at committedParentRoot.
+ }
s.setStateObject(obj)
return obj
}
+ // Account not in FlatDiff — check if it was destructed in FlatDiff.
+ if _, ok := s.flatDiffRef.Destructs[addr]; ok {
+ return nil
+ }
}
s.AccountLoaded++
@@ -1057,7 +1191,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
}
// Short circuit if the account is not found
if acct == nil {
- // Track the address so the delayed SRC goroutine can walk
+ // Track the address so the pipelined SRC goroutine can walk
// the trie path and capture proof-of-absence nodes for the
// witness. Without this, stateless execution can't verify
// non-existent accounts.
@@ -1972,7 +2106,7 @@ type FlatDiff struct {
Code map[common.Hash][]byte // newly deployed code
// ReadSet and ReadStorage list accounts and storage slots that were read
- // (but not mutated) during block execution. The delayed SRC goroutine loads
+ // (but not mutated) during block execution. The pipelined SRC goroutine loads
// these from the root_{N-1} trie so their MPT proof nodes are captured in
// the witness for stateless execution.
ReadSet []common.Address
@@ -1992,18 +2126,10 @@ type FlatDiff struct {
// account data — it only forces dst to load the accounts from its own trie.
func (diff *FlatDiff) TouchAllAddresses(dst *StateDB) {
for addr := range diff.Accounts {
- dst.GetBalance(addr)
- if slots, ok := diff.Storage[addr]; ok {
- for slot := range slots {
- dst.GetCommittedState(addr, slot)
- }
- }
+ touchAddressAndStorage(dst, addr, diff.mutatedStorageKeys(addr))
}
for _, addr := range diff.ReadSet {
- dst.GetBalance(addr)
- for _, slot := range diff.ReadStorage[addr] {
- dst.GetCommittedState(addr, slot)
- }
+ touchAddressAndStorage(dst, addr, diff.ReadStorage[addr])
}
for addr := range diff.Destructs {
dst.GetBalance(addr)
@@ -2016,6 +2142,31 @@ func (diff *FlatDiff) TouchAllAddresses(dst *StateDB) {
}
}
+// touchAddressAndStorage calls GetBalance on addr and GetCommittedState on
+// each provided slot so the destination statedb tracks the reads (and the
+// background SRC walks those trie nodes for the witness).
+func touchAddressAndStorage(dst *StateDB, addr common.Address, slots []common.Hash) {
+ dst.GetBalance(addr)
+ for _, slot := range slots {
+ dst.GetCommittedState(addr, slot)
+ }
+}
+
+// mutatedStorageKeys returns the keys of diff.Storage[addr] as a slice so
+// TouchAllAddresses can route both mutated and read-only accounts through
+// touchAddressAndStorage without branching on map vs slice.
+func (diff *FlatDiff) mutatedStorageKeys(addr common.Address) []common.Hash {
+ slots, ok := diff.Storage[addr]
+ if !ok {
+ return nil
+ }
+ keys := make([]common.Hash, 0, len(slots))
+ for k := range slots {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
// CommitSnapshot finalises the StateDB and returns a FlatDiff capturing all
// mutations without performing any MPT hashing (~1ms). After this call the
// StateDB should no longer be used by the caller.
@@ -2029,90 +2180,107 @@ func (s *StateDB) CommitSnapshot(deleteEmptyObjects bool) *FlatDiff {
Code: make(map[common.Hash][]byte),
ReadStorage: make(map[common.Address][]common.Hash),
}
-
- // Capture self-destructed accounts.
for addr := range s.stateObjectsDestruct {
diff.Destructs[addr] = struct{}{}
}
-
- // Capture mutations for live (non-destructed) accounts.
for addr, op := range s.mutations {
- if op.isDelete() {
- diff.Destructs[addr] = struct{}{}
- continue
- }
- obj, ok := s.stateObjects[addr]
- if !ok {
- continue
- }
- diff.Accounts[addr] = obj.data
-
- // Capture dirty code.
- if obj.dirtyCode {
- diff.Code[common.BytesToHash(obj.CodeHash())] = obj.code
- }
+ s.captureMutation(diff, addr, op)
+ }
+ // Read-only accounts: accessed during execution but not mutated. The
+ // pipelined SRC goroutine loads their root_{N-1} trie nodes into the
+ // witness so stateless nodes can execute against root_{N-1}.
+ for addr, obj := range s.stateObjects {
+ s.captureReadOnlyAccount(diff, addr, obj)
+ }
+ // Non-existent account reads: looked-up addresses that don't exist in
+ // the state trie. The SRC goroutine needs these to walk proof-of-absence
+ // paths and capture trie nodes for the witness.
+ for addr := range s.nonExistentReads {
+ s.captureNonExistentRead(diff, addr)
+ }
+ return diff
+}
- // Capture pending (post-Finalise) storage.
- if len(obj.pendingStorage) > 0 {
- slots := make(map[common.Hash]common.Hash, len(obj.pendingStorage))
- for k, v := range obj.pendingStorage {
- slots[k] = v
- }
- diff.Storage[addr] = slots
- }
- // Capture read-only storage for mutated accounts: storage slots that
- // were read (in originStorage) but not written (not in pendingStorage).
- // Without this, the SRC goroutine won't load these slots' trie nodes
- // into the witness, causing "missing trie node" during stateless replay
- // (e.g., span commits read validator contract slots they don't write).
- if len(obj.originStorage) > 0 {
- var readSlots []common.Hash
- for slot := range obj.originStorage {
- if _, dirty := obj.pendingStorage[slot]; !dirty {
- readSlots = append(readSlots, slot)
- }
- }
- if len(readSlots) > 0 {
- diff.ReadStorage[addr] = readSlots
- }
- }
+// captureMutation records a single mutated/destructed account into the
+// FlatDiff. Destructs take both the explicit delete path and the pending
+// Destructs set; live mutations copy account data, dirty code, and both
+// pending and read-only storage so the SRC goroutine can later walk every
+// trie node the block touched.
+func (s *StateDB) captureMutation(diff *FlatDiff, addr common.Address, op *mutation) {
+ if op.isDelete() {
+ diff.Destructs[addr] = struct{}{}
+ return
+ }
+ obj, ok := s.stateObjects[addr]
+ if !ok {
+ return
+ }
+ diff.Accounts[addr] = obj.data
+ if obj.dirtyCode {
+ diff.Code[common.BytesToHash(obj.CodeHash())] = obj.code
}
+ captureObjectStorage(diff, addr, obj)
+}
- // Capture read-only accounts: accessed during execution but not mutated.
- // The delayed SRC goroutine uses these to load their root_{N-1} trie nodes
- // into the witness so stateless nodes can execute against root_{N-1}.
- for addr, obj := range s.stateObjects {
- if _, isMutation := s.mutations[addr]; isMutation {
- continue // already captured above
- }
- if _, isDestruct := s.stateObjectsDestruct[addr]; isDestruct {
- continue // already captured above
+// captureObjectStorage copies pending (post-Finalise) storage mutations and
+// any read-only slots that weren't overwritten. Read-only slots matter
+// because the SRC goroutine needs to load their trie nodes into the witness
+// (e.g., span commits read validator-contract slots they don't write).
+func captureObjectStorage(diff *FlatDiff, addr common.Address, obj *stateObject) {
+ if len(obj.pendingStorage) > 0 {
+ slots := make(map[common.Hash]common.Hash, len(obj.pendingStorage))
+ for k, v := range obj.pendingStorage {
+ slots[k] = v
}
- diff.ReadSet = append(diff.ReadSet, addr)
- if len(obj.originStorage) > 0 {
- slots := make([]common.Hash, 0, len(obj.originStorage))
- for slot := range obj.originStorage {
- slots = append(slots, slot)
- }
- diff.ReadStorage[addr] = slots
+ diff.Storage[addr] = slots
+ }
+ if len(obj.originStorage) == 0 {
+ return
+ }
+ var readSlots []common.Hash
+ for slot := range obj.originStorage {
+ if _, dirty := obj.pendingStorage[slot]; !dirty {
+ readSlots = append(readSlots, slot)
}
}
+ if len(readSlots) > 0 {
+ diff.ReadStorage[addr] = readSlots
+ }
+}
- // Capture non-existent account reads: addresses that were looked up but
- // don't exist in the state trie. The SRC goroutine needs these to walk
- // proof-of-absence paths and capture trie nodes for the witness.
- for addr := range s.nonExistentReads {
- // Skip addresses that ended up existing (e.g., created later in the block).
- if _, isMutation := s.mutations[addr]; isMutation {
- continue
- }
- if _, ok := s.stateObjects[addr]; ok {
- continue
- }
- diff.NonExistentReads = append(diff.NonExistentReads, addr)
+// captureReadOnlyAccount adds an account to ReadSet (and its originStorage
+// to ReadStorage) if it was accessed but neither mutated nor destructed in
+// this block. Mutated/destructed accounts are already handled by
+// captureMutation.
+func (s *StateDB) captureReadOnlyAccount(diff *FlatDiff, addr common.Address, obj *stateObject) {
+ if _, isMutation := s.mutations[addr]; isMutation {
+ return
+ }
+ if _, isDestruct := s.stateObjectsDestruct[addr]; isDestruct {
+ return
+ }
+ diff.ReadSet = append(diff.ReadSet, addr)
+ if len(obj.originStorage) == 0 {
+ return
}
+ slots := make([]common.Hash, 0, len(obj.originStorage))
+ for slot := range obj.originStorage {
+ slots = append(slots, slot)
+ }
+ diff.ReadStorage[addr] = slots
+}
- return diff
+// captureNonExistentRead records proof-of-absence address reads. Skips
+// addresses that ended up existing (e.g., created later in the block) since
+// captureMutation/captureReadOnlyAccount already handled them.
+func (s *StateDB) captureNonExistentRead(diff *FlatDiff, addr common.Address) {
+ if _, isMutation := s.mutations[addr]; isMutation {
+ return
+ }
+ if _, ok := s.stateObjects[addr]; ok {
+ return
+ }
+ diff.NonExistentReads = append(diff.NonExistentReads, addr)
}
// ApplyFlatDiff installs the previous block's mutations as pre-loaded (but not
@@ -2137,32 +2305,31 @@ func (s *StateDB) ApplyFlatDiff(diff *FlatDiff) {
s.stateObjectsDestruct[addr] = newObject(s, addr, nil)
}
}
-
- // Install each mutated account directly, without journal entries.
for addr, acct := range diff.Accounts {
- acctCopy := acct
- obj := newObject(s, addr, &acctCopy)
-
- // Carry newly-deployed code in memory. For pre-existing contracts the
- // code hash is already persisted; stateObject.Code() will fetch it from
- // the DB on first access without needing dirtyCode set.
- if code, ok := diff.Code[common.BytesToHash(acctCopy.CodeHash)]; ok {
- obj.code = code
- // dirtyCode intentionally left false: this code was deployed in the
- // previous block, not this one.
- }
-
- // Load storage as originStorage (the "already committed" baseline).
- // dirtyStorage stays empty, so only slots the current block writes will
- // be captured by CommitSnapshot.
- if slots, ok := diff.Storage[addr]; ok {
- for k, v := range slots {
- obj.originStorage[k] = v
- }
- }
+ s.applyFlatAccountOverlay(diff, addr, acct)
+ }
+}
- s.stateObjects[addr] = obj
+// applyFlatAccountOverlay installs a FlatDiff account into stateObjects as a
+// read-only overlay: no journal entries, no dirty bits. Newly-deployed code
+// is carried in memory because the background goroutine may not have
+// persisted it yet; pre-existing contracts resolve via stateObject.Code().
+// Pending storage from the previous block is loaded as originStorage so
+// CommitSnapshot only re-captures slots that THIS block writes.
+func (s *StateDB) applyFlatAccountOverlay(diff *FlatDiff, addr common.Address, acct types.StateAccount) {
+ acctCopy := acct
+ obj := newObject(s, addr, &acctCopy)
+ if code, ok := diff.Code[common.BytesToHash(acctCopy.CodeHash)]; ok {
+ obj.code = code
+ // dirtyCode intentionally left false: code was deployed in the
+ // previous block, not this one.
+ }
+ if slots, ok := diff.Storage[addr]; ok {
+ for k, v := range slots {
+ obj.originStorage[k] = v
+ }
}
+ s.stateObjects[addr] = obj
}
// ApplyFlatDiffForCommit marks all mutations in diff as dirty via the normal
@@ -2175,53 +2342,50 @@ func (s *StateDB) ApplyFlatDiff(diff *FlatDiff) {
// state root; it is not suitable for execution state objects (it would cause
// mutations to cascade into subsequent FlatDiffs).
func (s *StateDB) ApplyFlatDiffForCommit(diff *FlatDiff) {
- // Handle self-destructs. Pure destructs (not resurrected) are done via
+ // Handle self-destructs. Pure destructs (not resurrected) go through
// SelfDestruct, which loads the original from the trie and marks it for
// deletion. Resurrected accounts (present in both Destructs and Accounts)
- // need the original placed in stateObjectsDestruct manually so the
- // subsequent Set* calls create a fresh object via getOrNewStateObject.
+ // are set up inside applyFlatMutation so the subsequent Set* calls create
+ // a fresh object via getOrNewStateObject.
for addr := range diff.Destructs {
if _, resurrected := diff.Accounts[addr]; resurrected {
- // Handled in the Accounts loop below.
continue
}
s.SelfDestruct(addr)
}
-
for addr, acct := range diff.Accounts {
- // For resurrected accounts, populate stateObjectsDestruct with the
- // original pre-block account (needed by handleDestruction to delete the
- // original storage trie) and remove any cached entry so the Set* calls
- // below create a fresh object via getOrNewStateObject.
- if _, destructed := diff.Destructs[addr]; destructed {
- if _, already := s.stateObjectsDestruct[addr]; !already {
- if prev := s.getStateObject(addr); prev != nil {
- s.stateObjectsDestruct[addr] = prev
- }
- }
- delete(s.stateObjects, addr)
- }
-
- // Apply newly-deployed contract code.
- if code, ok := diff.Code[common.BytesToHash(acct.CodeHash)]; ok {
- s.SetCode(addr, code, tracing.CodeChangeUnspecified)
- }
+ s.applyFlatMutation(diff, addr, acct)
+ }
+}
- // Mark each storage slot dirty. SetState reads the pre-block origin
- // from the storage trie, populating uncommittedStorage so updateTrie
- // correctly writes or deletes each slot (including zero-value deletions).
- if slots, ok := diff.Storage[addr]; ok {
- for k, v := range slots {
- s.SetState(addr, k, v)
+// applyFlatMutation commits one FlatDiff account mutation onto the statedb
+// via the journalled Set* path so Finalise / commit pick it up. Handles
+// resurrection by seeding stateObjectsDestruct with the pre-block original
+// (needed by handleDestruction to delete the original storage trie).
+func (s *StateDB) applyFlatMutation(diff *FlatDiff, addr common.Address, acct types.StateAccount) {
+ if _, destructed := diff.Destructs[addr]; destructed {
+ if _, already := s.stateObjectsDestruct[addr]; !already {
+ if prev := s.getStateObject(addr); prev != nil {
+ s.stateObjectsDestruct[addr] = prev
}
}
-
- // Mark account metadata dirty. Calling Set* ensures the account
- // appears in journal.dirties so Finalise emits a markUpdate, even
- // when only storage or code changed.
- s.SetNonce(addr, acct.Nonce, tracing.NonceChangeUnspecified)
- s.SetBalance(addr, acct.Balance, tracing.BalanceChangeUnspecified)
+ delete(s.stateObjects, addr)
+ }
+ if code, ok := diff.Code[common.BytesToHash(acct.CodeHash)]; ok {
+ s.SetCode(addr, code, tracing.CodeChangeUnspecified)
+ }
+ // SetState reads the pre-block origin from the storage trie, populating
+ // uncommittedStorage so updateTrie correctly writes or deletes each slot
+ // (including zero-value deletions).
+ if slots, ok := diff.Storage[addr]; ok {
+ for k, v := range slots {
+ s.SetState(addr, k, v)
+ }
}
+ // Set* ensures the account appears in journal.dirties so Finalise emits
+ // a markUpdate, even when only storage or code changed.
+ s.SetNonce(addr, acct.Nonce, tracing.NonceChangeUnspecified)
+ s.SetBalance(addr, acct.Balance, tracing.BalanceChangeUnspecified)
}
// NewWithFlatBase creates a StateDB at parentCommittedRoot (the last root
@@ -2229,9 +2393,9 @@ func (s *StateDB) ApplyFlatDiffForCommit(diff *FlatDiff) {
// the post-state of the block that produced flatDiff, without waiting for
// that block's state root to be computed.
//
-// This is used during DelayedSRC block processing: while goroutine G_N is
-// computing root_N from (root_{N-1}, FlatDiff_N), the next block N+1 can
-// already be executed using NewWithFlatBase(root_{N-1}, db, FlatDiff_N).
+// This is used during pipelined SRC: while a background goroutine computes
+// root_N from (root_{N-1}, FlatDiff_N), the next block N+1 can already be
+// executed using NewWithFlatBase(root_{N-1}, db, FlatDiff_N).
func NewWithFlatBase(parentCommittedRoot common.Hash, db Database, flatDiff *FlatDiff) (*StateDB, error) {
sdb, err := New(parentCommittedRoot, db)
if err != nil {
@@ -2248,6 +2412,19 @@ func (s *StateDB) SetFlatDiffRef(diff *FlatDiff) {
s.flatDiffRef = diff
}
+// WasStorageSlotRead returns true if the given address+slot was accessed
+// (read) during this block's execution. Used by pipelined SRC to detect
+// whether any transaction read the EIP-2935 history storage slot that
+// contains stale data during speculative execution.
+func (s *StateDB) WasStorageSlotRead(addr common.Address, slot common.Hash) bool {
+ obj, exists := s.stateObjects[addr]
+ if !exists {
+ return false
+ }
+ _, accessed := obj.originStorage[slot]
+ return accessed
+}
+
// Prepare handles the preparatory steps for executing a state transition with.
// This method must be invoked before state transition.
//
@@ -2399,3 +2576,20 @@ func (s *StateDB) AccessEvents() *AccessEvents {
func (s *StateDB) Inner() *StateDB {
return s
}
+
+// PropagateReadsTo touches all addresses and storage slots accessed in s on
+// the destination StateDB. This ensures the destination tracks them in its
+// stateObjects (and later in its FlatDiff ReadSet) so the pipelined SRC
+// goroutine captures their trie proof nodes in the witness.
+//
+// Use this when a temporary copy of the state is used for EVM calls (e.g.,
+// CommitStates → LastStateId) and the accessed addresses must be visible
+// in the original state for witness generation.
+func (s *StateDB) PropagateReadsTo(dst *StateDB) {
+ for addr, obj := range s.stateObjects {
+ dst.GetBalance(addr)
+ for slot := range obj.originStorage {
+ dst.GetState(addr, slot)
+ }
+ }
+}
diff --git a/core/state/statedb_pipeline_mutations_test.go b/core/state/statedb_pipeline_mutations_test.go
new file mode 100644
index 0000000000..c27d36f591
--- /dev/null
+++ b/core/state/statedb_pipeline_mutations_test.go
@@ -0,0 +1,1011 @@
+package state
+
+import (
+ "testing"
+
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+// The tests in this file exercise pipelined-SRC-authored code paths in
+// statedb.go at fine enough resolution to kill specific mutation-testing
+// survivors. Each assertion targets a distinct branch, call site, or return
+// path reported by diffguard's T1/T2 mutation pass.
+
+// ---- mutatedStorageKeys ----
+
+func TestMutatedStorageKeys_MissingAddr(t *testing.T) {
+ diff := &FlatDiff{Storage: make(map[common.Address]map[common.Hash]common.Hash)}
+ require.Nil(t, diff.mutatedStorageKeys(common.HexToAddress("0x1234")))
+}
+
+func TestMutatedStorageKeys_PresentAddr(t *testing.T) {
+ addr := common.HexToAddress("0x1234")
+ diff := &FlatDiff{
+ Storage: map[common.Address]map[common.Hash]common.Hash{
+ addr: {
+ common.HexToHash("0xaa"): common.HexToHash("0x01"),
+ common.HexToHash("0xbb"): common.HexToHash("0x02"),
+ },
+ },
+ }
+ got := diff.mutatedStorageKeys(addr)
+ require.Len(t, got, 2)
+ seen := map[common.Hash]bool{}
+ for _, k := range got {
+ seen[k] = true
+ }
+ require.True(t, seen[common.HexToHash("0xaa")])
+ require.True(t, seen[common.HexToHash("0xbb")])
+}
+
+// ---- touchAddressAndStorage / TouchAllAddresses ----
+
+func TestTouchAddressAndStorage_LoadsBalanceWithNoSlots(t *testing.T) {
+ // Kills: removal of dst.GetBalance(addr) when slots slice is empty. Without
+ // that call, a FlatDiff that names an account but no slots would leave dst
+ // untracked entirely.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xtouch1")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(99), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{addr: {}},
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ ReadStorage: make(map[common.Address][]common.Hash),
+ }
+
+ dst, err := New(root, db)
+ require.NoError(t, err)
+ diff.TouchAllAddresses(dst)
+
+ _, loaded := dst.stateObjects[addr]
+ require.True(t, loaded, "TouchAllAddresses must load addr even when the slot list is empty")
+}
+
+func TestTouchAddressAndStorage_LoadsEachSlot(t *testing.T) {
+ // Kills: removal of dst.GetCommittedState(addr, slot) inside the loop.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xtouch2")
+ slot1 := common.HexToHash("0xa1")
+ slot2 := common.HexToHash("0xa2")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ sdb.SetState(addr, slot1, common.HexToHash("0xb1"))
+ sdb.SetState(addr, slot2, common.HexToHash("0xb2"))
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{addr: {}},
+ Storage: map[common.Address]map[common.Hash]common.Hash{
+ addr: {slot1: {}, slot2: {}},
+ },
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ ReadStorage: make(map[common.Address][]common.Hash),
+ }
+
+ dst, err := New(root, db)
+ require.NoError(t, err)
+ diff.TouchAllAddresses(dst)
+
+ obj := dst.getStateObject(addr)
+ require.NotNil(t, obj)
+ _, s1 := obj.originStorage[slot1]
+ _, s2 := obj.originStorage[slot2]
+ require.True(t, s1, "slot1 must be tracked in dst.originStorage")
+ require.True(t, s2, "slot2 must be tracked in dst.originStorage")
+}
+
+func TestTouchAllAddresses_ReadSetSlotsLoaded(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xtouch3")
+ slot := common.HexToHash("0xcafe")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(42), 0)
+ sdb.SetState(addr, slot, common.HexToHash("0xbeef"))
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ ReadSet: []common.Address{addr},
+ ReadStorage: map[common.Address][]common.Hash{addr: {slot}},
+ }
+
+ dst, err := New(root, db)
+ require.NoError(t, err)
+ diff.TouchAllAddresses(dst)
+
+ obj := dst.getStateObject(addr)
+ require.NotNil(t, obj)
+ _, loaded := obj.originStorage[slot]
+ require.True(t, loaded, "ReadSet slot must be tracked in dst.originStorage")
+}
+
+func TestTouchAllAddresses_DestructsLoadBalance(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xtouch4")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(5), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+
+ dst, err := New(root, db)
+ require.NoError(t, err)
+ diff.TouchAllAddresses(dst)
+
+ _, ok := dst.stateObjects[addr]
+ require.True(t, ok, "destruct entry must cause dst to load addr via GetBalance")
+}
+
+func TestTouchAllAddresses_NonExistentReadsRegistered(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ missing := common.HexToAddress("0xtouch5-missing")
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ NonExistentReads: []common.Address{missing},
+ }
+
+ dst, err := New(root, db)
+ require.NoError(t, err)
+ diff.TouchAllAddresses(dst)
+
+ _, ok := dst.nonExistentReads[missing]
+ require.True(t, ok, "NonExistentReads addr must be tracked via GetBalance")
+}
+
+// ---- captureMutation (via CommitSnapshot) ----
+
+func TestCommitSnapshot_DestructedAccountExcludedFromAccounts(t *testing.T) {
+ // Kills: removal of `if op.isDelete()` early return. Without it, a destructed
+ // account flows into diff.Accounts alongside Destructs.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xcap1")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(100), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+ sdb2.SelfDestruct(addr)
+ diff := sdb2.CommitSnapshot(false)
+
+ require.Contains(t, diff.Destructs, addr)
+ require.NotContains(t, diff.Accounts, addr, "destructed addr must not also appear in Accounts")
+}
+
+func TestCommitSnapshot_DirtyCodeCaptured(t *testing.T) {
+ // Kills: removal of the dirtyCode branch that populates diff.Code.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xcap2")
+ code := []byte{0x60, 0x01, 0x60, 0x00, 0xf3}
+ sdb.CreateAccount(addr)
+ sdb.SetCode(addr, code, 0)
+
+ diff := sdb.CommitSnapshot(false)
+
+ codeHash := common.BytesToHash(crypto.Keccak256(code))
+ got, ok := diff.Code[codeHash]
+ require.True(t, ok, "dirty code must populate diff.Code")
+ require.Equal(t, code, got)
+}
+
+func TestCaptureMutation_OrphanMutationIsSkipped(t *testing.T) {
+ // Kills: removal of the `if !ok { return }` guard at line 2119. Without the
+ // guard, the following `diff.Accounts[addr] = obj.data` dereferences a nil
+ // stateObject and panics.
+ //
+ // Normal execution never produces an orphan mutation (every Set* path that
+ // records a mutation also installs a stateObject), so we construct the
+ // state by hand.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xorphan")
+ sdb.mutations[addr] = &mutation{typ: update}
+
+ require.NotPanics(t, func() { _ = sdb.CommitSnapshot(false) })
+ diff := sdb.CommitSnapshot(false)
+ require.NotContains(t, diff.Accounts, addr,
+ "orphan mutation (no stateObject) must not produce a diff.Accounts entry")
+}
+
+func TestCommitSnapshot_NoCodeWithoutDirtyFlag(t *testing.T) {
+ // Complements the previous test: verifies diff.Code stays empty when no code
+ // is deployed, so the dirtyCode branch is genuinely guarded.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xcap3")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+
+ diff := sdb.CommitSnapshot(false)
+ require.Empty(t, diff.Code)
+}
+
+// ---- captureObjectStorage ----
+
+func TestCaptureObjectStorage_NoPendingLeavesStorageEmpty(t *testing.T) {
+ // Kills: `len(pendingStorage) > 0` → `>= 0` (always true). Under mutation,
+ // an empty pendingStorage would still add an empty map to diff.Storage[addr].
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xcos1")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(7), 0)
+
+ diff := sdb.CommitSnapshot(false)
+
+ require.Contains(t, diff.Accounts, addr)
+ _, hasStorage := diff.Storage[addr]
+ require.False(t, hasStorage, "addr with no pending writes must not have a Storage entry (even empty)")
+}
+
+func TestCaptureObjectStorage_SplitsPendingAndRead(t *testing.T) {
+ // Kills:
+ // - removal of readSlots append (line 2146)
+ // - removal of `if len(readSlots) > 0 { ... ReadStorage[addr] = readSlots }` (line 2150)
+ // - inversion of the `len(originStorage) == 0` guard (line 2141)
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xcos2")
+ readSlot := common.HexToHash("0xa1")
+ writeSlot := common.HexToHash("0xa2")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ sdb.SetState(addr, readSlot, common.HexToHash("0xb1"))
+ sdb.SetState(addr, writeSlot, common.HexToHash("0xb2"))
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+ // Read-only access to readSlot (loads origin only).
+ _ = sdb2.GetState(addr, readSlot)
+ // Write writeSlot so the account is mutated.
+ sdb2.SetState(addr, writeSlot, common.HexToHash("0xb3"))
+
+ diff := sdb2.CommitSnapshot(false)
+
+ slots := diff.Storage[addr]
+ require.Contains(t, slots, writeSlot, "writeSlot must be in Storage")
+ require.NotContains(t, slots, readSlot, "readSlot must NOT be in Storage")
+
+ reads := diff.ReadStorage[addr]
+ require.Contains(t, reads, readSlot, "readSlot must be in ReadStorage")
+ require.NotContains(t, reads, writeSlot, "writeSlot must NOT be in ReadStorage")
+}
+
+// ---- captureReadOnlyAccount ----
+
+func TestCaptureReadOnlyAccount_AddsToReadSet(t *testing.T) {
+ // Kills: removal of the `len(originStorage) == 0` early-return guard
+ // (line 2167). Without it, diff.ReadStorage[addr] would be populated with
+ // an empty slice for read-only accounts that didn't touch any slots.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xro1")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(55), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+ _ = sdb2.GetBalance(addr)
+
+ diff := sdb2.CommitSnapshot(false)
+
+ require.Contains(t, diff.ReadSet, addr)
+ require.NotContains(t, diff.Accounts, addr, "read-only access must not populate Accounts")
+ require.NotContains(t, diff.ReadStorage, addr,
+ "read-only addr without slot accesses must not have a ReadStorage entry")
+}
+
+func TestCaptureReadOnlyAccount_SkipMutated(t *testing.T) {
+ // Kills: removal of `if isMutation { return }` guard (line 2160).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xro2")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(77), 0)
+
+ diff := sdb.CommitSnapshot(false)
+
+ require.Contains(t, diff.Accounts, addr)
+ require.NotContains(t, diff.ReadSet, addr, "mutated addr must not appear in ReadSet")
+}
+
+func TestCaptureReadOnlyAccount_SkipDestructed(t *testing.T) {
+ // Kills: removal of `if isDestruct { return }` guard (line 2163).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xro3")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(88), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+ sdb2.SelfDestruct(addr)
+
+ diff := sdb2.CommitSnapshot(false)
+
+ require.Contains(t, diff.Destructs, addr)
+ require.NotContains(t, diff.ReadSet, addr, "destructed addr must not appear in ReadSet")
+}
+
+func TestCaptureReadOnlyAccount_PopulatesReadStorage(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xro4")
+ slot := common.HexToHash("0xdead")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ sdb.SetState(addr, slot, common.HexToHash("0xcafe"))
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+ _ = sdb2.GetBalance(addr)
+ _ = sdb2.GetState(addr, slot)
+
+ diff := sdb2.CommitSnapshot(false)
+ require.Contains(t, diff.ReadSet, addr)
+ require.Contains(t, diff.ReadStorage[addr], slot)
+}
+
+// ---- captureNonExistentRead ----
+
+func TestCaptureNonExistentRead_AddsMissingAddr(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ missing := common.HexToAddress("0xnr1")
+ _ = sdb.GetBalance(missing)
+
+ diff := sdb.CommitSnapshot(false)
+ require.Contains(t, diff.NonExistentReads, missing)
+}
+
+func TestCaptureNonExistentRead_SkipMutated(t *testing.T) {
+ // Kills: removal of `if isMutation { return }` guard (line 2181). An address
+ // that was looked up (missing) and then created must not appear in
+ // NonExistentReads.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xnr2")
+
+ _ = sdb.GetBalance(addr)
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+
+ diff := sdb.CommitSnapshot(false)
+
+ require.Contains(t, diff.Accounts, addr)
+ require.NotContains(t, diff.NonExistentReads, addr, "created addr must not leak into NonExistentReads")
+}
+
+func TestCaptureNonExistentRead_SkipOrphanMutation(t *testing.T) {
+ // Kills: removal of the `if isMutation { return }` guard at line 2181.
+ // Normal flow can't exercise this because: when an addr is in mutations,
+ // it's also in stateObjects, so the 2184 guard catches it anyway — making
+ // 2181 observationally equivalent. We distinguish them with an orphan
+ // mutation: mutations[addr] set but stateObjects[addr] absent. Under the
+ // 2181 mutation, execution falls through to the 2184 check (which also
+ // passes since stateObjects is empty) and appends to NonExistentReads.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xnr_orphan")
+ sdb.mutations[addr] = &mutation{typ: update}
+ if sdb.nonExistentReads == nil {
+ sdb.nonExistentReads = make(map[common.Address]struct{})
+ }
+ sdb.nonExistentReads[addr] = struct{}{}
+
+ diff := sdb.CommitSnapshot(false)
+ require.NotContains(t, diff.NonExistentReads, addr,
+ "orphan mutation in nonExistentReads must be filtered by the isMutation guard")
+}
+
+func TestCaptureNonExistentRead_SkipExistingStateObject(t *testing.T) {
+ // Kills: removal of `if _, ok := stateObjects[addr]; ok { return }` guard
+ // (line 2184). Normally unreachable — force the state by seeding
+ // nonExistentReads directly for an addr that already has a stateObject.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xnr3")
+
+ // Load into stateObjects via GetBalance (account doesn't exist yet, so no
+ // state object is created). Instead, create + finalise to ensure it's in
+ // stateObjects, then manually inject nonExistentReads. Without finalise the
+ // account stays in mutations, which is already covered by the previous test.
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ sdb.Finalise(false)
+ // Drop the mutation so captureNonExistentRead's `isMutation` guard is not hit.
+ delete(sdb.mutations, addr)
+ // Inject non-existent read for an addr that now lives in stateObjects.
+ if sdb.nonExistentReads == nil {
+ sdb.nonExistentReads = make(map[common.Address]struct{})
+ }
+ sdb.nonExistentReads[addr] = struct{}{}
+
+ diff := sdb.CommitSnapshot(false)
+
+ require.NotContains(t, diff.NonExistentReads, addr,
+ "addr present in stateObjects must be excluded from NonExistentReads")
+}
+
+// ---- ApplyFlatDiff ----
+
+func TestApplyFlatDiff_DestructPopulatesDestructMap(t *testing.T) {
+ // Kills: removal of `if !already { s.stateObjectsDestruct[addr] = newObject(...) }` (line 2208).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafd1")
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb.ApplyFlatDiff(diff)
+ _, ok := sdb.stateObjectsDestruct[addr]
+ require.True(t, ok, "ApplyFlatDiff must register destruct in stateObjectsDestruct")
+}
+
+func TestApplyFlatDiff_PreservesExistingDestructEntry(t *testing.T) {
+ // Kills: inversion of the `!already` guard (line 2208). If the guard flips,
+ // the pre-existing entry would be overwritten by a blank newObject.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafd2")
+
+ sentinel := newObject(sdb, addr, &types.StateAccount{Nonce: 999})
+ sdb.stateObjectsDestruct[addr] = sentinel
+
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb.ApplyFlatDiff(diff)
+
+ require.Same(t, sentinel, sdb.stateObjectsDestruct[addr],
+ "pre-existing destruct entry must be preserved")
+}
+
+func TestApplyFlatDiff_InstallsAccountInStateObjects(t *testing.T) {
+ // Covers applyFlatAccountOverlay: newObject + stateObjects[addr] = obj.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xafo1")
+ acct := types.StateAccount{
+ Nonce: 7,
+ Balance: uint256.NewInt(123),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ }
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{addr: acct},
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb.ApplyFlatDiff(diff)
+
+ obj, ok := sdb.stateObjects[addr]
+ require.True(t, ok, "overlayed account must be in stateObjects")
+ require.Equal(t, uint64(7), obj.data.Nonce)
+ require.Equal(t, uint256.NewInt(123), obj.data.Balance)
+}
+
+func TestApplyFlatDiff_InstallsCodeOverlay(t *testing.T) {
+ // Covers applyFlatAccountOverlay code branch.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xafo2")
+ code := []byte{0x60, 0x11, 0x60, 0x00, 0xf3}
+ codeHash := common.BytesToHash(crypto.Keccak256(code))
+ acct := types.StateAccount{
+ Nonce: 1,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: codeHash.Bytes(),
+ }
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{addr: acct},
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: map[common.Hash][]byte{codeHash: code},
+ }
+ sdb.ApplyFlatDiff(diff)
+
+ obj, ok := sdb.stateObjects[addr]
+ require.True(t, ok)
+ require.Equal(t, code, obj.code, "FlatDiff code must be carried in obj.code")
+ require.False(t, obj.dirtyCode, "overlayed code must NOT be marked dirty")
+}
+
+func TestApplyFlatDiff_InstallsStorageOverlay(t *testing.T) {
+ // Covers applyFlatAccountOverlay storage branch.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xafo3")
+ slot := common.HexToHash("0xa1")
+ value := common.HexToHash("0xb1")
+ acct := types.StateAccount{
+ Nonce: 1,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ }
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{addr: acct},
+ Storage: map[common.Address]map[common.Hash]common.Hash{
+ addr: {slot: value},
+ },
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb.ApplyFlatDiff(diff)
+
+ obj, ok := sdb.stateObjects[addr]
+ require.True(t, ok)
+ got, loaded := obj.originStorage[slot]
+ require.True(t, loaded, "FlatDiff slot must populate originStorage")
+ require.Equal(t, value, got)
+}
+
+func TestNewWithFlatBase_SuccessInstallsFlatDiffRef(t *testing.T) {
+ // Covers the success path and the `if flatDiff != nil` branch.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ }
+ overlay, err := NewWithFlatBase(root, db, diff)
+ require.NoError(t, err)
+ require.NotNil(t, overlay)
+ require.Same(t, diff, overlay.flatDiffRef, "flatDiffRef must reference the supplied diff")
+}
+
+func TestNewWithFlatBase_NilFlatDiffLeavesRefNil(t *testing.T) {
+ // Covers the `if flatDiff != nil` guard (negative case).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ overlay, err := NewWithFlatBase(root, db, nil)
+ require.NoError(t, err)
+ require.NotNil(t, overlay)
+ require.Nil(t, overlay.flatDiffRef, "nil FlatDiff must not overwrite flatDiffRef")
+}
+
+func TestApplyFlatMutation_StorageWritesApplied(t *testing.T) {
+ // Covers applyFlatMutation's storage loop (SetState per slot).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafm_storage")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ slot := common.HexToHash("0xabc")
+ value := common.HexToHash("0xdef")
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 1,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: map[common.Address]map[common.Hash]common.Hash{
+ addr: {slot: value},
+ },
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ require.Equal(t, value, sdb2.GetState(addr, slot), "applyFlatMutation must SetState for each storage slot")
+}
+
+// ---- ApplyFlatDiffForCommit / applyFlatMutation ----
+
+func TestApplyFlatDiffForCommit_PureDestructTriggersSelfDestruct(t *testing.T) {
+ // Kills: removal of `s.SelfDestruct(addr)` call (line 2258).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafc1")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(42), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ obj := sdb2.getStateObject(addr)
+ require.NotNil(t, obj)
+ require.True(t, obj.selfDestructed, "SelfDestruct must have been invoked")
+}
+
+func TestApplyFlatDiffForCommit_ResurrectionSkipsSelfDestruct(t *testing.T) {
+ // Kills:
+ // - removal of `if resurrected { continue }` (line 2255). Without the
+ // skip, SelfDestruct(addr) runs before applyFlatMutation, which zeros
+ // the balance on the pre-block object — the same object that
+ // applyFlatMutation later snapshots into stateObjectsDestruct. Asserting
+ // that the snapshot still carries the ORIGINAL (non-zero) balance
+ // distinguishes the two paths.
+ // - removal of `s.SelfDestruct(addr)` call on the non-resurrected path is
+ // covered by TestApplyFlatDiffForCommit_PureDestructTriggersSelfDestruct.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafc2")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(5), 0)
+ sdb.SetNonce(addr, 1, 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 100,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ require.Equal(t, uint64(100), sdb2.GetNonce(addr), "resurrected account must adopt new nonce")
+
+ prev, destructed := sdb2.stateObjectsDestruct[addr]
+ require.True(t, destructed, "pre-block object must be recorded in stateObjectsDestruct")
+ require.Equal(t, uint64(1), prev.data.Nonce, "stateObjectsDestruct must snapshot PRE-block nonce")
+ require.Equal(t, uint256.NewInt(5), prev.data.Balance,
+ "stateObjectsDestruct must carry PRE-block balance; if SelfDestruct ran it would be zero")
+ require.False(t, prev.selfDestructed,
+ "prev must NOT be marked self-destructed — that would indicate SelfDestruct was called")
+}
+
+func TestApplyFlatMutation_SetNonceCalled(t *testing.T) {
+ // Kills: removal of `s.SetNonce(...)` (line 2291).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafm1")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ sdb.SetBalance(addr, uint256.NewInt(10), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 50,
+ Balance: uint256.NewInt(10),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ require.Equal(t, uint64(50), sdb2.GetNonce(addr))
+}
+
+func TestApplyFlatMutation_SetBalanceCalled(t *testing.T) {
+ // Kills: removal of `s.SetBalance(...)` (line 2292).
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafm2")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ sdb.SetBalance(addr, uint256.NewInt(10), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 1,
+ Balance: uint256.NewInt(9999),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ require.Equal(t, uint256.NewInt(9999), sdb2.GetBalance(addr))
+}
+
+func TestApplyFlatMutation_DestructBranchDeletesStateObject(t *testing.T) {
+ // Kills:
+ // - removal of `if destructed` branch entirely (line 2270)
+ // - removal of `if !already` inner guard (line 2271)
+ // - inversion of `prev != nil` check (line 2272)
+ // - removal of `delete(s.stateObjects, addr)` (line 2276)
+ //
+ // We distinguish these by asserting:
+ // - stateObjectsDestruct[addr] contains the PRE-block nonce (7), not the new one.
+ // - the post-commit GetNonce is the NEW value (99), which is only possible if
+ // the old stateObjects entry was deleted so SetNonce created a fresh object.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafm3")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(500), 0)
+ sdb.SetNonce(addr, 7, 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+ // Pre-load so stateObjects contains the pre-block object.
+ require.Equal(t, uint64(7), sdb2.GetNonce(addr))
+ _, had := sdb2.stateObjects[addr]
+ require.True(t, had)
+
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 99,
+ Balance: uint256.NewInt(1),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ prev, destructed := sdb2.stateObjectsDestruct[addr]
+ require.True(t, destructed, "stateObjectsDestruct must contain addr")
+ require.Equal(t, uint64(7), prev.data.Nonce,
+ "stateObjectsDestruct must hold pre-block nonce; mutating delete(stateObjects) leaves the same pointer here and corrupts this to 99")
+
+ require.Equal(t, uint64(99), sdb2.GetNonce(addr))
+ require.Equal(t, uint256.NewInt(1), sdb2.GetBalance(addr))
+}
+
+func TestApplyFlatMutation_WithCodeCallsSetCode(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xafm4")
+ sdb.CreateAccount(addr)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ code := []byte{0x60, 0x02, 0x60, 0x00, 0xf3}
+ codeHash := common.BytesToHash(crypto.Keccak256(code))
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 1,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: codeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: map[common.Hash][]byte{codeHash: code},
+ }
+ sdb2.ApplyFlatDiffForCommit(diff)
+
+ require.Equal(t, code, sdb2.GetCode(addr))
+ require.Equal(t, codeHash, sdb2.GetCodeHash(addr))
+}
+
+// ---- NewWithFlatBase ----
+
+func TestNewWithFlatBase_PropagatesReaderError(t *testing.T) {
+ // Kills: replace-return-value mutation on `return nil, err` (line 2306).
+ db := NewDatabaseForTesting()
+ // Root is not present in the DB, so the underlying New() returns an error
+ // when opening the trie reader.
+ badRoot := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+ sdb, err := NewWithFlatBase(badRoot, db, nil)
+ require.Error(t, err, "bad root must surface the reader error")
+ require.Nil(t, sdb)
+}
+
+// ---- WasStorageSlotRead ----
+
+func TestWasStorageSlotRead_AddrNotLoadedReturnsFalse(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ missing := common.HexToAddress("0xws1")
+ require.False(t, sdb.WasStorageSlotRead(missing, common.HexToHash("0x01")))
+}
+
+func TestWasStorageSlotRead_SlotNotReadReturnsFalse(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xws2")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ require.False(t, sdb.WasStorageSlotRead(addr, common.HexToHash("0x01")))
+}
+
+// ---- PropagateReadsTo ----
+
+func TestPropagateReadsTo_LoadsAddrIntoDst(t *testing.T) {
+ // Kills: removal of `dst.GetBalance(addr)` call (line 2494). Assertion
+ // inspects dst.stateObjects directly BEFORE issuing any read that would
+ // incidentally populate it.
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xprop1")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ src, err := New(root, db)
+ require.NoError(t, err)
+ dst, err := New(root, db)
+ require.NoError(t, err)
+
+ src.GetBalance(addr)
+ src.PropagateReadsTo(dst)
+
+ _, ok := dst.stateObjects[addr]
+ require.True(t, ok, "PropagateReadsTo must populate dst.stateObjects via GetBalance")
+}
+
+func TestPropagateReadsTo_LoadsStorageSlotsIntoDst(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ addr := common.HexToAddress("0xprop2")
+ slot := common.HexToHash("0xcaffe")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(1), 0)
+ sdb.SetState(addr, slot, common.HexToHash("0xbadc0de"))
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ src, err := New(root, db)
+ require.NoError(t, err)
+ dst, err := New(root, db)
+ require.NoError(t, err)
+
+ src.GetBalance(addr)
+ src.GetState(addr, slot)
+ src.PropagateReadsTo(dst)
+
+ dstObj, ok := dst.stateObjects[addr]
+ require.True(t, ok)
+ _, slotLoaded := dstObj.originStorage[slot]
+ require.True(t, slotLoaded, "origin slot must propagate to dst")
+}
diff --git a/core/state/statedb_pipeline_test.go b/core/state/statedb_pipeline_test.go
new file mode 100644
index 0000000000..4b800ab76b
--- /dev/null
+++ b/core/state/statedb_pipeline_test.go
@@ -0,0 +1,697 @@
+package state
+
+import (
+ "testing"
+
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/stateless"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
+)
+
+func TestWasStorageSlotRead(t *testing.T) {
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+ sdb, _ := New(types.EmptyRootHash, db)
+
+ addr := common.HexToAddress("0x1234")
+ slot := common.HexToHash("0xabcd")
+
+ // Slot not read yet
+ if sdb.WasStorageSlotRead(addr, slot) {
+ t.Error("slot should not be marked as read before any access")
+ }
+
+ // Create an account and read its storage
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ sdb.Finalise(false)
+
+ // Read the slot
+ sdb.GetState(addr, slot)
+
+ // Now it should be marked as read
+ if !sdb.WasStorageSlotRead(addr, slot) {
+ t.Error("slot should be marked as read after GetState")
+ }
+
+ // A different slot should not be marked
+ otherSlot := common.HexToHash("0x5678")
+ if sdb.WasStorageSlotRead(addr, otherSlot) {
+ t.Error("other slot should not be marked as read")
+ }
+
+ // A different address should not be marked
+ otherAddr := common.HexToAddress("0x5678")
+ if sdb.WasStorageSlotRead(otherAddr, slot) {
+ t.Error("other address should not be marked as read")
+ }
+}
+
+func TestFlatDiffOverlay_ReadThrough(t *testing.T) {
+ // Create a base state with an account
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+ sdb, _ := New(types.EmptyRootHash, db)
+
+ baseAddr := common.HexToAddress("0xbase")
+ sdb.CreateAccount(baseAddr)
+ sdb.SetNonce(baseAddr, 1, 0)
+ sdb.SetBalance(baseAddr, uint256.NewInt(100), 0)
+ root, _, _ := sdb.CommitWithUpdate(0, false, false)
+
+ // Create a FlatDiff with a new account
+ overlayAddr := common.HexToAddress("0xoverlay")
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ overlayAddr: {
+ Nonce: 42,
+ Balance: uint256.NewInt(200),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ ReadStorage: make(map[common.Address][]common.Hash),
+ NonExistentReads: nil,
+ }
+
+ // Create StateDB with FlatDiff overlay
+ overlayDB, err := NewWithFlatBase(root, db, diff)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Should see the overlay account
+ if overlayDB.GetNonce(overlayAddr) != 42 {
+ t.Errorf("expected nonce 42 for overlay addr, got %d", overlayDB.GetNonce(overlayAddr))
+ }
+
+ // Should still see the base account
+ if overlayDB.GetNonce(baseAddr) != 1 {
+ t.Errorf("expected nonce 1 for base addr, got %d", overlayDB.GetNonce(baseAddr))
+ }
+}
+
+func TestCommitSnapshot_CapturesWrites(t *testing.T) {
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+ sdb, _ := New(types.EmptyRootHash, db)
+
+ addr := common.HexToAddress("0x1234")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 10, 0)
+ sdb.SetBalance(addr, uint256.NewInt(500), 0)
+
+ slot := common.HexToHash("0xaaaa")
+ sdb.SetState(addr, slot, common.HexToHash("0xbbbb"))
+
+ diff := sdb.CommitSnapshot(false)
+
+ // Verify account is captured
+ acct, ok := diff.Accounts[addr]
+ if !ok {
+ t.Fatal("account not captured in FlatDiff")
+ }
+ if acct.Nonce != 10 {
+ t.Errorf("expected nonce 10, got %d", acct.Nonce)
+ }
+
+ // Verify storage is captured
+ slots, ok := diff.Storage[addr]
+ if !ok {
+ t.Fatal("storage not captured in FlatDiff")
+ }
+ if slots[slot] != common.HexToHash("0xbbbb") {
+ t.Errorf("expected slot value 0xbbbb, got %x", slots[slot])
+ }
+}
+
+func TestFlatDiffOverlay_DestructedAccountReturnsNil(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xdead01")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(999), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // FlatDiff marks account as destructed but does NOT add it to Accounts.
+ diff := &FlatDiff{
+ Accounts: make(map[common.Address]types.StateAccount),
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+
+ overlayDB, err := NewWithFlatBase(root, db, diff)
+ require.NoError(t, err)
+
+ require.False(t, overlayDB.Exist(addr), "destructed account should not exist")
+ require.True(t, overlayDB.GetBalance(addr).IsZero(), "destructed account balance should be zero")
+}
+
+func TestFlatDiffOverlay_DestructAndResurrect(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xdead02")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 5, 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // FlatDiff has addr in BOTH Destructs and Accounts (destruct + resurrect with new nonce).
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ addr: {
+ Nonce: 10,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: map[common.Address]struct{}{addr: {}},
+ Code: make(map[common.Hash][]byte),
+ }
+
+ overlayDB, err := NewWithFlatBase(root, db, diff)
+ require.NoError(t, err)
+
+ // The account should be resurrected with the new nonce from FlatDiff.Accounts.
+ require.Equal(t, uint64(10), overlayDB.GetNonce(addr))
+}
+
+func TestTrieOnlyReader_SkipsFlatReaders(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xacc001")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(42), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Create StateDB via NewTrieOnly — reads go through trie, not flat/snapshot.
+ trieDB, err := NewTrieOnly(root, db)
+ require.NoError(t, err)
+
+ // Verify trie reader returns correct data.
+ require.Equal(t, uint256.NewInt(42), trieDB.GetBalance(addr))
+
+ // Attach a witness and modify the account via a fresh trie-only StateDB.
+ // After IntermediateRoot, the witness should capture trie nodes (non-empty
+ // State map). With flat readers the trie is never walked, so the witness
+ // would remain empty.
+ trieDB2, err := NewTrieOnly(root, db)
+ require.NoError(t, err)
+
+ witness := &stateless.Witness{
+ Headers: []*types.Header{{}},
+ Codes: make(map[string]struct{}),
+ State: make(map[string]struct{}),
+ }
+ trieDB2.SetWitness(witness)
+
+ // Modify the account so that IntermediateRoot walks the trie and collects
+ // witness nodes from the account trie.
+ trieDB2.SetBalance(addr, uint256.NewInt(99), 0)
+ trieDB2.IntermediateRoot(false)
+
+ require.NotEmpty(t, witness.State, "witness should capture trie nodes when using trie-only reader")
+}
+
+func TestNewTrieOnly_ReadsCorrectData(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr1 := common.HexToAddress("0xacc101")
+ addr2 := common.HexToAddress("0xacc102")
+ addr3 := common.HexToAddress("0xacc103")
+
+ sdb.CreateAccount(addr1)
+ sdb.SetBalance(addr1, uint256.NewInt(100), 0)
+ sdb.SetNonce(addr1, 1, 0)
+
+ sdb.CreateAccount(addr2)
+ sdb.SetBalance(addr2, uint256.NewInt(200), 0)
+ sdb.SetNonce(addr2, 5, 0)
+ sdb.SetCode(addr2, []byte{0x60, 0x00, 0x60, 0x00}, 0)
+
+ sdb.CreateAccount(addr3)
+ sdb.SetBalance(addr3, uint256.NewInt(300), 0)
+ slot := common.HexToHash("0xaa01")
+ sdb.SetState(addr3, slot, common.HexToHash("0xbb01"))
+
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Create via NewTrieOnly and verify all data.
+ trieDB, err := NewTrieOnly(root, db)
+ require.NoError(t, err)
+
+ require.Equal(t, uint256.NewInt(100), trieDB.GetBalance(addr1))
+ require.Equal(t, uint64(1), trieDB.GetNonce(addr1))
+
+ require.Equal(t, uint256.NewInt(200), trieDB.GetBalance(addr2))
+ require.Equal(t, uint64(5), trieDB.GetNonce(addr2))
+ require.Equal(t, crypto.Keccak256Hash([]byte{0x60, 0x00, 0x60, 0x00}), trieDB.GetCodeHash(addr2))
+ require.Equal(t, []byte{0x60, 0x00, 0x60, 0x00}, trieDB.GetCode(addr2))
+
+ require.Equal(t, uint256.NewInt(300), trieDB.GetBalance(addr3))
+ require.Equal(t, common.HexToHash("0xbb01"), trieDB.GetState(addr3, slot))
+}
+
+func TestPropagateReadsTo_AccountsAndStorage(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr1 := common.HexToAddress("0xaa0001")
+ addr2 := common.HexToAddress("0xaa0002")
+ slot1 := common.HexToHash("0xcc0001")
+ slot2 := common.HexToHash("0xcc0002")
+
+ sdb.CreateAccount(addr1)
+ sdb.SetBalance(addr1, uint256.NewInt(111), 0)
+ sdb.SetState(addr1, slot1, common.HexToHash("0xdd0001"))
+ sdb.SetState(addr1, slot2, common.HexToHash("0xdd0002"))
+
+ sdb.CreateAccount(addr2)
+ sdb.SetBalance(addr2, uint256.NewInt(222), 0)
+
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Create src and dst StateDBs at the same root.
+ src, err := New(root, db)
+ require.NoError(t, err)
+ dst, err := New(root, db)
+ require.NoError(t, err)
+
+ // Read accounts and storage on src.
+ src.GetBalance(addr1)
+ src.GetBalance(addr2)
+ src.GetState(addr1, slot1)
+ src.GetState(addr1, slot2)
+
+ // Propagate reads from src to dst.
+ src.PropagateReadsTo(dst)
+
+ // dst should now have the accounts and storage in its stateObjects
+ // (populated by PropagateReadsTo calling GetBalance/GetState on dst).
+ require.Equal(t, uint256.NewInt(111), dst.GetBalance(addr1))
+ require.Equal(t, uint256.NewInt(222), dst.GetBalance(addr2))
+ require.Equal(t, common.HexToHash("0xdd0001"), dst.GetState(addr1, slot1))
+ require.Equal(t, common.HexToHash("0xdd0002"), dst.GetState(addr1, slot2))
+}
+
+func TestCommitSnapshot_CapturesDestructs(t *testing.T) {
+ db := NewDatabaseForTesting()
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xdestruct01")
+ sdb.CreateAccount(addr)
+ sdb.SetBalance(addr, uint256.NewInt(500), 0)
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Create a new StateDB at the committed root and self-destruct the account.
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ sdb2.SelfDestruct(addr)
+ diff := sdb2.CommitSnapshot(false)
+
+ _, destructed := diff.Destructs[addr]
+ require.True(t, destructed, "self-destructed account should appear in diff.Destructs")
+}
+
+// TestPrefetchRoot_FlatDiffAccountUsesCommittedRoot verifies that accounts
+// loaded from FlatDiff get their prefetchRoot set to the committed parent's
+// storage root, not the FlatDiff's storage root. This is critical for
+// pipelined SRC: the prefetcher's NodeReader is opened at the committed
+// parent root (grandparent), so it can only resolve trie nodes for that
+// state's storage root. Using FlatDiff's root (block N's post-state) would
+// cause "Unexpected trie node" hash mismatches.
+func TestPrefetchRoot_FlatDiffAccountUsesCommittedRoot(t *testing.T) {
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+
+ // --- Set up a committed state with a contract that has storage ---
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xcontract")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ sdb.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0xaa"))
+ sdb.Finalise(false)
+
+ committedRoot, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Read back the committed account to get its storage root.
+ committedSDB, err := New(committedRoot, db)
+ require.NoError(t, err)
+ committedObj := committedSDB.getStateObject(addr)
+ require.NotNil(t, committedObj)
+ committedStorageRoot := committedObj.data.Root
+ require.NotEqual(t, types.EmptyRootHash, committedStorageRoot, "committed account should have non-empty storage root")
+
+ // --- Simulate block N: modify the contract's storage and extract FlatDiff ---
+ sdb2, err := New(committedRoot, db)
+ require.NoError(t, err)
+ sdb2.SetState(addr, common.HexToHash("0x02"), common.HexToHash("0xbb")) // new slot
+ sdb2.Finalise(false)
+ diff := sdb2.CommitSnapshot(false)
+
+ // The FlatDiff account has block N's storage root (different from committed).
+ flatDiffAcct, ok := diff.Accounts[addr]
+ require.True(t, ok, "contract should be in FlatDiff")
+ flatDiffStorageRoot := flatDiffAcct.Root
+ // The FlatDiff root is the account's root BEFORE IntermediateRoot (i.e.,
+ // CommitSnapshot doesn't hash — it captures the current data.Root). So it
+ // equals the committed root here. But the key point is that getPrefetchRoot
+ // returns the committed root regardless.
+
+ // --- Create a pipelined StateDB with FlatDiff overlay ---
+ overlayDB, err := NewWithFlatBase(committedRoot, db, diff)
+ require.NoError(t, err)
+
+ // Load the account from FlatDiff
+ obj := overlayDB.getStateObject(addr)
+ require.NotNil(t, obj)
+
+ // Verify origin/data roots come from FlatDiff
+ require.Equal(t, flatDiffStorageRoot, obj.data.Root, "data.Root should be from FlatDiff")
+
+ // Verify prefetchRoot was set to the committed storage root
+ require.Equal(t, committedStorageRoot, obj.prefetchRoot, "prefetchRoot should be the committed parent's storage root")
+
+ // Verify getPrefetchRoot returns the committed root (not data.Root)
+ require.Equal(t, committedStorageRoot, obj.getPrefetchRoot(), "getPrefetchRoot should return the committed storage root")
+}
+
+// TestPrefetchRoot_NormalAccountFallsBackToDataRoot verifies that accounts
+// loaded from the committed state (not FlatDiff) have prefetchRoot=zero,
+// and getPrefetchRoot falls back to data.Root.
+func TestPrefetchRoot_NormalAccountFallsBackToDataRoot(t *testing.T) {
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xnormal")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ sdb.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0xaa"))
+ sdb.Finalise(false)
+
+ root, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Load the account normally (no FlatDiff)
+ sdb2, err := New(root, db)
+ require.NoError(t, err)
+
+ obj := sdb2.getStateObject(addr)
+ require.NotNil(t, obj)
+
+ // prefetchRoot should be zero (not set for non-FlatDiff accounts)
+ require.Equal(t, common.Hash{}, obj.prefetchRoot, "prefetchRoot should be zero for non-FlatDiff accounts")
+
+ // getPrefetchRoot should fall back to data.Root
+ require.Equal(t, obj.data.Root, obj.getPrefetchRoot(), "getPrefetchRoot should fall back to data.Root")
+}
+
+// TestPrefetchRoot_NewAccountInFlatDiff verifies that an account created in
+// block N (exists in FlatDiff but not in committed state) gets prefetchRoot=zero
+// since there's nothing to prefetch at the committed parent root.
+func TestPrefetchRoot_NewAccountInFlatDiff(t *testing.T) {
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+
+ // Commit an empty state
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+ committedRoot, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // FlatDiff with a new account that doesn't exist in committed state
+ newAddr := common.HexToAddress("0xnew")
+ diff := &FlatDiff{
+ Accounts: map[common.Address]types.StateAccount{
+ newAddr: {
+ Nonce: 1,
+ Balance: uint256.NewInt(100),
+ Root: crypto.Keccak256Hash([]byte("fake-storage-root")), // non-empty root
+ CodeHash: types.EmptyCodeHash.Bytes(),
+ },
+ },
+ Storage: make(map[common.Address]map[common.Hash]common.Hash),
+ Destructs: make(map[common.Address]struct{}),
+ Code: make(map[common.Hash][]byte),
+ ReadStorage: make(map[common.Address][]common.Hash),
+ NonExistentReads: nil,
+ }
+
+ overlayDB, err := NewWithFlatBase(committedRoot, db, diff)
+ require.NoError(t, err)
+
+ obj := overlayDB.getStateObject(newAddr)
+ require.NotNil(t, obj)
+
+ // Account is new (not in committed state), so prefetchRoot should be zero
+ require.Equal(t, common.Hash{}, obj.prefetchRoot, "prefetchRoot should be zero for new accounts not in committed state")
+
+ // getPrefetchRoot falls back to data.Root
+ require.Equal(t, obj.data.Root, obj.getPrefetchRoot(), "getPrefetchRoot should fall back to data.Root for new accounts")
+}
+
+// TestPrefetchRoot_DeepCopyPreserves verifies that stateObject.deepCopy
+// preserves the prefetchRoot field, which is important for StateDB.Copy()
+// used by the block-level prefetcher.
+func TestPrefetchRoot_DeepCopyPreserves(t *testing.T) {
+ db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+
+ sdb, err := New(types.EmptyRootHash, db)
+ require.NoError(t, err)
+
+ addr := common.HexToAddress("0xcopy")
+ sdb.CreateAccount(addr)
+ sdb.SetNonce(addr, 1, 0)
+ sdb.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0xaa"))
+ sdb.Finalise(false)
+
+ committedRoot, _, err := sdb.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+
+ // Simulate a FlatDiff account with a different storage root
+ sdb2, err := New(committedRoot, db)
+ require.NoError(t, err)
+ sdb2.SetState(addr, common.HexToHash("0x02"), common.HexToHash("0xbb"))
+ sdb2.Finalise(false)
+ diff := sdb2.CommitSnapshot(false)
+
+ // Create overlay StateDB and load account
+ overlayDB, err := NewWithFlatBase(committedRoot, db, diff)
+ require.NoError(t, err)
+ obj := overlayDB.getStateObject(addr)
+ require.NotNil(t, obj)
+ require.NotEqual(t, common.Hash{}, obj.prefetchRoot)
+
+ // Copy the StateDB and verify prefetchRoot is preserved
+ copiedDB := overlayDB.Copy()
+ copiedObj := copiedDB.getStateObject(addr)
+ require.NotNil(t, copiedObj)
+ require.Equal(t, obj.prefetchRoot, copiedObj.prefetchRoot, "deepCopy should preserve prefetchRoot")
+ require.Equal(t, obj.getPrefetchRoot(), copiedObj.getPrefetchRoot(), "getPrefetchRoot should match after deepCopy")
+}
+
+// TestPipelinedSRC_RootParity_NewVsTrieOnly is a consensus-critical parity
+// check for mitigation (2.5): when the SRC goroutine runs without producing a
+// witness, openSRCStateDB uses state.New (multi-reader, flat-reader-eligible)
+// instead of state.NewTrieOnly. Both reader paths must produce byte-identical
+// state roots from the same FlatDiff applied at the same parent root —
+// otherwise consensus would split between witness-producing and witness-off
+// nodes.
+//
+// The FlatDiff exercises every shape that touches origin reads:
+// - balance/nonce mutation on existing account
+// - storage zero-write (slot deletion) and fresh storage write
+// - pure self-destruct (no resurrection)
+// - self-destruct followed by resurrection with new state
+// - code deploy on a new account
+// - read-only access to an existing account (flatDiff.ReadSet)
+// - non-existent address probe (flatDiff.NonExistentReads)
+//
+// Uses path scheme so state.New actually wires a flat reader (pathdb
+// StateReader), making the trie-only vs multi-reader distinction
+// observable. Under hash scheme without a snapshot the multi-reader
+// degenerates to trie-only and the test would be trivially true.
+func TestPipelinedSRC_RootParity_NewVsTrieOnly(t *testing.T) {
+ disk := rawdb.NewMemoryDatabase()
+ defer disk.Close()
+ tdb := triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
+ defer tdb.Close()
+ sdb := NewDatabase(tdb, nil)
+
+ addrMutate := common.HexToAddress("0xa1") // existing → balance/nonce mutation
+ addrZeroSlot := common.HexToAddress("0xa2") // existing storage → zero a slot, write a fresh slot
+ addrPureDest := common.HexToAddress("0xa3") // existing → pure self-destruct
+ addrResurrect := common.HexToAddress("0xa4") // existing → destruct + resurrect with new state
+ addrReadOnly := common.HexToAddress("0xa5") // existing → read-only access only
+ addrCodeNew := common.HexToAddress("0xa6") // new → balance + code deploy
+ addrNonExist := common.HexToAddress("0xa7") // never exists → probed only
+
+ slotZeroed := common.HexToHash("0x01")
+ slotFresh := common.HexToHash("0x02")
+ slotResurrectOld := common.HexToHash("0x03")
+ slotResurrectNew := common.HexToHash("0x04")
+ slotReadOnly := common.HexToHash("0x05")
+
+ // --- Build initial committed state ---
+ initial, err := New(types.EmptyRootHash, sdb)
+ require.NoError(t, err)
+
+ initial.CreateAccount(addrMutate)
+ initial.SetBalance(addrMutate, uint256.NewInt(100), 0)
+ initial.SetNonce(addrMutate, 1, 0)
+
+ initial.CreateAccount(addrZeroSlot)
+ initial.SetBalance(addrZeroSlot, uint256.NewInt(50), 0)
+ initial.SetState(addrZeroSlot, slotZeroed, common.HexToHash("0xbeef"))
+
+ initial.CreateAccount(addrPureDest)
+ initial.SetBalance(addrPureDest, uint256.NewInt(200), 0)
+
+ initial.CreateAccount(addrResurrect)
+ initial.SetBalance(addrResurrect, uint256.NewInt(300), 0)
+ initial.SetNonce(addrResurrect, 5, 0)
+ initial.SetCode(addrResurrect, []byte{0x60, 0x01}, 0)
+ initial.SetState(addrResurrect, slotResurrectOld, common.HexToHash("0xbbbb"))
+
+ initial.CreateAccount(addrReadOnly)
+ initial.SetBalance(addrReadOnly, uint256.NewInt(400), 0)
+ initial.SetState(addrReadOnly, slotReadOnly, common.HexToHash("0xdddd"))
+
+ parentRoot, _, err := initial.CommitWithUpdate(0, false, false)
+ require.NoError(t, err)
+ require.NoError(t, tdb.Commit(parentRoot, false))
+
+ // Confirm the multi-reader path will actually wire a flat reader at
+ // parentRoot. state.New silently falls back to trie-only if
+ // triedb.StateReader errors, which would let the parity assertion below
+ // pass without exercising the mitigation. Asserting StateReader resolves
+ // makes the test fail loudly if path-mode setup ever regresses.
+ if _, err := tdb.StateReader(parentRoot); err != nil {
+ t.Fatalf("path-scheme StateReader unavailable at parentRoot — "+
+ "multi-reader would silently fall back to trie-only, defeating the parity test: %v", err)
+ }
+
+ // --- Simulate block N execution at parentRoot to produce a FlatDiff ---
+ exec, err := New(parentRoot, sdb)
+ require.NoError(t, err)
+
+ exec.SetBalance(addrMutate, uint256.NewInt(150), 0)
+ exec.SetNonce(addrMutate, 2, 0)
+
+ exec.SetState(addrZeroSlot, slotZeroed, common.Hash{})
+ exec.SetState(addrZeroSlot, slotFresh, common.HexToHash("0x1234"))
+
+ exec.SelfDestruct(addrPureDest)
+
+ // Destruct in one tx, resurrect in the next. Finalise between the two so
+ // the destructed object lands in stateObjectsDestruct before the new
+ // CreateAccount replaces stateObjects[addr]; without this, the new object
+ // overwrites the destruct trail and CommitSnapshot would emit only an
+ // Accounts entry, not the destruct+resurrect shape we want to exercise.
+ exec.SelfDestruct(addrResurrect)
+ exec.Finalise(false)
+ exec.CreateAccount(addrResurrect)
+ exec.SetBalance(addrResurrect, uint256.NewInt(999), 0)
+ exec.SetNonce(addrResurrect, 1, 0)
+ exec.SetCode(addrResurrect, []byte{0x60, 0x02}, 0)
+ exec.SetState(addrResurrect, slotResurrectNew, common.HexToHash("0xffff"))
+
+ exec.CreateAccount(addrCodeNew)
+ exec.SetBalance(addrCodeNew, uint256.NewInt(77), 0)
+ exec.SetCode(addrCodeNew, []byte{0x60, 0x03}, 0)
+
+ exec.GetBalance(addrReadOnly)
+ exec.GetState(addrReadOnly, slotReadOnly)
+
+ exec.GetBalance(addrNonExist)
+
+ flatDiff := exec.CommitSnapshot(false)
+
+ // Sanity: FlatDiff captured every shape we exercise.
+ require.Contains(t, flatDiff.Accounts, addrMutate)
+ require.Contains(t, flatDiff.Accounts, addrZeroSlot)
+ require.Contains(t, flatDiff.Destructs, addrPureDest)
+ require.Contains(t, flatDiff.Destructs, addrResurrect)
+ require.Contains(t, flatDiff.Accounts, addrResurrect)
+ require.Contains(t, flatDiff.Accounts, addrCodeNew)
+ zeroedSlots, ok := flatDiff.Storage[addrZeroSlot]
+ require.True(t, ok)
+ require.Equal(t, common.Hash{}, zeroedSlots[slotZeroed])
+ require.Equal(t, common.HexToHash("0x1234"), zeroedSlots[slotFresh])
+
+ // --- Path A: NewTrieOnly (witness-producing path) ---
+ trieOnlyDB, err := NewTrieOnly(parentRoot, sdb)
+ require.NoError(t, err)
+ trieOnlyDB.ApplyFlatDiffForCommit(flatDiff)
+ rootTrieOnly := trieOnlyDB.IntermediateRoot(false)
+
+ // --- Path B: state.New (witness-off multi-reader path) ---
+ multiDB, err := New(parentRoot, sdb)
+ require.NoError(t, err)
+ multiDB.ApplyFlatDiffForCommit(flatDiff)
+ rootMulti := multiDB.IntermediateRoot(false)
+
+ // --- Parity assertion: byte-identical state roots ---
+ require.Equal(t, rootTrieOnly, rootMulti,
+ "state root must be byte-identical between NewTrieOnly and state.New paths — "+
+ "any divergence is a consensus-splitting bug between witness-producing and witness-off nodes")
+
+ // Cross-check against a third path: directly executing the same mutations
+ // on a fresh StateDB at parentRoot (no FlatDiff replay). This catches any
+ // hypothetical bug where ApplyFlatDiffForCommit produces a root that
+ // differs from the original execution.
+ direct, err := New(parentRoot, sdb)
+ require.NoError(t, err)
+ direct.SetBalance(addrMutate, uint256.NewInt(150), 0)
+ direct.SetNonce(addrMutate, 2, 0)
+ direct.SetState(addrZeroSlot, slotZeroed, common.Hash{})
+ direct.SetState(addrZeroSlot, slotFresh, common.HexToHash("0x1234"))
+ direct.SelfDestruct(addrPureDest)
+ direct.SelfDestruct(addrResurrect)
+ // Mirror the exec-path transaction boundary: Finalise after SelfDestruct
+ // so the destructed object is recorded in stateObjectsDestruct before the
+ // resurrection. Without this, the cross-check would diverge from the
+ // FlatDiff path because the FlatDiff captured a destruct+resurrect shape
+ // that only exists when there is a Finalise between the two operations.
+ direct.Finalise(false)
+ direct.CreateAccount(addrResurrect)
+ direct.SetBalance(addrResurrect, uint256.NewInt(999), 0)
+ direct.SetNonce(addrResurrect, 1, 0)
+ direct.SetCode(addrResurrect, []byte{0x60, 0x02}, 0)
+ direct.SetState(addrResurrect, slotResurrectNew, common.HexToHash("0xffff"))
+ direct.CreateAccount(addrCodeNew)
+ direct.SetBalance(addrCodeNew, uint256.NewInt(77), 0)
+ direct.SetCode(addrCodeNew, []byte{0x60, 0x03}, 0)
+ rootDirect := direct.IntermediateRoot(false)
+ require.Equal(t, rootDirect, rootTrieOnly,
+ "FlatDiff replay path must produce the same root as direct execution")
+}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index b469a17e1d..40b8b996e9 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -35,6 +35,30 @@ var (
errTerminated = errors.New("fetcher is already terminated")
)
+type prefetchStopMode uint8
+
+const (
+ // prefetchStopDrain preserves the historical shutdown behaviour: once stop
+ // is requested, subfetchers finish queued work before exiting.
+ prefetchStopDrain prefetchStopMode = iota
+
+ // prefetchStopSnapshotFast is used by the pipelined SRC warm-snapshot
+ // handoff. Queued speculative work is discarded; already-running trie reads
+ // are allowed to finish, then the caller snapshots whatever nodes are
+ // available.
+ prefetchStopSnapshotFast
+)
+
+const (
+ // subfetcherAccountPrefetchChunk and subfetcherStoragePrefetchChunk bound
+ // how long snapshot-fast shutdown can be stuck behind an already-started
+ // prefetch batch. Full-drain shutdown still processes every chunk; the
+ // snapshot-fast path may exit between chunks and treat uncached nodes as SRC
+ // warm-snapshot misses.
+ subfetcherAccountPrefetchChunk = 64
+ subfetcherStoragePrefetchChunk = 128
+)
+
// triePrefetcher is an active prefetcher, which receives accounts or storage
// items and does trie-loading of them. The goal is to get as much useful content
// into the caches as possible.
@@ -101,10 +125,71 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string, noreads
}
}
-// terminate iterates over all the subfetchers and issues a termination request
-// to all of them. Depending on the async parameter, the method will either block
-// until all subfetchers spin down, or return immediately.
+// snapshotWarmNodes collects the trie nodes accumulated by every subfetcher
+// into a list of (owner, path -> blob) maps. It MUST be called only after a
+// synchronous termination has returned — once subfetcher goroutines have exited
+// their loops, their tries are quiescent and trie.Witness() can be read safely.
+// The caller (StopAndCollectWarmSnapshot) sequences this between the
+// snapshot-fast stop and report so the prefetcher's lifecycle remains intact.
+//
+// Returns nil when called on a nil receiver, an already-closed prefetcher
+// without subfetchers, or when no subfetcher loaded any nodes — callers must
+// tolerate a nil result. The stats return describes the subfetcher count and
+// warm-node mix observed while collecting the maps.
+func (p *triePrefetcher) snapshotWarmNodes() ([]TrieWarmNodes, PrefetcherSnapshotStats) {
+ var stats PrefetcherSnapshotStats
+ if p == nil {
+ return nil, stats
+ }
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ stats.Fetchers = len(p.fetchers)
+ if len(p.fetchers) == 0 {
+ return nil, stats
+ }
+ out := make([]TrieWarmNodes, 0, len(p.fetchers))
+ for _, fetcher := range p.fetchers {
+ nodes := fetcher.warmNodes()
+ if len(nodes) == 0 {
+ continue
+ }
+ stats.LoadedFetchers++
+ var bytes int
+ for _, blob := range nodes {
+ bytes += len(blob)
+ }
+ if fetcher.owner == (common.Hash{}) {
+ stats.AccountFetchers++
+ stats.AccountNodes += len(nodes)
+ stats.AccountBytes += bytes
+ } else {
+ stats.StorageFetchers++
+ stats.StorageNodes += len(nodes)
+ stats.StorageBytes += bytes
+ }
+ out = append(out, TrieWarmNodes{Owner: fetcher.owner, Nodes: nodes})
+ }
+ return out, stats
+}
+
+// terminate iterates over all the subfetchers and issues a full-drain
+// termination request to all of them. Depending on the async parameter, the
+// method will either block until all subfetchers spin down, or return
+// immediately.
func (p *triePrefetcher) terminate(async bool) {
+ p.terminateWithMode(async, prefetchStopDrain)
+}
+
+// terminateForSnapshot terminates the prefetcher for the pipelined SRC
+// warm-snapshot handoff. It discards queued speculative work and waits only for
+// subfetcher goroutines to exit, so the caller can safely snapshot the nodes
+// that were already loaded.
+func (p *triePrefetcher) terminateForSnapshot() {
+ p.terminateWithMode(false, prefetchStopSnapshotFast)
+}
+
+func (p *triePrefetcher) terminateWithMode(async bool, mode prefetchStopMode) {
p.lock.Lock() // Lock for writing
defer p.lock.Unlock() // Ensure the lock is released after the function
@@ -116,7 +201,7 @@ func (p *triePrefetcher) terminate(async bool) {
}
// Terminate all sub-fetchers, sync or async, depending on the request
for _, fetcher := range p.fetchers {
- fetcher.terminate(async)
+ fetcher.terminateWithMode(async, mode)
}
close(p.term)
}
@@ -265,8 +350,9 @@ type subfetcher struct {
addr common.Address // Address of the account that the trie belongs to
trie Trie // Trie being populated with nodes
- tasks []*subfetcherTask // Items queued up for retrieval
- lock sync.Mutex // Lock protecting the task queue
+ tasks []*subfetcherTask // Items queued up for retrieval
+ stopMode prefetchStopMode // Stop behaviour, guarded by lock
+ lock sync.Mutex // Lock protecting the task queue and stop mode
wake chan struct{} // Wake channel if a new task is scheduled
stop chan struct{} // Channel to interrupt processing
@@ -324,10 +410,18 @@ func (sf *subfetcher) schedule(addrs []common.Address, slots []common.Hash, read
select {
case <-sf.term:
return errTerminated
+ case <-sf.stop:
+ return errTerminated
default:
}
// Append the tasks to the current queue
sf.lock.Lock()
+ select {
+ case <-sf.stop:
+ sf.lock.Unlock()
+ return errTerminated
+ default:
+ }
for _, addr := range addrs {
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, addr: &addr})
}
@@ -376,9 +470,20 @@ func (sf *subfetcher) peek() Trie {
}
// terminate requests the subfetcher to stop accepting new tasks and spin down
-// as soon as everything is loaded. Depending on the async parameter, the method
+// once queued work is drained. Depending on the async parameter, the method
// will either block until all disk loads finish or return immediately.
func (sf *subfetcher) terminate(async bool) {
+ sf.terminateWithMode(async, prefetchStopDrain)
+}
+
+func (sf *subfetcher) terminateWithMode(async bool, mode prefetchStopMode) {
+ sf.lock.Lock()
+ if mode == prefetchStopSnapshotFast {
+ sf.stopMode = mode
+ sf.tasks = nil
+ }
+ sf.lock.Unlock()
+
select {
case <-sf.stop:
default:
@@ -390,6 +495,61 @@ func (sf *subfetcher) terminate(async bool) {
<-sf.term
}
+func (sf *subfetcher) discardOnStop() bool {
+ sf.lock.Lock()
+ defer sf.lock.Unlock()
+
+ return sf.stopMode == prefetchStopSnapshotFast
+}
+
+// warmNodes returns the (path -> blob) map of trie nodes this subfetcher has
+// loaded into its trie. It must be called only after the subfetcher's loop has
+// exited — synchronous termination provides that ordering by waiting on
+// <-sf.term. Returns nil if the trie was never opened (openTrie failed) or has
+// not loaded any nodes.
+func (sf *subfetcher) warmNodes() map[string][]byte {
+ if sf == nil || sf.trie == nil {
+ return nil
+ }
+ return sf.trie.Witness()
+}
+
+func (sf *subfetcher) prefetchAccounts(addresses []common.Address) bool {
+ for start := 0; start < len(addresses); start += subfetcherAccountPrefetchChunk {
+ if sf.discardOnStop() {
+ return false
+ }
+ end := start + subfetcherAccountPrefetchChunk
+ if end > len(addresses) {
+ end = len(addresses)
+ }
+ startTime := time.Now()
+ if err := sf.trie.PrefetchAccount(addresses[start:end]); err != nil {
+ log.Error("Failed to prefetch accounts", "err", err)
+ }
+ sf.fetchTime += time.Since(startTime)
+ }
+ return true
+}
+
+func (sf *subfetcher) prefetchStorage(slots [][]byte) bool {
+ for start := 0; start < len(slots); start += subfetcherStoragePrefetchChunk {
+ if sf.discardOnStop() {
+ return false
+ }
+ end := start + subfetcherStoragePrefetchChunk
+ if end > len(slots) {
+ end = len(slots)
+ }
+ startTime := time.Now()
+ if err := sf.trie.PrefetchStorage(sf.addr, slots[start:end]); err != nil {
+ log.Error("Failed to prefetch storage", "err", err)
+ }
+ sf.fetchTime += time.Since(startTime)
+ }
+ return true
+}
+
// openTrie resolves the target trie from database for prefetching.
func (sf *subfetcher) openTrie() error {
// Open the verkle tree if the sub-fetcher is in verkle mode. Note, there is
@@ -438,7 +598,11 @@ func (sf *subfetcher) loop() {
sf.lock.Lock()
tasks := sf.tasks
sf.tasks = nil
+ discard := sf.stopMode == prefetchStopSnapshotFast
sf.lock.Unlock()
+ if discard {
+ return
+ }
var (
addresses []common.Address
@@ -495,26 +659,26 @@ func (sf *subfetcher) loop() {
slots = append(slots, key.Bytes())
}
}
- if len(addresses) != 0 {
- start := time.Now()
- if err := sf.trie.PrefetchAccount(addresses); err != nil {
- log.Error("Failed to prefetch accounts", "err", err)
- }
- sf.fetchTime += time.Since(start)
+ if sf.discardOnStop() {
+ return
}
- if len(slots) != 0 {
- start := time.Now()
- if err := sf.trie.PrefetchStorage(sf.addr, slots); err != nil {
- log.Error("Failed to prefetch storage", "err", err)
- }
- sf.fetchTime += time.Since(start)
+ if len(addresses) != 0 && !sf.prefetchAccounts(addresses) {
+ return
+ }
+ if len(slots) != 0 && !sf.prefetchStorage(slots) {
+ return
}
case <-sf.stop:
- // Termination is requested, abort if no more tasks are pending. If
- // there are some, exhaust them first.
+ // Termination is requested. Snapshot-fast mode discards speculative
+ // queued tasks; full-drain mode keeps the historical behaviour and
+ // exhausts queued tasks before exiting.
sf.lock.Lock()
- done := sf.tasks == nil
+ done := len(sf.tasks) == 0
+ if sf.stopMode == prefetchStopSnapshotFast {
+ sf.tasks = nil
+ done = true
+ }
sf.lock.Unlock()
if done {
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index 12b8d2ee0d..5ca9ec4662 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -71,6 +71,330 @@ func TestUseAfterTerminate(t *testing.T) {
}
}
+func TestSubfetcherSnapshotFastTerminateDiscardsQueuedTasks(t *testing.T) {
+ db := filledStateDB()
+ slot := common.HexToHash("aaa")
+ sf := &subfetcher{
+ db: db.db,
+ state: db.originalRoot,
+ root: db.originalRoot,
+ wake: make(chan struct{}, 1),
+ stop: make(chan struct{}),
+ term: make(chan struct{}),
+ seenReadAddr: make(map[common.Address]struct{}),
+ seenWriteAddr: make(map[common.Address]struct{}),
+ seenReadSlot: make(map[common.Hash]struct{}),
+ seenWriteSlot: make(map[common.Hash]struct{}),
+ tasks: []*subfetcherTask{{slot: &slot}},
+ }
+ sf.terminateWithMode(true, prefetchStopSnapshotFast)
+
+ if got := len(sf.tasks); got != 0 {
+ t.Fatalf("snapshot-fast terminate left %d queued tasks, want 0", got)
+ }
+ select {
+ case <-sf.stop:
+ default:
+ t.Fatalf("snapshot-fast terminate did not close stop channel")
+ }
+ if err := sf.schedule(nil, []common.Hash{slot}, false); err != errTerminated {
+ t.Fatalf("schedule after snapshot-fast terminate error = %v, want %v", err, errTerminated)
+ }
+}
+
+func TestSubfetcherDrainTerminateKeepsQueuedTasks(t *testing.T) {
+ db := filledStateDB()
+ slot := common.HexToHash("aaa")
+ sf := &subfetcher{
+ db: db.db,
+ state: db.originalRoot,
+ root: db.originalRoot,
+ wake: make(chan struct{}, 1),
+ stop: make(chan struct{}),
+ term: make(chan struct{}),
+ seenReadAddr: make(map[common.Address]struct{}),
+ seenWriteAddr: make(map[common.Address]struct{}),
+ seenReadSlot: make(map[common.Hash]struct{}),
+ seenWriteSlot: make(map[common.Hash]struct{}),
+ tasks: []*subfetcherTask{{slot: &slot}},
+ }
+ sf.terminateWithMode(true, prefetchStopDrain)
+
+ if got := len(sf.tasks); got != 1 {
+ t.Fatalf("full-drain terminate changed queued task count to %d, want 1", got)
+ }
+}
+
+func TestTriePrefetcherSnapshotFastTerminateSkipsQueuedWork(t *testing.T) {
+ db := NewDatabaseForTesting()
+ tr := newBlockingPrefetchTrie()
+ t.Cleanup(tr.releaseBlockedPrefetch)
+ prefetcher := newTriePrefetcher(&blockingPrefetchDB{
+ Database: db,
+ triedb: db.TrieDB(),
+ trie: tr,
+ }, common.Hash{}, "snapshot-fast-terminate", false)
+ addr1 := common.HexToAddress("0x1")
+ addr2 := common.HexToAddress("0x2")
+
+ if err := prefetcher.prefetch(common.Hash{}, common.Hash{}, common.Address{}, []common.Address{addr1}, nil, false); err != nil {
+ t.Fatalf("first prefetch failed: %v", err)
+ }
+ select {
+ case <-tr.started:
+ case <-time.After(time.Second):
+ t.Fatalf("first prefetch did not start")
+ }
+ if err := prefetcher.prefetch(common.Hash{}, common.Hash{}, common.Address{}, []common.Address{addr2}, nil, false); err != nil {
+ t.Fatalf("second prefetch failed: %v", err)
+ }
+
+ done := make(chan struct{})
+ go func() {
+ prefetcher.terminateForSnapshot()
+ close(done)
+ }()
+ select {
+ case <-done:
+ t.Fatalf("snapshot-fast terminate returned before in-flight prefetch completed")
+ case <-time.After(20 * time.Millisecond):
+ }
+ tr.releaseBlockedPrefetch()
+ select {
+ case <-done:
+ case <-time.After(time.Second):
+ t.Fatalf("snapshot-fast terminate did not return after in-flight prefetch completed")
+ }
+
+ if calls, items := tr.accountStats(); calls != 1 || items != 1 {
+ t.Fatalf("executed account prefetch calls/items = %d/%d, want 1/1", calls, items)
+ }
+ if err := prefetcher.prefetch(common.Hash{}, common.Hash{}, common.Address{}, []common.Address{addr2}, nil, false); err != errTerminated {
+ t.Fatalf("prefetch after snapshot-fast terminate error = %v, want %v", err, errTerminated)
+ }
+}
+
+func TestTriePrefetcherSnapshotFastTerminateStopsBetweenAccountChunks(t *testing.T) {
+ db := NewDatabaseForTesting()
+ tr := newBlockingPrefetchTrie()
+ t.Cleanup(tr.releaseBlockedPrefetch)
+ prefetcher := newTriePrefetcher(&blockingPrefetchDB{
+ Database: db,
+ triedb: db.TrieDB(),
+ trie: tr,
+ }, common.Hash{}, "snapshot-fast-account-chunks", false)
+
+ addrs := make([]common.Address, subfetcherAccountPrefetchChunk+1)
+ for i := range addrs {
+ addrs[i] = common.BigToAddress(big.NewInt(int64(i + 1)))
+ }
+ if err := prefetcher.prefetch(common.Hash{}, common.Hash{}, common.Address{}, addrs, nil, false); err != nil {
+ t.Fatalf("prefetch failed: %v", err)
+ }
+ select {
+ case <-tr.started:
+ case <-time.After(time.Second):
+ t.Fatalf("first account prefetch chunk did not start")
+ }
+
+ done := make(chan struct{})
+ go func() {
+ prefetcher.terminateForSnapshot()
+ close(done)
+ }()
+ select {
+ case <-done:
+ t.Fatalf("snapshot-fast terminate returned before in-flight account chunk completed")
+ case <-time.After(20 * time.Millisecond):
+ }
+ tr.releaseBlockedPrefetch()
+ select {
+ case <-done:
+ case <-time.After(time.Second):
+ t.Fatalf("snapshot-fast terminate did not return after in-flight account chunk completed")
+ }
+
+ if calls, items := tr.accountStats(); calls != 1 || items != subfetcherAccountPrefetchChunk {
+ t.Fatalf("executed account prefetch calls/items = %d/%d, want 1/%d", calls, items, subfetcherAccountPrefetchChunk)
+ }
+}
+
+func TestTriePrefetcherSnapshotFastTerminateStopsBetweenStorageChunks(t *testing.T) {
+ db := NewDatabaseForTesting()
+ tr := newBlockingPrefetchTrie()
+ t.Cleanup(tr.releaseBlockedPrefetch)
+ prefetcher := newTriePrefetcher(&blockingPrefetchDB{
+ Database: db,
+ triedb: db.TrieDB(),
+ trie: tr,
+ }, common.Hash{}, "snapshot-fast-storage-chunks", false)
+
+ slots := make([]common.Hash, subfetcherStoragePrefetchChunk+1)
+ for i := range slots {
+ slots[i] = common.BigToHash(big.NewInt(int64(i + 1)))
+ }
+ owner := common.Hash{0x01}
+ addr := common.HexToAddress("0x1")
+ if err := prefetcher.prefetch(owner, common.Hash{}, addr, nil, slots, false); err != nil {
+ t.Fatalf("prefetch failed: %v", err)
+ }
+ select {
+ case <-tr.started:
+ case <-time.After(time.Second):
+ t.Fatalf("first storage prefetch chunk did not start")
+ }
+
+ done := make(chan struct{})
+ go func() {
+ prefetcher.terminateForSnapshot()
+ close(done)
+ }()
+ select {
+ case <-done:
+ t.Fatalf("snapshot-fast terminate returned before in-flight storage chunk completed")
+ case <-time.After(20 * time.Millisecond):
+ }
+ tr.releaseBlockedPrefetch()
+ select {
+ case <-done:
+ case <-time.After(time.Second):
+ t.Fatalf("snapshot-fast terminate did not return after in-flight storage chunk completed")
+ }
+
+ if calls, items := tr.storageStats(); calls != 1 || items != subfetcherStoragePrefetchChunk {
+ t.Fatalf("executed storage prefetch calls/items = %d/%d, want 1/%d", calls, items, subfetcherStoragePrefetchChunk)
+ }
+}
+
+func TestTriePrefetcherDrainTerminateCompletesAccountChunks(t *testing.T) {
+ db := NewDatabaseForTesting()
+ tr := newBlockingPrefetchTrie()
+ t.Cleanup(tr.releaseBlockedPrefetch)
+ prefetcher := newTriePrefetcher(&blockingPrefetchDB{
+ Database: db,
+ triedb: db.TrieDB(),
+ trie: tr,
+ }, common.Hash{}, "drain-account-chunks", false)
+
+ addrs := make([]common.Address, subfetcherAccountPrefetchChunk+1)
+ for i := range addrs {
+ addrs[i] = common.BigToAddress(big.NewInt(int64(i + 1)))
+ }
+ if err := prefetcher.prefetch(common.Hash{}, common.Hash{}, common.Address{}, addrs, nil, false); err != nil {
+ t.Fatalf("prefetch failed: %v", err)
+ }
+ select {
+ case <-tr.started:
+ case <-time.After(time.Second):
+ t.Fatalf("first account prefetch chunk did not start")
+ }
+
+ done := make(chan struct{})
+ go func() {
+ prefetcher.terminate(false)
+ close(done)
+ }()
+ select {
+ case <-done:
+ t.Fatalf("full-drain terminate returned before in-flight account chunk completed")
+ case <-time.After(20 * time.Millisecond):
+ }
+ tr.releaseBlockedPrefetch()
+ select {
+ case <-done:
+ case <-time.After(time.Second):
+ t.Fatalf("full-drain terminate did not return after account chunks completed")
+ }
+
+ if calls, items := tr.accountStats(); calls != 2 || items != subfetcherAccountPrefetchChunk+1 {
+ t.Fatalf("executed account prefetch calls/items = %d/%d, want 2/%d", calls, items, subfetcherAccountPrefetchChunk+1)
+ }
+}
+
+type blockingPrefetchDB struct {
+ Database
+ triedb *triedb.Database
+ trie *blockingPrefetchTrie
+}
+
+func (db *blockingPrefetchDB) OpenTrie(common.Hash) (Trie, error) {
+ return db.trie, nil
+}
+
+func (db *blockingPrefetchDB) OpenStorageTrie(common.Hash, common.Address, common.Hash, Trie) (Trie, error) {
+ return db.trie, nil
+}
+
+func (db *blockingPrefetchDB) TrieDB() *triedb.Database {
+ return db.triedb
+}
+
+type blockingPrefetchTrie struct {
+ Trie
+
+ started chan struct{}
+ release chan struct{}
+ once sync.Once
+ releaseOnce sync.Once
+
+ lock sync.Mutex
+ accountCalls int
+ accountItems int
+ storageCalls int
+ storageItems int
+}
+
+func newBlockingPrefetchTrie() *blockingPrefetchTrie {
+ return &blockingPrefetchTrie{
+ started: make(chan struct{}),
+ release: make(chan struct{}),
+ }
+}
+
+func (t *blockingPrefetchTrie) PrefetchAccount(addrs []common.Address) error {
+ t.lock.Lock()
+ t.accountCalls++
+ t.accountItems += len(addrs)
+ first := t.accountCalls == 1
+ t.lock.Unlock()
+ if first {
+ t.once.Do(func() { close(t.started) })
+ <-t.release
+ }
+ return nil
+}
+
+func (t *blockingPrefetchTrie) releaseBlockedPrefetch() {
+ t.releaseOnce.Do(func() { close(t.release) })
+}
+
+func (t *blockingPrefetchTrie) PrefetchStorage(_ common.Address, keys [][]byte) error {
+ t.lock.Lock()
+ t.storageCalls++
+ t.storageItems += len(keys)
+ first := t.storageCalls == 1
+ t.lock.Unlock()
+ if first {
+ t.once.Do(func() { close(t.started) })
+ <-t.release
+ }
+ return nil
+}
+
+func (t *blockingPrefetchTrie) accountStats() (int, int) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ return t.accountCalls, t.accountItems
+}
+
+func (t *blockingPrefetchTrie) storageStats() (int, int) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ return t.storageCalls, t.storageItems
+}
+
func TestVerklePrefetcher(t *testing.T) {
disk := rawdb.NewMemoryDatabase()
db := triedb.NewDatabase(disk, triedb.VerkleDefaults)
diff --git a/core/state/warm_snapshot.go b/core/state/warm_snapshot.go
new file mode 100644
index 0000000000..2dea716d4d
--- /dev/null
+++ b/core/state/warm_snapshot.go
@@ -0,0 +1,369 @@
+// Copyright 2026 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb/database"
+)
+
+var (
+ // These meters are intentionally emitted from snapshotNodeReader.Node so
+ // hit/miss attribution includes every trie-node fetch SRC attempts. If this
+ // shows up in CPU profiles, batch these counts per block and emit them once
+ // from the import handoff instead.
+ warmSnapshotAccountHitMeter = metrics.NewRegisteredMeter("chain/imports/pipelined/warm_snapshot/account/hit", nil)
+ warmSnapshotAccountMissMeter = metrics.NewRegisteredMeter("chain/imports/pipelined/warm_snapshot/account/miss", nil)
+ warmSnapshotStorageHitMeter = metrics.NewRegisteredMeter("chain/imports/pipelined/warm_snapshot/storage/hit", nil)
+ warmSnapshotStorageMissMeter = metrics.NewRegisteredMeter("chain/imports/pipelined/warm_snapshot/storage/miss", nil)
+)
+
+// WarmSnapshot is an immutable, hash-verified copy of trie nodes loaded by the
+// execution-side prefetcher. It is constructed in the pipelined SRC goroutine
+// from a quiesced WarmSnapshotInput so SRC's NewTrieOnly reader can
+// short-circuit pathdb/pebble lookups for nodes the main thread already loaded.
+//
+// The snapshot is read-only after construction. Concurrent readers are safe
+// because a populated map is never mutated post-construction; the SRC handoff
+// in persistPipelinedImport provides the happens-before edge.
+type WarmSnapshot struct {
+ // nodes is keyed by (owner, path, hash). Across blocks the same
+ // (owner, path) can resolve to different node hashes as the trie
+ // shape evolves; keying by hash too keeps every distinct warm node
+ // retrievable rather than overwriting earlier entries with later
+ // ones for the same path. The hash check on lookup remains the
+ // authoritative correctness gate; this keying just preserves hits
+ // the prefetcher actually observed.
+ nodes map[warmKey][]byte
+}
+
+// warmKey identifies a trie node by its containing trie's owner (zero for
+// the account trie, account hash for storage tries), its path within the
+// trie, and the node hash itself. The hash field disambiguates entries that
+// share owner+path across different blocks/states.
+type warmKey struct {
+ owner common.Hash
+ path string // string-keyed for direct map use; the bytes are MPT path nibbles
+ hash common.Hash
+}
+
+// NewWarmSnapshot constructs a snapshot from per-trie node maps already
+// extracted from a quiesced prefetcher. Each entry in tries supplies a trie
+// owner and a (path -> blob) map (typically the result of trie.Witness()).
+// Blobs are copied; the snapshot does not retain references into the source
+// maps. Empty input produces a non-nil empty snapshot — Lookup on an empty
+// snapshot is a fast miss.
+//
+// Source maps are keyed by path only; this constructor computes each blob's
+// hash and uses (owner, path, hash) as the snapshot key. If the source
+// somehow contains two distinct blobs at the same (owner, path) — possible
+// when the prefetcher loaded the same path at different roots within a
+// single block — both are retained.
+func NewWarmSnapshot(tries []TrieWarmNodes) *WarmSnapshot {
+ total := 0
+ for i := range tries {
+ total += len(tries[i].Nodes)
+ }
+ s := &WarmSnapshot{nodes: make(map[warmKey][]byte, total)}
+ for i := range tries {
+ owner := tries[i].Owner
+ for path, blob := range tries[i].Nodes {
+ if len(blob) == 0 {
+ continue
+ }
+ cp := make([]byte, len(blob))
+ copy(cp, blob)
+ s.nodes[warmKey{owner: owner, path: path, hash: crypto.Keccak256Hash(cp)}] = cp
+ }
+ }
+ return s
+}
+
+// TrieWarmNodes carries one trie's contribution to a WarmSnapshot. The Owner
+// is the trie's identifying hash (zero for the account trie, the account hash
+// for a storage trie). Nodes maps trie-path to RLP-encoded node blob.
+type TrieWarmNodes struct {
+ Owner common.Hash
+ Nodes map[string][]byte
+}
+
+// WarmSnapshotInput is the quiesced handoff from the execution-side
+// prefetcher to SRC. It contains cloned path->blob maps returned by
+// Trie.Witness() after all subfetcher goroutines have exited. The maps are
+// read-only after construction and may be passed to another goroutine.
+//
+// Build constructs the final immutable, hash-indexed WarmSnapshot. Keeping
+// this as a separate step lets the import thread stop and detach the
+// prefetcher quickly while SRC pays the copy/hash/index cost in the background.
+type WarmSnapshotInput struct {
+ tries []TrieWarmNodes
+}
+
+// NewWarmSnapshotInput wraps quiesced trie-node maps for later WarmSnapshot
+// construction. It does not copy blobs or compute hashes; callers must only
+// pass maps that will not be mutated after this point.
+func NewWarmSnapshotInput(tries []TrieWarmNodes) *WarmSnapshotInput {
+ if len(tries) == 0 {
+ return nil
+ }
+ return &WarmSnapshotInput{tries: tries}
+}
+
+// Build constructs the immutable WarmSnapshot from the input. The returned
+// snapshot owns copies of all node blobs and does not alias the input maps.
+func (in *WarmSnapshotInput) Build() *WarmSnapshot {
+ if in == nil || len(in.tries) == 0 {
+ return nil
+ }
+ return NewWarmSnapshot(in.tries)
+}
+
+// Len returns the number of nodes in the snapshot. Useful for tests and
+// metrics; safe to call on a nil snapshot.
+func (s *WarmSnapshot) Len() int {
+ if s == nil {
+ return 0
+ }
+ return len(s.nodes)
+}
+
+// SizeBytes returns the total retained trie-node blob bytes. It intentionally
+// excludes map/key overhead; use it as a stable payload-size signal rather than
+// a precise heap-size estimate.
+func (s *WarmSnapshot) SizeBytes() int {
+ if s == nil {
+ return 0
+ }
+ var size int
+ for _, blob := range s.nodes {
+ size += len(blob)
+ }
+ return size
+}
+
+// Lookup returns the cached trie-node blob for (owner, path, expectedHash) if
+// present. A miss returns (nil, false) and the caller is expected to fall
+// through to the underlying NodeReader.
+//
+// The map is keyed by the (owner, path, hash) triple, so a present entry
+// already has a verified hash by construction (see NewWarmSnapshot); the
+// expectedHash supplied by the caller becomes part of the lookup key
+// itself, which means a request for a different hash at the same
+// (owner, path) is a structural miss — no stored entry can satisfy it
+// regardless of contents. This is the linearisation point that prevents a
+// stale snapshot entry from satisfying a different state's read.
+func (s *WarmSnapshot) Lookup(owner common.Hash, path []byte, expectedHash common.Hash) ([]byte, bool) {
+ if s == nil || len(s.nodes) == 0 {
+ return nil, false
+ }
+ blob, ok := s.nodes[warmKey{owner: owner, path: string(path), hash: expectedHash}]
+ if !ok {
+ return nil, false
+ }
+ return blob, true
+}
+
+// snapshotStateDatabase wraps CachingDB for a single SRC StateDB so every trie
+// opening path can consult the same WarmSnapshot. The plain snapshot reader
+// wrapper is enough for StateDB.reader reads, but CommitWithUpdate also opens
+// tries through StateDB.db.OpenTrie/OpenStorageTrie. If those methods keep
+// using the unwrapped CachingDB, the commit and witness-collection walks miss
+// the warm handoff entirely.
+//
+// The wrapper preserves NewTrieOnly semantics: account and storage reads still
+// walk MPT tries, and trie resolveAndTrack still records proof nodes. The only
+// change is the NodeDatabase behind those tries: snapshot hits return an
+// already-loaded RLP node blob, while misses fall through to the underlying
+// triedb/pathdb chain.
+type snapshotStateDatabase struct {
+ *CachingDB
+
+ nodeDB database.NodeDatabase
+ snapshot *WarmSnapshot
+}
+
+func newSnapshotStateDatabase(inner *CachingDB, snapshot *WarmSnapshot) *snapshotStateDatabase {
+ return &snapshotStateDatabase{
+ CachingDB: inner,
+ nodeDB: newSnapshotNodeDatabase(inner.triedb, snapshot),
+ snapshot: snapshot,
+ }
+}
+
+// Reader intentionally returns a trie-only snapshot-aware reader, not
+// CachingDB.Reader's multi-reader. This wrapper is meant for short-lived SRC
+// StateDB instances that are discarded after CommitWithUpdate; do not reuse it
+// for long-lived StateDBs that expect flat/snapshot reader semantics after
+// commit-time reader refreshes.
+func (db *snapshotStateDatabase) Reader(stateRoot common.Hash) (Reader, error) {
+ tr, err := newTrieReaderWithSnapshot(stateRoot, db.triedb, db.nodeDB)
+ if err != nil {
+ return nil, err
+ }
+ return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), tr), nil
+}
+
+func (db *snapshotStateDatabase) OpenTrie(root common.Hash) (Trie, error) {
+ if db.triedb.IsVerkle() {
+ return db.CachingDB.OpenTrie(root)
+ }
+ tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.nodeDB)
+ if err != nil {
+ return nil, err
+ }
+ return tr, nil
+}
+
+func (db *snapshotStateDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
+ if db.triedb.IsVerkle() {
+ return self, nil
+ }
+ tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.nodeDB)
+ if err != nil {
+ return nil, err
+ }
+ return tr, nil
+}
+
+// preimageForwarder is the interface trie.NewStateTrie checks at construction
+// time to decide whether the trie has a backing preimage store (see
+// trie/secure_trie.go's preimageStore type assertion). Mirroring it here
+// lets snapshotNodeDatabase satisfy the same shape so wrapped tries don't
+// silently lose preimage support.
+//
+// We declare it locally rather than importing trie's unexported preimageStore
+// because: (a) trie's interface is unexported, and (b) Go's structural typing
+// makes a type-equivalent local declaration sufficient — the type assertion
+// inside trie.NewStateTrie will succeed against any value that has these
+// three methods.
+type preimageForwarder interface {
+ Preimage(hash common.Hash) []byte
+ InsertPreimage(preimages map[common.Hash][]byte)
+ PreimageEnabled() bool
+}
+
+// snapshotNodeDatabase wraps a database.NodeDatabase so that NodeReaders
+// returned from it consult a WarmSnapshot before falling through to the
+// underlying reader. It is the boundary at which the SRC goroutine's trie
+// reads bypass pathdb/pebble for warm nodes.
+//
+// trie.Reader.Node(path, hash) calls the wrapped NodeReader once per node
+// fetch, supplying owner via its internal field and path/hash from the
+// caller. The wrapper consults the snapshot using exactly that triple. On
+// miss or hash mismatch, the underlying NodeReader is invoked unchanged, so
+// trie.Trie.resolveAndTrack and the trie's prevalueTracer record the served
+// node regardless of whether it came from the snapshot or pathdb. Witness
+// completeness under NewTrieOnly semantics is therefore preserved.
+//
+// snapshotNodeDatabase also forwards preimage methods (Preimage,
+// InsertPreimage, PreimageEnabled) when the inner database supports them.
+// trie.NewStateTrie type-asserts the supplied NodeDatabase to detect a
+// preimage store; without forwarding, wrapped tries would silently lose
+// preimage recording even though the underlying *triedb.Database supports it.
+type snapshotNodeDatabase struct {
+ inner database.NodeDatabase
+ snapshot *WarmSnapshot
+
+ // preimages is the inner database's preimage interface, captured at
+ // construction iff the inner database implements it. Nil when the
+ // underlying database does not record preimages, which preserves the
+ // "preimages disabled" branch in trie.NewStateTrie's type assertion.
+ preimages preimageForwarder
+}
+
+// newSnapshotNodeDatabase wraps inner with the given snapshot. If snapshot is
+// nil or empty, returns inner unchanged so callers can pass through without
+// allocating a wrapper.
+func newSnapshotNodeDatabase(inner database.NodeDatabase, snapshot *WarmSnapshot) database.NodeDatabase {
+ if snapshot == nil || snapshot.Len() == 0 {
+ return inner
+ }
+ wrapped := &snapshotNodeDatabase{inner: inner, snapshot: snapshot}
+ if pi, ok := inner.(preimageForwarder); ok {
+ wrapped.preimages = pi
+ }
+ return wrapped
+}
+
+func (db *snapshotNodeDatabase) NodeReader(stateRoot common.Hash) (database.NodeReader, error) {
+ r, err := db.inner.NodeReader(stateRoot)
+ if err != nil {
+ return nil, err
+ }
+ return &snapshotNodeReader{inner: r, snapshot: db.snapshot}, nil
+}
+
+// Preimage forwards to the inner preimage store. Trie callers reach this via
+// the preimageStore type assertion inside trie.NewStateTrie; if the inner
+// database had no preimage support, this method returns nil (the trie's
+// PreimageEnabled() check below returns false, so the trie won't install us
+// as its preimage store and these methods will not be invoked).
+func (db *snapshotNodeDatabase) Preimage(hash common.Hash) []byte {
+ if db.preimages == nil {
+ return nil
+ }
+ return db.preimages.Preimage(hash)
+}
+
+// InsertPreimage forwards a preimage batch to the inner store. Called by
+// trie.StateTrie.Commit when secKeyCache is non-empty and PreimageEnabled
+// returned true at construction time.
+func (db *snapshotNodeDatabase) InsertPreimage(preimages map[common.Hash][]byte) {
+ if db.preimages == nil {
+ return
+ }
+ db.preimages.InsertPreimage(preimages)
+}
+
+// PreimageEnabled reports whether the underlying database has preimage
+// recording enabled. Returning false makes trie.NewStateTrie skip the
+// preimage-store linkage for the wrapped trie — same outcome as if the
+// caller had passed an unwrapped *triedb.Database with preimages off.
+func (db *snapshotNodeDatabase) PreimageEnabled() bool {
+ if db.preimages == nil {
+ return false
+ }
+ return db.preimages.PreimageEnabled()
+}
+
+// snapshotNodeReader is the per-state-root NodeReader that consults the
+// snapshot first. Hits avoid pathdb diff-layer walks and pebble I/O entirely;
+// misses fall through to the underlying reader without modification.
+type snapshotNodeReader struct {
+ inner database.NodeReader
+ snapshot *WarmSnapshot
+}
+
+func (r *snapshotNodeReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ if blob, ok := r.snapshot.Lookup(owner, path, hash); ok {
+ if owner == (common.Hash{}) {
+ warmSnapshotAccountHitMeter.Mark(1)
+ } else {
+ warmSnapshotStorageHitMeter.Mark(1)
+ }
+ return blob, nil
+ }
+ if owner == (common.Hash{}) {
+ warmSnapshotAccountMissMeter.Mark(1)
+ } else {
+ warmSnapshotStorageMissMeter.Mark(1)
+ }
+ return r.inner.Node(owner, path, hash)
+}
diff --git a/core/state/warm_snapshot_test.go b/core/state/warm_snapshot_test.go
new file mode 100644
index 0000000000..5e6f79c22d
--- /dev/null
+++ b/core/state/warm_snapshot_test.go
@@ -0,0 +1,280 @@
+// Copyright 2026 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/triedb/database"
+)
+
+// stubNodeReader records every call made to it and returns a configured blob
+// per (owner, path) pair. It lets us assert exactly when the snapshot wrapper
+// falls through to the underlying NodeReader.
+type stubNodeReader struct {
+ calls []stubCall
+ nodes map[stubKey]stubNode
+}
+
+type stubCall struct {
+ owner common.Hash
+ path []byte
+ hash common.Hash
+}
+
+type stubKey struct {
+ owner common.Hash
+ path string
+}
+
+type stubNode struct {
+ blob []byte
+ err error
+}
+
+func newStubNodeReader() *stubNodeReader {
+ return &stubNodeReader{nodes: make(map[stubKey]stubNode)}
+}
+
+func (s *stubNodeReader) set(owner common.Hash, path []byte, blob []byte) {
+ s.nodes[stubKey{owner: owner, path: string(path)}] = stubNode{blob: blob}
+}
+
+func (s *stubNodeReader) setError(owner common.Hash, path []byte, err error) {
+ s.nodes[stubKey{owner: owner, path: string(path)}] = stubNode{err: err}
+}
+
+func (s *stubNodeReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ s.calls = append(s.calls, stubCall{owner: owner, path: append([]byte(nil), path...), hash: hash})
+ n, ok := s.nodes[stubKey{owner: owner, path: string(path)}]
+ if !ok {
+ return nil, errors.New("stub: not found")
+ }
+ if n.err != nil {
+ return nil, n.err
+ }
+ return n.blob, nil
+}
+
+// stubNodeDB satisfies database.NodeDatabase by returning a fixed
+// stubNodeReader regardless of state root. It exists only to drive
+// snapshotNodeDatabase in tests.
+type stubNodeDB struct {
+ reader *stubNodeReader
+}
+
+func (s *stubNodeDB) NodeReader(stateRoot common.Hash) (database.NodeReader, error) {
+ return s.reader, nil
+}
+
+// TestWarmSnapshot_HashMismatchFallsThrough is the consensus-critical safety
+// test for the snapshot reader. The snapshot is keyed by (owner, path, hash);
+// the caller-supplied expectedHash participates in the lookup key, so a
+// request for a different hash at the same (owner, path) is a structural
+// miss and the reader must fall through to the authoritative pathdb-backed
+// reader rather than serve a blob whose hash does not match what the caller
+// expects.
+//
+// Failing this test means a stale snapshot entry could satisfy a current
+// trie read with the wrong blob — a silent state-corruption / consensus
+// risk. The (owner, path, hash) keying is the structural guarantee that
+// prevents that, and the underlying-reader fallthrough is its observable
+// consequence.
+func TestWarmSnapshot_HashMismatchFallsThrough(t *testing.T) {
+ owner := common.HexToHash("0x01")
+ path := []byte{0xab, 0xcd}
+
+ correctBlob := []byte("trie-node-correct")
+ staleBlob := []byte("trie-node-stale-from-old-state")
+
+ correctHash := crypto.Keccak256Hash(correctBlob)
+ staleHash := crypto.Keccak256Hash(staleBlob)
+ require.NotEqual(t, correctHash, staleHash, "test setup: stale and correct blobs must hash differently")
+
+ // Snapshot contains the stale blob keyed by (owner, path).
+ snap := NewWarmSnapshot([]TrieWarmNodes{{
+ Owner: owner,
+ Nodes: map[string][]byte{string(path): staleBlob},
+ }})
+ require.Equal(t, 1, snap.Len())
+
+ // Underlying reader has the correct blob.
+ underlying := newStubNodeReader()
+ underlying.set(owner, path, correctBlob)
+
+ wrappedDB := newSnapshotNodeDatabase(&stubNodeDB{reader: underlying}, snap)
+ reader, err := wrappedDB.NodeReader(common.Hash{}) // root is irrelevant for stub
+ require.NoError(t, err)
+
+ // Caller asks for the CORRECT hash. The snapshot's only entry is keyed
+ // by (owner, path, staleHash), so a lookup with (owner, path,
+ // correctHash) is a structural miss and the wrapper must fall through
+ // to the underlying reader.
+ got, err := reader.Node(owner, path, correctHash)
+ require.NoError(t, err)
+ require.Equal(t, correctBlob, got, "must serve from underlying reader, not the stale snapshot blob")
+ require.Len(t, underlying.calls, 1, "underlying reader must be invoked exactly once on hash-mismatch fallthrough")
+
+ // Sanity: when the caller asks for staleHash, the lookup key matches
+ // the stored entry exactly and the snapshot serves without consulting
+ // the underlying reader. This shows the hash component of the key is
+ // what distinguishes hit from miss at the same (owner, path).
+ got, err = reader.Node(owner, path, staleHash)
+ require.NoError(t, err)
+ require.Equal(t, staleBlob, got, "snapshot must serve when expectedHash matches stored hash")
+ require.Len(t, underlying.calls, 1, "underlying reader must NOT be invoked on a snapshot hit")
+}
+
+// TestWarmSnapshot_NilAndEmpty exercises the no-op paths: a nil snapshot or
+// an empty snapshot must always return a miss and let every read fall through
+// to the underlying reader. Constructing the snapshot wrapper should be free
+// in those cases.
+func TestWarmSnapshot_NilAndEmpty(t *testing.T) {
+ owner := common.HexToHash("0x02")
+ path := []byte{0x10}
+ blob := []byte("real-node")
+ hash := crypto.Keccak256Hash(blob)
+
+ underlying := newStubNodeReader()
+ underlying.set(owner, path, blob)
+
+ innerDB := &stubNodeDB{reader: underlying}
+
+ // Nil snapshot: wrapper short-circuits, returns inner DB unchanged.
+ require.Same(t, innerDB, newSnapshotNodeDatabase(innerDB, nil))
+
+ // Empty snapshot: same short-circuit.
+ empty := NewWarmSnapshot(nil)
+ require.Equal(t, 0, empty.Len())
+ require.Same(t, innerDB, newSnapshotNodeDatabase(innerDB, empty))
+
+ // Direct Lookup on nil snapshot is a miss.
+ var nilSnap *WarmSnapshot
+ _, ok := nilSnap.Lookup(owner, path, hash)
+ require.False(t, ok)
+
+ // Direct Lookup on empty snapshot is a miss.
+ _, ok = empty.Lookup(owner, path, hash)
+ require.False(t, ok)
+}
+
+// TestNewTrieOnlyWithSnapshotInstallsStateDBWrapper verifies that the snapshot
+// handoff is installed on StateDB.db, not only on the initial Reader. Commit
+// paths call StateDB.db.OpenTrie/OpenStorageTrie directly; if db remained the
+// plain CachingDB those calls would bypass WarmSnapshot.
+func TestNewTrieOnlyWithSnapshotInstallsStateDBWrapper(t *testing.T) {
+ cdb := NewDatabaseForTesting()
+ snap := NewWarmSnapshot([]TrieWarmNodes{{
+ Owner: common.Hash{},
+ Nodes: map[string][]byte{"warm": []byte("warm-node")},
+ }})
+ require.Equal(t, 1, snap.Len())
+
+ sdb, err := NewTrieOnlyWithSnapshot(types.EmptyRootHash, cdb, snap)
+ require.NoError(t, err)
+ _, ok := sdb.db.(*snapshotStateDatabase)
+ require.True(t, ok, "StateDB.db must be snapshot-aware so commit-time trie opens use the warm handoff")
+
+ sdb, err = NewTrieOnlyWithSnapshot(types.EmptyRootHash, cdb, nil)
+ require.NoError(t, err)
+ _, ok = sdb.db.(*snapshotStateDatabase)
+ require.False(t, ok, "nil snapshot must preserve the plain trie-only database path")
+}
+
+// TestWarmSnapshot_RetainsDistinctHashesAtSamePath verifies that two warm
+// nodes with the same (owner, path) but different hashes are both retained.
+// Across blocks, root churn means the same trie position can resolve to
+// different node hashes; if the snapshot collapsed entries at the (owner,
+// path) level, an entry with a different hash would silently overwrite an
+// earlier one and the SRC would miss when its expected hash matched the
+// dropped entry. Triple keying by (owner, path, hash) prevents that
+// loss-of-hits scenario.
+func TestWarmSnapshot_RetainsDistinctHashesAtSamePath(t *testing.T) {
+ owner := common.HexToHash("0xa1")
+ path := []byte{0x42}
+
+ blobA := []byte("trie-node-A")
+ blobB := []byte("trie-node-B")
+ hashA := crypto.Keccak256Hash(blobA)
+ hashB := crypto.Keccak256Hash(blobB)
+ require.NotEqual(t, hashA, hashB)
+
+ // Same (owner, path), two different blobs (different hashes).
+ // Source map keyed by path only, so we have to materialise both as
+ // separate TrieWarmNodes entries: each call to NewWarmSnapshot inserts
+ // every (owner, path, hash) it sees — duplicates only collapse if the
+ // triple is identical, not just (owner, path).
+ snap := NewWarmSnapshot([]TrieWarmNodes{
+ {Owner: owner, Nodes: map[string][]byte{string(path): blobA}},
+ {Owner: owner, Nodes: map[string][]byte{string(path): blobB}},
+ })
+ require.Equal(t, 2, snap.Len(), "both entries must be retained when hashes differ at same (owner, path)")
+
+ gotA, ok := snap.Lookup(owner, path, hashA)
+ require.True(t, ok, "lookup with hashA must hit blobA")
+ require.Equal(t, blobA, gotA)
+
+ gotB, ok := snap.Lookup(owner, path, hashB)
+ require.True(t, ok, "lookup with hashB must hit blobB")
+ require.Equal(t, blobB, gotB)
+
+ // A third hash that nobody supplied: structural miss.
+ other := crypto.Keccak256Hash([]byte("never-seen"))
+ _, ok = snap.Lookup(owner, path, other)
+ require.False(t, ok)
+}
+
+// TestWarmSnapshot_OwnerScoped verifies that the (owner, path) keying
+// distinguishes account-trie nodes from storage-trie nodes that may share a
+// path. Without owner scoping a storage-trie lookup could be satisfied by an
+// account-trie node at the same path, which would still pass the hash check
+// only if the blobs collided — but the keying-level isolation is the
+// structural guarantee.
+func TestWarmSnapshot_OwnerScoped(t *testing.T) {
+ accountOwner := common.Hash{}
+ storageOwner := common.HexToHash("0xfeedface")
+ path := []byte{0x07}
+
+ accountBlob := []byte("account-trie-node")
+ storageBlob := []byte("storage-trie-node-different-content")
+
+ snap := NewWarmSnapshot([]TrieWarmNodes{
+ {Owner: accountOwner, Nodes: map[string][]byte{string(path): accountBlob}},
+ {Owner: storageOwner, Nodes: map[string][]byte{string(path): storageBlob}},
+ })
+ require.Equal(t, 2, snap.Len())
+
+ // Account-trie lookup serves the account blob.
+ got, ok := snap.Lookup(accountOwner, path, crypto.Keccak256Hash(accountBlob))
+ require.True(t, ok)
+ require.Equal(t, accountBlob, got)
+
+ // Storage-trie lookup at the same path serves the storage blob.
+ got, ok = snap.Lookup(storageOwner, path, crypto.Keccak256Hash(storageBlob))
+ require.True(t, ok)
+ require.Equal(t, storageBlob, got)
+
+ // Cross-owner lookup with the wrong-owner-but-matching-path is a miss.
+ _, ok = snap.Lookup(storageOwner, path, crypto.Keccak256Hash(accountBlob))
+ require.False(t, ok, "must not serve account blob to storage owner even when that blob's hash matches expectedHash")
+}
diff --git a/core/stateless.go b/core/stateless.go
index 3a85b7fcc1..c00e9b3f3e 100644
--- a/core/stateless.go
+++ b/core/stateless.go
@@ -43,27 +43,17 @@ import (
//
// TODO(karalabe): Would be nice to resolve both issues above somehow and move it.
func ExecuteStateless(config *params.ChainConfig, vmconfig vm.Config, block *types.Block, witness *stateless.Witness, author *common.Address, consensus consensus.Engine, diskdb ethdb.Database) (common.Hash, common.Hash, *state.StateDB, *ProcessResult, error) {
- var preStateRoot common.Hash
- if config.Bor != nil && config.Bor.IsDelayedSRC(block.Number()) {
- // Under delayed SRC, block.Root() carries the pre-state root for this block
- // (the actual post-execution state root of the parent, placed there by the
- // block producer). Use it directly; do NOT treat it as a faulty value.
- preStateRoot = block.Root()
- } else {
- // Sanity check: the caller should have zeroed Root and ReceiptHash so that
- // we can compute them from scratch via the witness.
- if block.Root() != (common.Hash{}) {
- log.Error("stateless runner received state root it's expected to calculate (faulty consensus client)", "block", block.Number())
- }
- if block.ReceiptHash() != (common.Hash{}) {
- log.Error("stateless runner received receipt root it's expected to calculate (faulty consensus client)", "block", block.Number())
- }
- preStateRoot = witness.Root()
+ // Sanity check if the supplied block accidentally contains a set root or
+ // receipt hash. If so, be very loud, but still continue.
+ if block.Root() != (common.Hash{}) {
+ log.Error("stateless runner received state root it's expected to calculate (faulty consensus client)", "block", block.Number())
+ }
+ if block.ReceiptHash() != (common.Hash{}) {
+ log.Error("stateless runner received receipt root it's expected to calculate (faulty consensus client)", "block", block.Number())
}
-
// Create and populate the state database to serve as the stateless backend
memdb := witness.MakeHashDB(diskdb)
- db, err := state.New(preStateRoot, state.NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil))
+ db, err := state.New(witness.Root(), state.NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil))
if err != nil {
return common.Hash{}, common.Hash{}, nil, nil, err
}
diff --git a/core/stateless/encoding.go b/core/stateless/encoding.go
index e955b9c962..a7ffb043c0 100644
--- a/core/stateless/encoding.go
+++ b/core/stateless/encoding.go
@@ -25,8 +25,9 @@ import (
)
// BorWitness is the canonical 3-field RLP encoding used for network
-// transmission in Bor. The State field carries all proof data — both
-// contract bytecodes and MPT state trie nodes — as a flat list of byte slices.
+// transmission in Bor. The State field carries MPT trie proof nodes as a flat
+// list of byte slices. Contract bytecodes are not part of the BorWitness wire
+// format; verifiers read bytecode from local storage via CodeRoutingDB.
type BorWitness struct {
Context *types.Header
Headers []*types.Header
diff --git a/core/stateless/witness.go b/core/stateless/witness.go
index 33a755dda2..175c9694b3 100644
--- a/core/stateless/witness.go
+++ b/core/stateless/witness.go
@@ -36,28 +36,54 @@ type HeaderReader interface {
}
// ValidateWitnessPreState validates that the witness pre-state root matches
-// expectedPreStateRoot (the parent block's actual post-execution state root).
-//
-// Under delayed SRC, the pre-state root is stored in contextHeader.Root
-// (set by spawnSRCGoroutine). Under normal operation, it is witness.Root()
-// (= Headers[0].Root = parent header's Root field).
-func ValidateWitnessPreState(witness *Witness, expectedPreStateRoot common.Hash) error {
+// the parent block's state root. The expectedBlock header is the block being
+// imported — the witness context must match it (ParentHash and Number) to
+// prevent a malicious peer from substituting a witness for a different block.
+func ValidateWitnessPreState(witness *Witness, headerReader HeaderReader, expectedBlock *types.Header) error {
if witness == nil {
return fmt.Errorf("witness is nil")
}
+
+ // Check if witness has any headers.
if len(witness.Headers) == 0 {
return fmt.Errorf("witness has no headers")
}
+
+ // Get the witness context header (the block this witness is for).
contextHeader := witness.Header()
if contextHeader == nil {
return fmt.Errorf("witness context header is nil")
}
- // Normal path: witness.Root() (= parent header's Root) must match expected.
- if witness.Root() != expectedPreStateRoot {
- return fmt.Errorf("witness pre-state root mismatch: witness=%x, expected=%x, blockNumber=%d",
- witness.Root(), expectedPreStateRoot, contextHeader.Number.Uint64())
+ // Verify the witness is for the expected block — a malicious peer could
+ // craft a witness with a different ParentHash to bypass the pre-state check.
+ if expectedBlock != nil {
+ if contextHeader.ParentHash != expectedBlock.ParentHash {
+ return fmt.Errorf("witness ParentHash mismatch: witness=%x, expected=%x, blockNumber=%d",
+ contextHeader.ParentHash, expectedBlock.ParentHash, expectedBlock.Number.Uint64())
+ }
+ if contextHeader.Number.Uint64() != expectedBlock.Number.Uint64() {
+ return fmt.Errorf("witness block number mismatch: witness=%d, expected=%d",
+ contextHeader.Number.Uint64(), expectedBlock.Number.Uint64())
+ }
+ }
+
+ // Get the parent block header from the chain.
+ parentHeader := headerReader.GetHeader(contextHeader.ParentHash, contextHeader.Number.Uint64()-1)
+ if parentHeader == nil {
+ return fmt.Errorf("parent block header not found: parentHash=%x, parentNumber=%d",
+ contextHeader.ParentHash, contextHeader.Number.Uint64()-1)
+ }
+
+ // Get witness pre-state root (from first header which should be parent).
+ witnessPreStateRoot := witness.Root()
+
+ // Compare with actual parent block's state root.
+ if witnessPreStateRoot != parentHeader.Root {
+ return fmt.Errorf("witness pre-state root mismatch: witness=%x, parent=%x, blockNumber=%d",
+ witnessPreStateRoot, parentHeader.Root, contextHeader.Number.Uint64())
}
+
return nil
}
@@ -86,14 +112,14 @@ func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) {
}
headers = append(headers, parent)
}
- // Gut out the root and receipt hash: these are what stateless execution
- // computes. A non-zero Root signals delayed SRC (the pre-state root is
- // embedded there by the caller after NewWitness returns).
+ // Create the witness with a copy of the context header to prevent
+ // callers from mutating the header after witness creation.
+ // Note: Root and ReceiptHash are NOT zeroed here — they are zeroed at the
+ // point of stateless execution (ProcessBlockWithWitnesses) where they are
+ // recomputed. Zeroing here would break the witness manager's hash matching
+ // (handleBroadcast uses witness.Header().Hash() to look up pending blocks).
ctx := types.CopyHeader(context)
- ctx.Root = common.Hash{}
- ctx.ReceiptHash = common.Hash{}
- // Create the witness with a reconstructed gutted out block
return &Witness{
context: ctx,
Headers: headers,
@@ -156,13 +182,7 @@ func (w *Witness) Copy() *Witness {
return cpy
}
-// Root returns the pre-state root for executing this block's transactions.
-// This is always Headers[0].Root, i.e. the parent block's post-execution state
-// root (the trustless pre-state anchor included in every witness).
-//
-// Under delayed SRC the correct pre-state root lives in the block header itself
-// (block[N].Header.Root = root_{N-1}); callers that have the block available
-// should use block.Root() directly rather than this method.
+// Root returns the pre-state root from the first header.
//
// Note, this method will panic in case of a bad witness (but RLP decoding will
// sanitize it and fail before that).
diff --git a/core/stateless/witness_test.go b/core/stateless/witness_test.go
index 86c07a4ce0..4d83da46a5 100644
--- a/core/stateless/witness_test.go
+++ b/core/stateless/witness_test.go
@@ -26,8 +26,13 @@ func TestValidateWitnessPreState_Success(t *testing.T) {
contextHeader := &types.Header{
Number: big.NewInt(100),
ParentHash: parentHash,
+ Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"),
}
+ // Set up mock header reader.
+ mockReader := NewMockHeaderReader()
+ mockReader.AddHeader(parentHeader)
+
// Create witness with matching pre-state root.
witness := &Witness{
context: contextHeader,
@@ -36,8 +41,8 @@ func TestValidateWitnessPreState_Success(t *testing.T) {
State: make(map[string]struct{}),
}
- // Test validation - should succeed (witness.Root() == parentStateRoot).
- err := ValidateWitnessPreState(witness, parentStateRoot)
+ // Test validation - should succeed.
+ err := ValidateWitnessPreState(witness, mockReader, nil)
if err != nil {
t.Errorf("Expected validation to succeed, but got error: %v", err)
}
@@ -60,6 +65,7 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) {
contextHeader := &types.Header{
Number: big.NewInt(100),
ParentHash: parentHash,
+ Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"),
}
// Create witness header with mismatched state root.
@@ -69,6 +75,10 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) {
Root: mismatchedStateRoot, // Different from actual parent.
}
+ // Set up mock header reader.
+ mockReader := NewMockHeaderReader()
+ mockReader.AddHeader(parentHeader)
+
// Create witness with mismatched pre-state root.
witness := &Witness{
context: contextHeader,
@@ -77,8 +87,8 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) {
State: make(map[string]struct{}),
}
- // Test validation - should fail (witness.Root() = mismatchedStateRoot != parentStateRoot).
- err := ValidateWitnessPreState(witness, parentStateRoot)
+ // Test validation - should fail.
+ err := ValidateWitnessPreState(witness, mockReader, nil)
if err == nil {
t.Error("Expected validation to fail due to state root mismatch, but it succeeded")
}
@@ -92,11 +102,11 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) {
}
func TestValidateWitnessPreState_EdgeCases(t *testing.T) {
- dummyRoot := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
+ mockReader := NewMockHeaderReader()
// Test case 1: Nil witness.
t.Run("NilWitness", func(t *testing.T) {
- err := ValidateWitnessPreState(nil, dummyRoot)
+ err := ValidateWitnessPreState(nil, mockReader, nil)
if err == nil {
t.Error("Expected validation to fail for nil witness")
}
@@ -114,7 +124,7 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) {
State: make(map[string]struct{}),
}
- err := ValidateWitnessPreState(witness, dummyRoot)
+ err := ValidateWitnessPreState(witness, mockReader, nil)
if err == nil {
t.Error("Expected validation to fail for witness with no headers")
}
@@ -130,14 +140,14 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) {
Headers: []*types.Header{
{
Number: big.NewInt(99),
- Root: dummyRoot,
+ Root: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"),
},
},
Codes: make(map[string]struct{}),
State: make(map[string]struct{}),
}
- err := ValidateWitnessPreState(witness, dummyRoot)
+ err := ValidateWitnessPreState(witness, mockReader, nil)
if err == nil {
t.Error("Expected validation to fail for witness with nil context header")
}
@@ -146,31 +156,33 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) {
}
})
- // Test case 4: Mismatch with expected root.
- t.Run("Mismatch", func(t *testing.T) {
- wrongRoot := common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
+ // Test case 4: Parent header not found.
+ t.Run("ParentNotFound", func(t *testing.T) {
+ contextHeader := &types.Header{
+ Number: big.NewInt(100),
+ ParentHash: common.HexToHash("0xnonexistent1234567890abcdef1234567890abcdef1234567890abcdef123456"),
+ Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"),
+ }
witness := &Witness{
- context: &types.Header{
- Number: big.NewInt(100),
- ParentHash: common.HexToHash("0xabc"),
- },
+ context: contextHeader,
Headers: []*types.Header{
{
Number: big.NewInt(99),
- Root: wrongRoot, // witness.Root() will be wrongRoot
+ Root: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"),
},
},
Codes: make(map[string]struct{}),
State: make(map[string]struct{}),
}
- err := ValidateWitnessPreState(witness, dummyRoot)
+ // Don't add parent header to mock reader - it won't be found.
+ err := ValidateWitnessPreState(witness, mockReader, nil)
if err == nil {
- t.Error("Expected validation to fail when witness root doesn't match expected")
+ t.Error("Expected validation to fail when parent header is not found")
}
- expectedError := "witness pre-state root mismatch"
+ expectedError := "parent block header not found"
if err != nil && len(err.Error()) > len(expectedError) {
if err.Error()[:len(expectedError)] != expectedError {
t.Errorf("Expected error message to start with '%s', but got: %v", expectedError, err)
@@ -190,6 +202,7 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) {
Root: grandParentStateRoot,
}
+ // Use the actual hash of the grandparent header.
grandParentHash := grandParentHeader.Hash()
parentHeader := &types.Header{
@@ -198,13 +211,20 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) {
Root: parentStateRoot,
}
+ // Use the actual hash of the parent header.
parentHash := parentHeader.Hash()
contextHeader := &types.Header{
Number: big.NewInt(100),
ParentHash: parentHash,
+ Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"),
}
+ // Set up mock header reader.
+ mockReader := NewMockHeaderReader()
+ mockReader.AddHeader(parentHeader)
+ mockReader.AddHeader(grandParentHeader)
+
// Create witness with multiple headers (parent should be first).
witness := &Witness{
context: contextHeader,
@@ -213,54 +233,13 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) {
State: make(map[string]struct{}),
}
- // Test validation - should succeed (witness.Root() = parentStateRoot).
- err := ValidateWitnessPreState(witness, parentStateRoot)
+ // Test validation - should succeed (only first header matters for validation).
+ err := ValidateWitnessPreState(witness, mockReader, nil)
if err != nil {
t.Errorf("Expected validation to succeed with multiple headers, but got error: %v", err)
}
}
-func TestValidateWitnessPreState_DelayedSRC(t *testing.T) {
- // Under delayed SRC, witness.Root() = Headers[0].Root = parent header's
- // on-chain Root (= root_{N-2}). The caller passes parentHeader.Root as
- // expectedPreStateRoot. The actual pre-state root (root_{N-1}) is validated
- // separately in writeBlockAndSetHead.
- parentOnChainRoot := common.HexToHash("0xbbbb") // root_{N-2}
-
- t.Run("Match", func(t *testing.T) {
- witness := &Witness{
- context: &types.Header{
- Number: big.NewInt(100),
- Root: common.HexToHash("0xaaaa"), // root_{N-1}, irrelevant here
- },
- Headers: []*types.Header{{Number: big.NewInt(99), Root: parentOnChainRoot}},
- Codes: make(map[string]struct{}),
- State: make(map[string]struct{}),
- }
- err := ValidateWitnessPreState(witness, parentOnChainRoot)
- if err != nil {
- t.Errorf("Expected delayed SRC validation to succeed, got: %v", err)
- }
- })
-
- t.Run("Mismatch", func(t *testing.T) {
- wrongExpected := common.HexToHash("0xcccc")
- witness := &Witness{
- context: &types.Header{
- Number: big.NewInt(100),
- Root: common.HexToHash("0xaaaa"),
- },
- Headers: []*types.Header{{Number: big.NewInt(99), Root: parentOnChainRoot}},
- Codes: make(map[string]struct{}),
- State: make(map[string]struct{}),
- }
- err := ValidateWitnessPreState(witness, wrongExpected)
- if err == nil {
- t.Error("Expected delayed SRC validation to fail on mismatch")
- }
- })
-}
-
// TestConsensusWithOriginalPeer tests consensus calculation including original peer
func TestConsensusWithOriginalPeer(t *testing.T) {
t.Run("Case1_OriginalPeer3_RandomPeers2and3_ShouldChoose3", func(t *testing.T) {
@@ -995,3 +974,93 @@ func TestCalculatePageThreshold(t *testing.T) {
}
})
}
+
+// makeValidatePreStateFixture builds a consistent witness + mockReader + context
+// header where the witness pre-state matches the chain's parent block. Callers
+// can mutate the returned expectedBlock to test the anti-malicious-peer guards.
+func makeValidatePreStateFixture() (*Witness, HeaderReader, *types.Header) {
+ parentStateRoot := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
+
+ parentHeader := &types.Header{
+ Number: big.NewInt(99),
+ ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ Root: parentStateRoot,
+ }
+ parentHash := parentHeader.Hash()
+
+ contextHeader := &types.Header{
+ Number: big.NewInt(100),
+ ParentHash: parentHash,
+ Root: common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"),
+ }
+
+ mockReader := NewMockHeaderReader()
+ mockReader.AddHeader(parentHeader)
+
+ witness := &Witness{
+ context: contextHeader,
+ Headers: []*types.Header{parentHeader},
+ Codes: make(map[string]struct{}),
+ State: make(map[string]struct{}),
+ }
+
+ // expectedBlock mirrors the context header — tests mutate individual fields
+ // (ParentHash / Number) to exercise the ValidateWitnessPreState guards.
+ expectedBlock := &types.Header{
+ Number: big.NewInt(100),
+ ParentHash: parentHash,
+ }
+ return witness, mockReader, expectedBlock
+}
+
+// TestValidateWitnessPreState_ExpectedBlockMatches exercises the non-nil
+// expectedBlock branch with matching ParentHash and Number so the full
+// function runs to completion.
+func TestValidateWitnessPreState_ExpectedBlockMatches(t *testing.T) {
+ witness, reader, expectedBlock := makeValidatePreStateFixture()
+ if err := ValidateWitnessPreState(witness, reader, expectedBlock); err != nil {
+ t.Errorf("expected validation to succeed, got %v", err)
+ }
+}
+
+// TestValidateWitnessPreState_ExpectedBlockParentHashMismatch rejects a witness
+// whose context ParentHash disagrees with the expected block — defends against
+// a malicious peer substituting a witness for a different fork.
+func TestValidateWitnessPreState_ExpectedBlockParentHashMismatch(t *testing.T) {
+ witness, reader, expectedBlock := makeValidatePreStateFixture()
+ expectedBlock.ParentHash = common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+
+ err := ValidateWitnessPreState(witness, reader, expectedBlock)
+ if err == nil {
+ t.Fatal("expected ParentHash mismatch error, got nil")
+ }
+ if !containsSubstring(err.Error(), "witness ParentHash mismatch") {
+ t.Errorf("expected ParentHash mismatch error, got: %v", err)
+ }
+}
+
+// TestValidateWitnessPreState_ExpectedBlockNumberMismatch rejects a witness
+// whose context Number disagrees with the expected block — defends against a
+// malicious peer substituting a witness for a different block at the same
+// ParentHash (e.g., after a reorg).
+func TestValidateWitnessPreState_ExpectedBlockNumberMismatch(t *testing.T) {
+ witness, reader, expectedBlock := makeValidatePreStateFixture()
+ expectedBlock.Number = big.NewInt(999) // ParentHash still matches
+
+ err := ValidateWitnessPreState(witness, reader, expectedBlock)
+ if err == nil {
+ t.Fatal("expected Number mismatch error, got nil")
+ }
+ if !containsSubstring(err.Error(), "witness block number mismatch") {
+ t.Errorf("expected Number mismatch error, got: %v", err)
+ }
+}
+
+func containsSubstring(s, sub string) bool {
+ for i := 0; i+len(sub) <= len(s); i++ {
+ if s[i:i+len(sub)] == sub {
+ return true
+ }
+ }
+ return false
+}
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index ec5ec55bb3..3be0b89068 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -405,7 +405,7 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
// Initialize the state with head block, or fallback to empty one in
// case the head state is not available (might occur when node is not
// fully synced).
- state, err := p.chain.PostExecutionStateAt(head)
+ state, err := p.chain.PostExecState(head)
if err != nil {
state, err = p.chain.StateAt(types.EmptyRootHash)
}
@@ -843,7 +843,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
resettimeHist.Update(time.Since(start).Nanoseconds())
}(time.Now())
- statedb, err := p.chain.PostExecutionStateAt(newHead)
+ statedb, err := p.chain.PostExecState(newHead)
if err != nil {
log.Error("Failed to reset blobpool state", "err", err)
return
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 8922c64a81..adf86f5510 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -202,7 +202,7 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
return bc.statedb, nil
}
-func (bc *testBlockChain) PostExecutionStateAt(*types.Header) (*state.StateDB, error) {
+func (bc *testBlockChain) PostExecState(header *types.Header) (*state.StateDB, error) {
return bc.statedb, nil
}
diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go
index b1d7b69969..abb3c63a9b 100644
--- a/core/txpool/blobpool/interface.go
+++ b/core/txpool/blobpool/interface.go
@@ -42,9 +42,8 @@ type BlockChain interface {
// StateAt returns a state database for a given root hash (generally the head).
StateAt(root common.Hash) (*state.StateDB, error)
- // PostExecutionStateAt returns a StateDB representing the post-execution
- // state of the given block header. Under delayed SRC, uses a non-blocking
- // FlatDiff overlay when available; otherwise falls back to resolving the
- // actual state root (which may block).
- PostExecutionStateAt(header *types.Header) (*state.StateDB, error)
+ // PostExecState returns a StateDB representing the post-execution
+ // state of the given block header. Under pipelined SRC, uses a non-blocking
+ // FlatDiff overlay when available; otherwise falls back to StateAt.
+ PostExecState(header *types.Header) (*state.StateDB, error)
}
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index 63d3721b6b..355e476714 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -172,11 +172,10 @@ type BlockChain interface {
// StateAt returns a state database for a given root hash (generally the head).
StateAt(root common.Hash) (*state.StateDB, error)
- // PostExecutionStateAt returns a StateDB representing the post-execution
- // state of the given block header. Under delayed SRC, uses a non-blocking
- // FlatDiff overlay when available; otherwise falls back to resolving the
- // actual state root (which may block).
- PostExecutionStateAt(header *types.Header) (*state.StateDB, error)
+ // PostExecState returns a StateDB representing the post-execution
+ // state of the given block header. Under pipelined SRC, uses a non-blocking
+ // FlatDiff overlay when available; otherwise falls back to StateAt.
+ PostExecState(header *types.Header) (*state.StateDB, error)
}
// Config are the configuration parameters of the transaction pool.
@@ -408,7 +407,7 @@ func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserver txpool.
// Initialize the state with head block, or fallback to empty one in
// case the head state is not available (might occur when node is not
// fully synced).
- statedb, err := pool.chain.PostExecutionStateAt(head)
+ statedb, err := pool.chain.PostExecState(head)
if err != nil {
statedb, err = pool.chain.StateAt(types.EmptyRootHash)
}
@@ -1787,7 +1786,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
if newHead == nil {
newHead = pool.chain.CurrentBlock() // Special case during testing
}
- statedb, err := pool.chain.PostExecutionStateAt(newHead)
+ statedb, err := pool.chain.PostExecState(newHead)
if err != nil {
log.Error("Failed to reset txpool state", "err", err)
return
@@ -1804,6 +1803,42 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
pool.addTxs(reinject, false)
}
+// SetSpeculativeState updates the pool's internal state to reflect a new
+// block that hasn't been written to the chain yet. This is used by pipelined
+// SRC: after block N's transactions are executed but before block N is sealed,
+// the miner calls this to update the txpool so that speculative execution of
+// block N+1 gets correct pending transactions (with block N's nonces/balances).
+//
+// Unlike the full reset() path, this does NOT walk the chain for included/
+// discarded transactions (the block isn't in the chain DB). It only:
+// 1. Updates currentState and pendingNonces from the provided statedb
+// 2. Sets currentHead to the new header
+// 3. Demotes transactions with stale nonces
+// 4. Promotes newly executable transactions
+func (pool *LegacyPool) SetSpeculativeState(newHead *types.Header, statedb *state.StateDB) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pool.currentHead.Store(newHead)
+ pool.currentState = statedb
+ pool.pendingNonces = newNoncer(statedb)
+
+ // Demote transactions that are no longer valid with the new nonces
+ pool.demoteUnexecutables()
+
+ // Promote transactions that are now executable
+ promoted := pool.promoteExecutables(nil)
+
+ // Fire events for promoted transactions
+ if len(promoted) > 0 {
+ var txs []*types.Transaction
+ for _, tx := range promoted {
+ txs = append(txs, tx)
+ }
+ pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
+ }
+}
+
// promoteExecutables moves transactions that have become processable from the
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index d12759eb88..36c804d806 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -117,7 +117,7 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
return bc.statedb, nil
}
-func (bc *testBlockChain) PostExecutionStateAt(*types.Header) (*state.StateDB, error) {
+func (bc *testBlockChain) PostExecState(header *types.Header) (*state.StateDB, error) {
return bc.statedb, nil
}
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index 7a6f81c99e..2869a3c28d 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -57,11 +57,10 @@ type BlockChain interface {
// StateAt returns a state database for a given root hash (generally the head).
StateAt(root common.Hash) (*state.StateDB, error)
- // PostExecutionStateAt returns a StateDB representing the post-execution
- // state of the given block header. Under delayed SRC, uses a non-blocking
- // FlatDiff overlay when available; otherwise falls back to resolving the
- // actual state root (which may block).
- PostExecutionStateAt(header *types.Header) (*state.StateDB, error)
+ // PostExecState returns a StateDB representing the post-execution
+ // state of the given block header. Under pipelined SRC, uses a non-blocking
+ // FlatDiff overlay when available; otherwise falls back to StateAt.
+ PostExecState(header *types.Header) (*state.StateDB, error)
}
// TxPool is an aggregator for various transaction specific pools, collectively
@@ -94,7 +93,7 @@ func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
// Initialize the state with head block, or fallback to empty one in
// case the head state is not available (might occur when node is not
// fully synced).
- statedb, err := chain.PostExecutionStateAt(head)
+ statedb, err := chain.PostExecState(head)
if err != nil {
statedb, err = chain.StateAt(types.EmptyRootHash)
}
@@ -199,7 +198,7 @@ func (p *TxPool) loop(head *types.Header) {
case resetBusy <- struct{}{}:
// Updates the statedb with the new chain head. The head state may be
// unavailable if the initial state sync has not yet completed.
- if statedb, err := p.chain.PostExecutionStateAt(newHead); err != nil {
+ if statedb, err := p.chain.PostExecState(newHead); err != nil {
log.Error("Failed to reset txpool state", "err", err)
} else {
p.stateLock.Lock()
@@ -559,3 +558,28 @@ func (p *TxPool) Clear() {
subpool.Clear()
}
}
+
+// SpeculativeSetter is implemented by subpools that support speculative
+// state updates for pipelined SRC. This avoids import cycles between txpool
+// and legacypool packages.
+type SpeculativeSetter interface {
+ SetSpeculativeState(newHead *types.Header, statedb *state.StateDB)
+}
+
+// SetSpeculativeState updates the txpool's state to reflect a block that
+// hasn't been written to the chain yet. This is used by pipelined SRC so that
+// speculative execution of block N+1 gets correct pending transactions
+// (reflecting block N's post-execution nonces and balances via FlatDiff overlay).
+func (p *TxPool) SetSpeculativeState(newHead *types.Header, statedb *state.StateDB) {
+ // Update the aggregator's state
+ p.stateLock.Lock()
+ p.state = statedb
+ p.stateLock.Unlock()
+
+ // Update subpools that support speculative state
+ for _, subpool := range p.subpools {
+ if ss, ok := subpool.(SpeculativeSetter); ok {
+ ss.SetSpeculativeState(newHead, statedb)
+ }
+ }
+}
diff --git a/core/types.go b/core/types.go
index 43f1f87897..f9f4b691e3 100644
--- a/core/types.go
+++ b/core/types.go
@@ -35,6 +35,11 @@ type Validator interface {
// ValidateState validates the given statedb and optionally the process result.
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error
+
+ // ValidateStateCheap validates cheap post-state checks (gas, bloom, receipt root,
+ // requests) without computing the expensive IntermediateRoot. Used by the
+ // pipelined import path where IntermediateRoot is deferred to an SRC goroutine.
+ ValidateStateCheap(block *types.Block, state *state.StateDB, res *ProcessResult) error
}
// Prefetcher is an interface for pre-caching transaction signatures and state.
diff --git a/core/types/block.go b/core/types/block.go
index 03bcc6c703..06af72191c 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -102,6 +102,10 @@ type Header struct {
// ActualTime is the actual time of the block. It is internally used by the miner.
ActualTime time.Time `json:"-" rlp:"-"`
+ // AbortRecovery marks a miner-local rebuild after speculative execution was
+ // discarded. It is not encoded and is only used for build-time heuristics.
+ AbortRecovery bool `json:"-" rlp:"-"`
+
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index d07a6e2a05..ddb01122a0 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -536,7 +536,6 @@ func TestReinforceMultiClientPreCompilesTest(t *testing.T) {
"IsMadhugiriPro",
"IsLisovo",
"IsLisovoPro",
- "IsDelayedSRC",
}
if len(actual) != len(expected) {
diff --git a/docs/cli/default_config.toml b/docs/cli/default_config.toml
index 022a277ef1..cb3197af50 100644
--- a/docs/cli/default_config.toml
+++ b/docs/cli/default_config.toml
@@ -105,6 +105,8 @@ devfakeauthor = false
base-fee-change-denominator = 0
prefetch = false
prefetch-gaslimit-percent = 100
+ pipelined-src = true
+ pipelined-src-logs = true
[jsonrpc]
ipcdisable = false
@@ -263,3 +265,7 @@ devfakeauthor = false
enable-preconfs = false
enable-private-tx = false
bp-rpc-endpoints = []
+
+[pipeline]
+ enable-import-src = true
+ import-src-logs = true
diff --git a/docs/cli/server.md b/docs/cli/server.md
index a8e4a3d595..bc62316a52 100644
--- a/docs/cli/server.md
+++ b/docs/cli/server.md
@@ -324,6 +324,12 @@ The ```bor server``` command runs the Bor client.
- ```v5disc```: Enables the V5 discovery mechanism (default: true)
+### Pipeline Options
+
+- ```pipeline.enable-import-src```: Enable pipelined state root computation during block import: overlap SRC(N) with block N+1 tx execution (default: true)
+
+- ```pipeline.import-src-logs```: Enable verbose logging for pipelined import SRC (default: true)
+
### Sealer Options
- ```allow-gas-tip-override```: Allows block producers to override the mining gas tip (default: false)
@@ -354,6 +360,10 @@ The ```bor server``` command runs the Bor client.
- ```miner.interruptcommit```: Interrupt block commit when block creation time is passed (default: true)
+- ```miner.pipelined-src```: Enable pipelined state root computation: overlap SRC(N) with block N+1 tx execution (default: true)
+
+- ```miner.pipelined-src-logs```: Enable verbose logging for pipelined SRC (spawned SRC, SRC completed, block sealed, etc.) (default: true)
+
- ```miner.prefetch```: Enable transaction prefetching from the pool during block building (default: false)
- ```miner.prefetch.gaslimit.percent```: Gas limit percentage for prefetching (e.g., 100 = 100%, 110 = 110%) (default: 100)
diff --git a/eth/api_backend.go b/eth/api_backend.go
index c068fa1e3b..19b16fb70b 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -288,9 +288,12 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B
return nil, nil, errors.New("header not found")
}
- stateDb, err := b.stateAtHeader(header)
+ stateDb, err := b.eth.BlockChain().StateAt(header.Root)
if err != nil {
- return nil, nil, err
+ stateDb, err = b.eth.BlockChain().HistoricState(header.Root)
+ if err != nil {
+ return nil, nil, err
+ }
}
return stateDb, header, nil
}
@@ -314,9 +317,12 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN
return nil, nil, errors.New("hash is not currently canonical")
}
- stateDb, err := b.stateAtHeader(header)
+ stateDb, err := b.eth.BlockChain().StateAt(header.Root)
if err != nil {
- return nil, nil, err
+ stateDb, err = b.eth.BlockChain().HistoricState(header.Root)
+ if err != nil {
+ return nil, nil, err
+ }
}
return stateDb, header, nil
}
@@ -324,21 +330,6 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN
return nil, nil, errors.New("invalid arguments; neither block nor hash specified")
}
-// stateAtHeader returns the state database for the given header, correctly
-// resolving the state root under delayed SRC where header.Root stores the
-// parent's state root rather than this block's post-execution root.
-func (b *EthAPIBackend) stateAtHeader(header *types.Header) (*state.StateDB, error) {
- bc := b.eth.BlockChain()
- stateDb, err := bc.PostExecutionStateAt(header)
- if err != nil {
- stateDb, err = bc.HistoricState(header.Root)
- if err != nil {
- return nil, err
- }
- }
- return stateDb, nil
-}
-
func (b *EthAPIBackend) HistoryPruningCutoff() uint64 {
bn, _ := b.eth.blockchain.HistoryPruningCutoff()
return bn
diff --git a/eth/api_debug.go b/eth/api_debug.go
index 7bcf149e07..4cd120a721 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -506,7 +506,7 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness
}
parentBlock := bc.GetBlockByHash(block.ParentHash())
- _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil)
+ _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil, nil)
if err != nil {
return nil, err
}
@@ -527,7 +527,7 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit
}
parentBlock := bc.GetBlockByHash(block.ParentHash())
- _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil)
+ _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil, nil)
if err != nil {
return nil, err
}
diff --git a/eth/backend.go b/eth/backend.go
index fbd3f98eb3..5b45339764 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -291,7 +291,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
EnableWitnessStats: config.EnableWitnessStats,
EnableEVMSwitchDispatch: config.EnableEVMSwitchDispatch,
},
- Stateless: config.SyncMode == downloader.StatelessSync,
+ EnablePipelinedImportSRC: config.EnablePipelinedImportSRC,
+ PipelinedImportSRCLogs: config.PipelinedImportSRCLogs,
+ PipelinedSRCWarmSnapshot: config.PipelinedSRCWarmSnapshot,
+ Stateless: config.SyncMode == downloader.StatelessSync,
// Enables file journaling for the trie database. The journal files will be stored
// within the data directory. The corresponding paths will be either:
// - DATADIR/triedb/merkle.journal
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index a1d42d7b57..4c9420b90a 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -132,6 +132,17 @@ type Config struct {
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
+ // Pipelined import SRC: overlap SRC(N) with tx execution of block N+1 during import
+ EnablePipelinedImportSRC bool
+ PipelinedImportSRCLogs bool
+
+ // PipelinedSRCWarmSnapshot enables warm-cache handoff from the
+ // execution-side trie prefetcher to the pipelined SRC goroutine. Trie
+ // reads in SRC consult a hash-verified snapshot before falling through
+ // to pathdb. Targets cold-cache restart/catch-up CPU. NewTrieOnly
+ // semantics, witness completeness, and root determinism are unaffected.
+ PipelinedSRCWarmSnapshot bool
+
// Deprecated: use 'TransactionHistory' instead.
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
diff --git a/eth/filters/api_test.go b/eth/filters/api_test.go
index 5b07fc663a..7278d0a694 100644
--- a/eth/filters/api_test.go
+++ b/eth/filters/api_test.go
@@ -219,3 +219,74 @@ func TestUnmarshalJSONNewFilterArgs(t *testing.T) {
t.Fatalf("expected 0 topics, got %d topics", len(test7.Topics[2]))
}
}
+
+// TestResolveBlockNumForRangeCheck exercises each branch of the sentinel-to-height
+// resolver so that mutations on the sentinel handling and fall-through can be
+// detected by tests rather than only through full FilterAPI integration tests.
+func TestResolveBlockNumForRangeCheck(t *testing.T) {
+ t.Parallel()
+ const head uint64 = 200
+
+ tests := []struct {
+ name string
+ n int64
+ want uint64
+ }{
+ {"concrete_zero", 0, 0},
+ {"concrete_positive", 42, 42},
+ {"concrete_at_head", 200, 200},
+ {"earliest_sentinel", rpc.EarliestBlockNumber.Int64(), 0},
+ {"latest_sentinel", rpc.LatestBlockNumber.Int64(), head},
+ {"pending_sentinel", rpc.PendingBlockNumber.Int64(), head},
+ {"safe_sentinel", rpc.SafeBlockNumber.Int64(), head},
+ {"finalized_sentinel", rpc.FinalizedBlockNumber.Int64(), head},
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ if got := resolveBlockNumForRangeCheck(tc.n, head); got != tc.want {
+ t.Errorf("resolveBlockNumForRangeCheck(%d, %d) = %d; want %d", tc.n, head, got, tc.want)
+ }
+ })
+ }
+}
+
+// TestCheckBlockRangeLimit covers the range-limit DoS guard at the unit-function
+// level, including the boundary condition (span == limit) and the `- -> +`
+// operator mutation (a mutant that replaces subtraction with addition would
+// flag ranges whose sum exceeds the limit even though the span is small).
+func TestCheckBlockRangeLimit(t *testing.T) {
+ t.Parallel()
+ const head uint64 = 200
+
+ tests := []struct {
+ name string
+ begin int64
+ end int64
+ limit uint64
+ wantErr bool
+ }{
+ {"limit_zero_disabled", 0, 1000, 0, false},
+ {"span_below_limit", 0, 50, 100, false},
+ {"span_at_limit", 0, 100, 100, false},
+ {"span_above_limit", 0, 101, 100, true},
+ {"end_before_begin_no_false_positive", 50, 40, 100, false},
+ {"sum_exceeds_limit_but_span_small", 50, 55, 10, false},
+ {"earliest_to_head_exceeds_limit", rpc.EarliestBlockNumber.Int64(), rpc.LatestBlockNumber.Int64(), 100, true},
+ {"earliest_to_head_at_limit", rpc.EarliestBlockNumber.Int64(), rpc.LatestBlockNumber.Int64(), 200, false},
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ err := checkBlockRangeLimit(tc.begin, tc.end, head, tc.limit)
+ if tc.wantErr && err == nil {
+ t.Errorf("checkBlockRangeLimit(%d, %d, %d, %d) = nil; want error", tc.begin, tc.end, head, tc.limit)
+ }
+ if !tc.wantErr && err != nil {
+ t.Errorf("checkBlockRangeLimit(%d, %d, %d, %d) = %v; want nil", tc.begin, tc.end, head, tc.limit, err)
+ }
+ })
+ }
+}
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 07497c6805..c48bbcbbb3 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -589,6 +589,14 @@ func TestInvalidGetRangeLogsRequest(t *testing.T) {
if _, err := api.GetLogs(t.Context(), FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}); err != errInvalidBlockRange {
t.Errorf("Expected Logs for invalid range return error, but got: %v", err)
}
+
+ // GetBorBlockLogs has the same range guard (fromBlock > toBlock → error) and
+ // must reject it independently — the previous branch only covered GetLogs.
+ // This is reached before the backend.CurrentHeader() call, so the empty-DB
+ // fixture above is sufficient.
+ if _, err := api.GetBorBlockLogs(t.Context(), FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}); err != errInvalidBlockRange {
+ t.Errorf("Expected GetBorBlockLogs for invalid range to return errInvalidBlockRange, but got: %v", err)
+ }
}
// TestExceedLogQueryLimit tests getLogs with too many addresses or topics
diff --git a/eth/handler.go b/eth/handler.go
index 5a0f625bc4..2f4024d903 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -164,16 +164,15 @@ type handler struct {
// privateTxGetter to check if a transaction needs to be treated as private or not
privateTxGetter relay.PrivateTxGetter
- eventMux *event.TypeMux
- txsCh chan core.NewTxsEvent
- txsSub event.Subscription
- stuckTxsCh chan core.StuckTxsEvent
- stuckTxsSub event.Subscription
- minedBlockSub *event.TypeMuxSubscription
- blockRange *blockRangeState
-
+ eventMux *event.TypeMux
+ txsCh chan core.NewTxsEvent
+ txsSub event.Subscription
+ stuckTxsCh chan core.StuckTxsEvent
+ stuckTxsSub event.Subscription
+ minedBlockSub *event.TypeMuxSubscription
witnessReadyCh chan core.WitnessReadyEvent
witnessReadySub event.Subscription
+ blockRange *blockRangeState
requiredBlocks map[uint64]common.Hash
@@ -622,11 +621,11 @@ func (h *handler) Start(maxPeers int) {
h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{})
go h.minedBroadcastLoop()
- // broadcast delayed-SRC witnesses once the SRC goroutine completes
- h.wg.Add(1)
+ // announce witnesses from pipelined import SRC
h.witnessReadyCh = make(chan core.WitnessReadyEvent, 10)
h.witnessReadySub = h.chain.SubscribeWitnessReadyEvent(h.witnessReadyCh)
- go h.witnessBroadcastLoop()
+ h.wg.Add(1)
+ go h.witnessReadyBroadcastLoop()
h.wg.Add(1)
go h.chainSync.loop()
@@ -649,7 +648,9 @@ func (h *handler) Stop() {
h.stuckTxsSub.Unsubscribe() // quits stuckTxBroadcastLoop
}
h.minedBlockSub.Unsubscribe()
- h.witnessReadySub.Unsubscribe() // quits witnessBroadcastLoop
+ if h.witnessReadySub != nil {
+ h.witnessReadySub.Unsubscribe()
+ }
h.blockRange.stop()
// Quit chainSync and txsync64.
@@ -728,8 +729,9 @@ func (h *handler) BroadcastBlock(block *types.Block, witness *stateless.Witness,
return
}
- // Otherwise if the block is indeed in out own chain, announce it
- if h.chain.HasBlock(hash, block.NumberU64()) {
+ // Otherwise, announce the block if it is already written locally or if the
+ // witness is cached and the block is in-flight on the local write path.
+ if h.chain.HasBlock(hash, block.NumberU64()) || h.chain.HasWitness(hash) {
for _, peer := range peers {
peer.AsyncSendNewBlockHash(block)
}
@@ -840,26 +842,63 @@ func (h *handler) minedBroadcastLoop() {
log.Info("[block tracker] Broadcasting mined block", "number", ev.Block.NumberU64(), "hash", ev.Block.Hash(), "blockTime", ev.Block.Time(), "now", now.Unix(), "delay", delay, "delayInMs", delayInMs, "sealToBroadcast", common.PrettyDuration(sealToBcast))
}
loopStart := time.Now()
- h.BroadcastBlock(ev.Block, ev.Witness, true) // First propagate block to peers
- h.BroadcastBlock(ev.Block, ev.Witness, false) // Only then announce to the rest
+ h.BroadcastBlock(ev.Block, ev.Witness, true) // First propagate block to peers
+ go h.announceMinedBlock(ev.Block, ev.Witness)
broadcastLoopTimer.Update(time.Since(loopStart))
}
}
}
-// witnessBroadcastLoop announces delayed-SRC witnesses to peers once the
-// background SRC goroutine has finished computing them. Analogous to block
-// propagation: we send a hash announcement and let peers fetch on demand.
-func (h *handler) witnessBroadcastLoop() {
+// announceMinedBlock announces a locally mined block after it becomes visible
+// through the local chain reader.
+//
+// The pipelined inline path broadcasts before its async DB write completes, so
+// announcing immediately can race with HasBlock() and silently skip the hash
+// announcement to non-propagation peers. Wait briefly for the write to land,
+// then announce. If the block still isn't visible but the witness is cached,
+// fall back to the witness-gated path so stateless peers can still progress.
+func (h *handler) announceMinedBlock(block *types.Block, witness *stateless.Witness) {
+ const (
+ pollInterval = 10 * time.Millisecond
+ maxWait = 500 * time.Millisecond
+ )
+
+ hash := block.Hash()
+ number := block.NumberU64()
+ deadline := time.NewTimer(maxWait)
+ defer deadline.Stop()
+ ticker := time.NewTicker(pollInterval)
+ defer ticker.Stop()
+
+ for {
+ if h.chain.HasBlock(hash, number) {
+ h.BroadcastBlock(block, witness, false)
+ return
+ }
+ select {
+ case <-ticker.C:
+ case <-deadline.C:
+ if h.chain.HasWitness(hash) {
+ h.BroadcastBlock(block, witness, false)
+ } else {
+ log.Debug("Skipping mined block announce before local write became visible", "hash", hash, "number", number)
+ }
+ return
+ }
+ }
+}
+
+// witnessReadyBroadcastLoop announces witness availability from the pipelined
+// import SRC goroutine. Without this, the stateless node would have to poll
+// for witnesses with 10-second retry intervals.
+func (h *handler) witnessReadyBroadcastLoop() {
defer h.wg.Done()
for {
select {
case ev := <-h.witnessReadyCh:
- hash := ev.Block.Hash()
- number := ev.Block.NumberU64()
- for _, peer := range h.peers.peersWithoutWitness(hash) {
- peer.Peer.AsyncSendNewWitnessHash(hash, number)
+ for _, peer := range h.peers.peersWithoutWitness(ev.BlockHash) {
+ peer.Peer.AsyncSendNewWitnessHash(ev.BlockHash, ev.BlockNumber)
}
case <-h.witnessReadySub.Err():
return
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index a016d906ac..7745f1e3a5 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -45,9 +45,10 @@ import (
// testEthHandler is a mock event handler to listen for inbound network requests
// on the `eth` protocol and convert them into a more easily testable form.
type testEthHandler struct {
- blockBroadcasts event.Feed
- txAnnounces event.Feed
- txBroadcasts event.Feed
+ blockBroadcasts event.Feed
+ blockAnnouncements event.Feed
+ txAnnounces event.Feed
+ txBroadcasts event.Feed
}
func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") }
@@ -62,6 +63,11 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.blockBroadcasts.Send(packet.Block)
return nil
+ case *eth.NewBlockHashesPacket:
+ hashes, _ := packet.Unpack()
+ h.blockAnnouncements.Send(hashes)
+ return nil
+
case *eth.NewPooledTransactionHashesPacket:
h.txAnnounces.Send(packet.Hashes)
return nil
@@ -705,6 +711,70 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
}
}
+func TestMinedBroadcastAnnouncesWithCachedWitnessBeforeWrite(t *testing.T) {
+ t.Parallel()
+
+ source := newTestHandlerWithBlocks(1)
+ defer source.close()
+
+ sinks := make([]*testEthHandler, 2)
+ for i := range sinks {
+ sinks[i] = new(testEthHandler)
+ }
+
+ for i, sink := range sinks {
+ sourcePipe, sinkPipe := p2p.MsgPipe()
+ defer sourcePipe.Close()
+ defer sinkPipe.Close()
+
+ sourcePeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
+ sinkPeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
+ defer sourcePeer.Close()
+ defer sinkPeer.Close()
+
+ go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
+ return eth.Handle((*ethHandler)(source.handler), peer)
+ })
+ if err := sinkPeer.Handshake(1, source.chain, eth.BlockRangeUpdatePacket{}); err != nil {
+ t.Fatalf("failed to run protocol handshake: %v", err)
+ }
+ go eth.Handle(sink, sinkPeer)
+ }
+
+ announceCh := make(chan []common.Hash, len(sinks))
+ for i := range sinks {
+ sub := sinks[i].blockAnnouncements.Subscribe(announceCh)
+ defer sub.Unsubscribe()
+ }
+
+ parent := source.chain.CurrentBlock()
+ block := types.NewBlockWithHeader(&types.Header{
+ ParentHash: parent.Hash(),
+ Number: new(big.Int).Add(parent.Number, common.Big1),
+ Time: parent.Time + 1,
+ Difficulty: big.NewInt(1),
+ GasLimit: parent.GasLimit,
+ })
+ require.False(t, source.chain.HasBlock(block.Hash(), block.NumberU64()))
+
+ source.chain.CacheWitness(block.Hash(), []byte{0x1})
+
+ time.Sleep(100 * time.Millisecond)
+ source.handler.eventMux.Post(core.NewMinedBlockEvent{Block: block})
+
+ timeout := time.After(2 * time.Second)
+ for {
+ select {
+ case hashes := <-announceCh:
+ if len(hashes) == 1 && hashes[0] == block.Hash() {
+ return
+ }
+ case <-timeout:
+ t.Fatalf("timed out waiting for block hash announcement for block %s", block.Hash())
+ }
+ }
+}
+
// Tests that a propagated malformed block (uncles or transactions don't match
// with the hashes in the header) gets discarded and not broadcast forward.
func TestBroadcastMalformedBlock69(t *testing.T) {
diff --git a/eth/handler_wit.go b/eth/handler_wit.go
index 2c4e19d471..e5e6f20e3d 100644
--- a/eth/handler_wit.go
+++ b/eth/handler_wit.go
@@ -112,6 +112,40 @@ func (h *witHandler) handleWitnessHashesAnnounce(peer *wit.Peer, hashes []common
// handleGetWitness retrieves witnesses for the requested block hashes and returns them as raw RLP data.
// It now returns the data and error, rather than sending the reply directly.
// The returned data is [][]byte, as rlp.RawValue is essentially []byte.
+// resolveWitnessSizes builds the per-hash size map for a GetWitness request.
+// First checks rawdb for persisted witness sizes. For hashes missing from
+// rawdb but whose block header exists, falls back to GetWitness (which
+// consults the cache and waits for pipelined SRC) so peers get a response
+// for witnesses still being generated by the SRC goroutine.
+//
+// The header existence check is critical: without it, a peer can DoS the
+// handler by requesting sizes for non-existent block hashes, forcing a
+// 2-second wait per hash in GetWitness.
+//
+// Returns both the size map and any witnesses fetched during the size
+// lookup so the caller can reuse them instead of re-fetching.
+func (h *witHandler) resolveWitnessSizes(seen map[common.Hash]struct{}) (map[common.Hash]uint64, map[common.Hash][]byte) {
+ sizes := make(map[common.Hash]uint64, len(seen))
+ prefetched := make(map[common.Hash][]byte, len(seen))
+ for hash := range seen {
+ if size := rawdb.ReadWitnessSize(h.Chain().DB(), hash); size != nil {
+ sizes[hash] = *size
+ continue
+ }
+ if h.Chain().GetHeaderByHash(hash) == nil {
+ sizes[hash] = 0
+ continue
+ }
+ if w := h.Chain().GetWitness(hash); len(w) > 0 {
+ sizes[hash] = uint64(len(w))
+ prefetched[hash] = w
+ } else {
+ sizes[hash] = 0
+ }
+ }
+ return sizes, prefetched
+}
+
func (h *witHandler) handleGetWitness(peer *wit.Peer, req *wit.GetWitnessPacket) (wit.WitnessPacketResponse, error) {
log.Debug("handleGetWitness processing request", "peer", peer.ID(), "reqID", req.RequestId, "witnessPages", len(req.WitnessPages))
// list different witnesses to query
@@ -120,16 +154,7 @@ func (h *witHandler) handleGetWitness(peer *wit.Peer, req *wit.GetWitnessPacket)
seen[witnessPage.Hash] = struct{}{}
}
- // witness sizes query
- witnessSize := make(map[common.Hash]uint64, len(seen))
- for witnessBlockHash := range seen {
- size := rawdb.ReadWitnessSize(h.Chain().DB(), witnessBlockHash)
- if size == nil {
- witnessSize[witnessBlockHash] = 0
- } else {
- witnessSize[witnessBlockHash] = *size
- }
- }
+ witnessSize, prefetchedWitnesses := h.resolveWitnessSizes(seen)
// query witnesses by demand
var response wit.WitnessPacketResponse
@@ -150,6 +175,11 @@ func (h *witHandler) handleGetWitness(peer *wit.Peer, req *wit.GetWitnessPacket)
var witnessBytes []byte
if cachedRLPBytes, exists := witnessCache[witnessPage.Hash]; exists {
witnessBytes = cachedRLPBytes
+ } else if prefetched, exists := prefetchedWitnesses[witnessPage.Hash]; exists {
+ // Use the witness already fetched during the size check (avoids double wait)
+ witnessBytes = prefetched
+ witnessCache[witnessPage.Hash] = prefetched
+ totalCached += len(prefetched)
} else {
// Use GetWitness to benefit from the blockchain's witness cache
queriedBytes := h.Chain().GetWitness(witnessPage.Hash)
diff --git a/eth/peer.go b/eth/peer.go
index 3612db28a8..a2838439ab 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -744,47 +744,56 @@ func (p *ethPeer) doWitnessRequest(
<-witReqSem
return err
}
-
witReqsWg.Add(1)
-
- go func() {
- var witRes *wit.Response
- select {
- case witRes = <-witResCh:
- case <-cancel:
- witReqsWg.Done()
- <-witReqSem
- return
- }
-
- // Unblock the wit dispatcher now that we've received the response.
- // Select with cancel to avoid blocking if Done is unbuffered and
- // the dispatcher has already exited.
- if witRes != nil && witRes.Done != nil {
- select {
- case witRes.Done <- nil:
- case <-cancel:
- witReqsWg.Done()
- <-witReqSem
- return
- }
- }
-
- select {
- case witReqResCh <- &witReqRes{Request: request, Response: witRes}:
- case <-cancel:
- witReqsWg.Done()
- <-witReqSem
- }
- }()
+ go awaitWitnessResponse(request, witResCh, witReqResCh, witReqsWg, witReqSem, cancel)
mapsMu.Lock()
*witReqs = append(*witReqs, witReq)
-
if page >= witTotalRequest[hash] {
witTotalRequest[hash]++
}
mapsMu.Unlock()
-
return nil
}
+
+// awaitWitnessResponse runs in a dedicated goroutine per outstanding witness
+// request. It waits for the peer's response (or cancel), unblocks the wit
+// dispatcher, and forwards the result on witReqResCh. On cancel at any
+// step we release the waitgroup + semaphore so the caller isn't wedged;
+// on successful delivery the consumer of witReqResCh owns that release.
+func awaitWitnessResponse(
+ request []wit.WitnessPageRequest,
+ witResCh <-chan *wit.Response,
+ witReqResCh chan *witReqRes,
+ witReqsWg *sync.WaitGroup,
+ witReqSem chan int,
+ cancel <-chan struct{},
+) {
+ releaseOnCancel := func() {
+ witReqsWg.Done()
+ <-witReqSem
+ }
+ var witRes *wit.Response
+ select {
+ case witRes = <-witResCh:
+ case <-cancel:
+ releaseOnCancel()
+ return
+ }
+ // Unblock the wit dispatcher now that we've received the response.
+ // Select with cancel to avoid blocking if Done is unbuffered and the
+ // dispatcher has already exited.
+ if witRes != nil && witRes.Done != nil {
+ select {
+ case witRes.Done <- nil:
+ case <-cancel:
+ releaseOnCancel()
+ return
+ }
+ }
+ select {
+ case witReqResCh <- &witReqRes{Request: request, Response: witRes}:
+ case <-cancel:
+ releaseOnCancel()
+ }
+}
diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go
index bc4e168cde..2b6a740a87 100644
--- a/internal/cli/server/config.go
+++ b/internal/cli/server/config.go
@@ -175,6 +175,9 @@ type Config struct {
// Relay has transaction relay related settings
Relay *RelayConfig `hcl:"relay,block" toml:"relay,block"`
+
+ // Pipeline has pipelined SRC settings for block import
+ Pipeline *PipelineConfig `hcl:"pipeline,block" toml:"pipeline,block"`
}
type HistoryConfig struct {
@@ -449,6 +452,12 @@ type SealerConfig struct {
// PrefetchGasLimitPercent is the gas limit percentage for prefetching (e.g., 100 = 100%, 110 = 110%)
PrefetchGasLimitPercent uint64 `hcl:"prefetch-gaslimit-percent,optional" toml:"prefetch-gaslimit-percent,optional"`
+
+ // EnablePipelinedSRC enables pipelined state root computation: overlap SRC(N) with block N+1 tx execution
+ EnablePipelinedSRC bool `hcl:"pipelined-src,optional" toml:"pipelined-src,optional"`
+
+ // PipelinedSRCLogs enables verbose logging for pipelined SRC
+ PipelinedSRCLogs bool `hcl:"pipelined-src-logs,optional" toml:"pipelined-src-logs,optional"`
}
type JsonRPCConfig struct {
@@ -807,6 +816,21 @@ type RelayConfig struct {
BlockProducerRpcEndpoints []string `hcl:"bp-rpc-endpoints,optional" toml:"bp-rpc-endpoints,optional"`
}
+// PipelineConfig has settings for pipelined state root computation during block import.
+type PipelineConfig struct {
+ // EnableImportSRC enables pipelined state root computation during block import:
+ // overlap SRC(N) with tx execution of block N+1
+ EnableImportSRC bool `hcl:"enable-import-src,optional" toml:"enable-import-src,optional"`
+
+ // ImportSRCLogs enables verbose logging for pipelined import SRC
+ ImportSRCLogs bool `hcl:"import-src-logs,optional" toml:"import-src-logs,optional"`
+
+ // WarmSnapshot enables warm-cache handoff from the execution-side trie
+ // prefetcher to the pipelined SRC goroutine. Targets cold-cache
+ // restart/catch-up CPU; no effect on correctness or witness completeness.
+ WarmSnapshot bool `hcl:"warm-snapshot,optional" toml:"warm-snapshot,optional"`
+}
+
func DefaultConfig() *Config {
return &Config{
Chain: "mainnet",
@@ -906,6 +930,8 @@ func DefaultConfig() *Config {
PrefetchGasLimitPercent: 100,
TargetGasPercentage: 0, // Initialize to 0, will be set from CLI or remain 0 (meaning use default)
BaseFeeChangeDenominator: 0, // Initialize to 0, will be set from CLI or remain 0 (meaning use default)
+ EnablePipelinedSRC: true,
+ PipelinedSRCLogs: true,
},
Gpo: &GpoConfig{
Blocks: 20,
@@ -1067,6 +1093,10 @@ func DefaultConfig() *Config {
EnablePrivateTx: false,
BlockProducerRpcEndpoints: []string{},
},
+ Pipeline: &PipelineConfig{
+ EnableImportSRC: true,
+ ImportSRCLogs: true,
+ },
}
}
@@ -1277,6 +1307,8 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*
n.Miner.BlockTime = c.Sealer.BlockTime
n.Miner.EnablePrefetch = c.Sealer.EnablePrefetch
n.Miner.PrefetchGasLimitPercent = c.Sealer.PrefetchGasLimitPercent
+ n.Miner.EnablePipelinedSRC = c.Sealer.EnablePipelinedSRC
+ n.Miner.PipelinedSRCLogs = c.Sealer.PipelinedSRCLogs
// Validate prefetch gas limit percentage
if c.Sealer.EnablePrefetch && c.Sealer.PrefetchGasLimitPercent > 150 {
@@ -1548,6 +1580,9 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*
n.TrieDirtyCache = calcPerc(c.Cache.PercGc)
n.NoPrefetch = c.Cache.NoPrefetch
n.Preimages = c.Cache.Preimages
+ n.EnablePipelinedImportSRC = c.Pipeline.EnableImportSRC
+ n.PipelinedImportSRCLogs = c.Pipeline.ImportSRCLogs
+ n.PipelinedSRCWarmSnapshot = c.Pipeline.WarmSnapshot
// Note that even the values set by `history.transactions` will be written in the old flag until it's removed.
n.TransactionHistory = c.Cache.TxLookupLimit
n.TrieTimeout = c.Cache.TrieTimeout
diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go
index 270d9f6bde..192ec47cd9 100644
--- a/internal/cli/server/flags.go
+++ b/internal/cli/server/flags.go
@@ -438,6 +438,20 @@ func (c *Command) Flags(config *Config) *flagset.Flagset {
Default: c.cliConfig.Sealer.PrefetchGasLimitPercent,
Group: "Sealer",
})
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "miner.pipelined-src",
+ Usage: "Enable pipelined state root computation: overlap SRC(N) with block N+1 tx execution",
+ Value: &c.cliConfig.Sealer.EnablePipelinedSRC,
+ Default: c.cliConfig.Sealer.EnablePipelinedSRC,
+ Group: "Sealer",
+ })
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "miner.pipelined-src-logs",
+ Usage: "Enable verbose logging for pipelined SRC (spawned SRC, SRC completed, block sealed, etc.)",
+ Value: &c.cliConfig.Sealer.PipelinedSRCLogs,
+ Default: c.cliConfig.Sealer.PipelinedSRCLogs,
+ Group: "Sealer",
+ })
f.BoolFlag(&flagset.BoolFlag{
Name: "miner.enableDynamicGasLimit",
Usage: "Enable dynamic gas limit adjustment based on base fee",
@@ -640,6 +654,27 @@ func (c *Command) Flags(config *Config) *flagset.Flagset {
Default: c.cliConfig.Cache.TxLookupLimit,
Group: "Cache",
})
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "pipeline.enable-import-src",
+ Usage: "Enable pipelined state root computation during block import: overlap SRC(N) with block N+1 tx execution",
+ Value: &c.cliConfig.Pipeline.EnableImportSRC,
+ Default: c.cliConfig.Pipeline.EnableImportSRC,
+ Group: "Pipeline",
+ })
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "pipeline.import-src-logs",
+ Usage: "Enable verbose logging for pipelined import SRC",
+ Value: &c.cliConfig.Pipeline.ImportSRCLogs,
+ Default: c.cliConfig.Pipeline.ImportSRCLogs,
+ Group: "Pipeline",
+ })
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "pipeline.warm-snapshot",
+ Usage: "Enable warm-cache handoff from execution prefetcher to pipelined SRC (default false)",
+ Value: &c.cliConfig.Pipeline.WarmSnapshot,
+ Default: c.cliConfig.Pipeline.WarmSnapshot,
+ Group: "Pipeline",
+ })
f.IntFlag(&flagset.IntFlag{
Name: "fdlimit",
Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
diff --git a/miner/fake_miner.go b/miner/fake_miner.go
index 4954e7952a..d0ee912aa4 100644
--- a/miner/fake_miner.go
+++ b/miner/fake_miner.go
@@ -261,7 +261,7 @@ func (bc *testBlockChainBor) StateAt(common.Hash) (*state.StateDB, error) {
return bc.statedb, nil
}
-func (bc *testBlockChainBor) PostExecutionStateAt(*types.Header) (*state.StateDB, error) {
+func (bc *testBlockChainBor) PostExecState(header *types.Header) (*state.StateDB, error) {
return bc.statedb, nil
}
diff --git a/miner/miner.go b/miner/miner.go
index a919717cc2..09aabdd8fc 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -67,6 +67,8 @@ type Config struct {
PendingFeeRecipient common.Address `toml:"-"` // Address for pending block rewards.
EnablePrefetch bool // Enable transaction prefetching from pool during block building
PrefetchGasLimitPercent uint64 // Gas limit percentage for prefetching (e.g., 100 = 100%, 110 = 110%)
+ EnablePipelinedSRC bool // Enable pipelined state root computation: overlap SRC(N) with block N+1 tx execution
+ PipelinedSRCLogs bool // Enable verbose logging for pipelined SRC (spawned SRC, SRC completed, block sealed, etc.)
}
// DefaultConfig contains default settings for miner.
diff --git a/miner/pipeline.go b/miner/pipeline.go
new file mode 100644
index 0000000000..864909b497
--- /dev/null
+++ b/miner/pipeline.go
@@ -0,0 +1,1192 @@
+package miner
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor"
+ "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// Pipelined SRC metrics
+var (
+ pipelineSpeculativeBlocksCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeBlocks", nil)
+ pipelineSpeculativeAbortsCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeAborts", nil)
+ pipelineEIP2935AbortsCounter = metrics.NewRegisteredCounter("worker/pipelineEIP2935Aborts", nil)
+ pipelineSRCTimer = metrics.NewRegisteredTimer("worker/pipelineSRCTime", nil)
+ pipelineFlatDiffExtractTimer = metrics.NewRegisteredTimer("worker/pipelineFlatDiffExtractTime", nil)
+ pipelineSpeculativeCommittedCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeCommitted", nil) // speculative block broadcast as the real next block — success signal
+ pipelineSRCWaitTimer = metrics.NewRegisteredTimer("worker/pipelineSRCWait", nil) // time blocked on WaitForSRC (ideally near-zero — means SRC finished before the caller arrived)
+ pipelineSealDurationTimer = metrics.NewRegisteredTimer("worker/pipelineSealDuration", nil) // engine.Seal latency in the inline path
+ // Per-cause abort counters — each increments alongside the aggregate pipelineSpeculativeAbortsCounter.
+ pipelineAbortBlockhashCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeAborts/blockhash", nil) // BLOCKHASH(N) was read during speculative N+1
+ pipelineAbortSRCFailedCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeAborts/src_failed", nil) // WaitForSRC returned an error
+ pipelineAbortFallbackCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeAborts/fallback", nil) // fallbackToSequential entered
+ // Announce earliness histogram (ms). Positive = announced before header.Time (PIP-66 working). Negative = announced late.
+ pipelineAnnounceEarlinessMs = metrics.NewRegisteredHistogram("worker/pipelineAnnounceEarlinessMs", nil, metrics.NewExpDecaySample(1028, 0.015))
+ // Mode gauge — 1 when pipelined SRC block-building is enabled on this node, 0 otherwise.
+ // Pair with chain/imports/pipelined/enabled on dashboards to distinguish "metric is
+ // zero because pipelining is off" from "metric is zero because the code path bypassed it".
+ pipelineBuildEnabledGauge = metrics.NewRegisteredGauge("worker/pipeline/enabled", nil)
+)
+
+const speculativeEmptyRefillLead = 300 * time.Millisecond
+
+// Refill speculative blocks that are still less than 75% full after the first
+// txpool snapshot. This catches the common case where the early snapshot grabs
+// a small trickle of txs, but the load ramps up before the slot boundary.
+const speculativeLowFillRemainingGasDivisor = 4
+
+// speculativeWorkReq is sent to mainLoop's speculative work channel
+// when block N's execution is done and we want to speculatively start N+1.
+type speculativeWorkReq struct {
+ parentHeader *types.Header // block N's header (complete except Root)
+ flatDiff *state.FlatDiff // block N's state mutations
+ parentRoot common.Hash // root_{N-1} (last committed trie root)
+ blockNEnv *environment // block N's execution environment (for assembly later)
+ stateSyncData []*types.StateSyncData // from FinalizeForPipeline
+}
+
+// placeholderParentHash generates a deterministic placeholder hash for use
+// as ParentHash in speculative headers. It must not collide with any real
+// block hash.
+func placeholderParentHash(blockNumber uint64) common.Hash {
+ data := append([]byte("pipelined-src-placeholder:"), new(big.Int).SetUint64(blockNumber).Bytes()...)
+ return sha256.Sum256(data)
+}
+
+// isPipelineEligible checks whether we can use pipelined SRC for the next
+// block. Returns false at sprint boundaries in pre-Rio mode (where
+// GetCurrentValidatorsByHash needs a real parent hash).
+func (w *worker) isPipelineEligible(currentBlockNumber uint64) bool {
+ if !w.config.EnablePipelinedSRC {
+ return false
+ }
+ if w.chainConfig.Bor == nil {
+ return false
+ }
+ if len(w.chainConfig.Bor.Sprint) == 0 {
+ return false
+ }
+ if !w.IsRunning() || w.syncing.Load() {
+ return false
+ }
+ // Pre-Rio: the speculative chain reader provides block N's unsigned header.
+ // When snapshot() walks back and calls ecrecover() on this header, it fails
+ // because the Extra seal bytes are all zeros (Seal() hasn't run yet).
+ // This causes speculative Prepare to always fail with "recovery failed",
+ // making the pipeline useless pre-Rio. Skip it entirely.
+ nextBlockNumber := currentBlockNumber + 1
+ if !w.chainConfig.Bor.IsRio(new(big.Int).SetUint64(nextBlockNumber)) {
+ return false
+ }
+ return true
+}
+
+// commitPipelined is the pipelined version of commit(). Instead of calling
+// FinalizeAndAssemble (which blocks on IntermediateRoot), it:
+// 1. Calls FinalizeForPipeline (state sync, span commits — no IntermediateRoot)
+// 2. Extracts FlatDiff
+// 3. Sends a speculativeWorkReq to start N+1 execution
+// 4. Returns immediately — the SRC goroutine is spawned by commitSpeculativeWork
+// after confirming the speculative Prepare() succeeds. This avoids a trie DB
+// race between the SRC goroutine and the fallback path's inline commit.
+func (w *worker) commitPipelined(env *environment, start time.Time) error {
+ if !w.IsRunning() {
+ return nil
+ }
+
+ env = env.copy()
+
+ borEngine, ok := w.engine.(*bor.Bor)
+ if !ok {
+ log.Error("Pipelined SRC: engine is not Bor")
+ return nil
+ }
+
+ // Phase 1: Finalize (state sync, span commits) without IntermediateRoot.
+ stateSyncData, err := borEngine.FinalizeForPipeline(w.chain, env.header, env.state, &types.Body{
+ Transactions: env.txs,
+ }, env.receipts)
+ if err != nil {
+ log.Error("Pipelined SRC: FinalizeForPipeline failed", "err", err)
+ return err
+ }
+
+ // Phase 2: Extract FlatDiff, record mode-visible side-effects, build the
+ // speculative work request. The SRC goroutine is NOT spawned here —
+ // commitSpeculativeWork spawns it after confirming Prepare() succeeds.
+ req, ok := w.buildSpeculativeReq(env, stateSyncData)
+ if !ok {
+ return nil
+ }
+
+ // Phase 3: Hand off to mainLoop's speculative path.
+ select {
+ case w.speculativeWorkCh <- req:
+ case <-w.exitCh:
+ }
+ return nil
+}
+
+// buildSpeculativeReq extracts block N's FlatDiff, resolves the committed
+// parent root, and composes the speculativeWorkReq for block N+1.
+// Returns ok=false only when the parent header cannot be located (treated as
+// a soft failure — the caller skips pipelining rather than returning an error,
+// matching the pre-refactor behavior).
+func (w *worker) buildSpeculativeReq(env *environment, stateSyncData []*types.StateSyncData) (*speculativeWorkReq, bool) {
+ flatDiffStart := time.Now()
+ flatDiff := env.state.CommitSnapshot(w.chainConfig.IsEIP158(env.header.Number))
+ pipelineFlatDiffExtractTimer.Update(time.Since(flatDiffStart))
+
+ parent := w.chain.GetHeader(env.header.ParentHash, env.header.Number.Uint64()-1)
+ if parent == nil {
+ log.Error("Pipelined SRC: parent not found", "parentHash", env.header.ParentHash)
+ return nil, false
+ }
+
+ w.chain.SetLastFlatDiff(flatDiff, env.header.Number.Uint64(), parent.Root, common.Hash{})
+ // Counts block N as "entering the pipeline." If Prepare() fails and
+ // fallbackToSequential produces the block inline, this counter is slightly
+ // inflated — block was produced sequentially, not speculatively.
+ pipelineSpeculativeBlocksCounter.Inc(1)
+
+ return &speculativeWorkReq{
+ parentHeader: env.header,
+ flatDiff: flatDiff,
+ parentRoot: parent.Root,
+ blockNEnv: env,
+ stateSyncData: stateSyncData,
+ }, true
+}
+
+// spawnSRCForFinalBlock conditionally spawns the SRC goroutine + publishes the
+// FlatDiff for the last block of a pipeline run (used by sealBlockViaTaskCh).
+func (w *worker) spawnSRCForFinalBlock(finalHeader *types.Header, rootN common.Hash, flatDiff *state.FlatDiff, spawnSRC bool) {
+ if !spawnSRC {
+ return
+ }
+ tmpBlock := types.NewBlockWithHeader(finalHeader)
+ // Miner pipeline always produces witnesses for now. allowOwnWitness=true
+ // explicitly permits SRC to create its own witness when no execution
+ // witness is handed in by the caller. nil warmSnapshot — the miner-side
+ // path does not currently capture the prefetcher snapshot; SRC falls
+ // back to the plain pathdb reader chain.
+ w.chain.SpawnSRCGoroutine(tmpBlock, rootN, flatDiff, true, nil, true, nil)
+ w.chain.SetLastFlatDiff(flatDiff, finalHeader.Number.Uint64(), rootN, common.Hash{})
+}
+
+// shouldLateRefillSpeculativeBlock reports whether a speculative block should
+// take one more txpool snapshot shortly before the slot boundary.
+func shouldLateRefillSpeculativeBlock(env *environment) bool {
+ if len(env.txs) == 0 {
+ return true
+ }
+ if env.gasPool == nil {
+ return true
+ }
+
+ // Skip the top-up when the block is already mostly full. Otherwise, give it
+ // one late snapshot to catch txs that arrived after the initial early fill.
+ return env.gasPool.Gas() > env.header.GasLimit/speculativeLowFillRemainingGasDivisor
+}
+
+// fillSpeculativeTransactions snapshots the txpool once immediately, and if
+// the speculative block is still underfilled, gives it one more pass shortly
+// before the slot boundary. This avoids sealing low/empty speculative blocks
+// simply because the initial early snapshot raced ahead of incoming load.
+func (w *worker) fillSpeculativeTransactions(env *environment, interrupt *atomic.Int32) time.Duration {
+ fillStart := time.Now()
+ err := w.fillTransactions(interrupt, env)
+ totalFill := time.Since(fillStart)
+
+ if err != nil || !shouldLateRefillSpeculativeBlock(env) {
+ return totalFill
+ }
+
+ remaining := time.Until(env.header.GetActualTime())
+ if remaining <= speculativeEmptyRefillLead {
+ return totalFill
+ }
+
+ timer := time.NewTimer(remaining - speculativeEmptyRefillLead)
+ defer timer.Stop()
+
+ select {
+ case <-timer.C:
+ case <-w.exitCh:
+ return totalFill
+ }
+
+ refillStart := time.Now()
+ _ = w.fillTransactions(interrupt, env)
+ totalFill += time.Since(refillStart)
+
+ return totalFill
+}
+
+// commitSpeculativeWork handles a speculativeWorkReq: executes block N+1
+// speculatively using the FlatDiff overlay, then waits for SRC(N) to complete,
+// assembles block N, and sends it for sealing. Then it finalizes N+1 and
+// seals it as well.
+//
+// Returns true when mainLoop should requeue normal work after this function
+// returns. This is needed for:
+// - Abort (EIP-2935/BLOCKHASH): the speculative block was discarded, so the
+// block slot must be rebuilt sequentially.
+// - Normal pipeline exit: the last block was sent to sealBlockViaTaskCh, and
+// there is a race where ChainHeadEvent may arrive at newWorkLoop before
+// pendingWorkBlock is cleared, causing the event to be skipped.
+//
+// Returns false when the pipeline fell back to sequential (fallbackToSequential
+// already sealed block N via taskCh → resultLoop → ChainHeadEvent). Retrying
+// work in this case creates a tight loop that keeps restarting Seal() with
+// fresh timestamps, preventing any block from ever being sealed.
+func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) (shouldRetry bool, abortRecovery bool) {
+ // Default: retry commitWork after this function returns. Fallback paths
+ // set shouldRetry = false because they already sealed block N via taskCh
+ // (resultLoop handles it).
+ shouldRetry = true
+ // Ensure pendingWorkBlock is cleared on every exit path.
+ defer w.pendingWorkBlock.Store(0)
+
+ s := newSpecSession(w, req)
+ if !s.setupInitial() {
+ return false, false
+ }
+ defer func() { <-s.initialFillDone }()
+
+ if !s.waitForSRCAndSealBlockN() {
+ return s.exitDuringBlockN, false
+ }
+ <-s.initialFillDone
+
+ for {
+ switch s.runOneIteration() {
+ case iterContinue:
+ continue
+ case iterBreakAbort:
+ abortRecovery = true
+ case iterExitEarly:
+ return false, false
+ }
+ break
+ }
+ if s.prevDBWriteDone != nil {
+ <-s.prevDBWriteDone
+ }
+ return shouldRetry, abortRecovery
+}
+
+// iterResult enumerates how a single pipeline iteration ends.
+type iterResult int
+
+const (
+ iterContinue iterResult = iota // shifted to the next block; keep looping
+ iterBreak // normal exit (error, last block sealed via taskCh)
+ iterBreakAbort // speculative block was discarded (abortRecovery=true)
+ iterExitEarly // w.exitCh fired mid-iteration; caller returns shouldRetry=false
+)
+
+// runOneIteration finalizes the current speculative block, prepares the next
+// one, seals the current block, and shifts state. Each return value tells
+// commitSpeculativeWork how to proceed; see iterResult.
+func (s *specSession) runOneIteration() iterResult {
+ if s.checkCurrentAbort() {
+ return iterBreakAbort
+ }
+ s.drainPrevDBWrite()
+
+ finalSpecHeader, flatDiff, stateSyncData, ok := s.finalizeCurrent()
+ if !ok {
+ return iterBreak
+ }
+ // Last block in pipeline (eligibility failed) → seal via taskCh so
+ // resultLoop emits ChainHeadEvent and normal production resumes.
+ if !s.w.isPipelineEligible(s.nextBlockNumber) || !s.w.IsRunning() {
+ s.w.sealBlockViaTaskCh(s.borEngine, finalSpecHeader, s.specState, s.specEnv.txs,
+ s.specEnv.receipts, stateSyncData, s.rootN, flatDiff, true, s.curBuildStart)
+ return iterBreak
+ }
+
+ next, cont := s.prepareNextIteration(finalSpecHeader, flatDiff, stateSyncData)
+ if !cont {
+ return iterBreak
+ }
+ sealed, exitEarly, ok := s.sealCurrentAndAdvance(finalSpecHeader, stateSyncData, next)
+ if exitEarly {
+ return iterExitEarly
+ }
+ if !ok {
+ return iterBreak
+ }
+ s.shiftToNext(sealed, next)
+ return iterContinue
+}
+
+// specSession holds the rotating per-invocation state of commitSpeculativeWork.
+// It exists so the orchestrator can decompose the 600-line original into
+// focused methods that share state through the receiver — avoiding 15-parameter
+// helper signatures. Fields are mutated through shiftToNext() as each
+// speculative block is sealed.
+type specSession struct {
+ w *worker
+ req *speculativeWorkReq
+ borEngine *bor.Bor
+
+ blockNHeader *types.Header
+ blockNNumber uint64
+ nextBlockNumber uint64
+
+ // Current speculative block state (rotates each iteration).
+ specHeader *types.Header
+ specState *state.StateDB
+ specEnv *environment
+ coinbase common.Address
+ blockhashAccessed *atomic.Bool // set true if speculative block read BLOCKHASH(N)
+ eip2935Abort bool // set by initial-fill goroutine (for first iteration)
+ curBuildStart time.Time // wall clock when this block's fill began
+
+ // Updated as blocks are sealed.
+ realBlockNHash common.Hash
+ rootN common.Hash
+ lastSealedHeader *types.Header
+
+ // Iteration coordination.
+ initialFillDone chan struct{}
+ prevDBWriteDone chan struct{}
+ exitDuringBlockN bool
+}
+
+// specNextIteration bundles everything prepareNextIteration allocates for the
+// next speculative block, so sealCurrentAndAdvance/shiftToNext can consume it
+// without 10-parameter helper signatures.
+type specNextIteration struct {
+ specHeaderNext *types.Header
+ specStateNext *state.StateDB
+ specEnvNext *environment
+ coinbaseNext common.Address
+ blockhashAccessed *atomic.Bool // *atomic.Bool for the next block
+ eip2935AbortPtr *bool // set true by fill goroutine if EIP-2935 slot read
+ nextBuildStart time.Time
+ fillDone chan struct{}
+ fillElapsed *time.Duration // pointer so goroutine writes are visible after <-fillDone
+ srcSpawnTime time.Time
+}
+
+func newSpecSession(w *worker, req *speculativeWorkReq) *specSession {
+ blockNNumber := req.parentHeader.Number.Uint64()
+ return &specSession{
+ w: w,
+ req: req,
+ blockNHeader: req.parentHeader,
+ blockNNumber: blockNNumber,
+ nextBlockNumber: blockNNumber + 1,
+ curBuildStart: time.Now(),
+ }
+}
+
+// setupInitial builds the first speculative environment (N+1), runs Prepare,
+// spawns SRC for block N, and starts the initial fill goroutine. Returns
+// false if Prepare fails or state cannot be opened — in both cases the
+// function has already called fallbackToSequential and the caller should
+// return shouldRetry=false.
+func (s *specSession) setupInitial() bool {
+ log.Debug("Pipelined SRC: starting speculative execution", "speculativeBlock", s.nextBlockNumber, "parent", s.blockNNumber)
+
+ borEngine, ok := s.w.engine.(*bor.Bor)
+ if !ok {
+ log.Error("Pipelined SRC: engine is not Bor")
+ return false
+ }
+ s.borEngine = borEngine
+
+ specReader, specContext, specHeader, coinbase := s.buildInitialSpecHeader()
+ if err := s.w.engine.Prepare(specReader, specHeader); err != nil {
+ log.Warn("Pipelined SRC: speculative Prepare failed, falling back", "err", err)
+ s.w.fallbackToSequential(s.req)
+ return false
+ }
+ s.specHeader = specHeader
+ s.coinbase = coinbase
+
+ // Prepare() succeeded — spawn the background SRC goroutine for block N.
+ // Done AFTER Prepare to avoid a trie DB race with fallbackToSequential's
+ // inline FinalizeAndAssemble on the same parent root.
+ tmpBlock := types.NewBlockWithHeader(s.req.parentHeader)
+ // Miner pipeline always produces witnesses for now. allowOwnWitness=true
+ // explicitly permits SRC to create its own witness when no execution
+ // witness is handed in by the caller. nil warmSnapshot — the miner-side
+ // path does not currently capture the prefetcher snapshot; SRC falls
+ // back to the plain pathdb reader chain.
+ s.w.chain.SpawnSRCGoroutine(tmpBlock, s.req.parentRoot, s.req.flatDiff, true, nil, true, nil)
+
+ specState, err := s.w.chain.StateAtWithFlatDiff(s.req.parentRoot, s.req.flatDiff)
+ if err != nil {
+ log.Error("Pipelined SRC: failed to open speculative state", "err", err)
+ s.w.chain.WaitForSRC() //nolint:errcheck
+ s.w.fallbackToSequential(s.req)
+ return false
+ }
+ specState.StartPrefetcher("miner-speculative", nil, nil)
+ s.specState = specState
+
+ blockN1Header := s.w.chain.GetHeader(s.blockNHeader.ParentHash, s.blockNNumber-1)
+ if blockN1Header == nil {
+ log.Error("Pipelined SRC: grandparent header not found")
+ s.w.chain.WaitForSRC() //nolint:errcheck
+ s.w.fallbackToSequential(s.req)
+ return false
+ }
+
+ var blockhashAccessed atomic.Bool
+ s.blockhashAccessed = &blockhashAccessed
+ s.specEnv = s.buildSpecEnv(specHeader, specState, coinbase, specContext, blockN1Header, s.blockNNumber, s.newBlockNHashResolver())
+ s.resetTxPoolState(s.blockNHeader, s.req.parentRoot, s.req.flatDiff)
+ s.startInitialFillGoroutine()
+ return true
+}
+
+// buildInitialSpecHeader constructs the header for speculative execution of
+// block N+1 while block N is still being sealed. It intentionally does NOT
+// reuse makeHeader because the inputs diverge fundamentally: the parent is a
+// placeholder hash (block N not sealed yet), the timestamp is deterministic
+// (blockN.Time + bor period — no genParams / user input), the gas limit uses
+// config.GasCeil directly (no dynamic base-fee adjustment), and engine.Prepare
+// is deliberately skipped (it would fail against the placeholder parent). The
+// overlap is limited to coinbase resolution — unified via resolveCoinbase so
+// both headers pick the same address and don't diverge on state root.
+func (s *specSession) buildInitialSpecHeader() (*speculativeChainReader, *speculativeChainContext, *types.Header, common.Address) {
+ placeholder := placeholderParentHash(s.blockNNumber)
+ specReader := newSpeculativeChainReader(s.w.chain, s.blockNHeader, placeholder)
+ specContext := newSpeculativeChainContext(specReader, s.w.engine)
+ coinbase := s.w.resolveCoinbase(s.nextBlockNumber, s.w.etherbase())
+ specHeader := &types.Header{
+ ParentHash: placeholder,
+ Number: new(big.Int).SetUint64(s.nextBlockNumber),
+ GasLimit: core.CalcGasLimit(s.blockNHeader.GasLimit, s.w.config.GasCeil),
+ Time: s.blockNHeader.Time + s.w.chainConfig.Bor.CalculatePeriod(s.nextBlockNumber),
+ Coinbase: coinbase,
+ }
+ if s.w.chainConfig.IsLondon(specHeader.Number) {
+ specHeader.BaseFee = eip1559.CalcBaseFee(s.w.chainConfig, s.blockNHeader)
+ }
+ return specReader, specContext, specHeader, coinbase
+}
+
+// resolveCoinbase matches the importer's NewEVMBlockContext(header, chain, nil)
+// logic: post-Rio uses BorConfig.CalculateCoinbase, otherwise the caller-provided
+// fallback (genParams.coinbase for makeHeader, etherbase for the speculative
+// path). Unifying this ensures the speculative header and the later real header
+// resolve coinbase identically — a mismatch would cause a state root divergence.
+func (w *worker) resolveCoinbase(blockNumber uint64, fallback common.Address) common.Address {
+ var coinbase common.Address
+ if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsRio(new(big.Int).SetUint64(blockNumber)) {
+ coinbase = common.HexToAddress(w.chainConfig.Bor.CalculateCoinbase(blockNumber))
+ }
+ if coinbase == (common.Address{}) {
+ coinbase = fallback
+ }
+ return coinbase
+}
+
+// newBlockNHashResolver returns a lazy resolver for block N's signed hash used
+// by SpeculativeGetHashFn. Block N's hash isn't known until SRC completes
+// because it depends on the state root — if a speculative tx calls BLOCKHASH(N)
+// we wait for SRC, compute the pre-seal hash, and the hashAccessed flag on the
+// outer speculative block triggers a discard (pre-seal hash ≠ final on-chain).
+func (s *specSession) newBlockNHashResolver() func() common.Hash {
+ var (
+ hash common.Hash
+ resolved bool
+ mu sync.Mutex
+ )
+ blockNHeader := s.blockNHeader
+ return func() common.Hash {
+ mu.Lock()
+ defer mu.Unlock()
+ if resolved {
+ return hash
+ }
+ root, _, err := s.w.chain.WaitForSRC()
+ if err != nil {
+ log.Error("Pipelined SRC: SRC failed during BLOCKHASH resolution", "err", err)
+ return common.Hash{}
+ }
+ finalHeader := types.CopyHeader(blockNHeader)
+ finalHeader.Root = root
+ finalHeader.UncleHash = types.CalcUncleHash(nil)
+ hash = finalHeader.Hash()
+ resolved = true
+ return hash
+ }
+}
+
+// buildSpecEnv assembles the *environment used for speculative transaction
+// execution. Used by both the initial setup and each loop iteration's
+// next-block preparation.
+func (s *specSession) buildSpecEnv(header *types.Header, state *state.StateDB, coinbase common.Address, specContext *speculativeChainContext, grandparent *types.Header, grandparentNumber uint64, srcDone func() common.Hash) *environment {
+ specGetHash := core.SpeculativeGetHashFn(grandparent, specContext, grandparentNumber, srcDone, s.blockhashAccessed)
+ evmContext := core.NewEVMBlockContext(header, specContext, &coinbase)
+ evmContext.GetHash = specGetHash
+ env := &environment{
+ signer: types.MakeSigner(s.w.chainConfig, header.Number, header.Time),
+ state: state,
+ size: uint64(header.Size()),
+ coinbase: coinbase,
+ buildInterrupt: newBuildInterruptState(),
+ header: header,
+ evm: vm.NewEVM(evmContext, state, s.w.chainConfig, vm.Config{}),
+ }
+ env.evm.SetInterrupt(env.buildInterrupt.timeoutFlag())
+ env.tcount = 0
+ return env
+}
+
+// resetTxPoolState publishes a fresh speculative state to the txpool so tx
+// selection sees the new block's post-parent view (nonces, balances).
+func (s *specSession) resetTxPoolState(parent *types.Header, parentRoot common.Hash, flatDiff *state.FlatDiff) {
+ specTxPoolState, err := s.w.chain.StateAtWithFlatDiff(parentRoot, flatDiff)
+ if err != nil {
+ log.Error("Pipelined SRC: failed to create txpool speculative state", "err", err)
+ return
+ }
+ s.w.eth.TxPool().SetSpeculativeState(parent, specTxPoolState)
+}
+
+// startInitialFillGoroutine kicks off the speculative tx fill for N+1 and
+// the EIP-2935 abort check. The goroutine closes initialFillDone when done;
+// s.eip2935Abort is only safe to read after <-s.initialFillDone.
+func (s *specSession) startInitialFillGoroutine() {
+ s.initialFillDone = make(chan struct{})
+ go func() {
+ defer close(s.initialFillDone)
+ stop := createInterruptTimer(s.specHeader.Number.Uint64(), s.specHeader.GetActualTime(), s.specEnv.buildInterrupt, true)
+ var interrupt atomic.Int32
+ s.w.fillSpeculativeTransactions(s.specEnv, &interrupt)
+ stop()
+ // Final discard log is emitted in the main loop so each aborted block is logged once.
+ if s.w.chainConfig.IsPrague(s.specHeader.Number) {
+ dangerousSlot := common.BigToHash(new(big.Int).SetUint64(s.blockNNumber % params.HistoryServeWindow))
+ if s.specState.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) {
+ s.eip2935Abort = true
+ pipelineEIP2935AbortsCounter.Inc(1)
+ }
+ }
+ }()
+}
+
+// waitForSRCAndSealBlockN waits for block N's SRC goroutine to complete,
+// assembles block N with the real root, submits it via taskCh, and waits for
+// resultLoop to persist it. Returns false on any failure; sets
+// exitDuringBlockN when the failure was w.exitCh.
+func (s *specSession) waitForSRCAndSealBlockN() bool {
+ srcStart := time.Now()
+ root, witnessN, err := s.w.chain.WaitForSRC()
+ srcWaitN := time.Since(srcStart)
+ pipelineSRCTimer.Update(srcWaitN)
+ pipelineSRCWaitTimer.Update(srcWaitN)
+ if err != nil {
+ log.Error("Pipelined SRC: SRC(N) failed", "block", s.blockNNumber, "err", err)
+ pipelineSpeculativeAbortsCounter.Inc(1)
+ pipelineAbortSRCFailedCounter.Inc(1)
+ return false
+ }
+ finalHeaderN := types.CopyHeader(s.blockNHeader)
+ finalHeaderN.Root = root
+ blockN, receiptsN, err := s.borEngine.AssembleBlock(s.w.chain, finalHeaderN, s.req.blockNEnv.state, &types.Body{
+ Transactions: s.req.blockNEnv.txs,
+ }, s.req.blockNEnv.receipts, root, s.req.stateSyncData)
+ if err != nil {
+ log.Error("Pipelined SRC: AssembleBlock(N) failed", "err", err)
+ return false
+ }
+ // Block N uses the pipelined write path to avoid a double CommitWithUpdate
+ // from the SRC goroutine and writeBlockWithState. Witness from SRC is complete.
+ select {
+ case s.w.taskCh <- &task{receipts: receiptsN, state: s.req.blockNEnv.state, block: blockN, createdAt: time.Now(), pipelined: true, witnessBytes: witnessN}:
+ if s.w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: block N sent for sealing", "number", blockN.Number(), "txs", len(blockN.Transactions()), "root", root)
+ }
+ case <-s.w.exitCh:
+ s.exitDuringBlockN = true
+ return false
+ }
+ realHash, ok := s.waitForChainHead(blockN.NumberU64())
+ if !ok {
+ return false
+ }
+ s.realBlockNHash = realHash
+ s.rootN = root
+ return true
+}
+
+// waitForChainHead blocks until the chain head reaches blockNum (up to 30s)
+// so we can read the real (signed) block N hash from the canonical chain.
+// resultLoop writes the final header after Seal() modifies Extra, so we
+// can't use blockN.Hash() directly.
+func (s *specSession) waitForChainHead(blockNum uint64) (common.Hash, bool) {
+ waitDeadline := time.After(30 * time.Second)
+ for {
+ if current := s.w.chain.CurrentBlock(); current != nil && current.Number.Uint64() >= blockNum {
+ if current.Number.Uint64() != blockNum {
+ log.Error("Pipelined SRC: chain head mismatch after waiting", "expected", blockNum, "got", current.Number.Uint64())
+ return common.Hash{}, false
+ }
+ return current.Hash(), true
+ }
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-waitDeadline:
+ log.Error("Pipelined SRC: timed out waiting for block N to be written", "number", blockNum)
+ return common.Hash{}, false
+ case <-s.w.exitCh:
+ s.exitDuringBlockN = true
+ return common.Hash{}, false
+ }
+ }
+}
+
+// checkCurrentAbort inspects the abort flags set by the current fill goroutine:
+// EIP-2935 history-slot read, or BLOCKHASH(N) read before SRC resolved. Returns
+// true when the speculative block must be discarded (caller sets abortRecovery).
+func (s *specSession) checkCurrentAbort() bool {
+ if s.eip2935Abort {
+ log.Warn("Pipelined SRC: discarding speculative block — EIP-2935 slot accessed", "block", s.nextBlockNumber)
+ pipelineSpeculativeAbortsCounter.Inc(1)
+ return true
+ }
+ if s.blockhashAccessed.Load() {
+ log.Warn("Pipelined SRC: discarding speculative block — BLOCKHASH(N) was accessed",
+ "block", s.nextBlockNumber, "pendingBlockN", s.blockNNumber)
+ pipelineSpeculativeAbortsCounter.Inc(1)
+ pipelineAbortBlockhashCounter.Inc(1)
+ return true
+ }
+ return false
+}
+
+// drainPrevDBWrite waits for the previous iteration's async DB write before
+// FinalizeForPipeline runs. FinalizeForPipeline may read block headers and
+// state sync / span data from the chain DB — if the previous inline-sealed
+// block hasn't persisted, those lookups fail.
+func (s *specSession) drainPrevDBWrite() {
+ if s.prevDBWriteDone != nil {
+ <-s.prevDBWriteDone
+ s.prevDBWriteDone = nil
+ }
+}
+
+// finalizeCurrent runs FinalizeForPipeline on the current speculative block,
+// extracts its FlatDiff, and returns the final header + stateSync data.
+// Returns ok=false if FinalizeForPipeline errors (caller should break).
+func (s *specSession) finalizeCurrent() (*types.Header, *state.FlatDiff, []*types.StateSyncData, bool) {
+ finalSpecHeader := types.CopyHeader(s.specHeader)
+ finalSpecHeader.ParentHash = s.realBlockNHash
+ if s.w.chainConfig.IsPrague(finalSpecHeader.Number) {
+ evmCtx := core.NewEVMBlockContext(finalSpecHeader, s.w.chain, &s.coinbase)
+ vmenv := vm.NewEVM(evmCtx, s.specState, s.w.chainConfig, vm.Config{})
+ core.ProcessParentBlockHash(s.realBlockNHash, vmenv)
+ }
+ stateSyncData, err := s.borEngine.FinalizeForPipeline(s.w.chain, finalSpecHeader, s.specState, &types.Body{
+ Transactions: s.specEnv.txs,
+ }, s.specEnv.receipts)
+ if err != nil {
+ log.Error("Pipelined SRC: FinalizeForPipeline failed", "block", s.nextBlockNumber, "err", err)
+ return nil, nil, nil, false
+ }
+ flatDiff := s.specState.CommitSnapshot(s.w.chainConfig.IsEIP158(finalSpecHeader.Number))
+ return finalSpecHeader, flatDiff, stateSyncData, true
+}
+
+// prepareNextIteration builds the N+2 speculative environment: header,
+// Prepare (may seal current via taskCh on failure), SRC spawn for current
+// block, state open, EVM+srcDone for next, fill goroutine. cont=false means
+// the main loop should break (we already handed off the current block).
+func (s *specSession) prepareNextIteration(finalSpecHeader *types.Header, flatDiff *state.FlatDiff, stateSyncData []*types.StateSyncData) (*specNextIteration, bool) {
+ specHeaderNext, specContextNext, coinbaseNext, ok := s.buildAndPrepareNextHeader(finalSpecHeader, flatDiff, stateSyncData)
+ if !ok {
+ return nil, false
+ }
+ srcSpawnTime := s.spawnSRCForCurrent(finalSpecHeader, flatDiff)
+ specStateNext, specEnvNext, blockhashAccessedNext, ok := s.openNextSpecEnv(finalSpecHeader, flatDiff, stateSyncData, specHeaderNext, specContextNext, coinbaseNext)
+ if !ok {
+ return nil, false
+ }
+ s.resetTxPoolState(finalSpecHeader, s.rootN, flatDiff)
+ fillDone, eip2935AbortPtr, fillElapsedPtr := s.startNextFillGoroutine(specHeaderNext, specEnvNext, specStateNext)
+ return &specNextIteration{
+ specHeaderNext: specHeaderNext,
+ specStateNext: specStateNext,
+ specEnvNext: specEnvNext,
+ coinbaseNext: coinbaseNext,
+ blockhashAccessed: blockhashAccessedNext,
+ eip2935AbortPtr: eip2935AbortPtr,
+ nextBuildStart: time.Now(),
+ fillDone: fillDone,
+ fillElapsed: fillElapsedPtr,
+ srcSpawnTime: srcSpawnTime,
+ }, true
+}
+
+// buildAndPrepareNextHeader constructs the next speculative header (N+2),
+// runs Prepare via the speculative chain reader, and on Prepare failure
+// hands off the CURRENT speculative block via taskCh (spawnSRC=true) before
+// returning ok=false so the caller can break out of the loop.
+func (s *specSession) buildAndPrepareNextHeader(finalSpecHeader *types.Header, flatDiff *state.FlatDiff, stateSyncData []*types.StateSyncData) (*types.Header, *speculativeChainContext, common.Address, bool) {
+ nextNextBlockNumber := s.nextBlockNumber + 1
+ specReaderNext := newSpeculativeChainReader(s.w.chain, finalSpecHeader, placeholderParentHash(s.nextBlockNumber))
+ specContextNext := newSpeculativeChainContext(specReaderNext, s.w.engine)
+ coinbaseNext := s.w.resolveCoinbase(nextNextBlockNumber, s.w.etherbase())
+ specHeaderNext := &types.Header{
+ ParentHash: placeholderParentHash(s.nextBlockNumber),
+ Number: new(big.Int).SetUint64(nextNextBlockNumber),
+ GasLimit: core.CalcGasLimit(finalSpecHeader.GasLimit, s.w.config.GasCeil),
+ Time: finalSpecHeader.Time + s.w.chainConfig.Bor.CalculatePeriod(nextNextBlockNumber),
+ Coinbase: coinbaseNext,
+ }
+ if s.w.chainConfig.IsLondon(specHeaderNext.Number) {
+ specHeaderNext.BaseFee = eip1559.CalcBaseFee(s.w.chainConfig, finalSpecHeader)
+ }
+ if err := s.w.engine.Prepare(specReaderNext, specHeaderNext); err != nil {
+ log.Warn("Pipelined SRC: Prepare failed for next block, sealing current", "block", nextNextBlockNumber, "err", err)
+ s.w.sealBlockViaTaskCh(s.borEngine, finalSpecHeader, s.specState, s.specEnv.txs, s.specEnv.receipts, stateSyncData, s.rootN, flatDiff, true, s.curBuildStart)
+ return nil, nil, common.Address{}, false
+ }
+ return specHeaderNext, specContextNext, coinbaseNext, true
+}
+
+// spawnSRCForCurrent starts the SRC goroutine that computes the state root
+// for the current speculative block (now finalized) while the next block's
+// execution runs. Returns the srcSpawnTime used for pipelineSRCTimer.
+func (s *specSession) spawnSRCForCurrent(finalSpecHeader *types.Header, flatDiff *state.FlatDiff) time.Time {
+ srcSpawnTime := time.Now()
+ tmpBlockCur := types.NewBlockWithHeader(finalSpecHeader)
+ // Miner pipeline always produces witnesses for now. allowOwnWitness=true
+ // explicitly permits SRC to create its own witness when no execution
+ // witness is handed in by the caller. nil warmSnapshot — the miner-side
+ // path does not currently capture the prefetcher snapshot; SRC falls
+ // back to the plain pathdb reader chain.
+ s.w.chain.SpawnSRCGoroutine(tmpBlockCur, s.rootN, flatDiff, true, nil, true, nil)
+ s.w.chain.SetLastFlatDiff(flatDiff, finalSpecHeader.Number.Uint64(), s.rootN, common.Hash{})
+ if s.w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: spawned SRC, starting speculative exec", "srcBlock", s.nextBlockNumber, "specExecBlock", s.nextBlockNumber+1)
+ }
+ return srcSpawnTime
+}
+
+// openNextSpecEnv opens the state + environment for the next speculative
+// block (N+2). On failure (state open error or grandparent not found), hands
+// off the CURRENT speculative block via taskCh with spawnSRC=false (SRC for
+// the current block is already in flight from spawnSRCForCurrent).
+func (s *specSession) openNextSpecEnv(finalSpecHeader *types.Header, flatDiff *state.FlatDiff, stateSyncData []*types.StateSyncData, specHeaderNext *types.Header, specContextNext *speculativeChainContext, coinbaseNext common.Address) (*state.StateDB, *environment, *atomic.Bool, bool) {
+ specStateNext, err := s.w.chain.StateAtWithFlatDiff(s.rootN, flatDiff)
+ if err != nil {
+ log.Error("Pipelined SRC: failed to open speculative state for next block", "block", s.nextBlockNumber+1, "err", err)
+ s.w.sealBlockViaTaskCh(s.borEngine, finalSpecHeader, s.specState, s.specEnv.txs, s.specEnv.receipts, stateSyncData, s.rootN, flatDiff, false, s.curBuildStart)
+ return nil, nil, nil, false
+ }
+ specStateNext.StartPrefetcher("miner-speculative", nil, nil)
+
+ grandparent := s.resolveGrandparent()
+ if grandparent == nil {
+ log.Error("Pipelined SRC: grandparent header not found for next block", "number", s.blockNNumber)
+ s.w.sealBlockViaTaskCh(s.borEngine, finalSpecHeader, s.specState, s.specEnv.txs, s.specEnv.receipts, stateSyncData, s.rootN, flatDiff, false, s.curBuildStart)
+ return nil, nil, nil, false
+ }
+
+ blockhashAccessedNext := new(atomic.Bool)
+ specGetHashNext := core.SpeculativeGetHashFn(grandparent, specContextNext, s.nextBlockNumber, s.makeNextHashResolver(finalSpecHeader), blockhashAccessedNext)
+ evmContextNext := core.NewEVMBlockContext(specHeaderNext, specContextNext, &coinbaseNext)
+ evmContextNext.GetHash = specGetHashNext
+
+ specEnvNext := &environment{
+ signer: types.MakeSigner(s.w.chainConfig, specHeaderNext.Number, specHeaderNext.Time),
+ state: specStateNext,
+ size: uint64(specHeaderNext.Size()),
+ coinbase: coinbaseNext,
+ buildInterrupt: newBuildInterruptState(),
+ header: specHeaderNext,
+ evm: vm.NewEVM(evmContextNext, specStateNext, s.w.chainConfig, vm.Config{}),
+ }
+ specEnvNext.evm.SetInterrupt(specEnvNext.buildInterrupt.timeoutFlag())
+ specEnvNext.tcount = 0
+ return specStateNext, specEnvNext, blockhashAccessedNext, true
+}
+
+// resolveGrandparent returns the grandparent header for the next iteration.
+// Prefers lastSealedHeader (the async DB write may not have persisted yet)
+// and falls back to the chain DB.
+func (s *specSession) resolveGrandparent() *types.Header {
+ if s.lastSealedHeader != nil && s.lastSealedHeader.Number.Uint64() == s.blockNNumber {
+ return s.lastSealedHeader
+ }
+ return s.w.chain.GetHeaderByNumber(s.blockNNumber)
+}
+
+// makeNextHashResolver returns a lazy resolver for the current speculative
+// block's signed hash, used by SpeculativeGetHashFn of the NEXT speculative
+// block. Mirrors newBlockNHashResolver but for mid-pipeline iterations.
+func (s *specSession) makeNextHashResolver(finalSpecHeader *types.Header) func() common.Hash {
+ var (
+ hash common.Hash
+ resolved bool
+ mu sync.Mutex
+ )
+ return func() common.Hash {
+ mu.Lock()
+ defer mu.Unlock()
+ if resolved {
+ return hash
+ }
+ rootSpec, _, err := s.w.chain.WaitForSRC()
+ if err != nil {
+ log.Error("Pipelined SRC: SRC failed during BLOCKHASH resolution", "err", err)
+ return common.Hash{}
+ }
+ finalH := types.CopyHeader(finalSpecHeader)
+ finalH.Root = rootSpec
+ finalH.UncleHash = types.CalcUncleHash(nil)
+ hash = finalH.Hash()
+ resolved = true
+ return hash
+ }
+}
+
+// startNextFillGoroutine fills N+2 speculatively in parallel with the current
+// block's seal, and flags EIP-2935 aborts for N+2. Returns the done channel
+// and pointers to the abort/elapsed fields set by the goroutine (only safe to
+// read after <-fillDone).
+func (s *specSession) startNextFillGoroutine(headerNext *types.Header, envNext *environment, stateNext *state.StateDB) (chan struct{}, *bool, *time.Duration) {
+ fillDone := make(chan struct{})
+ var (
+ eip2935Abort bool
+ fillElapsed time.Duration
+ )
+ go func() {
+ defer close(fillDone)
+ stop := createInterruptTimer(headerNext.Number.Uint64(), headerNext.GetActualTime(), envNext.buildInterrupt, true)
+ var interrupt atomic.Int32
+ fillElapsed = s.w.fillSpeculativeTransactions(envNext, &interrupt)
+ stop()
+ if s.w.chainConfig.IsPrague(headerNext.Number) {
+ dangerousSlot := common.BigToHash(new(big.Int).SetUint64(s.nextBlockNumber % params.HistoryServeWindow))
+ if stateNext.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) {
+ eip2935Abort = true
+ pipelineEIP2935AbortsCounter.Inc(1)
+ }
+ }
+ }()
+ return fillDone, &eip2935Abort, &fillElapsed
+}
+
+// sealCurrentAndAdvance waits for SRC of the current speculative block,
+// assembles it, waits for header.Time, inline-seals + broadcasts, and hands
+// back the sealed block. Returns exitEarly=true if w.exitCh fired during the
+// timestamp wait (caller returns false, abortRecovery).
+func (s *specSession) sealCurrentAndAdvance(finalSpecHeader *types.Header, stateSyncData []*types.StateSyncData, next *specNextIteration) (*types.Block, bool, bool) {
+ srcWaitStart := time.Now()
+ rootSpec, witnessSpec, err := s.w.chain.WaitForSRC()
+ srcWaitElapsed := time.Since(srcWaitStart)
+ pipelineSRCTimer.Update(time.Since(next.srcSpawnTime))
+ pipelineSRCWaitTimer.Update(srcWaitElapsed)
+ if err != nil {
+ log.Error("Pipelined SRC: SRC failed", "block", s.nextBlockNumber, "err", err)
+ pipelineSpeculativeAbortsCounter.Inc(1)
+ pipelineAbortSRCFailedCounter.Inc(1)
+ <-next.fillDone
+ return nil, false, false
+ }
+ if s.w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: SRC completed", "block", s.nextBlockNumber, "srcWait", srcWaitElapsed)
+ }
+ blockSpec, receiptsSpec, err := s.borEngine.AssembleBlock(s.w.chain, finalSpecHeader, s.specState, &types.Body{
+ Transactions: s.specEnv.txs,
+ }, s.specEnv.receipts, rootSpec, stateSyncData)
+ if err != nil {
+ log.Error("Pipelined SRC: AssembleBlock failed", "block", s.nextBlockNumber, "err", err)
+ <-next.fillDone
+ return nil, false, false
+ }
+ // Update pendingWorkBlock BEFORE inline write so that newWorkLoop skips
+ // the ChainHeadEvent for this block. pendingWorkBlock = nextBlockNumber+1
+ // means "working on nextBlockNumber+1, so skip ChainHeadEvent for nextBlockNumber".
+ s.w.pendingWorkBlock.Store(s.nextBlockNumber + 1)
+ if exit := s.waitForBlockTime(finalSpecHeader, next.fillDone); exit {
+ return nil, true, false
+ }
+ sealedBlock, dbWriteDone, err := s.w.inlineSealAndBroadcast(blockSpec, receiptsSpec, s.specState, witnessSpec, s.curBuildStart)
+ if err != nil {
+ log.Error("Pipelined SRC: inline seal failed", "block", s.nextBlockNumber, "err", err)
+ <-next.fillDone
+ return nil, false, false
+ }
+ <-next.fillDone
+ s.prevDBWriteDone = dbWriteDone
+ pipelineSpeculativeBlocksCounter.Inc(1)
+ if s.w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: block sealed (inline)", "number", sealedBlock.Number(),
+ "txs", len(sealedBlock.Transactions()), "root", rootSpec, "fillBlock", s.nextBlockNumber+1, "fillElapsed", *next.fillElapsed)
+ }
+ return sealedBlock, false, true
+}
+
+// waitForBlockTime blocks until the speculative block's target announce time
+// is reached, draining the fill and previous-DB-write channels on shutdown
+// so goroutines aren't left hanging. Returns exit=true on w.exitCh.
+func (s *specSession) waitForBlockTime(finalSpecHeader *types.Header, fillDone chan struct{}) bool {
+ delay := time.Until(finalSpecHeader.GetActualTime())
+ if delay <= 0 {
+ return false
+ }
+ select {
+ case <-time.After(delay):
+ return false
+ case <-s.w.exitCh:
+ <-fillDone
+ if s.prevDBWriteDone != nil {
+ <-s.prevDBWriteDone
+ }
+ return true
+ }
+}
+
+// shiftToNext rotates the session's per-iteration state to the block just
+// prepared by prepareNextIteration. Called after a successful inline seal.
+func (s *specSession) shiftToNext(sealed *types.Block, next *specNextIteration) {
+ s.lastSealedHeader = sealed.Header()
+ s.blockNNumber = s.nextBlockNumber
+ s.nextBlockNumber++
+ s.rootN = sealed.Root()
+ s.realBlockNHash = sealed.Hash()
+ s.specHeader = next.specHeaderNext
+ s.specState = next.specStateNext
+ s.specEnv = next.specEnvNext
+ s.coinbase = next.coinbaseNext
+ s.eip2935Abort = *next.eip2935AbortPtr
+ s.blockhashAccessed = next.blockhashAccessed
+ s.curBuildStart = next.nextBuildStart
+}
+
+// fallbackToSequential computes the state root inline and assembles block N
+// without a background SRC goroutine. This avoids trie DB races between
+// background and inline commits.
+func (w *worker) fallbackToSequential(req *speculativeWorkReq) {
+ if w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: falling back to sequential execution")
+ }
+ pipelineSpeculativeAbortsCounter.Inc(1)
+ pipelineAbortFallbackCounter.Inc(1)
+
+ borEngine, ok := w.engine.(*bor.Bor)
+ if !ok {
+ return
+ }
+
+ root := req.blockNEnv.state.IntermediateRoot(w.chainConfig.IsEIP158(req.blockNEnv.header.Number))
+
+ block, receipts, err := borEngine.AssembleBlock(w.chain, req.blockNEnv.header, req.blockNEnv.state, &types.Body{
+ Transactions: req.blockNEnv.txs,
+ }, req.blockNEnv.receipts, root, req.stateSyncData)
+ if err != nil {
+ log.Error("Pipelined SRC: AssembleBlock failed during fallback", "err", err)
+ return
+ }
+
+ select {
+ case w.taskCh <- &task{receipts: receipts, state: req.blockNEnv.state, block: block, createdAt: time.Now()}:
+ if w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: fallback block sealed", "number", block.Number(), "root", root)
+ }
+ case <-w.exitCh:
+ }
+}
+
+// sealBlockViaTaskCh spawns SRC (if needed), waits for the root, assembles the
+// block, and sends it through the normal taskCh → taskLoop → Seal → resultLoop
+// path. Used for the last block in a pipeline run so that resultLoop emits
+// ChainHeadEvent and normal block production resumes immediately.
+func (w *worker) sealBlockViaTaskCh(
+ borEngine *bor.Bor,
+ finalHeader *types.Header,
+ statedb *state.StateDB,
+ txs []*types.Transaction,
+ receipts []*types.Receipt,
+ stateSyncData []*types.StateSyncData,
+ rootN common.Hash,
+ flatDiff *state.FlatDiff,
+ spawnSRC bool, // false if SRC goroutine is already running
+ buildStart time.Time, // wall clock when this block's speculative fill began — for worker/build_to_announce
+) {
+ w.spawnSRCForFinalBlock(finalHeader, rootN, flatDiff, spawnSRC)
+ pipelineSpeculativeBlocksCounter.Inc(1)
+
+ rootSpec, witnessSpec, err := w.chain.WaitForSRC()
+ if err != nil {
+ log.Error("Pipelined SRC: SRC failed", "block", finalHeader.Number, "err", err)
+ return
+ }
+
+ block, blockReceipts, err := borEngine.AssembleBlock(w.chain, finalHeader, statedb, &types.Body{
+ Transactions: txs,
+ }, receipts, rootSpec, stateSyncData)
+ if err != nil {
+ log.Error("Pipelined SRC: AssembleBlock failed", "block", finalHeader.Number, "err", err)
+ return
+ }
+
+ // Wait for the block's target timestamp before sending to taskCh.
+ // Since Prepare() was called without sleeping, we wait here instead.
+ if delay := time.Until(finalHeader.GetActualTime()); delay > 0 {
+ select {
+ case <-time.After(delay):
+ case <-w.exitCh:
+ return
+ }
+ }
+
+ select {
+ case w.taskCh <- &task{receipts: blockReceipts, state: statedb, block: block, createdAt: time.Now(), productionStart: buildStart, pipelined: true, witnessBytes: witnessSpec}:
+ if w.config.PipelinedSRCLogs {
+ log.Info("Pipelined SRC: block sealed", "number", block.Number(),
+ "txs", len(block.Transactions()), "root", rootSpec)
+ }
+ case <-w.exitCh:
+ }
+}
+
+// inlineSealAndBroadcast seals a pipelined block using a private channel
+// (bypassing taskLoop/resultLoop), broadcasts it to peers immediately, and
+// writes to the chain DB asynchronously. This avoids blocking the pipeline
+// on the DB write — the next iteration can start as soon as the block is sealed.
+//
+// Returns the sealed block and a channel that closes when the async DB write
+// completes. The caller must wait on writeDone before the node can serve the
+// block data from DB, but the pipeline can proceed immediately.
+//
+// Uses emitHeadEvent=false to avoid a deadlock: mainLoop is blocked in
+// commitSpeculativeWork, so chainHeadFeed.Send would eventually block when
+// newWorkLoop's channel fills up.
+func (w *worker) inlineSealAndBroadcast(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB, witnessBytes []byte, buildStart time.Time) (*types.Block, chan struct{}, error) {
+ sealedBlock, err := w.sealViaPrivateChannel(block)
+ if err != nil {
+ return nil, nil, err
+ }
+ hash := sealedBlock.Hash()
+ sealedReceipts, logs := rebindReceiptsToSealedBlock(receipts, sealedBlock)
+
+ log.Info("Successfully sealed new block", "number", sealedBlock.Number(),
+ "sealhash", w.engine.SealHash(sealedBlock.Header()), "hash", hash, "elapsed", "inline")
+
+ // Cache the witness so the WIT protocol can serve it to stateless peers
+ // immediately, without waiting for the async DB write.
+ if len(witnessBytes) > 0 {
+ w.chain.CacheWitness(hash, witnessBytes)
+ }
+
+ w.announceInlineSealedBlock(sealedBlock, buildStart)
+ w.clearPending(sealedBlock.NumberU64())
+
+ // Write to chain DB asynchronously — the pipeline can proceed with the
+ // next iteration using sealedBlock.Hash() directly, without waiting for
+ // the DB write to complete.
+ writeDone := make(chan struct{})
+ go func() {
+ defer close(writeDone)
+ writeStart := time.Now()
+ _, err := w.chain.WriteBlockAndSetHeadPipelined(sealedBlock, sealedReceipts, logs, statedb, false, witnessBytes)
+ writeBlockAndSetHeadTimer.UpdateSince(writeStart)
+ if err != nil {
+ log.Error("Pipelined SRC: async DB write failed", "block", sealedBlock.Number(), "err", err)
+ }
+ }()
+ return sealedBlock, writeDone, nil
+}
+
+// sealViaPrivateChannel runs engine.Seal on a private channel (no contention
+// with the shared resultCh) and waits up to 5s for the sealed block.
+// For primary producers on Bhilai+, delay=0, so the wait is effectively
+// bounded by the Seal signature computation.
+func (w *worker) sealViaPrivateChannel(block *types.Block) (*types.Block, error) {
+ sealCh := make(chan *consensus.NewSealedBlockEvent, 1)
+ stopCh := make(chan struct{})
+ sealStart := time.Now()
+ if err := w.engine.Seal(w.chain, block, nil, sealCh, stopCh); err != nil {
+ return nil, fmt.Errorf("seal failed: %w", err)
+ }
+ select {
+ case ev := <-sealCh:
+ pipelineSealDurationTimer.UpdateSince(sealStart)
+ if ev == nil || ev.Block == nil {
+ return nil, errors.New("nil sealed block from Seal")
+ }
+ return ev.Block, nil
+ case <-time.After(5 * time.Second):
+ close(stopCh)
+ return nil, errors.New("inline seal timed out")
+ case <-w.exitCh:
+ close(stopCh)
+ return nil, errors.New("worker stopped during inline seal")
+ }
+}
+
+// rebindReceiptsToSealedBlock copies receipts with BlockHash/BlockNumber/
+// TransactionIndex pointing at the sealed block, deep-copies logs, and
+// returns the flat logs slice (same behavior as resultLoop's receipt fixup).
+func rebindReceiptsToSealedBlock(receipts []*types.Receipt, sealedBlock *types.Block) ([]*types.Receipt, []*types.Log) {
+ hash := sealedBlock.Hash()
+ sealedReceipts := make([]*types.Receipt, len(receipts))
+ var logs []*types.Log
+ for i, r := range receipts {
+ receipt := new(types.Receipt)
+ sealedReceipts[i] = receipt
+ *receipt = *r
+ receipt.BlockHash = hash
+ receipt.BlockNumber = sealedBlock.Number()
+ receipt.TransactionIndex = uint(i)
+ receipt.Logs = make([]*types.Log, len(r.Logs))
+ for j, l := range r.Logs {
+ logCopy := new(types.Log)
+ receipt.Logs[j] = logCopy
+ *logCopy = *l
+ logCopy.BlockHash = hash
+ }
+ logs = append(logs, receipt.Logs...)
+ }
+ return sealedReceipts, logs
+}
+
+// announceInlineSealedBlock emits the pipelined-sealed block to peers and
+// updates the build-to-announce / earliness / committed / throughput metrics.
+// Broadcast happens BEFORE the async DB write so peers don't wait on disk.
+func (w *worker) announceInlineSealedBlock(sealedBlock *types.Block, buildStart time.Time) {
+ announceAt := time.Now()
+ // Positive when announced before header.GetActualTime (PIP-66 early). Negative when late.
+ earlyMs := sealedBlock.Header().GetActualTime().Sub(announceAt).Milliseconds()
+ pipelineAnnounceEarlinessMs.Update(earlyMs)
+ pipelineSpeculativeCommittedCounter.Inc(1)
+ if !buildStart.IsZero() {
+ workerBuildToAnnounceTimer.UpdateSince(buildStart)
+ }
+ w.mux.Post(core.NewMinedBlockEvent{Block: sealedBlock, SealedAt: announceAt})
+ sealedBlocksCounter.Inc(1)
+ if sealedBlock.Transactions().Len() == 0 {
+ sealedEmptyBlocksCounter.Inc(1)
+ }
+ workerGasUsedPerBlockHistogram.Update(int64(sealedBlock.GasUsed()))
+ workerTxsPerBlockHistogram.Update(int64(sealedBlock.Transactions().Len()))
+}
diff --git a/miner/pipeline_test.go b/miner/pipeline_test.go
new file mode 100644
index 0000000000..7f750df2d1
--- /dev/null
+++ b/miner/pipeline_test.go
@@ -0,0 +1,34 @@
+package miner
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+func TestShouldLateRefillSpeculativeBlock(t *testing.T) {
+ t.Parallel()
+
+ newEnv := func(txs int, gasLimit uint64, remainingGas uint64, withGasPool bool) *environment {
+ env := &environment{
+ header: &types.Header{
+ Number: big.NewInt(1),
+ GasLimit: gasLimit,
+ },
+ txs: make([]*types.Transaction, txs),
+ }
+ if withGasPool {
+ env.gasPool = new(core.GasPool).AddGas(remainingGas)
+ }
+ return env
+ }
+
+ require.True(t, shouldLateRefillSpeculativeBlock(newEnv(0, 1000, 0, false)))
+ require.True(t, shouldLateRefillSpeculativeBlock(newEnv(1, 1000, 600, true)))
+ require.True(t, shouldLateRefillSpeculativeBlock(newEnv(2, 1000, 0, false)))
+ require.False(t, shouldLateRefillSpeculativeBlock(newEnv(1, 1000, 200, true)))
+}
diff --git a/miner/speculative_chain_reader.go b/miner/speculative_chain_reader.go
new file mode 100644
index 0000000000..5bf05199d2
--- /dev/null
+++ b/miner/speculative_chain_reader.go
@@ -0,0 +1,115 @@
+package miner
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// speculativeChainReader wraps a real ChainHeaderReader and intercepts
+// hash-based lookups for a pending block whose hash is not yet known
+// (because its state root is still being computed by the SRC goroutine).
+//
+// During pipelined SRC, block N+1's Prepare() needs to look up block N's
+// header — but block N hasn't been written to the chain DB yet. The wrapper
+// maps a deterministic placeholder hash to block N's provisional header
+// (complete except for Root), allowing Prepare() and snapshot walks to proceed.
+//
+// The snapshot walk (bor.go:686) starts from header.ParentHash. For the
+// speculative header, that's the placeholder hash. The wrapper returns
+// pendingParentHeader for that lookup. Subsequent walk steps use
+// pendingParentHeader.ParentHash (= hash(block_{N-1})), which is in the
+// real chain DB, so the walk continues normally.
+type speculativeChainReader struct {
+ inner consensus.ChainHeaderReader
+ pendingParentHeader *types.Header // block N's header (complete except Root)
+ placeholderHash common.Hash // the placeholder used as block N+1's ParentHash
+}
+
+// newSpeculativeChainReader creates a wrapper that intercepts lookups for
+// the pending parent block.
+//
+// pendingParentHeader must have all fields set except Root. The caller must
+// ensure that pendingParentHeader.ParentHash points to a block that IS in
+// the chain DB (block N-1).
+//
+// placeholderHash is a deterministic sentinel used as ParentHash in the
+// speculative block N+1 header. It must NOT collide with any real block hash.
+func newSpeculativeChainReader(
+ inner consensus.ChainHeaderReader,
+ pendingParentHeader *types.Header,
+ placeholderHash common.Hash,
+) *speculativeChainReader {
+ return &speculativeChainReader{
+ inner: inner,
+ pendingParentHeader: pendingParentHeader,
+ placeholderHash: placeholderHash,
+ }
+}
+
+func (s *speculativeChainReader) Config() *params.ChainConfig {
+ return s.inner.Config()
+}
+
+func (s *speculativeChainReader) CurrentHeader() *types.Header {
+ return s.inner.CurrentHeader()
+}
+
+func (s *speculativeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header {
+ if hash == s.placeholderHash && number == s.pendingParentHeader.Number.Uint64() {
+ return s.pendingParentHeader
+ }
+ return s.inner.GetHeader(hash, number)
+}
+
+func (s *speculativeChainReader) GetHeaderByNumber(number uint64) *types.Header {
+ if number == s.pendingParentHeader.Number.Uint64() {
+ return s.pendingParentHeader
+ }
+ return s.inner.GetHeaderByNumber(number)
+}
+
+func (s *speculativeChainReader) GetHeaderByHash(hash common.Hash) *types.Header {
+ if hash == s.placeholderHash {
+ return s.pendingParentHeader
+ }
+ return s.inner.GetHeaderByHash(hash)
+}
+
+func (s *speculativeChainReader) GetTd(hash common.Hash, number uint64) *big.Int {
+ if hash == s.placeholderHash && number == s.pendingParentHeader.Number.Uint64() {
+ // Return the parent's TD. This is an approximation — the real TD
+ // would include block N's difficulty, but Bor's Prepare() does not
+ // use TD from GetTd. Seal() uses it for broadcast, but that happens
+ // after the real header is assembled.
+ return s.inner.GetTd(s.pendingParentHeader.ParentHash, s.pendingParentHeader.Number.Uint64()-1)
+ }
+ return s.inner.GetTd(hash, number)
+}
+
+// speculativeChainContext wraps speculativeChainReader and adds the Engine()
+// method, satisfying core.ChainContext. This is needed because
+// NewEVMBlockContext takes a ChainContext.
+type speculativeChainContext struct {
+ *speculativeChainReader
+ engine consensus.Engine
+}
+
+// newSpeculativeChainContext creates a ChainContext backed by the speculative
+// reader and the given consensus engine.
+func newSpeculativeChainContext(
+ reader *speculativeChainReader,
+ engine consensus.Engine,
+) *speculativeChainContext {
+ return &speculativeChainContext{
+ speculativeChainReader: reader,
+ engine: engine,
+ }
+}
+
+func (s *speculativeChainContext) Engine() consensus.Engine {
+ return s.engine
+}
diff --git a/miner/speculative_chain_reader_test.go b/miner/speculative_chain_reader_test.go
new file mode 100644
index 0000000000..57dae5ba07
--- /dev/null
+++ b/miner/speculative_chain_reader_test.go
@@ -0,0 +1,204 @@
+package miner
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// mockChainHeaderReader implements consensus.ChainHeaderReader for testing.
+type mockChainHeaderReader struct {
+ headers map[common.Hash]*types.Header
+ byNum map[uint64]*types.Header
+}
+
+func newMockChainHeaderReader() *mockChainHeaderReader {
+ return &mockChainHeaderReader{
+ headers: make(map[common.Hash]*types.Header),
+ byNum: make(map[uint64]*types.Header),
+ }
+}
+
+func (m *mockChainHeaderReader) addHeader(h *types.Header) {
+ m.headers[h.Hash()] = h
+ m.byNum[h.Number.Uint64()] = h
+}
+
+func (m *mockChainHeaderReader) Config() *params.ChainConfig { return params.TestChainConfig }
+func (m *mockChainHeaderReader) CurrentHeader() *types.Header { return nil }
+func (m *mockChainHeaderReader) GetTd(common.Hash, uint64) *big.Int { return big.NewInt(1) }
+
+func (m *mockChainHeaderReader) GetHeader(hash common.Hash, number uint64) *types.Header {
+ h, ok := m.headers[hash]
+ if ok && h.Number.Uint64() == number {
+ return h
+ }
+ return nil
+}
+
+func (m *mockChainHeaderReader) GetHeaderByNumber(number uint64) *types.Header {
+ return m.byNum[number]
+}
+
+func (m *mockChainHeaderReader) GetHeaderByHash(hash common.Hash) *types.Header {
+ return m.headers[hash]
+}
+
+func TestSpeculativeChainReader_InterceptsPlaceholder(t *testing.T) {
+ inner := newMockChainHeaderReader()
+
+ // Build a simple chain: block 8 (committed), block 9 (pending)
+ header8 := &types.Header{Number: big.NewInt(8), Extra: []byte("block8")}
+ inner.addHeader(header8)
+
+ // Block 9 is pending — not in the chain DB
+ pendingHeader9 := &types.Header{
+ Number: big.NewInt(9),
+ ParentHash: header8.Hash(),
+ Extra: []byte("block9-pending"),
+ }
+
+ placeholder := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+ reader := newSpeculativeChainReader(inner, pendingHeader9, placeholder)
+
+ // GetHeader with placeholder hash and number 9 should return pending header
+ got := reader.GetHeader(placeholder, 9)
+ if got == nil {
+ t.Fatal("GetHeader(placeholder, 9) returned nil")
+ }
+ if got.Number.Uint64() != 9 {
+ t.Errorf("expected block 9, got %d", got.Number.Uint64())
+ }
+ if string(got.Extra) != "block9-pending" {
+ t.Errorf("expected pending header extra, got %s", string(got.Extra))
+ }
+
+ // GetHeaderByHash with placeholder should return pending header
+ got = reader.GetHeaderByHash(placeholder)
+ if got == nil {
+ t.Fatal("GetHeaderByHash(placeholder) returned nil")
+ }
+ if got.Number.Uint64() != 9 {
+ t.Errorf("expected block 9, got %d", got.Number.Uint64())
+ }
+
+ // GetHeaderByNumber(9) should return pending header
+ got = reader.GetHeaderByNumber(9)
+ if got == nil {
+ t.Fatal("GetHeaderByNumber(9) returned nil")
+ }
+ if string(got.Extra) != "block9-pending" {
+ t.Errorf("expected pending header, got %s", string(got.Extra))
+ }
+}
+
+func TestSpeculativeChainReader_DelegatesNonPlaceholder(t *testing.T) {
+ inner := newMockChainHeaderReader()
+
+ header7 := &types.Header{Number: big.NewInt(7), Extra: []byte("block7")}
+ header8 := &types.Header{Number: big.NewInt(8), Extra: []byte("block8")}
+ inner.addHeader(header7)
+ inner.addHeader(header8)
+
+ pendingHeader9 := &types.Header{
+ Number: big.NewInt(9),
+ ParentHash: header8.Hash(),
+ }
+
+ placeholder := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+ reader := newSpeculativeChainReader(inner, pendingHeader9, placeholder)
+
+ // Looking up block 8 by its real hash should delegate to inner
+ got := reader.GetHeader(header8.Hash(), 8)
+ if got == nil {
+ t.Fatal("GetHeader(block8Hash, 8) returned nil")
+ }
+ if string(got.Extra) != "block8" {
+ t.Errorf("expected block8 header, got %s", string(got.Extra))
+ }
+
+ // GetHeaderByNumber(7) should delegate
+ got = reader.GetHeaderByNumber(7)
+ if got == nil {
+ t.Fatal("GetHeaderByNumber(7) returned nil")
+ }
+ if string(got.Extra) != "block7" {
+ t.Errorf("expected block7 header, got %s", string(got.Extra))
+ }
+
+ // Unknown hash should return nil
+ got = reader.GetHeader(common.HexToHash("0x1234"), 99)
+ if got != nil {
+ t.Error("expected nil for unknown hash")
+ }
+}
+
+func TestSpeculativeChainReader_WalkThroughPending(t *testing.T) {
+ // Simulate the snapshot walk: start at pending block 9, walk to block 8 (in chain)
+ inner := newMockChainHeaderReader()
+
+ header7 := &types.Header{Number: big.NewInt(7), Extra: []byte("block7")}
+ header8 := &types.Header{Number: big.NewInt(8), ParentHash: header7.Hash(), Extra: []byte("block8")}
+ inner.addHeader(header7)
+ inner.addHeader(header8)
+
+ pendingHeader9 := &types.Header{
+ Number: big.NewInt(9),
+ ParentHash: header8.Hash(),
+ Extra: []byte("block9-pending"),
+ }
+
+ placeholder := common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000")
+ reader := newSpeculativeChainReader(inner, pendingHeader9, placeholder)
+
+ // Step 1: look up block 9 via placeholder → returns pending header
+ h9 := reader.GetHeader(placeholder, 9)
+ if h9 == nil {
+ t.Fatal("step 1: pending header not found")
+ }
+
+ // Step 2: walk to block 8 using h9.ParentHash (= header8.Hash(), a real hash)
+ h8 := reader.GetHeader(h9.ParentHash, 8)
+ if h8 == nil {
+ t.Fatal("step 2: block 8 not found via ParentHash walk")
+ }
+ if string(h8.Extra) != "block8" {
+ t.Errorf("step 2: expected block8, got %s", string(h8.Extra))
+ }
+
+ // Step 3: walk to block 7 using h8.ParentHash
+ h7 := reader.GetHeader(h8.ParentHash, 7)
+ if h7 == nil {
+ t.Fatal("step 3: block 7 not found via ParentHash walk")
+ }
+ if string(h7.Extra) != "block7" {
+ t.Errorf("step 3: expected block7, got %s", string(h7.Extra))
+ }
+}
+
+func TestSpeculativeChainReader_Config(t *testing.T) {
+ inner := newMockChainHeaderReader()
+ pendingHeader := &types.Header{Number: big.NewInt(5)}
+ reader := newSpeculativeChainReader(inner, pendingHeader, common.Hash{})
+
+ if reader.Config() != params.TestChainConfig {
+ t.Error("Config() should delegate to inner")
+ }
+}
+
+func TestSpeculativeChainContext_Engine(t *testing.T) {
+ inner := newMockChainHeaderReader()
+ pendingHeader := &types.Header{Number: big.NewInt(5)}
+ reader := newSpeculativeChainReader(inner, pendingHeader, common.Hash{})
+
+ var mockEngine consensus.Engine // nil for testing
+ ctx := newSpeculativeChainContext(reader, mockEngine)
+
+ if ctx.Engine() != mockEngine {
+ t.Error("Engine() should return the provided engine")
+ }
+}
diff --git a/miner/worker.go b/miner/worker.go
index ece318d1d9..2ac33514a3 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -103,11 +103,24 @@ var (
// txApplyDurationTimer captures per-transaction apply latency during block building.
// Uses a larger reservoir to preserve tail visibility on high-throughput blocks.
txApplyDurationTimer = newRegisteredCustomTimer("worker/txApplyDuration", 8192)
- // finalizeAndAssembleTimer measures time taken to finalize and assemble the block (state root calculation)
+ // finalizeAndAssembleTimer measures time taken to finalize and assemble the block (state root calculation).
+ // NOT emitted when pipelined SRC is enabled: the pipelined path uses
+ // FinalizeForPipeline, which deliberately skips the inline IntermediateRoot
+ // (the root comes from the background SRC goroutine instead). Closest
+ // pipeline equivalents: worker/pipelineSRCTime (total SRC compute) and
+ // worker/pipelineSRCWait (portion of SRC that actually blocked the caller).
finalizeAndAssembleTimer = metrics.NewRegisteredTimer("worker/finalizeAndAssemble", nil)
- // intermediateRootTimer measures time taken to calculate intermediate root
+ // intermediateRootTimer measures time taken to calculate intermediate root.
+ // NOT emitted when pipelined SRC is enabled: there is no inline root calculation
+ // under pipelining — the SRC goroutine computes it in parallel with the next
+ // block's execution. Closest pipeline equivalent: worker/pipelineSRCTime (cost)
+ // or worker/pipelineSRCWait (how much of the cost was hidden by the overlap).
intermediateRootTimer = metrics.NewRegisteredTimer("worker/intermediateRoot", nil)
- // commitTimer measures total time for complete block building (tx execution + finalization + state root)
+ // commitTimer measures total time for complete block building (tx execution + finalization + state root).
+ // NOT emitted when pipelined SRC is enabled: the pipelined model has no
+ // single contiguous "build" interval — speculative fill of N+1 overlaps with
+ // SRC(N), so fabricating a total would be misleading. Closest pipeline signals:
+ // worker/pipelineSRCWait + worker/pipelineSealDuration + worker/pipelineAnnounceEarlinessMs.
commitTimer = metrics.NewRegisteredTimer("worker/commit", nil)
// writeBlockAndSetHeadTimer measures total time for WriteBlockAndSetHead in the seal result loop.
// This covers the entire gap between block sealing and event posting: witness encoding, batch write,
@@ -155,6 +168,15 @@ var (
workerBorConsensusTimer = metrics.NewRegisteredTimer("worker/chain/bor/consensus", nil)
workerBlockExecutionTimer = metrics.NewRegisteredTimer("worker/chain/execution", nil)
workerMgaspsTimer = metrics.NewRegisteredResettingTimer("worker/chain/mgasps", nil)
+ // Throughput histograms — mode-agnostic. For the pipelined path, "per-block build elapsed"
+ // isn't a single contiguous interval, so mgasps is only emitted by the normal path.
+ // gas_used_per_block and txs_per_block are emitted in both modes.
+ workerGasUsedPerBlockHistogram = metrics.NewRegisteredHistogram("worker/chain/gas_used_per_block", nil, metrics.NewExpDecaySample(1028, 0.015))
+ workerTxsPerBlockHistogram = metrics.NewRegisteredHistogram("worker/chain/txs_per_block", nil, metrics.NewExpDecaySample(1028, 0.015))
+ // End-to-end producer timer: wall clock from build begin to NewMinedBlockEvent broadcast.
+ // Fires in both normal (resultLoop → mux.Post) and pipelined (inlineSealAndBroadcast → mux.Post) modes,
+ // giving a directly comparable apples-to-apples A/B signal.
+ workerBuildToAnnounceTimer = metrics.NewRegisteredTimer("worker/build_to_announce", nil)
// Trie commit metrics for block production (populated after WriteBlockAndSetHead → CommitWithUpdate).
workerAccountCommitTimer = metrics.NewRegisteredResettingTimer("worker/chain/account/commits", nil)
@@ -202,6 +224,10 @@ type environment struct {
gasPool *core.GasPool // available gas used to pack transactions
coinbase common.Address
evm *vm.EVM
+ // buildInterrupt owns the timeout signal for this specific block-building
+ // attempt. It must not be shared across overlapping sequential/speculative
+ // builds, otherwise one timer can abort another build.
+ buildInterrupt *buildInterruptState
header *types.Header
txs []*types.Transaction
@@ -210,6 +236,8 @@ type environment struct {
blobs int
mvReadMapList []map[blockstm.Key]blockstm.ReadDescriptor
+ depsBuilder *blockstm.DepsBuilder
+ depsFailed bool
witness *stateless.Witness
// Readers with stats tracking for metrics reporting
@@ -217,6 +245,22 @@ type environment struct {
processReader state.ReaderWithStats
}
+type buildInterruptState struct {
+ timedOut atomic.Bool
+ flagSetAt atomic.Int64
+}
+
+func newBuildInterruptState() *buildInterruptState {
+ return &buildInterruptState{}
+}
+
+func (s *buildInterruptState) timeoutFlag() *atomic.Bool {
+ if s == nil {
+ return nil
+ }
+ return &s.timedOut
+}
+
// copy creates a deep copy of environment.
func (env *environment) copy() *environment {
cpy := &environment{
@@ -224,6 +268,7 @@ func (env *environment) copy() *environment {
state: env.state.Copy(),
tcount: env.tcount,
coinbase: env.coinbase,
+ buildInterrupt: newBuildInterruptState(),
header: types.CopyHeader(env.header),
receipts: copyReceipts(env.receipts),
mvReadMapList: env.mvReadMapList,
@@ -261,8 +306,11 @@ type task struct {
state *state.StateDB
block *types.Block
createdAt time.Time
+ productionStart time.Time // wall clock at build begin — used for worker/build_to_announce (fires from resultLoop at mux.Post)
productionElapsed time.Duration // elapsed from after prepareWork to task submission (excludes sealing wait); used for workerMgaspsTimer and workerBlockExecutionTimer
intermediateRootTime time.Duration // time spent in IntermediateRoot inside FinalizeAndAssemble; subtracted when computing workerBlockExecutionTimer
+ pipelined bool // If true, state was already committed by SRC goroutine — skip CommitWithUpdate in writeBlockWithState
+ witnessBytes []byte // RLP-encoded witness from SRC goroutine (for pipelined blocks)
}
// txFits reports whether the transaction fits into the block size limit.
@@ -364,6 +412,10 @@ type worker struct {
// Used to prevent duplicate work.
pendingWorkBlock atomic.Uint64
+ // When set, the next sequential build is recovering a discarded
+ // speculative block and should preserve its original target slot.
+ nextCommitAbortRecovery atomic.Bool
+
snapshotMu sync.RWMutex // The lock used to protect the snapshots below
snapshotBlock *types.Block
snapshotReceipts types.Receipts
@@ -394,10 +446,11 @@ type worker struct {
fullTaskHook func() // Method to call before pushing the full sealing task.
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
- // Interrupt commit to stop block building on time
- interruptCommitFlag bool // Denotes whether interrupt commit is enabled or not
- interruptBlockBuilding atomic.Bool // A toggle to denote whether to stop block building or not
- interruptFlagSetAt atomic.Int64
+ // Interrupt commit to stop block building on time.
+ // interruptBlockBuilding is kept only as a manual/test override. Real timeout
+ // state now lives on each environment/build attempt.
+ interruptCommitFlag bool
+ interruptBlockBuilding atomic.Bool
mockTxDelay uint // A mock delay for transaction execution, only used in tests
blockTime time.Duration // The block time defined by the miner. Needs to be larger or equal to the consensus block time. If not set (default = 0), the miner will use the consensus block time.
@@ -411,6 +464,9 @@ type worker struct {
noempty atomic.Bool
makeWitness bool
+
+ // Pipelined SRC: speculative work channel for block N+1 execution
+ speculativeWorkCh chan *speculativeWorkReq
}
//nolint:staticcheck
@@ -441,8 +497,14 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
blockTime: config.BlockTime,
slowTxTracker: newSlowTxTopTracker(),
makeWitness: makeWitness,
+ speculativeWorkCh: make(chan *speculativeWorkReq, 1),
}
worker.noempty.Store(true)
+ if config.EnablePipelinedSRC {
+ pipelineBuildEnabledGauge.Update(1)
+ } else {
+ pipelineBuildEnabledGauge.Update(0)
+ }
// Subscribe for transaction insertion events (whether from network or resurrects)
worker.txsSub = eth.TxPool().SubscribeTransactions(worker.txsCh, true)
// Subscribe events for blockchain
@@ -806,6 +868,57 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
}
}
+// schedulePipelineRetry re-enters block building through the normal newWorkCh
+// path after the pipeline exits or aborts. A short delay lets the latest head
+// become visible first, so the retry builds on the correct parent instead of
+// recursively calling commitWork from inside the speculative-work handler.
+// handleSpeculativeWork runs a pipelined speculative-work request and, when
+// shouldRetry=true, requeues a normal commitWork via the newWorkCh path.
+// Extracted from mainLoop so the main dispatch select stays compact.
+// Requeueing instead of recursing avoids building on a stale parent and is
+// deliberately skipped when commitSpeculativeWork fell back to sequential
+// (fallbackToSequential already sealed block N via taskCh, and retrying would
+// loop-restart Seal() with fresh timestamps).
+func (w *worker) handleSpeculativeWork(req *speculativeWorkReq) {
+ shouldRetry, abortRecovery := w.commitSpeculativeWork(req)
+ if !shouldRetry {
+ return
+ }
+ if abortRecovery {
+ w.nextCommitAbortRecovery.Store(true)
+ }
+ w.schedulePipelineRetry()
+}
+
+func (w *worker) schedulePipelineRetry() {
+ go func() {
+ timer := time.NewTimer(25 * time.Millisecond)
+ defer timer.Stop()
+
+ select {
+ case <-timer.C:
+ case <-w.exitCh:
+ return
+ }
+
+ current := w.chain.CurrentBlock()
+ if current == nil {
+ return
+ }
+
+ target := current.Number.Uint64() + 1
+ if w.pendingWorkBlock.Load() >= target {
+ return
+ }
+ w.pendingWorkBlock.Store(target)
+
+ select {
+ case w.newWorkCh <- &newWorkReq{timestamp: time.Now().Unix()}:
+ case <-w.exitCh:
+ }
+ }()
+}
+
// mainLoop is responsible for generating and submitting sealing work based on
// the received event. It can support two modes: automatically generate task and
// submit it or return task according to given parameters for various proposes.
@@ -829,16 +942,20 @@ func (w *worker) mainLoop() {
for {
select {
case req := <-w.newWorkCh:
+ abortRecovery := w.nextCommitAbortRecovery.Swap(false)
if w.chainConfig.ChainID.Cmp(params.BorMainnetChainConfig.ChainID) == 0 || w.chainConfig.ChainID.Cmp(params.MumbaiChainConfig.ChainID) == 0 || w.chainConfig.ChainID.Cmp(params.AmoyChainConfig.ChainID) == 0 {
if w.eth.PeerCount() > 0 || devFakeAuthor {
//nolint:contextcheck
- w.commitWork(req.interrupt, req.noempty, req.timestamp)
+ w.commitWork(req.interrupt, req.noempty, req.timestamp, abortRecovery)
}
} else {
//nolint:contextcheck
- w.commitWork(req.interrupt, req.noempty, req.timestamp)
+ w.commitWork(req.interrupt, req.noempty, req.timestamp, abortRecovery)
}
+ case req := <-w.speculativeWorkCh:
+ w.handleSpeculativeWork(req)
+
case req := <-w.getWorkCh:
req.result <- w.generateWork(req.params, false)
@@ -879,13 +996,13 @@ func (w *worker) mainLoop() {
stopFn = createInterruptTimer(
w.current.header.Number.Uint64(),
w.current.header.GetActualTime(),
- &w.interruptBlockBuilding,
- &w.interruptFlagSetAt,
+ w.current.buildInterrupt,
+ w.config.EnablePipelinedSRC,
)
}
- plainTxs := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee, &w.interruptBlockBuilding) // Mixed bag of everrything, yolo
- blobTxs := newTransactionsByPriceAndNonce(w.current.signer, nil, w.current.header.BaseFee, &w.interruptBlockBuilding) // Empty bag, don't bother optimising
+ plainTxs := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee, w.current.buildInterrupt.timeoutFlag()) // Mixed bag of everrything, yolo
+ blobTxs := newTransactionsByPriceAndNonce(w.current.signer, nil, w.current.header.BaseFee, w.current.buildInterrupt.timeoutFlag()) // Empty bag, don't bother optimising
tcount := w.current.tcount
@@ -902,7 +1019,7 @@ func (w *worker) mainLoop() {
// submit sealing work here since all empty submission will be rejected
// by clique. Of course the advance sealing(empty submission) is disabled.
if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 {
- w.commitWork(nil, true, time.Now().Unix())
+ w.commitWork(nil, true, time.Now().Unix(), false)
}
}
@@ -1072,73 +1189,26 @@ func (w *worker) resultLoop() {
witness.SetHeader(block.Header())
}
- // Execution metrics: emitted before write because these values are final after
- // FinalizeAndAssemble and do not depend on write success — matching the import path
- // which emits read/update/hash/execution/bor metrics before writeBlockAndSetHead.
- // Emitting here avoids losing these observations on a rare write failure.
- if metrics.Enabled() {
- workerAccountReadTimer.Update(task.state.AccountReads)
- workerStorageReadTimer.Update(task.state.StorageReads)
- workerSnapshotAccountReadTimer.Update(task.state.SnapshotAccountReads)
- workerSnapshotStorageReadTimer.Update(task.state.SnapshotStorageReads)
- workerAccountUpdateTimer.Update(task.state.AccountUpdates)
- workerStorageUpdateTimer.Update(task.state.StorageUpdates)
- workerAccountHashTimer.Update(task.state.AccountHashes)
- workerStorageHashTimer.Update(task.state.StorageHashes)
- workerBorConsensusTimer.Update(task.state.BorConsensusTime)
- trieRead := task.state.SnapshotAccountReads + task.state.AccountReads +
- task.state.SnapshotStorageReads + task.state.StorageReads
- // productionElapsed covers fillTx + FinalizeAndAssemble; subtract trie reads,
- // Bor consensus time, and IntermediateRoot time to isolate pure EVM execution time.
- // Mirrors the import path formula in blockchain.go (writeBlockAndSetHead),
- // where ptime already excludes vtime (IntermediateRoot) via explicit subtraction.
- // Clamped to zero to avoid negative histogram samples from measurement jitter.
- execTime := task.productionElapsed - trieRead - task.state.BorConsensusTime - task.intermediateRootTime
- if execTime < 0 {
- execTime = 0
- }
- workerBlockExecutionTimer.Update(execTime)
- }
+ emitExecutionMetrics(task)
- // Commit block and state to database.
writeStart := time.Now()
- _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true)
+ _, err = w.writeTaskBlock(task, block, receipts, logs)
writeElapsed := time.Since(writeStart)
writeBlockAndSetHeadTimer.Update(writeElapsed)
-
if err != nil {
log.Error("Failed writing block to chain", "err", err)
- // Error writing block to chain, delete the pending task.
w.pendingMu.Lock()
delete(w.pendingTasks, sealhash)
w.pendingMu.Unlock()
continue
}
- // Commit metrics: emitted only after a successful write because these values are
- // populated by WriteBlockAndSetHead → CommitWithUpdate. Emitting on failure would
- // record zeroes or stale data — matching the import path which also gates commit
- // metrics after a successful writeBlockAndSetHead.
- if metrics.Enabled() {
- workerAccountCommitTimer.Update(task.state.AccountCommits)
- workerStorageCommitTimer.Update(task.state.StorageCommits)
- workerSnapshotCommitTimer.Update(task.state.SnapshotCommits)
- workerTriedbCommitTimer.Update(task.state.TrieDBCommits)
- workerWitnessCollectionTimer.Update(task.state.WitnessCollection)
-
- // MGas/s: denominator includes both production and write time, matching blockchain.go
- // which measures elapsed after writeBlockAndSetHead returns
- // (gas * 1000 / elapsed_nanoseconds stores milli-gas/ns = MGas/s as a Duration value).
- if total := task.productionElapsed + writeElapsed; total > 0 {
- workerMgaspsTimer.Update(time.Duration(float64(block.GasUsed()) * 1000 / float64(total)))
- }
- }
+ emitCommitMetrics(task, block, writeElapsed)
log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
"elapsed", common.PrettyDuration(time.Since(task.createdAt)))
- // Broadcast the block and announce chain insertion event
- w.mux.Post(core.NewMinedBlockEvent{Block: block, Witness: witness, SealedAt: time.Now()})
+ announceTaskBlock(w.mux, task, block, witness)
sealedBlocksCounter.Inc(1)
@@ -1156,43 +1226,106 @@ func (w *worker) resultLoop() {
}
}
+// emitExecutionMetrics reports the task's pre-write statedb timers + execution
+// time. Matches the import path which emits read/update/hash/execution/bor
+// metrics before writeBlockAndSetHead so observations aren't lost on write
+// failure. No-op when metrics are disabled.
+func emitExecutionMetrics(task *task) {
+ if !metrics.Enabled() {
+ return
+ }
+ workerAccountReadTimer.Update(task.state.AccountReads)
+ workerStorageReadTimer.Update(task.state.StorageReads)
+ workerSnapshotAccountReadTimer.Update(task.state.SnapshotAccountReads)
+ workerSnapshotStorageReadTimer.Update(task.state.SnapshotStorageReads)
+ workerAccountUpdateTimer.Update(task.state.AccountUpdates)
+ workerStorageUpdateTimer.Update(task.state.StorageUpdates)
+ workerAccountHashTimer.Update(task.state.AccountHashes)
+ workerStorageHashTimer.Update(task.state.StorageHashes)
+ workerBorConsensusTimer.Update(task.state.BorConsensusTime)
+ trieRead := task.state.SnapshotAccountReads + task.state.AccountReads +
+ task.state.SnapshotStorageReads + task.state.StorageReads
+ // productionElapsed covers fillTx + FinalizeAndAssemble; subtract trie reads,
+ // Bor consensus, and IntermediateRoot time to isolate pure EVM execution.
+ // Mirrors blockchain.go's ptime = productionElapsed - trieRead (clamped
+ // to zero for measurement jitter).
+ execTime := task.productionElapsed - trieRead - task.state.BorConsensusTime - task.intermediateRootTime
+ if execTime < 0 {
+ execTime = 0
+ }
+ workerBlockExecutionTimer.Update(execTime)
+}
+
+// writeTaskBlock commits the sealed block + state to disk. Pipelined tasks go
+// through WriteBlockAndSetHeadPipelined so the SRC goroutine's earlier
+// CommitWithUpdate isn't duplicated; normal tasks go through the standard
+// path. Returns the write status for parity with the original inline call.
+func (w *worker) writeTaskBlock(task *task, block *types.Block, receipts []*types.Receipt, logs []*types.Log) (core.WriteStatus, error) {
+ if task.pipelined {
+ return w.chain.WriteBlockAndSetHeadPipelined(block, receipts, logs, task.state, true, task.witnessBytes)
+ }
+ return w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true)
+}
+
+// emitCommitMetrics reports the task's post-write statedb timers, mgas/s,
+// and per-block throughput histograms. Must run only after a successful
+// write — the commit fields are populated by CommitWithUpdate inside
+// WriteBlockAndSetHead. No-op when metrics are disabled.
+func emitCommitMetrics(task *task, block *types.Block, writeElapsed time.Duration) {
+ if !metrics.Enabled() {
+ return
+ }
+ workerAccountCommitTimer.Update(task.state.AccountCommits)
+ workerStorageCommitTimer.Update(task.state.StorageCommits)
+ workerSnapshotCommitTimer.Update(task.state.SnapshotCommits)
+ workerTriedbCommitTimer.Update(task.state.TrieDBCommits)
+ workerWitnessCollectionTimer.Update(task.state.WitnessCollection)
+ // MGas/s: denominator is production + write (matches blockchain.go's
+ // elapsed, measured after writeBlockAndSetHead returns). Duration stores
+ // milli-gas/ns = MGas/s.
+ if total := task.productionElapsed + writeElapsed; total > 0 {
+ workerMgaspsTimer.Update(time.Duration(float64(block.GasUsed()) * 1000 / float64(total)))
+ }
+ workerGasUsedPerBlockHistogram.Update(int64(block.GasUsed()))
+ workerTxsPerBlockHistogram.Update(int64(block.Transactions().Len()))
+}
+
+// announceTaskBlock broadcasts the sealed block to peers and updates the
+// build-to-announce + PIP-66 earliness + committed metrics for pipelined
+// tasks sealed via taskCh (last-of-pipeline, eligibility-fail, or fallback).
+// inlineSealAndBroadcast emits the same signals on the inline path.
+func announceTaskBlock(mux *event.TypeMux, task *task, block *types.Block, witness *stateless.Witness) {
+ announceAt := time.Now()
+ if !task.productionStart.IsZero() {
+ workerBuildToAnnounceTimer.UpdateSince(task.productionStart)
+ }
+ if task.pipelined {
+ earlyMs := block.Header().GetActualTime().Sub(announceAt).Milliseconds()
+ pipelineAnnounceEarlinessMs.Update(earlyMs)
+ pipelineSpeculativeCommittedCounter.Inc(1)
+ }
+ mux.Post(core.NewMinedBlockEvent{Block: block, Witness: witness, SealedAt: announceAt})
+}
+
+// resolveStateFor returns the caller-supplied statedb if any (from commitWork's
+// dual-reader path), otherwise opens one at the parent's root. Kept as a helper
+// so makeEnv itself fits within the function-size budget.
+func (w *worker) resolveStateFor(header *types.Header, genParams *generateParams) (*state.StateDB, error) {
+ if genParams.statedb != nil {
+ return genParams.statedb, nil
+ }
+ parent := w.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
+ if parent == nil {
+ return nil, fmt.Errorf("parent block not found")
+ }
+ return w.chain.StateAt(parent.Root)
+}
+
// makeEnv creates a new environment for the sealing block.
func (w *worker) makeEnv(header *types.Header, coinbase common.Address, witness bool, genParams *generateParams) (*environment, error) {
- var state *state.StateDB
-
- // If statedb is not provided (e.g., from getSealingBlock path), create it
- if genParams.statedb == nil {
- parent := w.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
- if parent == nil {
- return nil, fmt.Errorf("parent block not found")
- }
- var err error
- if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsDelayedSRC(header.Number) {
- // Under delayed SRC, the actual pre-state for executing block N is
- // root_{N-1} = GetPostStateRoot(parent.ParentHash).
- // G_{N-1} has already finished (it was the sync point during parent's
- // validation), so this lookup is immediate — no blocking.
- // G_N (computing root_N from FlatDiff_N) is still running concurrently.
- // We open state at root_{N-1} + FlatDiff_N overlay, which gives a
- // complete view of block N's post-execution state without waiting for G_N.
- baseRoot := w.chain.GetPostStateRoot(parent.ParentHash)
- if baseRoot == (common.Hash{}) {
- return nil, fmt.Errorf("delayed state root unavailable for grandparent %s", parent.ParentHash)
- }
- flatDiff := w.chain.GetLastFlatDiff()
- if flatDiff == nil {
- return nil, fmt.Errorf("no flat diff available for delayed SRC block building")
- }
- state, err = w.chain.StateAtWithFlatDiff(baseRoot, flatDiff)
- } else {
- state, err = w.chain.StateAt(parent.Root)
- }
- if err != nil {
- return nil, err
- }
- } else {
- // Use the provided statedb (from commitWork with dual readers)
- state = genParams.statedb
+ state, err := w.resolveStateFor(header, genParams)
+ if err != nil {
+ return nil, err
}
if witness {
@@ -1212,13 +1345,14 @@ func (w *worker) makeEnv(header *types.Header, coinbase common.Address, witness
state: state,
size: uint64(header.Size()),
coinbase: coinbase,
+ buildInterrupt: newBuildInterruptState(),
header: header,
witness: state.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, w.chain, &coinbase), state, w.chainConfig, w.vmConfig()),
prefetchReader: genParams.prefetchReader,
processReader: genParams.processReader,
}
- env.evm.SetInterrupt(&w.interruptBlockBuilding)
+ env.evm.SetInterrupt(env.buildInterrupt.timeoutFlag())
// Keep track of transactions which return errors so they can be removed
env.tcount = 0
@@ -1277,41 +1411,13 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac
var coalescedLogs []*types.Log
- var deps map[int]map[int]bool
-
- var depsBuilder *blockstm.DepsBuilder
- var chDeps chan blockstm.TxReadWriteSet
-
- var depsWg sync.WaitGroup
- var once sync.Once
-
EnableMVHashMap := w.chainConfig.IsCancun(env.header.Number)
- // create and add empty mvHashMap in statedb
- if EnableMVHashMap && w.IsRunning() {
- depsBuilder = blockstm.NewDepsBuilder()
- chDeps = make(chan blockstm.TxReadWriteSet)
-
- // Make sure we safely close the channel in case of interrupt
- defer once.Do(func() {
- close(chDeps)
- })
-
- depsWg.Add(1)
-
- go func(chDeps chan blockstm.TxReadWriteSet) {
- for t := range chDeps {
- if err := depsBuilder.AddTransaction(t.Index, t.ReadList, t.WriteList); err != nil {
- // Non-sequential index indicates a systematic bug, not a transient error.
- // Drain the channel so the sender never blocks, then stop processing.
- log.Error("Failed to build tx dependency metadata, dropping DAG hint", "tx", t.Index, "err", err)
- for range chDeps {
- }
- break
- }
- }
- depsWg.Done()
- }(chDeps)
+ // Speculative blocks can be filled in more than one pass. Keep a single DAG
+ // builder on the environment so dependency indices continue from the first
+ // pass instead of restarting from zero on a refill.
+ if EnableMVHashMap && w.IsRunning() && env.depsBuilder == nil && !env.depsFailed {
+ env.depsBuilder = blockstm.NewDepsBuilder()
}
var lastTxHash common.Hash
@@ -1339,16 +1445,20 @@ mainloop:
}
// Check for the flag to interrupt block building on timeout.
- if w.interruptBlockBuilding.Load() {
+ // The worker-global interrupt is only a manual/test override; the real
+ // timeout state is owned by this build attempt.
+ if w.interruptBlockBuilding.Load() || (env.buildInterrupt != nil && env.buildInterrupt.timedOut.Load()) {
txCommitInterruptCounter.Inc(1)
logCtx := []interface{}{
"number", env.header.Number.Uint64(),
"headerTime", common.PrettyTime(time.Unix(int64(env.header.Time), 0)),
}
- if flagSetAt := w.interruptFlagSetAt.Load(); flagSetAt > 0 {
- flagSetTime := time.Unix(0, flagSetAt)
- logCtx = append(logCtx, "flagSetAt", common.PrettyTime(flagSetTime))
- logCtx = append(logCtx, "flagToAbortDelay", common.PrettyDuration(time.Since(flagSetTime)))
+ if env.buildInterrupt != nil {
+ if flagSetAt := env.buildInterrupt.flagSetAt.Load(); flagSetAt > 0 {
+ flagSetTime := time.Unix(0, flagSetAt)
+ logCtx = append(logCtx, "flagSetAt", common.PrettyTime(flagSetTime))
+ logCtx = append(logCtx, "flagToAbortDelay", common.PrettyDuration(time.Since(flagSetTime)))
+ }
}
if hasTxInterruptDelay {
logCtx = append(logCtx, "flagToTxInterruptDelay", common.PrettyDuration(flagToTxInterruptDelay))
@@ -1514,25 +1624,14 @@ mainloop:
return errors.New("transaction count exceeds dependency list length")
}
- temp := blockstm.TxReadWriteSet{
- Index: env.tcount - 1,
- ReadList: env.state.MVReadList(),
- WriteList: env.state.MVFullWriteList(),
- }
-
- // Send with timeout to prevent deadlock
- select {
- case chDeps <- temp:
- // Successfully sent
- case <-time.After(1 * time.Second):
- // Timeout after 1 second - channel is blocked
- log.Error("Transaction dependency channel blocked, aborting block building",
- "txIndex", env.tcount-1,
- "blockNumber", env.header.Number.Uint64())
- once.Do(func() {
- close(chDeps)
- })
- return errors.New("dependency channel timeout")
+ if !env.depsFailed {
+ if env.depsBuilder == nil {
+ env.depsBuilder = blockstm.NewDepsBuilder()
+ }
+ if err := env.depsBuilder.AddTransaction(env.tcount-1, env.state.MVReadList(), env.state.MVFullWriteList()); err != nil {
+ log.Error("Failed to build tx dependency metadata, dropping DAG hint", "tx", env.tcount-1, "err", err)
+ env.depsFailed = true
+ }
}
}
@@ -1541,9 +1640,11 @@ mainloop:
case errors.Is(err, vm.ErrInterrupt):
// Timeout interrupt surfaced from EVM execution for this tx.
if !hasTxInterruptDelay {
- if flagSetAt := w.interruptFlagSetAt.Load(); flagSetAt > 0 {
- flagToTxInterruptDelay = time.Since(time.Unix(0, flagSetAt))
- hasTxInterruptDelay = true
+ if env.buildInterrupt != nil {
+ if flagSetAt := env.buildInterrupt.flagSetAt.Load(); flagSetAt > 0 {
+ flagToTxInterruptDelay = time.Since(time.Unix(0, flagSetAt))
+ hasTxInterruptDelay = true
+ }
}
}
log.Debug("Transaction interrupted due to timeout", "hash", ltx.Hash, "err", err)
@@ -1562,73 +1663,10 @@ mainloop:
}
}
- // nolint:nestif
if EnableMVHashMap && w.IsRunning() {
- once.Do(func() {
- close(chDeps)
- })
- depsWg.Wait()
-
- deps = depsBuilder.GetDeps()
- if deps == nil {
- log.Warn("Failed to build tx dependency DAG, skipping metadata", "number", env.header.Number)
- }
-
- var blockExtraData types.BlockExtraData
-
- tempVanity := env.header.Extra[:types.ExtraVanityLength]
- tempSeal := env.header.Extra[len(env.header.Extra)-types.ExtraSealLength:]
-
- // Always decode header extra data before overwriting TxDependency.
- if err := rlp.DecodeBytes(env.header.Extra[types.ExtraVanityLength:len(env.header.Extra)-types.ExtraSealLength], &blockExtraData); err != nil {
- log.Error("error while decoding block extra data", "err", err)
- return err
- }
-
- // deps is nil when DepsBuilder errored, and non-nil empty when no transactions were added.
- if deps != nil && len(env.mvReadMapList) > 0 {
- tempDeps := make([][]uint64, len(env.mvReadMapList))
-
- for j := range deps[0] {
- tempDeps[0] = append(tempDeps[0], uint64(j))
- }
-
- delayFlag := true
-
- for i := 1; i <= len(env.mvReadMapList)-1; i++ {
- reads := env.mvReadMapList[i]
-
- // Coinbase and burn-contract balance reads create an implicit ordering not captured by the DAG.
- _, ok1 := reads[blockstm.NewSubpathKey(env.coinbase, state.BalancePath)]
- _, ok2 := reads[blockstm.NewSubpathKey(common.HexToAddress(w.chainConfig.Bor.CalculateBurntContract(env.header.Number.Uint64())), state.BalancePath)]
- if ok1 || ok2 {
- delayFlag = false
- break
- }
-
- for j := range deps[i] {
- tempDeps[i] = append(tempDeps[i], uint64(j))
- }
- }
-
- if delayFlag {
- blockExtraData.TxDependency = tempDeps
- } else {
- blockExtraData.TxDependency = nil
- }
- } else {
- blockExtraData.TxDependency = nil
- }
-
- blockExtraDataBytes, err := rlp.EncodeToBytes(blockExtraData)
- if err != nil {
- log.Error("error while encoding block extra data: %v", err)
+ if err := w.updateTxDependencyMetadata(env); err != nil {
return err
}
-
- env.header.Extra = []byte{}
- env.header.Extra = append(tempVanity, blockExtraDataBytes...)
- env.header.Extra = append(env.header.Extra, tempSeal...)
}
if !w.IsRunning() && len(coalescedLogs) > 0 {
@@ -1650,12 +1688,75 @@ mainloop:
return nil
}
+func (w *worker) updateTxDependencyMetadata(env *environment) error {
+ var deps map[int]map[int]bool
+ if env.depsBuilder != nil && !env.depsFailed {
+ deps = env.depsBuilder.GetDeps()
+ }
+ if deps == nil && len(env.mvReadMapList) > 0 {
+ log.Warn("Failed to build tx dependency DAG, skipping metadata", "number", env.header.Number)
+ }
+
+ var blockExtraData types.BlockExtraData
+ tempVanity := env.header.Extra[:types.ExtraVanityLength]
+ tempSeal := env.header.Extra[len(env.header.Extra)-types.ExtraSealLength:]
+
+ // Always decode header extra data before overwriting TxDependency.
+ if err := rlp.DecodeBytes(env.header.Extra[types.ExtraVanityLength:len(env.header.Extra)-types.ExtraSealLength], &blockExtraData); err != nil {
+ log.Error("error while decoding block extra data", "err", err)
+ return err
+ }
+
+ blockExtraData.TxDependency = w.buildTxDependencyArray(env, deps)
+
+ blockExtraDataBytes, err := rlp.EncodeToBytes(blockExtraData)
+ if err != nil {
+ log.Error("error while encoding block extra data: %v", err)
+ return err
+ }
+
+ env.header.Extra = []byte{}
+ env.header.Extra = append(tempVanity, blockExtraDataBytes...)
+ env.header.Extra = append(env.header.Extra, tempSeal...)
+ return nil
+}
+
+// buildTxDependencyArray projects the DepsBuilder output into the block's
+// TxDependency encoding. Returns nil when deps are unavailable or when any
+// transaction reads coinbase/burn-contract balance (implicit ordering the
+// DAG doesn't capture — signalled by delayFlag=false in the original code).
+func (w *worker) buildTxDependencyArray(env *environment, deps map[int]map[int]bool) [][]uint64 {
+ // deps is nil when DepsBuilder errored; non-nil empty when no txs were added.
+ if deps == nil || len(env.mvReadMapList) == 0 {
+ return nil
+ }
+ tempDeps := make([][]uint64, len(env.mvReadMapList))
+ for j := range deps[0] {
+ tempDeps[0] = append(tempDeps[0], uint64(j))
+ }
+ burntContract := common.HexToAddress(w.chainConfig.Bor.CalculateBurntContract(env.header.Number.Uint64()))
+ for i := 1; i <= len(env.mvReadMapList)-1; i++ {
+ reads := env.mvReadMapList[i]
+ // Coinbase and burn-contract balance reads create an implicit ordering not captured by the DAG.
+ _, ok1 := reads[blockstm.NewSubpathKey(env.coinbase, state.BalancePath)]
+ _, ok2 := reads[blockstm.NewSubpathKey(burntContract, state.BalancePath)]
+ if ok1 || ok2 {
+ return nil
+ }
+ for j := range deps[i] {
+ tempDeps[i] = append(tempDeps[i], uint64(j))
+ }
+ }
+ return tempDeps
+}
+
// generateParams wraps various of settings for generating sealing task.
type generateParams struct {
timestamp uint64 // The timestamp for sealing task
forceTime bool // Flag whether the given timestamp is immutable or not
parentHash common.Hash // Parent block hash, empty means the latest chain head
coinbase common.Address // The fee recipient address for including transaction
+ abortRecovery bool // Flag that this build is rebuilding a discarded speculative block
random common.Hash // The randomness generated by beacon chain, empty before the merge
withdrawals types.Withdrawals // List of withdrawals to include in block.
beaconRoot *common.Hash // The beacon root (cancun field).
@@ -1668,7 +1769,7 @@ type generateParams struct {
}
// makeHeader creates a new block header for sealing.
-func (w *worker) makeHeader(genParams *generateParams, waitOnPrepare bool) (*types.Header, common.Address, error) {
+func (w *worker) makeHeader(genParams *generateParams) (*types.Header, common.Address, error) {
// Find the parent block for sealing task
parent := w.chain.CurrentBlock()
@@ -1691,18 +1792,8 @@ func (w *worker) makeHeader(genParams *generateParams, waitOnPrepare bool) (*typ
timestamp = parent.Time + 1
}
- var coinbase common.Address
newBlockNumber := new(big.Int).Add(parent.Number, common.Big1)
- if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsRio(newBlockNumber) {
- coinbase = common.HexToAddress(w.chainConfig.Bor.CalculateCoinbase(newBlockNumber.Uint64()))
-
- // In case of coinbase is not set post Rio, use the default coinbase
- if coinbase == (common.Address{}) {
- coinbase = genParams.coinbase
- }
- } else {
- coinbase = genParams.coinbase
- }
+ coinbase := w.resolveCoinbase(newBlockNumber.Uint64(), genParams.coinbase)
// Calculate desired gas limit (may be dynamically adjusted based on base fee)
desiredGasLimit := w.calculateDesiredGasLimit(parent)
@@ -1715,6 +1806,7 @@ func (w *worker) makeHeader(genParams *generateParams, waitOnPrepare bool) (*typ
Time: timestamp,
Coinbase: coinbase,
}
+ header.AbortRecovery = genParams.abortRecovery
// Set the extra field.
if len(w.extra) != 0 {
header.Extra = w.extra
@@ -1737,7 +1829,7 @@ func (w *worker) makeHeader(genParams *generateParams, waitOnPrepare bool) (*typ
header.ParentBeaconRoot = nil
// Run the consensus preparation with the default or customized consensus engine.
- if err := w.engine.Prepare(w.chain, header, waitOnPrepare); err != nil {
+ if err := w.engine.Prepare(w.chain, header); err != nil {
switch err.(type) {
case *bor.UnauthorizedSignerError:
log.Debug("Failed to prepare header for sealing", "err", err)
@@ -1758,7 +1850,9 @@ func (w *worker) prepareWork(genParams *generateParams, witness bool) (*environm
w.mu.RLock()
defer w.mu.RUnlock()
- header, coinbase, err := w.makeHeader(genParams, true)
+ // Build the header without sleeping so tx selection gets the full slot.
+ // Bor's Seal path (or the pipeline's explicit wait) handles the final delay.
+ header, coinbase, err := w.makeHeader(genParams)
if err != nil {
return nil, err
}
@@ -1818,14 +1912,19 @@ func (w *worker) buildDefaultFilter(BaseFee *big.Int, Number *big.Int) txpool.Pe
//
//nolint:gocognit
func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) error {
+ if w.interruptBlockBuilding.Load() {
+ return nil
+ }
+
w.mu.RLock()
prio := w.prio
w.mu.RUnlock()
filter := w.buildDefaultFilter(env.header.BaseFee, env.header.Number)
+ timeoutInterrupt := env.buildInterrupt.timeoutFlag()
filter.BlobTxs = false
- pendingPlainTxs := w.eth.TxPool().Pending(filter, &w.interruptBlockBuilding)
+ pendingPlainTxs := w.eth.TxPool().Pending(filter, timeoutInterrupt)
filter.BlobTxs = true
if w.chainConfig.IsOsaka(env.header.Number) {
@@ -1833,7 +1932,7 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err
} else {
filter.BlobVersion = types.BlobSidecarVersion0
}
- pendingBlobTxs := w.eth.TxPool().Pending(filter, &w.interruptBlockBuilding)
+ pendingBlobTxs := w.eth.TxPool().Pending(filter, timeoutInterrupt)
// Split the pending transactions into locals and remotes.
prioPlainTxs, normalPlainTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingPlainTxs
@@ -1850,26 +1949,37 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err
}
}
- // Fill the block with all available pending transactions.
- if len(prioPlainTxs) > 0 || len(prioBlobTxs) > 0 {
- plainTxs := newTransactionsByPriceAndNonce(env.signer, prioPlainTxs, env.header.BaseFee, &w.interruptBlockBuilding)
- blobTxs := newTransactionsByPriceAndNonce(env.signer, prioBlobTxs, env.header.BaseFee, &w.interruptBlockBuilding)
- if err := w.commitTransactions(env, plainTxs, blobTxs, interrupt); err != nil {
- return err
- }
- }
- if len(normalPlainTxs) > 0 || len(normalBlobTxs) > 0 {
- heapInitTime := time.Now()
- plainTxs := newTransactionsByPriceAndNonce(env.signer, normalPlainTxs, env.header.BaseFee, &w.interruptBlockBuilding)
- blobTxs := newTransactionsByPriceAndNonce(env.signer, normalBlobTxs, env.header.BaseFee, &w.interruptBlockBuilding)
- txHeapInitTimer.Update(time.Since(heapInitTime))
-
- if err := w.commitTransactions(env, plainTxs, blobTxs, interrupt); err != nil {
- return err
- }
+ // Fill with priority (locals/first-class) txs first — no heap-init timing
+ // for the priority pool (matches prior behavior).
+ if err := w.commitTxMaps(env, prioPlainTxs, prioBlobTxs, timeoutInterrupt, interrupt, false); err != nil {
+ return err
}
+ // Then fill with normal pool txs; records txHeapInitTimer for the normal
+ // heap construction (the hot path for the producer under load).
+ return w.commitTxMaps(env, normalPlainTxs, normalBlobTxs, timeoutInterrupt, interrupt, true)
+}
- return nil
+// commitTxMaps runs commitTransactions on the given plain/blob maps when at
+// least one is non-empty. measureHeapInit gates the txHeapInitTimer update so
+// only the normal-pool path records it (priority-pool heap is typically
+// smaller and not the tail-latency hot spot).
+func (w *worker) commitTxMaps(
+ env *environment,
+ plainMap, blobMap map[common.Address][]*txpool.LazyTransaction,
+ timeoutInterrupt *atomic.Bool,
+ interrupt *atomic.Int32,
+ measureHeapInit bool,
+) error {
+ if len(plainMap) == 0 && len(blobMap) == 0 {
+ return nil
+ }
+ heapInitStart := time.Now()
+ plainTxs := newTransactionsByPriceAndNonce(env.signer, plainMap, env.header.BaseFee, timeoutInterrupt)
+ blobTxs := newTransactionsByPriceAndNonce(env.signer, blobMap, env.header.BaseFee, timeoutInterrupt)
+ if measureHeapInit {
+ txHeapInitTimer.Update(time.Since(heapInitStart))
+ }
+ return w.commitTransactions(env, plainTxs, blobTxs, interrupt)
}
// generateWork generates a sealing block based on the given parameters.
@@ -1940,18 +2050,12 @@ func (w *worker) generateWork(params *generateParams, witness bool) *newPayloadR
// commitWork generates several new sealing tasks based on the parent block
// and submit them to the sealer.
-func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int64) {
- // Abort committing if node is still syncing
+func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int64, abortRecovery bool) {
if w.syncing.Load() {
return
}
+ defer w.clearPendingWorkOnExit()()
- // Clear the pending work block number when commitWork completes (success or failure).
- defer func() {
- w.pendingWorkBlock.Store(0)
- }()
-
- // Set the coinbase if the worker is running or it's required
var coinbase common.Address
if w.IsRunning() {
coinbase = w.etherbase()
@@ -1961,10 +2065,7 @@ func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int
}
}
- // Find the parent block for sealing task
parent := w.chain.CurrentBlock()
-
- // Retrieve the parent state to execute on top, with separate readers for stats tracking.
state, throwaway, prefetchReader, processReader, err := w.chain.StateAtWithReaders(parent.Root)
if err != nil {
return
@@ -1973,51 +2074,77 @@ func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int
genParams := generateParams{
timestamp: uint64(timestamp),
coinbase: coinbase,
+ abortRecovery: abortRecovery,
parentHash: parent.Hash(),
+ statedb: state,
prefetchReader: prefetchReader,
processReader: processReader,
prefetchedTxHashes: &sync.Map{},
}
- // Default to state (correct for pre-fork and activation boundary).
- // Under delayed SRC, parent.Root = root_{N-1} and misses block N's mutations;
- // overlay flatDiff_N to get the correct pre-state when it is available.
- genParams.statedb = state
- if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsDelayedSRC(new(big.Int).Add(parent.Number, big.NewInt(1))) {
- if flatDiff := w.chain.GetLastFlatDiff(); flatDiff != nil {
- if s, ferr := w.chain.StateAtWithFlatDiff(parent.Root, flatDiff); ferr == nil {
- genParams.statedb = s
- }
+
+ var interruptPrefetch atomic.Bool
+ w.maybeStartPrefetch(parent, throwaway, &genParams, &interruptPrefetch)
+ w.buildAndCommitBlock(interrupt, noempty, &genParams, &interruptPrefetch)
+}
+
+// clearPendingWorkOnExit returns a deferred closure that resets pendingWorkBlock
+// to 0 once commitWork returns — but only if the pipeline didn't advance it
+// beyond this invocation's starting head+1 (meaning buildAndCommitBlock took
+// the pipelined path and is now handling N+1). Captured head is sampled up
+// front so concurrent inserts don't confuse the "pipeline advanced" check.
+func (w *worker) clearPendingWorkOnExit() func() {
+ currentBlockNum := w.chain.CurrentBlock().Number.Uint64()
+ return func() {
+ if w.pendingWorkBlock.Load() <= currentBlockNum+1 {
+ w.pendingWorkBlock.Store(0)
}
}
+}
- var interruptPrefetch atomic.Bool
+// maybeStartPrefetch launches the tx-prefetch goroutine when enabled AND
+// the next block is Giugliano-activated. Giugliano gating prevents pre-fork
+// blocks from triggering speculative prefetch, which can read storage slots
+// the current block's EVM hasn't touched yet and cause cache thrash.
+func (w *worker) maybeStartPrefetch(parent *types.Header, throwaway *state.StateDB, genParams *generateParams, interruptPrefetch *atomic.Bool) {
newBlockNumber := new(big.Int).Add(parent.Number, common.Big1)
- if w.config.EnablePrefetch && w.chainConfig.Bor != nil && w.chainConfig.Bor.IsGiugliano(newBlockNumber) {
- go func() {
- defer func() {
- if r := recover(); r != nil {
- log.Error("Prefetch goroutine panicked", "err", r, "stack", string(debug.Stack()))
- prefetchPanicMeter.Mark(1)
- }
- }()
- w.prefetchFromPool(parent, throwaway, &genParams, &interruptPrefetch)
- // Goroutine exits naturally after prefetch completes.
- // Go's GC keeps throwaway StateDB alive while this goroutine references it.
- // When the goroutine exits, the reference is released and GC can collect it.
- }()
+ if !w.config.EnablePrefetch || w.chainConfig.Bor == nil || !w.chainConfig.Bor.IsGiugliano(newBlockNumber) {
+ return
}
-
- w.buildAndCommitBlock(interrupt, noempty, &genParams, &interruptPrefetch)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Error("Prefetch goroutine panicked", "err", r, "stack", string(debug.Stack()))
+ prefetchPanicMeter.Mark(1)
+ }
+ }()
+ // Go's GC keeps throwaway alive while this goroutine references it;
+ // released when the goroutine exits after prefetch completes.
+ w.prefetchFromPool(parent, throwaway, genParams, interruptPrefetch)
+ }()
}
// buildAndCommitBlock prepares work, fills transactions, and commits the block for sealing.
+// submitForSealing dispatches the built block to either the pipelined path
+// (overlap SRC for N with N+1's execution) or the sequential path. The
+// pendingWorkBlock bump is what de-duplicates ChainHeadEvent-triggered
+// commitWork in newWorkLoop when the pipeline is already handling N+1.
+func (w *worker) submitForSealing(work *environment, start time.Time, genParams *generateParams) {
+ if w.isPipelineEligible(work.header.Number.Uint64()) {
+ w.pendingWorkBlock.Store(work.header.Number.Uint64() + 1)
+ _ = w.commitPipelined(work, start)
+ return
+ }
+ _ = w.commit(work.copy(), w.fullTaskHook, true, start, genParams)
+}
+
func (w *worker) buildAndCommitBlock(interrupt *atomic.Int32, noempty bool, genParams *generateParams, interruptPrefetch *atomic.Bool) {
work, err := w.prepareWork(genParams, w.makeWitness)
if err != nil {
return
}
- // Starts accounting time after prepareWork, since it includes the wait we have on Prepare phase of Bor
+ // Starts accounting time after prepareWork. Slot timing is handled in Seal
+ // for sequential paths and explicitly in the pipeline path.
start := time.Now()
interruptPrefetch.Store(true)
@@ -2031,8 +2158,8 @@ func (w *worker) buildAndCommitBlock(interrupt *atomic.Int32, noempty bool, genP
stopFn = createInterruptTimer(
work.header.Number.Uint64(),
work.header.GetActualTime(),
- &w.interruptBlockBuilding,
- &w.interruptFlagSetAt,
+ work.buildInterrupt,
+ w.config.EnablePipelinedSRC,
)
}
@@ -2082,8 +2209,7 @@ func (w *worker) buildAndCommitBlock(interrupt *atomic.Int32, noempty bool, genP
work.discard()
return
}
- // Submit the generated block for consensus sealing.
- _ = w.commit(work.copy(), w.fullTaskHook, true, start, genParams)
+ w.submitForSealing(work, start, genParams)
// Swap out the old work with the new one, terminating any leftover
// prefetcher processes in the mean time and starting a new one.
@@ -2105,7 +2231,7 @@ func (w *worker) prefetchFromPool(parent *types.Header, throwaway *state.StateDB
// Acquire read lock to safely access w.extra in makeHeader
w.mu.RLock()
- header, _, err := w.makeHeader(genParams, false)
+ header, _, err := w.makeHeader(genParams)
w.mu.RUnlock()
if err != nil {
@@ -2222,21 +2348,27 @@ func (w *worker) prefetchFromPool(parent *types.Header, throwaway *state.StateDB
}
}
-// createInterruptTimer creates and starts a timer based on the header's timestamp for block building
-// and toggles the flag when the timer expires.
-func createInterruptTimer(number uint64, actualTimestamp time.Time, interruptBlockBuilding *atomic.Bool, interruptFlagSetAt *atomic.Int64) func() {
+// createInterruptTimer creates and starts a timer based on the header's timestamp for
+// one specific block-building attempt. The timeout state must be build-local so
+// overlapping sequential/speculative work cannot interrupt each other.
+func createInterruptTimer(number uint64, actualTimestamp time.Time, buildInterrupt *buildInterruptState, pipelinedSRC bool) func() {
+ if buildInterrupt == nil {
+ return func() {}
+ }
+
delay := time.Until(actualTimestamp)
- // Reduce the timeout by 500ms to give some buffer for state root computation
- if delay > 1*time.Second {
+ // Reserve 500ms for state root computation — unless pipelined SRC is enabled,
+ // in which case SRC runs in the background and fillTransactions gets the full block time.
+ if !pipelinedSRC && delay > 1*time.Second {
delay -= 500 * time.Millisecond
}
interruptCtx, cancel := context.WithTimeout(context.Background(), delay)
// Reset the flag when timer starts for building a new block.
- interruptBlockBuilding.Store(false)
- interruptFlagSetAt.Store(0)
+ buildInterrupt.timedOut.Store(false)
+ buildInterrupt.flagSetAt.Store(0)
go func() {
// Wait for timeout
@@ -2245,11 +2377,8 @@ func createInterruptTimer(number uint64, actualTimestamp time.Time, interruptBlo
// Toggle the flag to indicate commit transactions loop and EVM interpreter loop
// to stop block building.
if interruptCtx.Err() != context.Canceled {
- interruptFlagSetAt.Store(time.Now().UnixNano())
- }
- interruptBlockBuilding.Store(true)
-
- if interruptCtx.Err() != context.Canceled {
+ buildInterrupt.flagSetAt.Store(time.Now().UnixNano())
+ buildInterrupt.timedOut.Store(true)
cancel()
}
}()
@@ -2337,7 +2466,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti
}
select {
- case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now(), productionElapsed: time.Since(firstNonZeroTime(productionStartFrom(genParams), start)), intermediateRootTime: commitTime}:
+ case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now(), productionStart: firstNonZeroTime(productionStartFrom(genParams), start), productionElapsed: time.Since(firstNonZeroTime(productionStartFrom(genParams), start)), intermediateRootTime: commitTime}:
fees := totalFees(block, env.receipts)
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 8da774b8e4..ffc1e92c9b 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -52,6 +52,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests/bor/mocks"
"github.com/ethereum/go-ethereum/triedb"
@@ -1073,6 +1074,37 @@ func TestCommitInterruptPending(t *testing.T) {
w.stop()
}
+func TestCreateInterruptTimer_IsolatedPerBuild(t *testing.T) {
+ t.Parallel()
+
+ first := newBuildInterruptState()
+ second := newBuildInterruptState()
+
+ stopFirst := createInterruptTimer(1, time.Now().Add(25*time.Millisecond), first, true)
+ defer stopFirst()
+ stopSecond := createInterruptTimer(2, time.Now().Add(500*time.Millisecond), second, true)
+ defer stopSecond()
+
+ require.Eventually(t, func() bool {
+ return first.timedOut.Load()
+ }, time.Second, 10*time.Millisecond)
+ require.NotZero(t, first.flagSetAt.Load())
+ require.False(t, second.timedOut.Load(), "one build's timeout must not trip another build")
+ require.Zero(t, second.flagSetAt.Load())
+}
+
+func TestCreateInterruptTimer_CancelDoesNotTripInterrupt(t *testing.T) {
+ t.Parallel()
+
+ state := newBuildInterruptState()
+ stop := createInterruptTimer(1, time.Now().Add(500*time.Millisecond), state, true)
+ stop()
+
+ time.Sleep(50 * time.Millisecond)
+ require.False(t, state.timedOut.Load(), "canceling a build timer must not look like a timeout")
+ require.Zero(t, state.flagSetAt.Load())
+}
+
// TestBenchmarkPending is a simple benchmark test to measure the performance of transaction pool. It inserts
// large number of transactions into the pool and captures the time taken for `pending` to return the list
// of pending transactions. The purpose is just to compare the performance on different branches.
@@ -2921,6 +2953,56 @@ func TestWriteBlockAndSetHeadTimer(t *testing.T) {
}
}
+// TestPipelineBuildEnabledGauge verifies that worker/pipeline/enabled reflects
+// the EnablePipelinedSRC flag on the miner config at worker-init time.
+func TestPipelineBuildEnabledGauge(t *testing.T) {
+ metrics.Enable()
+
+ var (
+ engine consensus.Engine
+ chainConfig = params.BorUnittestChainConfig
+ db = rawdb.NewMemoryDatabase()
+ ctrl *gomock.Controller
+ )
+ engine, ctrl = getFakeBorFromConfig(t, chainConfig)
+ defer engine.Close()
+ defer ctrl.Finish()
+
+ cfg := DefaultTestConfig()
+ cfg.EnablePipelinedSRC = true
+
+ w, _, _ := newTestWorker(t, cfg, chainConfig, engine, db, false, 0)
+ defer w.close()
+
+ if got := pipelineBuildEnabledGauge.Snapshot().Value(); got != 1 {
+ t.Errorf("pipelineBuildEnabledGauge = %d, want 1 when EnablePipelinedSRC=true", got)
+ }
+}
+
+// TestPipelineBuildDisabledGauge verifies that worker/pipeline/enabled reads
+// 0 when the miner config has EnablePipelinedSRC=false.
+func TestPipelineBuildDisabledGauge(t *testing.T) {
+ metrics.Enable()
+
+ var (
+ engine consensus.Engine
+ chainConfig = params.BorUnittestChainConfig
+ db = rawdb.NewMemoryDatabase()
+ ctrl *gomock.Controller
+ )
+ engine, ctrl = getFakeBorFromConfig(t, chainConfig)
+ defer engine.Close()
+ defer ctrl.Finish()
+
+ cfg := DefaultTestConfig() // EnablePipelinedSRC defaults to false
+ w, _, _ := newTestWorker(t, cfg, chainConfig, engine, db, false, 0)
+ defer w.close()
+
+ if got := pipelineBuildEnabledGauge.Snapshot().Value(); got != 0 {
+ t.Errorf("pipelineBuildEnabledGauge = %d, want 0 when EnablePipelinedSRC=false", got)
+ }
+}
+
// TestDelayFlagOffByOne verifies that the delayFlag check inspects each transaction's
// own read set rather than its predecessor's.
func TestDelayFlagOffByOne(t *testing.T) {
@@ -2967,3 +3049,51 @@ func TestDelayFlagOffByOne(t *testing.T) {
require.True(t, buggyDelayFlag(), "bug: last tx skipped, DAG hint incorrectly embedded")
require.False(t, fixedDelayFlag(), "fix: last tx detected, DAG hint suppressed")
}
+
+func TestTxDependencyMetadataPersistsAcrossSpeculativeRefillPasses(t *testing.T) {
+ t.Parallel()
+
+ chainConfig := params.BorUnittestChainConfig
+ engine, ctrl := getFakeBorFromConfig(t, chainConfig)
+ defer engine.Close()
+ defer ctrl.Finish()
+
+ w, _, _ := newTestWorker(t, DefaultTestConfig(), chainConfig, engine, rawdb.NewMemoryDatabase(), false, 0)
+ defer w.close()
+ w.running.Store(true)
+
+ extraDataBytes, err := rlp.EncodeToBytes(types.BlockExtraData{})
+ require.NoError(t, err)
+
+ headerExtra := append(make([]byte, types.ExtraVanityLength), extraDataBytes...)
+ headerExtra = append(headerExtra, make([]byte, types.ExtraSealLength)...)
+
+ env := &environment{
+ header: &types.Header{
+ Number: big.NewInt(1),
+ Extra: headerExtra,
+ },
+ coinbase: testBankAddress,
+ depsBuilder: blockstm.NewDepsBuilder(),
+ }
+
+ key := blockstm.NewSubpathKey(testUserAddress, state.BalancePath)
+
+ env.mvReadMapList = append(env.mvReadMapList, map[blockstm.Key]blockstm.ReadDescriptor{})
+ require.NoError(t, env.depsBuilder.AddTransaction(0, nil, []blockstm.WriteDescriptor{{Path: key}}))
+ require.NoError(t, w.updateTxDependencyMetadata(env))
+
+ var blockExtraData types.BlockExtraData
+ require.NoError(t, rlp.DecodeBytes(env.header.Extra[types.ExtraVanityLength:len(env.header.Extra)-types.ExtraSealLength], &blockExtraData))
+ require.Len(t, blockExtraData.TxDependency, 1)
+ require.Empty(t, blockExtraData.TxDependency[0])
+
+ env.mvReadMapList = append(env.mvReadMapList, map[blockstm.Key]blockstm.ReadDescriptor{
+ key: {Path: key},
+ })
+ require.NoError(t, env.depsBuilder.AddTransaction(1, []blockstm.ReadDescriptor{{Path: key}}, nil))
+ require.NoError(t, w.updateTxDependencyMetadata(env))
+
+ require.NoError(t, rlp.DecodeBytes(env.header.Extra[types.ExtraVanityLength:len(env.header.Extra)-types.ExtraSealLength], &blockExtraData))
+ require.Equal(t, [][]uint64{{}, {0}}, blockExtraData.TxDependency)
+}
diff --git a/params/config.go b/params/config.go
index 9397f08afd..b03fdbd325 100644
--- a/params/config.go
+++ b/params/config.go
@@ -954,7 +954,6 @@ type BorConfig struct {
LisovoBlock *big.Int `json:"lisovoBlock"` // Lisovo switch block (nil = no fork, 0 = already on lisovo)
LisovoProBlock *big.Int `json:"lisovoProBlock"` // LisovoPro switch block (nil = no fork, 0 = already on lisovoPro)
GiuglianoBlock *big.Int `json:"giuglianoBlock"` // Giugliano switch block (nil = no fork, 0 = already on giugliano)
- DelayedSRCBlock *big.Int `json:"delayedSRCBlock"` // DelayedSRC switch block (nil = no fork, 0 = already on delayedSRC)
}
// String implements the stringer interface, returning the consensus engine details.
@@ -1030,10 +1029,6 @@ func (c *BorConfig) IsGiugliano(number *big.Int) bool {
return isBlockForked(c.GiuglianoBlock, number)
}
-func (c *BorConfig) IsDelayedSRC(number *big.Int) bool {
- return isBlockForked(c.DelayedSRCBlock, number)
-}
-
// GetTargetGasPercentage returns the target gas percentage for gas limit calculation.
// After Lisovo hard fork, this value can be configured via CLI flags (stored in BorConfig at runtime).
// It validates the configured value and falls back to defaults if invalid or nil.
@@ -1880,7 +1875,6 @@ type Rules struct {
IsMadhugiriPro bool
IsLisovo bool
IsLisovoPro bool
- IsDelayedSRC bool
}
// Rules ensures c's ChainID is not nil.
@@ -1916,6 +1910,5 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, _ uint64) Rules {
IsMadhugiriPro: c.Bor != nil && c.Bor.IsMadhugiriPro(num),
IsLisovo: c.Bor != nil && c.Bor.IsLisovo(num),
IsLisovoPro: c.Bor != nil && c.Bor.IsLisovoPro(num),
- IsDelayedSRC: c.Bor != nil && c.Bor.IsDelayedSRC(num),
}
}
diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go
index d91e4f267a..e4b94297e7 100644
--- a/tests/bor/bor_test.go
+++ b/tests/bor/bor_test.go
@@ -36,7 +36,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@@ -2934,375 +2933,674 @@ func getMockedSpannerWithSpanRotation(t *testing.T, validator1, validator2 commo
return spanner
}
-// TestDelayedStateRoot verifies the Delayed SRC protocol across the hard fork
-// boundary. Before the fork, block[N].Header.Root is the actual post-execution
-// state root of block N. After the fork, block[N].Header.Root stores the
-// post-execution state root of block N-1 (the parent), computed concurrently
-// by a background goroutine.
-func TestDelayedStateRoot(t *testing.T) {
+// TestPipelinedSRC_BasicBlockProduction verifies that a single miner with
+// pipelined SRC enabled can produce multiple consecutive blocks correctly.
+// This exercises the full pipeline: commitPipelined → FlatDiff extraction →
+// background SRC goroutine → speculative N+1 execution → block assembly → seal.
+func TestPipelinedSRC_BasicBlockProduction(t *testing.T) {
t.Parallel()
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ fdlimit.Raise(2048)
- const delayedSRCBlock = 5
-
- updateGenesis := func(gen *core.Genesis) {
- gen.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock)
- // Large sprint to avoid hitting sprint boundaries that invoke StateSyncEvents.
- gen.Config.Bor.Sprint = map[string]uint64{"0": 64}
+ faucets := make([]*ecdsa.PrivateKey, 128)
+ for i := 0; i < len(faucets); i++ {
+ faucets[i], _ = crypto.GenerateKey()
}
- init := buildEthereumInstance(t, rawdb.NewMemoryDatabase(), updateGenesis)
- chain := init.ethereum.BlockChain()
- engine := init.ethereum.Engine()
- _bor := engine.(*bor.Bor)
- defer _bor.Close()
+ genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16)
+ genesis.Config.Bor.Period = map[string]uint64{"0": 2}
+ genesis.Config.Bor.Sprint = map[string]uint64{"0": 16}
+ genesis.Config.Bor.RioBlock = big.NewInt(0) // Enable Rio so snapshot uses spanByBlockNumber (no ecrecover needed)
- span0 := createMockSpan(addr, chain.Config().ChainID.String())
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
+ // Start a single miner with pipelined SRC enabled
+ stack, ethBackend, err := InitMinerWithPipelinedSRC(genesis, keys[0], true)
+ require.NoError(t, err)
+ defer stack.Close()
- h := createMockHeimdall(ctrl, &span0, &span0)
- _bor.SetHeimdallClient(h)
+ for stack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
- validators := borSpan.ConvertHeimdallValSetToBorValSet(span0.ValidatorSet).Validators
- spanner := getMockedSpanner(t, validators)
- _bor.SetSpanner(spanner)
+ // Start mining
+ err = ethBackend.StartMining()
+ require.NoError(t, err)
- // Build and insert 7 blocks: blocks 1-4 are pre-fork, blocks 5-7 are post-fork.
- // insertNewBlock calls t.Fatalf on error, so a ValidateState failure here means
- // the goroutine computed the wrong root or the protocol invariant was violated.
- const numBlocks = 7
- blocks := make([]*types.Block, numBlocks+1)
- blocks[0] = init.genesis.ToBlock()
-
- for i := 1; i <= numBlocks; i++ {
- blocks[i] = buildNextBlock(t, _bor, chain, blocks[i-1], nil, init.genesis.Config.Bor, nil, validators, false, nil, nil)
- insertNewBlock(t, chain, blocks[i])
- }
-
- // Pre-fork invariant: GetPostStateRoot(block_N) == block_N.Header.Root,
- // because header.Root IS the block's own post-execution state root before the fork.
- for i := 1; i < delayedSRCBlock; i++ {
- got := chain.GetPostStateRoot(blocks[i].Hash())
- require.NotEqual(t, common.Hash{}, got, "pre-fork block %d: delayed root should not be zero", i)
- require.Equal(t, blocks[i].Header().Root, got,
- "pre-fork block %d: GetPostStateRoot should match header.Root", i)
- }
-
- // Post-fork invariant: block[N].Header.Root == GetPostStateRoot(block[N-1]).
- // For N == delayedSRCBlock this also covers the activation boundary where block[N-1]
- // is still pre-fork (its delayed root equals its own header.Root).
- for i := delayedSRCBlock; i <= numBlocks; i++ {
- parentDelayedRoot := chain.GetPostStateRoot(blocks[i-1].Hash())
- require.NotEqual(t, common.Hash{}, parentDelayedRoot,
- "block %d parent: delayed root should not be zero", i)
- require.Equal(t, parentDelayedRoot, blocks[i].Header().Root,
- "post-fork block %d: header.Root should equal GetPostStateRoot(parent)", i)
- }
-
- // The last inserted block's delayed state root is computed by a background goroutine
- // and stored in pendingSRC (no child block has been inserted to carry it in its
- // header.Root). GetPostStateRoot waits for that goroutine and returns
- // its result directly.
- lastRoot := chain.GetPostStateRoot(blocks[numBlocks].Hash())
- require.NotEqual(t, common.Hash{}, lastRoot,
- "last post-fork block: delayed root from in-flight goroutine should not be zero")
+ // Wait for the miner to produce at least 10 blocks
+ targetBlock := uint64(10)
+ deadline := time.After(60 * time.Second)
+ for {
+ select {
+ case <-deadline:
+ currentNum := ethBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Fatalf("Timed out waiting for block %d, current block: %d", targetBlock, currentNum)
+ default:
+ time.Sleep(500 * time.Millisecond)
+ if ethBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock {
+ goto done
+ }
+ }
+ }
+done:
+
+ chain := ethBackend.BlockChain()
+ currentNum := chain.CurrentBlock().Number.Uint64()
+ t.Logf("Miner produced %d blocks with pipelined SRC", currentNum)
+
+ // Verify chain integrity: each block's parent hash matches the previous block's hash
+ for i := uint64(1); i <= currentNum; i++ {
+ block := chain.GetBlockByNumber(i)
+ require.NotNil(t, block, "block %d not found", i)
+
+ if i > 0 {
+ parent := chain.GetBlockByNumber(i - 1)
+ require.NotNil(t, parent, "parent block %d not found", i-1)
+ require.Equal(t, parent.Hash(), block.ParentHash(),
+ "block %d ParentHash mismatch: expected %x, got %x", i, parent.Hash(), block.ParentHash())
+ }
+
+ // Verify state root is valid (can open state at this root)
+ _, err := chain.StateAt(block.Root())
+ require.NoError(t, err, "cannot open state at block %d root %x", i, block.Root())
+ }
}
-// TestDelayedStateRootImport extends TestDelayedStateRoot to verify that the
-// stateless witness for each post-fork block is correctly built and persisted
-// by the background SRC goroutine. After block[N+1] is inserted, G_N has
-// finished (ValidateState(N+1) is the sync point inside processBlock), so the
-// witness for block N must already be in the database.
-func TestDelayedStateRootImport(t *testing.T) {
+// TestPipelinedSRC_WithTransactions verifies that the pipelined SRC miner
+// correctly includes transactions in blocks.
+func TestPipelinedSRC_WithTransactions(t *testing.T) {
t.Parallel()
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ fdlimit.Raise(2048)
- const delayedSRCBlock = 5
+ faucets := make([]*ecdsa.PrivateKey, 128)
+ for i := 0; i < len(faucets); i++ {
+ faucets[i], _ = crypto.GenerateKey()
+ }
- updateGenesis := func(gen *core.Genesis) {
- gen.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock)
- gen.Config.Bor.Sprint = map[string]uint64{"0": 64}
+ genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16)
+ genesis.Config.Bor.Period = map[string]uint64{"0": 2}
+ genesis.Config.Bor.Sprint = map[string]uint64{"0": 16}
+ genesis.Config.Bor.RioBlock = big.NewInt(0) // Enable Rio for pipelined SRC
+
+ stack, ethBackend, err := InitMinerWithPipelinedSRC(genesis, keys[0], true)
+ require.NoError(t, err)
+ defer stack.Close()
+
+ for stack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
}
- init := buildEthereumInstance(t, rawdb.NewMemoryDatabase(), updateGenesis)
- chain := init.ethereum.BlockChain()
- engine := init.ethereum.Engine()
- _bor := engine.(*bor.Bor)
- defer _bor.Close()
+ err = ethBackend.StartMining()
+ require.NoError(t, err)
- span0 := createMockSpan(addr, chain.Config().ChainID.String())
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
+ // Wait for a few blocks first
+ for ethBackend.BlockChain().CurrentBlock().Number.Uint64() < 2 {
+ time.Sleep(500 * time.Millisecond)
+ }
- h := createMockHeimdall(ctrl, &span0, &span0)
- _bor.SetHeimdallClient(h)
+ // Submit transactions
+ txpool := ethBackend.TxPool()
+ senderKey := pkey1
+ recipientAddr := crypto.PubkeyToAddress(pkey2.PublicKey)
+ signer := types.LatestSignerForChainID(genesis.Config.ChainID)
- validators := borSpan.ConvertHeimdallValSetToBorValSet(span0.ValidatorSet).Validators
- spanner := getMockedSpanner(t, validators)
- _bor.SetSpanner(spanner)
+ nonce := txpool.Nonce(crypto.PubkeyToAddress(senderKey.PublicKey))
+ txCount := 10
+
+ for i := 0; i < txCount; i++ {
+ tx := types.NewTransaction(
+ nonce+uint64(i),
+ recipientAddr,
+ big.NewInt(1000),
+ 21000,
+ big.NewInt(30000000000),
+ nil,
+ )
+ signedTx, err := types.SignTx(tx, signer, senderKey)
+ require.NoError(t, err)
+ errs := txpool.Add([]*types.Transaction{signedTx}, true)
+ require.Nil(t, errs[0], "failed to add tx %d", i)
+ }
+
+ // Wait for transactions to be included in blocks
+ deadline := time.After(60 * time.Second)
+ for {
+ select {
+ case <-deadline:
+ t.Fatal("Timed out waiting for transactions to be included")
+ default:
+ time.Sleep(500 * time.Millisecond)
+ // Check if all transactions have been mined
+ currentNonce := txpool.Nonce(crypto.PubkeyToAddress(senderKey.PublicKey))
+ if currentNonce >= nonce+uint64(txCount) {
+ goto txsDone
+ }
+ }
+ }
+txsDone:
+
+ chain := ethBackend.BlockChain()
+ currentNum := chain.CurrentBlock().Number.Uint64()
+ t.Logf("All %d transactions included by block %d", txCount, currentNum)
- // Build and insert 9 blocks: blocks 1-4 are pre-fork, blocks 5-9 are post-fork.
- const numBlocks = 9
- blocks := make([]*types.Block, numBlocks+1)
- blocks[0] = init.genesis.ToBlock()
-
- for i := 1; i <= numBlocks; i++ {
- blocks[i] = buildNextBlock(t, _bor, chain, blocks[i-1], nil, init.genesis.Config.Bor, nil, validators, false, nil, nil)
- insertNewBlock(t, chain, blocks[i])
-
- // After inserting block[i], the sync point inside processBlock has already
- // waited for G_{i-1} to finish. Therefore the witness for block[i-1] must
- // be in the database — but only for post-fork blocks (i-1 >= delayedSRCBlock).
- if i > delayedSRCBlock {
- prevHash := blocks[i-1].Hash()
- witnessBytes := chain.GetWitness(prevHash)
- require.NotNil(t, witnessBytes,
- "witness for block %d should be in DB after inserting block %d", i-1, i)
-
- w, err := stateless.GetWitnessFromRlp(witnessBytes)
- require.NoError(t, err, "witness for block %d: RLP decode failed", i-1)
-
- // Under delayed SRC the goroutine embeds parentRoot (= root_{i-2}) as
- // w.Header().Root. block[i-1].Header().Root is also root_{i-2} by the
- // protocol invariant (post-fork header stores parent's actual state root).
- require.Equal(t, blocks[i-1].Header().Root, w.Header().Root,
- "block %d witness: Header.Root should equal block's header.Root (pre-state root)", i-1)
- }
- }
-
- // Wait for G_{numBlocks} (the last goroutine) to finish.
- lastRoot := chain.GetPostStateRoot(blocks[numBlocks].Hash())
- require.NotEqual(t, common.Hash{}, lastRoot,
- "last post-fork block: delayed root from in-flight goroutine should not be zero")
-
- // With G_{numBlocks} done, the witness for the last block is now in the database.
- lastWitnessBytes := chain.GetWitness(blocks[numBlocks].Hash())
- require.NotNil(t, lastWitnessBytes,
- "witness for last block should be in DB after goroutine completes")
-
- lastWitness, err := stateless.GetWitnessFromRlp(lastWitnessBytes)
- require.NoError(t, err, "witness for last block: RLP decode failed")
- require.Equal(t, blocks[numBlocks].Header().Root, lastWitness.Header().Root,
- "last block witness: Header.Root should equal block's header.Root")
+ // Wait for async DB writes to complete — pipelined SRC writes blocks
+ // asynchronously, so GetBlockByNumber may not find them immediately.
+ // Also, the speculative fill may have advanced the nonce before the block
+ // containing the txs is sealed, so re-read currentNum after waiting.
+ time.Sleep(2 * time.Second)
+ currentNum = chain.CurrentBlock().Number.Uint64()
+
+ // Verify we can find the transactions in the blocks
+ totalTxs := 0
+ for i := uint64(1); i <= currentNum; i++ {
+ block := chain.GetBlockByNumber(i)
+ if block != nil {
+ totalTxs += len(block.Transactions())
+ }
+ }
+ require.GreaterOrEqual(t, totalTxs, txCount,
+ "expected at least %d transactions across all blocks, got %d", txCount, totalTxs)
}
-// TestDelayedStateRootMiner verifies the Delayed SRC protocol on the block
-// production (miner) path. writeBlockAndSetHead defers CommitWithUpdate to a
-// background goroutine and stores the resulting FlatDiff so the miner can open
-// the next block's state immediately via NewWithFlatBase without waiting for
-// the goroutine to commit the trie.
-func TestDelayedStateRootMiner(t *testing.T) {
+// TestPipelinedImportSRC_BasicImport verifies that a non-mining node with
+// pipelined import SRC enabled correctly syncs blocks from a block-producing
+// peer. The importer computes state roots in the background (overlapping
+// SRC(N) with tx execution of N+1) and should arrive at the same chain state
+// as the BP.
+func TestPipelinedImportSRC_BasicImport(t *testing.T) {
t.Parallel()
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ fdlimit.Raise(2048)
- const delayedSRCBlock = 3
- const targetBlock = 7
+ faucets := make([]*ecdsa.PrivateKey, 128)
+ for i := 0; i < len(faucets); i++ {
+ faucets[i], _ = crypto.GenerateKey()
+ }
- // Build a genesis with DelayedSRCBlock=3 and a large sprint to avoid
- // hitting sprint boundaries that trigger Heimdall StateSyncEvents calls.
- genesis := InitGenesis(t, nil, "./testdata/genesis.json", 64)
- genesis.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock)
+ genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16)
+ genesis.Config.Bor.Period = map[string]uint64{"0": 2}
+ genesis.Config.Bor.Sprint = map[string]uint64{"0": 16}
+ genesis.Config.Bor.RioBlock = big.NewInt(0)
- stack, ethBackend, err := InitMiner(genesis, key, true)
+ // Start a normal BP (no pipeline on mining side)
+ bpStack, bpBackend, err := InitMiner(genesis, keys[0], true)
require.NoError(t, err)
- defer stack.Close()
+ defer bpStack.Close()
- chain := ethBackend.BlockChain()
+ for bpStack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
- // Subscribe to both feeds before mining starts so we don't miss any events.
- headCh := make(chan core.ChainHeadEvent, 20)
- headSub := chain.SubscribeChainHeadEvent(headCh)
- defer headSub.Unsubscribe()
+ // Start a non-mining importer with pipelined import SRC
+ importerStack, importerBackend, err := InitImporterWithPipelinedSRC(genesis, keys[1], true)
+ require.NoError(t, err)
+ defer importerStack.Close()
- witnessCh := make(chan core.WitnessReadyEvent, 20)
- witnessSub := chain.SubscribeWitnessReadyEvent(witnessCh)
- defer witnessSub.Unsubscribe()
+ for importerStack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
- require.NoError(t, ethBackend.StartMining())
+ // Connect the two peers
+ importerStack.Server().AddPeer(bpStack.Server().Self())
+ bpStack.Server().AddPeer(importerStack.Server().Self())
- // Collect ChainHeadEvents until we reach targetBlock; also drain
- // WitnessReadyEvents that arrive concurrently.
- witnessByBlock := make(map[uint64]*stateless.Witness)
+ // Start mining on the BP
+ err = bpBackend.StartMining()
+ require.NoError(t, err)
- timeout := time.After(120 * time.Second)
-collectLoop:
+ // Wait for the BP to produce at least 20 blocks
+ targetBlock := uint64(20)
+ deadline := time.After(120 * time.Second)
for {
select {
- case ev := <-headCh:
- if ev.Header.Number.Uint64() >= targetBlock {
- break collectLoop
+ case <-deadline:
+ bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Fatalf("Timed out waiting for BP to reach block %d, current: %d", targetBlock, bpNum)
+ default:
+ time.Sleep(500 * time.Millisecond)
+ if bpBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock {
+ goto bpDone
}
- case ev := <-witnessCh:
- witnessByBlock[ev.Block.NumberU64()] = ev.Witness
- case <-timeout:
- t.Fatal("timeout waiting for miner to produce blocks")
}
}
+bpDone:
+
+ bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Logf("BP produced %d blocks, waiting for importer to sync", bpNum)
- // Drain any events already queued in the channels (non-blocking).
-drainLoop:
+ // Wait for the importer to sync up to the target
+ deadline = time.After(120 * time.Second)
for {
select {
- case <-headCh:
- case ev := <-witnessCh:
- witnessByBlock[ev.Block.NumberU64()] = ev.Witness
+ case <-deadline:
+ importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Fatalf("Timed out waiting for importer to reach block %d, current: %d", targetBlock, importerNum)
default:
- break drainLoop
+ time.Sleep(500 * time.Millisecond)
+ if importerBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock {
+ goto importerDone
+ }
}
}
+importerDone:
+
+ importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Logf("Importer synced to block %d", importerNum)
+
+ // Allow async DB writes to flush
+ time.Sleep(2 * time.Second)
+
+ // Use the minimum of both chains for comparison
+ bpNum = bpBackend.BlockChain().CurrentBlock().Number.Uint64()
+ importerNum = importerBackend.BlockChain().CurrentBlock().Number.Uint64()
+ compareUpTo := bpNum
+ if importerNum < compareUpTo {
+ compareUpTo = importerNum
+ }
+
+ bpChain := bpBackend.BlockChain()
+ importerChain := importerBackend.BlockChain()
+
+ for i := uint64(1); i <= compareUpTo; i++ {
+ bpBlock := bpChain.GetBlockByNumber(i)
+ require.NotNil(t, bpBlock, "BP missing block %d", i)
+
+ importerBlock := importerChain.GetBlockByNumber(i)
+ require.NotNil(t, importerBlock, "importer missing block %d", i)
- // Wait briefly for witnesses that G_N fires slightly after the corresponding
- // ChainHeadEvent (the goroutine for block N finishes before ChainHeadEvent for
- // block N+1, so witnesses for blocks < targetBlock should already be queued).
- witnessTimer := time.NewTimer(5 * time.Second)
- defer witnessTimer.Stop()
-waitWitness:
+ // Block hashes must match
+ require.Equal(t, bpBlock.Hash(), importerBlock.Hash(),
+ "block %d hash mismatch: BP=%x importer=%x", i, bpBlock.Hash(), importerBlock.Hash())
+
+ // State roots must match
+ require.Equal(t, bpBlock.Root(), importerBlock.Root(),
+ "block %d state root mismatch: BP=%x importer=%x", i, bpBlock.Root(), importerBlock.Root())
+
+ // Verify the importer can open state at each block's root
+ _, err := importerChain.StateAt(importerBlock.Root())
+ require.NoError(t, err, "importer cannot open state at block %d root %x", i, importerBlock.Root())
+ }
+
+ t.Logf("Verified %d blocks: hashes, state roots, and state accessibility all match", compareUpTo)
+}
+
+// TestPipelinedImportSRC_WithTransactions verifies that a non-mining node with
+// pipelined import SRC correctly imports blocks containing transactions. It
+// checks that transaction receipts exist and that account balances match
+// between the BP and the importer.
+func TestPipelinedImportSRC_WithTransactions(t *testing.T) {
+ t.Parallel()
+ log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ fdlimit.Raise(2048)
+
+ faucets := make([]*ecdsa.PrivateKey, 128)
+ for i := 0; i < len(faucets); i++ {
+ faucets[i], _ = crypto.GenerateKey()
+ }
+
+ genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16)
+ genesis.Config.Bor.Period = map[string]uint64{"0": 2}
+ genesis.Config.Bor.Sprint = map[string]uint64{"0": 16}
+ genesis.Config.Bor.RioBlock = big.NewInt(0)
+
+ // Start BP without pipeline
+ bpStack, bpBackend, err := InitMiner(genesis, keys[0], true)
+ require.NoError(t, err)
+ defer bpStack.Close()
+
+ for bpStack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
+
+ // Start importer with pipelined import SRC
+ importerStack, importerBackend, err := InitImporterWithPipelinedSRC(genesis, keys[1], true)
+ require.NoError(t, err)
+ defer importerStack.Close()
+
+ for importerStack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
+
+ // Connect peers
+ importerStack.Server().AddPeer(bpStack.Server().Self())
+ bpStack.Server().AddPeer(importerStack.Server().Self())
+
+ // Start mining
+ err = bpBackend.StartMining()
+ require.NoError(t, err)
+
+ // Wait for a few blocks before submitting transactions
+ for bpBackend.BlockChain().CurrentBlock().Number.Uint64() < 2 {
+ time.Sleep(500 * time.Millisecond)
+ }
+
+ // Submit ETH transfer transactions to the BP
+ txpool := bpBackend.TxPool()
+ senderKey := pkey1
+ senderAddr := crypto.PubkeyToAddress(senderKey.PublicKey)
+ recipientAddr := crypto.PubkeyToAddress(pkey2.PublicKey)
+ signer := types.LatestSignerForChainID(genesis.Config.ChainID)
+
+ nonce := txpool.Nonce(senderAddr)
+ txCount := 10
+ transferAmount := big.NewInt(1000)
+
+ for i := 0; i < txCount; i++ {
+ tx := types.NewTransaction(
+ nonce+uint64(i),
+ recipientAddr,
+ transferAmount,
+ 21000,
+ big.NewInt(30000000000),
+ nil,
+ )
+ signedTx, err := types.SignTx(tx, signer, senderKey)
+ require.NoError(t, err)
+ errs := txpool.Add([]*types.Transaction{signedTx}, true)
+ require.Nil(t, errs[0], "failed to add tx %d", i)
+ }
+
+ // Wait for all transactions to be mined on the BP
+ deadline := time.After(120 * time.Second)
for {
select {
- case ev := <-witnessCh:
- witnessByBlock[ev.Block.NumberU64()] = ev.Witness
- case <-witnessTimer.C:
- break waitWitness
- }
- }
-
- // Pre-fork invariant: GetPostStateRoot(block_N) == block_N.Header.Root.
- for i := uint64(1); i < delayedSRCBlock; i++ {
- h := chain.GetHeaderByNumber(i)
- require.NotNil(t, h, "pre-fork block %d not found", i)
- got := chain.GetPostStateRoot(h.Hash())
- require.NotEqual(t, common.Hash{}, got, "pre-fork block %d: delayed root should not be zero", i)
- require.Equal(t, h.Root, got, "pre-fork block %d: delayed root should equal header.Root", i)
- }
-
- // Post-fork header root invariant: block[N].Root == GetPostStateRoot(block[N-1]).
- for i := uint64(delayedSRCBlock); i <= targetBlock; i++ {
- h := chain.GetHeaderByNumber(i)
- require.NotNil(t, h, "post-fork block %d not found", i)
- ph := chain.GetHeaderByNumber(i - 1)
- require.NotNil(t, ph, "parent block %d not found", i-1)
-
- parentDelayedRoot := chain.GetPostStateRoot(ph.Hash())
- require.NotEqual(t, common.Hash{}, parentDelayedRoot,
- "block %d parent: delayed root should not be zero", i)
- require.Equal(t, parentDelayedRoot, h.Root,
- "post-fork block %d: header.Root should equal GetPostStateRoot(parent)", i)
- }
-
- // GetLastFlatDiff must be non-nil: writeBlockAndSetHead stores the FlatDiff
- // from each post-fork sealed block so the miner can build the next block
- // immediately without waiting for the SRC goroutine.
- flatDiff := chain.GetLastFlatDiff()
- require.NotNil(t, flatDiff, "GetLastFlatDiff() should be non-nil after post-fork mining")
-
- // WitnessReadyEvent must have been received for each post-fork block up to
- // targetBlock-1. For block targetBlock the goroutine may still be running
- // (it finishes before the next ChainHeadEvent, which we did not wait for).
- for i := uint64(delayedSRCBlock); i < targetBlock; i++ {
- w, ok := witnessByBlock[i]
- require.True(t, ok, "WitnessReadyEvent not received for post-fork block %d", i)
- require.NotNil(t, w, "witness for block %d should not be nil", i)
-
- h := chain.GetHeaderByNumber(i)
- require.NotNil(t, h, "block %d header not found", i)
- // The goroutine embeds parentRoot as w.Header().Root, and
- // block[N].Header().Root is also parentRoot under delayed SRC.
- require.Equal(t, h.Root, w.Header().Root,
- "block %d witness: Header.Root should equal block's header.Root", i)
+ case <-deadline:
+ t.Fatal("Timed out waiting for transactions to be mined on BP")
+ default:
+ time.Sleep(500 * time.Millisecond)
+ if txpool.Nonce(senderAddr) >= nonce+uint64(txCount) {
+ goto txsMined
+ }
+ }
}
+txsMined:
+
+ bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Logf("All %d transactions mined on BP by block %d", txCount, bpNum)
+
+ // Wait for the importer to sync past the block containing the last tx
+ targetBlock := bpNum
+ deadline = time.After(120 * time.Second)
+ for {
+ select {
+ case <-deadline:
+ importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Fatalf("Timed out waiting for importer to reach block %d, current: %d", targetBlock, importerNum)
+ default:
+ time.Sleep(500 * time.Millisecond)
+ if importerBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock {
+ goto importerSynced
+ }
+ }
+ }
+importerSynced:
+
+ // Allow async DB writes to flush
+ time.Sleep(2 * time.Second)
+
+ importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Logf("Importer synced to block %d", importerNum)
+
+ bpChain := bpBackend.BlockChain()
+ importerChain := importerBackend.BlockChain()
+
+ // Re-read current block numbers after the flush delay
+ bpNum = bpChain.CurrentBlock().Number.Uint64()
+ importerNum = importerChain.CurrentBlock().Number.Uint64()
+ compareUpTo := bpNum
+ if importerNum < compareUpTo {
+ compareUpTo = importerNum
+ }
+
+ // Verify blocks, state roots, and transaction counts match
+ totalBpTxs := 0
+ totalImporterTxs := 0
+ for i := uint64(1); i <= compareUpTo; i++ {
+ bpBlock := bpChain.GetBlockByNumber(i)
+ require.NotNil(t, bpBlock, "BP missing block %d", i)
+
+ importerBlock := importerChain.GetBlockByNumber(i)
+ require.NotNil(t, importerBlock, "importer missing block %d", i)
+
+ require.Equal(t, bpBlock.Hash(), importerBlock.Hash(),
+ "block %d hash mismatch", i)
+ require.Equal(t, bpBlock.Root(), importerBlock.Root(),
+ "block %d state root mismatch", i)
+
+ // Transaction counts must match per block
+ require.Equal(t, len(bpBlock.Transactions()), len(importerBlock.Transactions()),
+ "block %d tx count mismatch: BP=%d importer=%d", i,
+ len(bpBlock.Transactions()), len(importerBlock.Transactions()))
+
+ totalBpTxs += len(bpBlock.Transactions())
+ totalImporterTxs += len(importerBlock.Transactions())
+
+ // Verify receipts exist on the importer for each transaction
+ for j, tx := range importerBlock.Transactions() {
+ receipt, _, _, _ := rawdb.ReadReceipt(importerBackend.ChainDb(), tx.Hash(), importerChain.Config())
+ require.NotNil(t, receipt, "importer missing receipt for tx %d in block %d (hash=%x)", j, i, tx.Hash())
+ }
+ }
+
+ require.GreaterOrEqual(t, totalBpTxs, txCount,
+ "expected at least %d transactions across BP blocks, got %d", txCount, totalBpTxs)
+ require.Equal(t, totalBpTxs, totalImporterTxs,
+ "total tx count mismatch: BP=%d importer=%d", totalBpTxs, totalImporterTxs)
+
+ // Verify account balances match between BP and importer at the latest
+ // common block — this confirms the pipelined SRC produced correct state.
+ bpState, err := bpChain.StateAt(bpChain.GetBlockByNumber(compareUpTo).Root())
+ require.NoError(t, err, "cannot open BP state at block %d", compareUpTo)
+
+ importerState, err := importerChain.StateAt(importerChain.GetBlockByNumber(compareUpTo).Root())
+ require.NoError(t, err, "cannot open importer state at block %d", compareUpTo)
+
+ bpRecipientBal := bpState.GetBalance(recipientAddr)
+ importerRecipientBal := importerState.GetBalance(recipientAddr)
+ require.Equal(t, bpRecipientBal.String(), importerRecipientBal.String(),
+ "recipient balance mismatch at block %d: BP=%s importer=%s",
+ compareUpTo, bpRecipientBal, importerRecipientBal)
+
+ bpSenderBal := bpState.GetBalance(senderAddr)
+ importerSenderBal := importerState.GetBalance(senderAddr)
+ require.Equal(t, bpSenderBal.String(), importerSenderBal.String(),
+ "sender balance mismatch at block %d: BP=%s importer=%s",
+ compareUpTo, bpSenderBal, importerSenderBal)
+
+ t.Logf("Verified %d blocks with %d total transactions, balances match", compareUpTo, totalImporterTxs)
}
-// TestDelayedStateRootCrashRecovery simulates a crash where the SRC goroutine's
-// persisted post-state root is lost. On reopening the blockchain, the startup
-// recovery re-executes the head block, restoring the FlatDiff and spawning the
-// SRC goroutine so PostExecutionStateAt returns correct state.
-func TestDelayedStateRootCrashRecovery(t *testing.T) {
+// TestPipelinedImportSRC_SelfDestruct verifies that a contract which
+// self-destructs in its constructor is correctly handled by the FlatDiff
+// overlay during pipelined import. Without the Destructs check in
+// getStateObject, the importer would fall through to the trie reader and
+// see stale pre-destruct state from the committed parent root.
+//
+// Post-Cancun (EIP-6780), SELFDESTRUCT only fully destroys an account when
+// called in the same transaction that created the contract, so the test uses
+// a constructor that immediately self-destructs.
+func TestPipelinedImportSRC_SelfDestruct(t *testing.T) {
t.Parallel()
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ fdlimit.Raise(2048)
- const delayedSRCBlock = 3
+ faucets := make([]*ecdsa.PrivateKey, 128)
+ for i := 0; i < len(faucets); i++ {
+ faucets[i], _ = crypto.GenerateKey()
+ }
- init := buildEthereumInstance(t, rawdb.NewMemoryDatabase(), func(gen *core.Genesis) {
- gen.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock)
- gen.Config.Bor.Sprint = map[string]uint64{"0": 64}
- })
+ genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16)
+ genesis.Config.Bor.Period = map[string]uint64{"0": 2}
+ genesis.Config.Bor.Sprint = map[string]uint64{"0": 16}
+ genesis.Config.Bor.RioBlock = big.NewInt(0)
- chain := init.ethereum.BlockChain()
- engine := init.ethereum.Engine()
- _bor := engine.(*bor.Bor)
- defer _bor.Close()
+ // Start a normal BP (no pipeline on mining side)
+ bpStack, bpBackend, err := InitMiner(genesis, keys[0], true)
+ require.NoError(t, err)
+ defer bpStack.Close()
- span0 := createMockSpan(addr, chain.Config().ChainID.String())
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
+ for bpStack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
- h := createMockHeimdall(ctrl, &span0, &span0)
- _bor.SetHeimdallClient(h)
+ // Start importer with pipelined import SRC
+ importerStack, importerBackend, err := InitImporterWithPipelinedSRC(genesis, keys[1], true)
+ require.NoError(t, err)
+ defer importerStack.Close()
- validators := borSpan.ConvertHeimdallValSetToBorValSet(span0.ValidatorSet).Validators
- spanner := getMockedSpanner(t, validators)
- _bor.SetSpanner(spanner)
+ for importerStack.Server().NodeInfo().Ports.Listener == 0 {
+ time.Sleep(250 * time.Millisecond)
+ }
+
+ // Connect peers
+ importerStack.Server().AddPeer(bpStack.Server().Self())
+ bpStack.Server().AddPeer(importerStack.Server().Self())
+
+ // Start mining
+ err = bpBackend.StartMining()
+ require.NoError(t, err)
- // Build and insert blocks past the fork boundary.
- const numBlocks = 7
- blocks := make([]*types.Block, numBlocks+1)
- blocks[0] = init.genesis.ToBlock()
- for i := 1; i <= numBlocks; i++ {
- blocks[i] = buildNextBlock(t, _bor, chain, blocks[i-1], nil, init.genesis.Config.Bor, nil, validators, false, nil, nil)
- insertNewBlock(t, chain, blocks[i])
+ // Wait for a few blocks so we're past cancunBlock=3
+ for bpBackend.BlockChain().CurrentBlock().Number.Uint64() < 5 {
+ time.Sleep(500 * time.Millisecond)
}
- // Wait for the last SRC goroutine to finish and record its root.
- headHash := chain.CurrentBlock().Hash()
- expectedRoot := chain.GetPostStateRoot(headHash)
- require.NotEqual(t, common.Hash{}, expectedRoot, "post-state root should be computed")
+ // Deploy a contract whose constructor immediately self-destructs,
+ // sending its value back to CALLER.
+ // Init code: CALLER (0x33) SELFDESTRUCT (0xFF) = 0x33FF
+ selfDestructInitCode := []byte{byte(vm.CALLER), byte(vm.SELFDESTRUCT)}
+ deployValue := big.NewInt(1_000_000_000_000_000_000) // 1 ETH
+
+ txpool := bpBackend.TxPool()
+ senderKey := pkey1
+ senderAddr := crypto.PubkeyToAddress(senderKey.PublicKey)
+ signer := types.LatestSignerForChainID(genesis.Config.ChainID)
+
+ nonce := txpool.Nonce(senderAddr)
+
+ // Predict the contract address
+ contractAddr := crypto.CreateAddress(senderAddr, nonce)
+ t.Logf("Deploying self-destruct contract at predicted address %s with nonce %d", contractAddr.Hex(), nonce)
- // Record the post-execution state for comparison after recovery.
- preState, err := chain.PostExecutionStateAt(chain.CurrentBlock())
+ // Record sender balance before deployment
+ bpChain := bpBackend.BlockChain()
+ preState, err := bpChain.StateAt(bpChain.CurrentBlock().Root)
require.NoError(t, err)
- checkAddr := common.HexToAddress("0x0000000000000000000000000000000000001000")
- expectedBalance := preState.GetBalance(checkAddr)
+ senderBalBefore := preState.GetBalance(senderAddr)
+ t.Logf("Sender balance before deploy: %s", senderBalBefore.String())
- // Grab a reference to the underlying DB before stopping.
- db := init.ethereum.ChainDb()
+ // Create the deployment tx with value
+ deployTx, err := types.SignTx(
+ types.NewContractCreation(nonce, deployValue, 100_000, big.NewInt(30_000_000_000), selfDestructInitCode),
+ signer, senderKey,
+ )
+ require.NoError(t, err)
- // Stop the chain cleanly (journals trie state).
- chain.Stop()
+ errs := txpool.Add([]*types.Transaction{deployTx}, true)
+ require.Nil(t, errs[0], "failed to add deploy tx")
- // Simulate crash: delete the persisted post-state root for the head block
- // so that GetPostStateRoot returns empty on the next startup.
- key := append(rawdb.PostStateRootPrefix, headHash.Bytes()...)
- require.NoError(t, db.Delete(key))
+ // Also send a normal transfer in the NEXT block to force pipeline overlap.
+ // This ensures block N+1 uses the FlatDiff from block N (which has the destruct).
+ nonce++
+ recipientAddr := crypto.PubkeyToAddress(pkey2.PublicKey)
+ transferTx, err := types.SignTx(
+ types.NewTransaction(nonce, recipientAddr, big.NewInt(1000), 21000, big.NewInt(30_000_000_000), nil),
+ signer, senderKey,
+ )
+ require.NoError(t, err)
+ errs = txpool.Add([]*types.Transaction{transferTx}, true)
+ require.Nil(t, errs[0], "failed to add transfer tx")
- // Also delete the child block's reference (there is no child block for the
- // head, but verify ReadPostStateRoot returns empty now).
- got := rawdb.ReadPostStateRoot(db, headHash)
- require.Equal(t, common.Hash{}, got, "post-state root should be deleted from DB")
+ // Wait for both txs to be mined
+ deadline := time.After(120 * time.Second)
+ for {
+ select {
+ case <-deadline:
+ t.Fatal("Timed out waiting for transactions to be mined on BP")
+ default:
+ time.Sleep(500 * time.Millisecond)
+ if txpool.Nonce(senderAddr) >= nonce+1 {
+ goto txsMined
+ }
+ }
+ }
+txsMined:
- // Reopen the blockchain on the same DB. The startup recovery should detect
- // the missing post-state root and re-execute the head block.
- chain2, err := core.NewBlockChain(db, init.genesis, engine, core.DefaultConfig())
+ bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Logf("Transactions mined on BP by block %d", bpNum)
+
+ // Wait for importer to sync
+ targetBlock := bpNum
+ deadline = time.After(120 * time.Second)
+ for {
+ select {
+ case <-deadline:
+ importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64()
+ t.Fatalf("Timed out waiting for importer to reach block %d, current: %d", targetBlock, importerNum)
+ default:
+ time.Sleep(500 * time.Millisecond)
+ if importerBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock {
+ goto importerSynced
+ }
+ }
+ }
+importerSynced:
+
+ // Allow async DB writes to flush
+ time.Sleep(2 * time.Second)
+
+ importerChain := importerBackend.BlockChain()
+ importerNum := importerChain.CurrentBlock().Number.Uint64()
+ t.Logf("Importer synced to block %d", importerNum)
+
+ // Re-read BP chain head
+ bpNum = bpChain.CurrentBlock().Number.Uint64()
+ compareUpTo := bpNum
+ if importerNum < compareUpTo {
+ compareUpTo = importerNum
+ }
+
+ // Verify block hashes and state roots match
+ for i := uint64(1); i <= compareUpTo; i++ {
+ bpBlock := bpChain.GetBlockByNumber(i)
+ require.NotNil(t, bpBlock, "BP missing block %d", i)
+
+ importerBlock := importerChain.GetBlockByNumber(i)
+ require.NotNil(t, importerBlock, "importer missing block %d", i)
+
+ require.Equal(t, bpBlock.Hash(), importerBlock.Hash(),
+ "block %d hash mismatch", i)
+ require.Equal(t, bpBlock.Root(), importerBlock.Root(),
+ "block %d state root mismatch", i)
+ }
+
+ // Verify the self-destructed contract is gone on BOTH chains
+ bpState, err := bpChain.StateAt(bpChain.GetBlockByNumber(compareUpTo).Root())
require.NoError(t, err)
- defer chain2.Stop()
-
- // Verify the head block is unchanged.
- require.Equal(t, headHash, chain2.CurrentBlock().Hash(), "head block should be the same after reopen")
-
- // Verify PostExecutionStateAt returns correct state (via the recovered FlatDiff).
- postState, err := chain2.PostExecutionStateAt(chain2.CurrentBlock())
- require.NoError(t, err, "PostExecutionStateAt should succeed after recovery")
- require.Equal(t, expectedBalance, postState.GetBalance(checkAddr),
- "recovered state should match pre-crash state")
-
- // Verify GetPostStateRoot works (the SRC goroutine spawned by recovery
- // should compute the root; wait for it).
- recoveredRoot := chain2.GetPostStateRoot(headHash)
- require.Equal(t, expectedRoot, recoveredRoot,
- "recovered post-state root should match original")
-
- // Verify the root was persisted by the recovery goroutine.
- persistedRoot := rawdb.ReadPostStateRoot(db, headHash)
- require.Equal(t, expectedRoot, persistedRoot,
- "post-state root should be re-persisted after recovery")
+ importerState, err := importerChain.StateAt(importerChain.GetBlockByNumber(compareUpTo).Root())
+ require.NoError(t, err)
+
+ // Contract should have zero balance (ETH sent back to sender via SELFDESTRUCT)
+ bpContractBal := bpState.GetBalance(contractAddr)
+ importerContractBal := importerState.GetBalance(contractAddr)
+ require.True(t, bpContractBal.IsZero(), "BP: contract should have zero balance, got %s", bpContractBal)
+ require.True(t, importerContractBal.IsZero(), "importer: contract should have zero balance, got %s", importerContractBal)
+
+ // Contract should have no code
+ bpCode := bpState.GetCode(contractAddr)
+ importerCode := importerState.GetCode(contractAddr)
+ require.Empty(t, bpCode, "BP: contract should have no code")
+ require.Empty(t, importerCode, "importer: contract should have no code")
+
+ // Contract nonce should be zero (fully destroyed)
+ require.Equal(t, uint64(0), bpState.GetNonce(contractAddr), "BP: contract nonce should be 0")
+ require.Equal(t, uint64(0), importerState.GetNonce(contractAddr), "importer: contract nonce should be 0")
+
+ // Sender balances must match between BP and importer
+ bpSenderBal := bpState.GetBalance(senderAddr)
+ importerSenderBal := importerState.GetBalance(senderAddr)
+ require.Equal(t, bpSenderBal.String(), importerSenderBal.String(),
+ "sender balance mismatch: BP=%s importer=%s", bpSenderBal, importerSenderBal)
+
+ t.Logf("Verified: contract %s fully destroyed, sender balances match (BP=%s, importer=%s)",
+ contractAddr.Hex(), bpSenderBal, importerSenderBal)
}
diff --git a/tests/bor/helper.go b/tests/bor/helper.go
index 81df9ab851..0fbf87df76 100644
--- a/tests/bor/helper.go
+++ b/tests/bor/helper.go
@@ -752,3 +752,109 @@ func InitMinerWithOptions(genesis *core.Genesis, privKey *ecdsa.PrivateKey, with
return stack, ethBackend, err
}
+
+// InitMinerWithPipelinedSRC creates a miner node with pipelined SRC enabled.
+func InitMinerWithPipelinedSRC(genesis *core.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool) (*node.Node, *eth.Ethereum, error) {
+ stack, err := newPipelineTestNode("InitMiner-")
+ if err != nil {
+ return nil, nil, err
+ }
+ ethBackend, err := eth.New(stack, ðconfig.Config{
+ Genesis: genesis,
+ NetworkId: genesis.Config.ChainID.Uint64(),
+ SyncMode: downloader.FullSync,
+ DatabaseCache: 256,
+ DatabaseHandles: 256,
+ TxPool: legacypool.DefaultConfig,
+ GPO: ethconfig.Defaults.GPO,
+ Miner: miner.Config{
+ Etherbase: crypto.PubkeyToAddress(privKey.PublicKey),
+ GasCeil: genesis.GasLimit * 11 / 10,
+ GasPrice: big.NewInt(1),
+ Recommit: time.Second,
+ CommitInterruptFlag: true,
+ EnablePipelinedSRC: true,
+ PipelinedSRCLogs: true,
+ },
+ WithoutHeimdall: withoutHeimdall,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := importValidatorKey(stack, ethBackend, privKey); err != nil {
+ return nil, nil, err
+ }
+ return stack, ethBackend, stack.Start()
+}
+
+// InitImporterWithPipelinedSRC creates a non-mining node with pipelined import
+// SRC enabled. The node will import blocks from peers using the pipelined state
+// root computation path. A validator key is still needed for the keystore (used
+// for P2P identity / account manager) but the node does NOT start mining.
+func InitImporterWithPipelinedSRC(genesis *core.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool) (*node.Node, *eth.Ethereum, error) {
+ stack, err := newPipelineTestNode("InitImporter-")
+ if err != nil {
+ return nil, nil, err
+ }
+ ethBackend, err := eth.New(stack, ðconfig.Config{
+ Genesis: genesis,
+ NetworkId: genesis.Config.ChainID.Uint64(),
+ SyncMode: downloader.FullSync,
+ DatabaseCache: 256,
+ DatabaseHandles: 256,
+ TxPool: legacypool.DefaultConfig,
+ GPO: ethconfig.Defaults.GPO,
+ Miner: miner.Config{
+ Etherbase: crypto.PubkeyToAddress(privKey.PublicKey),
+ GasCeil: genesis.GasLimit * 11 / 10,
+ GasPrice: big.NewInt(1),
+ Recommit: time.Second,
+ },
+ WithoutHeimdall: withoutHeimdall,
+ EnablePipelinedImportSRC: true,
+ PipelinedImportSRCLogs: true,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := importValidatorKey(stack, ethBackend, privKey); err != nil {
+ return nil, nil, err
+ }
+ return stack, ethBackend, stack.Start()
+}
+
+// newPipelineTestNode creates a headless node.Node in a fresh temp datadir
+// with P2P discovery disabled. Shared between the miner and importer test
+// setups since their node-level configuration is identical.
+func newPipelineTestNode(dirPrefix string) (*node.Node, error) {
+ datadir, err := os.MkdirTemp("", dirPrefix+uuid.New().String())
+ if err != nil {
+ return nil, err
+ }
+ return node.New(&node.Config{
+ Name: "geth",
+ Version: params.Version,
+ DataDir: datadir,
+ P2P: p2p.Config{
+ ListenAddr: "0.0.0.0:0",
+ NoDiscovery: true,
+ MaxPeers: 25,
+ },
+ UseLightweightKDF: true,
+ })
+}
+
+// importValidatorKey imports the validator's ECDSA key into the node's
+// keystore, unlocks the imported account, and registers the keystore with
+// the eth account manager so mining / signing paths can use it.
+func importValidatorKey(stack *node.Node, ethBackend *eth.Ethereum, privKey *ecdsa.PrivateKey) error {
+ kStore := keystore.NewKeyStore(stack.KeyStoreDir(), keystore.StandardScryptN, keystore.StandardScryptP)
+ if _, err := kStore.ImportECDSA(privKey, ""); err != nil {
+ return err
+ }
+ if err := kStore.Unlock(kStore.Accounts()[0], ""); err != nil {
+ return err
+ }
+ ethBackend.AccountManager().AddBackend(kStore)
+ return nil
+}
diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go
index 842ac0972e..903b2c0d22 100644
--- a/triedb/pathdb/reader.go
+++ b/triedb/pathdb/reader.go
@@ -66,7 +66,18 @@ type reader struct {
func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
blob, got, loc, err := r.layer.node(owner, path, 0)
if err != nil {
- return nil, err
+ // If the diff layer chain walks into a stale disk layer (marked stale
+ // by concurrent cap()/persist() during pipelined SRC), fall back to
+ // the current base disk layer — same strategy as accountFallback and
+ // storageFallback.
+ if errors.Is(err, errSnapshotStale) {
+ blob, got, loc, err = r.nodeFallback(owner, path)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
}
// Error out if the local one is inconsistent with the target.
if !r.noHashCheck && got != hash {
@@ -92,6 +103,26 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
return blob, nil
}
+// nodeFallback retrieves a trie node when the normal diff layer walk fails
+// due to concurrent layer flattening (cap). This mirrors the fallback strategy
+// used by accountFallback and storageFallback.
+//
+// During pipelined SRC, the background SRC goroutine's CommitWithUpdate can
+// trigger cap() which flattens bottom diff layers into a new disk layer,
+// marking the old disk layer as stale. Concurrently, the prefetcher's trie
+// walk may reach this stale disk layer and get errSnapshotStale.
+//
+// The fallback tries the entry-point layer first (which is still valid in
+// memory), then falls back to tree.bottom() — the current base disk layer,
+// which is guaranteed non-stale.
+func (r *reader) nodeFallback(owner common.Hash, path []byte) ([]byte, common.Hash, *nodeLoc, error) {
+ blob, got, loc, err := r.layer.node(owner, path, 0)
+ if errors.Is(err, errSnapshotStale) {
+ return r.db.tree.bottom().node(owner, path, 0)
+ }
+ return blob, got, loc, err
+}
+
// AccountRLP directly retrieves the account associated with a particular hash.
// An error will be returned if the read operation exits abnormally. Specifically,
// if the layer is already stale.
@@ -102,6 +133,9 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) {
l, err := r.db.tree.lookupAccount(hash, r.state)
if err != nil {
+ if errors.Is(err, errSnapshotStale) {
+ return r.accountFallback(hash)
+ }
return nil, err
}
// If the located layer is stale, fall back to the slow path to retrieve
@@ -114,7 +148,21 @@ func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) {
// not affect the result unless the entry point layer is also stale.
blob, err := l.account(hash, 0)
if errors.Is(err, errSnapshotStale) {
- return r.layer.account(hash, 0)
+ return r.accountFallback(hash)
+ }
+ return blob, err
+}
+
+// accountFallback retrieves account data when the normal lookup path fails
+// due to concurrent layer flattening (cap). It tries the reader's entry-point
+// layer first (which is still in memory), then falls back to the current base
+// disk layer. The base fallback is needed because persist() creates intermediate
+// disk layers that are marked stale during recursive flattening — only the
+// final base layer is guaranteed non-stale.
+func (r *reader) accountFallback(hash common.Hash) ([]byte, error) {
+ blob, err := r.layer.account(hash, 0)
+ if errors.Is(err, errSnapshotStale) {
+ return r.db.tree.bottom().account(hash, 0)
}
return blob, err
}
@@ -151,6 +199,9 @@ func (r *reader) Account(hash common.Hash) (*types.SlimAccount, error) {
func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
l, err := r.db.tree.lookupStorage(accountHash, storageHash, r.state)
if err != nil {
+ if errors.Is(err, errSnapshotStale) {
+ return r.storageFallback(accountHash, storageHash)
+ }
return nil, err
}
// If the located layer is stale, fall back to the slow path to retrieve
@@ -163,7 +214,16 @@ func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
// not affect the result unless the entry point layer is also stale.
blob, err := l.storage(accountHash, storageHash, 0)
if errors.Is(err, errSnapshotStale) {
- return r.layer.storage(accountHash, storageHash, 0)
+ return r.storageFallback(accountHash, storageHash)
+ }
+ return blob, err
+}
+
+// storageFallback is the storage counterpart of accountFallback.
+func (r *reader) storageFallback(accountHash, storageHash common.Hash) ([]byte, error) {
+ blob, err := r.layer.storage(accountHash, storageHash, 0)
+ if errors.Is(err, errSnapshotStale) {
+ return r.db.tree.bottom().storage(accountHash, storageHash, 0)
}
return blob, err
}