Skip to content
Open
  •  
  •  
  •  
4 changes: 4 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# Auto detect text files and perform LF normalization
* text=auto
*.sol linguist-language=Solidity
core/blockstm/testdata/*.witness.gz filter=lfs diff=lfs merge=lfs -text
core/blockstm/testdata/*.block filter=lfs diff=lfs merge=lfs -text
core/blockstm/testdata/codes.tar.gz filter=lfs diff=lfs merge=lfs -text
core/blockstm/testdata/codes/*.bin filter=lfs diff=lfs merge=lfs -text
9 changes: 8 additions & 1 deletion core/block_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,18 @@ package core
import (
"errors"
"fmt"
"time"

"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)

var intermediateRootTimer = metrics.NewRegisteredTimer("chain/intermediateroot", nil)

// BlockValidator is responsible for validating block headers, uncles and
// processed state.
//
Expand Down Expand Up @@ -168,7 +172,10 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
irStart := time.Now()
root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number))
intermediateRootTimer.UpdateSince(irStart)
if header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
}

Expand Down
253 changes: 176 additions & 77 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@
blockExecutionParallelErrorCounter = metrics.NewRegisteredCounter("chain/execution/parallel/error", nil)
blockExecutionParallelTimer = metrics.NewRegisteredTimer("chain/execution/parallel/timer", nil)
blockExecutionSerialTimer = metrics.NewRegisteredTimer("chain/execution/serial/timer", nil)
blockMgaspsMeter = metrics.NewRegisteredHistogram("chain/execution/mgasps", nil, metrics.NewUniformSample(10240))

statelessParallelImportTimer = metrics.NewRegisteredTimer("chain/imports/stateless/parallel", nil)
statelessSequentialImportTimer = metrics.NewRegisteredTimer("chain/imports/stateless/sequential", nil)
Expand Down Expand Up @@ -700,91 +701,151 @@
return nil, err
}

bc.parallelProcessor = NewParallelStateProcessor(bc.hc, bc)
bc.parallelProcessor = NewV2StateProcessor(bc.hc, bc, numprocs)
bc.parallelSpeculativeProcesses = numprocs
bc.enforceParallelProcessor = enforce

return bc, nil
}

func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) {
// Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

if followupInterrupt == nil {
followupInterrupt = &atomic.Bool{}
}

if bc.logger != nil && bc.logger.OnBlockStart != nil {
td := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
bc.logger.OnBlockStart(tracing.BlockEvent{
Block: block,
TD: td,
Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(),
})
}

if bc.logger != nil && bc.logger.OnBlockEnd != nil {
defer func() {
bc.logger.OnBlockEnd(blockEndErr)
}()
// fireBlockStart emits the OnBlockStart tracing event when a tracer is set.
func (bc *BlockChain) fireBlockStart(block *types.Block) {
if bc.logger == nil || bc.logger.OnBlockStart == nil {
return
}
td := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
bc.logger.OnBlockStart(tracing.BlockEvent{
Block: block,
TD: td,
Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(),
})
}

parentRoot := parent.Root
prefetch, process, err := bc.statedb.ReadersWithCacheStats(parentRoot)
// setupBlockReaders builds the three StateDBs needed for parallel block
// processing: throwaway (for prefetcher), statedb (for serial processor),
// and parallelStatedb (for V2). The V2 statedb has concurrent reads
// enabled before the prefetcher runs so the underlying trieReader uses
// muSubTries throughout — switching mid-flight would race.
func (bc *BlockChain) setupBlockReaders(parentRoot common.Hash) (
throwaway, statedb, parallelStatedb *state.StateDB,
prefetch, process, parallel state.ReaderWithStats, err error,
) {
prefetch, process, parallel, err = bc.statedb.ReadersWithCacheStatsTriple(parentRoot)
if err != nil {
return nil, nil, 0, nil, 0, err
return nil, nil, nil, nil, nil, nil, err
}
throwaway, err := state.NewWithReader(parentRoot, bc.statedb, prefetch)
if err != nil {
return nil, nil, 0, nil, 0, err
if throwaway, err = state.NewWithReader(parentRoot, bc.statedb, prefetch); err != nil {
return nil, nil, nil, nil, nil, nil, err
}
statedb, err := state.NewWithReader(parentRoot, bc.statedb, process)
if err != nil {
return nil, nil, 0, nil, 0, err
if statedb, err = state.NewWithReader(parentRoot, bc.statedb, process); err != nil {
return nil, nil, nil, nil, nil, nil, err
}
parallelStatedb, err := state.NewWithReader(parentRoot, bc.statedb, process)
if err != nil {
return nil, nil, 0, nil, 0, err
if parallelStatedb, err = state.NewWithReader(parentRoot, bc.statedb, parallel); err != nil {
return nil, nil, nil, nil, nil, nil, err
}
parallelStatedb.EnableConcurrentReads()
return throwaway, statedb, parallelStatedb, prefetch, process, parallel, nil
}

// Upload the statistics of reader at the end
defer func() {
stats := prefetch.GetStats()
accountCacheHitPrefetchMeter.Mark(stats.AccountHit)
accountCacheMissPrefetchMeter.Mark(stats.AccountMiss)
storageCacheHitPrefetchMeter.Mark(stats.StorageHit)
storageCacheMissPrefetchMeter.Mark(stats.StorageMiss)
stats = process.GetStats()
accountCacheHitMeter.Mark(stats.AccountHit)
accountCacheMissMeter.Mark(stats.AccountMiss)
storageCacheHitMeter.Mark(stats.StorageHit)
storageCacheMissMeter.Mark(stats.StorageMiss)

// Report additional prefetch attribution metrics
prefetchStats := prefetch.GetPrefetchStats()
accountInsertPrefetchMeter.Mark(prefetchStats.AccountInsert)
storageInsertPrefetchMeter.Mark(prefetchStats.StorageInsert)

processStats := process.GetPrefetchStats()
accountHitFromPrefetchMeter.Mark(processStats.AccountHitFromPrefetch)
storageHitFromPrefetchMeter.Mark(processStats.StorageHitFromPrefetch)
accountHitFromPrefetchUniqueMeter.Mark(processStats.AccountHitFromPrefetchUnique)
}()
// reportReaderStats marks per-block cache hit/miss meters from prefetch,
// process, and parallel readers. Intended to be called via defer at the
// end of ProcessBlock.
//
// process and parallel both use the roleProcess label internally and
// share the same underlying cache, but ReadersWithCacheStatsTriple
// returns independent ReaderWithStats wrappers, so V2's reads accumulate
// in `parallel`'s atomic counters separately from V1's `process` counters.
// We merge them into the same meter set here so the cache-hit-rate
// dashboards reflect the work the winning processor (typically V2) did,
// rather than only the losing serial path's interrupted reads.
func reportReaderStats(prefetch, process, parallel state.ReaderWithStats) {
stats := prefetch.GetStats()
accountCacheHitPrefetchMeter.Mark(stats.AccountHit)
accountCacheMissPrefetchMeter.Mark(stats.AccountMiss)
storageCacheHitPrefetchMeter.Mark(stats.StorageHit)
storageCacheMissPrefetchMeter.Mark(stats.StorageMiss)

procStats := process.GetStats()
parStats := parallel.GetStats()
accountCacheHitMeter.Mark(procStats.AccountHit + parStats.AccountHit)
accountCacheMissMeter.Mark(procStats.AccountMiss + parStats.AccountMiss)
storageCacheHitMeter.Mark(procStats.StorageHit + parStats.StorageHit)
storageCacheMissMeter.Mark(procStats.StorageMiss + parStats.StorageMiss)

prefetchStats := prefetch.GetPrefetchStats()
accountInsertPrefetchMeter.Mark(prefetchStats.AccountInsert)
storageInsertPrefetchMeter.Mark(prefetchStats.StorageInsert)

procPF := process.GetPrefetchStats()
parPF := parallel.GetPrefetchStats()
accountHitFromPrefetchMeter.Mark(procPF.AccountHitFromPrefetch + parPF.AccountHitFromPrefetch)
storageHitFromPrefetchMeter.Mark(procPF.StorageHitFromPrefetch + parPF.StorageHitFromPrefetch)
accountHitFromPrefetchUniqueMeter.Mark(procPF.AccountHitFromPrefetchUnique + parPF.AccountHitFromPrefetchUnique)
}

// sharedBlockCaches holds VM-level caches that are shared between the
// prefetcher goroutine and the V2 BlockSTM workers for a single block.
type sharedBlockCaches struct {
jumpDests vm.JumpDestCache
keccak *sync.Map
ecrecover *sync.Map
}

func newSharedBlockCaches() *sharedBlockCaches {
return &sharedBlockCaches{
jumpDests: vm.NewSyncJumpDestCache(),
keccak: &sync.Map{},
ecrecover: &sync.Map{},
}
}

// applyTo populates a vm.Config with the shared caches.
func (c *sharedBlockCaches) applyTo(cfg *vm.Config) {
cfg.SharedJumpDestCache = c.jumpDests
cfg.Keccak256Cache = c.keccak
cfg.EcrecoverCache = c.ecrecover
}

go func(start time.Time, throwaway *state.StateDB, block *types.Block) {
// Disable tracing for prefetcher executions.
// startPrefetchGoroutine launches the throwaway-statedb prefetcher in
// the background. It runs the block with tracing disabled to warm caches
// for the real processors.
func (bc *BlockChain) startPrefetchGoroutine(block *types.Block, throwaway *state.StateDB,
caches *sharedBlockCaches, followupInterrupt *atomic.Bool) {
go func(start time.Time) {
vmCfg := bc.cfg.VmConfig
vmCfg.Tracer = nil
caches.applyTo(&vmCfg)
bc.prefetcher.Prefetch(block, throwaway, vmCfg, false, followupInterrupt)

blockPrefetchExecuteTimer.Update(time.Since(start))
if followupInterrupt.Load() {
blockPrefetchInterruptMeter.Mark(1)
}
}(time.Now(), throwaway, block)
}(time.Now())
}

func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) {
// Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

if followupInterrupt == nil {
followupInterrupt = &atomic.Bool{}
}
bc.fireBlockStart(block)
if bc.logger != nil && bc.logger.OnBlockEnd != nil {
defer func() { bc.logger.OnBlockEnd(blockEndErr) }()
}

throwaway, statedb, parallelStatedb, prefetch, process, parallel, err := bc.setupBlockReaders(parent.Root)
if err != nil {
return nil, nil, 0, nil, 0, err
}
defer reportReaderStats(prefetch, process, parallel)

// Shared caches for this block — used by both prefetcher and V2 workers.
sharedCaches := newSharedBlockCaches()
bc.startPrefetchGoroutine(block, throwaway, sharedCaches, followupInterrupt)
Comment thread
cffls marked this conversation as resolved.

type Result struct {
receipts types.Receipts
Expand All @@ -796,13 +857,6 @@
parallel bool
}

// Only disable Parallel Processor for witness producers
// TODO: work on enabling witness production for parallel processor
if witness != nil {
bc.parallelProcessor = nil
bc.enforceParallelProcessor = false
}

var resultChanLen int = 2
if bc.enforceParallelProcessor {
log.Debug("Processing block using Block STM only", "number", block.NumberU64())
Expand All @@ -811,23 +865,32 @@
resultChan := make(chan Result, resultChanLen)

processorCount := 0
execStart := time.Now()

if bc.parallelProcessor != nil {
processorCount++

go func() {
pstart := time.Now()
parallelStatedb.StartPrefetcher("chain", witness, nil)
res, err := bc.parallelProcessor.Process(block, parallelStatedb, bc.cfg.VmConfig, nil, ctx)
v2VmCfg := bc.cfg.VmConfig
sharedCaches.applyTo(&v2VmCfg)
res, err := bc.parallelProcessor.Process(block, parallelStatedb, v2VmCfg, nil, ctx)
Comment thread
claude[bot] marked this conversation as resolved.
blockExecutionParallelTimer.UpdateSince(pstart)
if err == nil {
vstart := time.Now()
err = bc.validator.ValidateState(block, parallelStatedb, res, false)
vtime = time.Since(vstart)
}
// If context was cancelled (we lost the race), stop prefetcher
// before sending result. This prevents "layer stale" errors when
// the winner's commit advances the pathdb layer.
if ctx.Err() != nil {
parallelStatedb.StopPrefetcher()
}
if res == nil {
res = &ProcessResult{}
}

Check failure on line 893 in core/blockchain.go

View check run for this annotation

Claude / Claude Code Review

V2 prefetcher leaks on the documented V2-failure fallback path

🟡 V2 prefetcher leaks on the documented V2-failure fallback path. When V2 returns an error (PanickedIdx, ExecErrIdx) and ProcessBlock falls back to serial, the V2 statedb's `"v2-settle"` prefetcher is never stopped — the ctx.Err() guard at blockchain.go:888-890 cannot fire because the cancel-ordering fix in 4c688e4 deliberately moved `cancel()` to AFTER the fallback block, and the fallback then reassigns `result` to V1's value (stopping V1's prefetcher only). The leak is real but only on V2-fail
Comment on lines 873 to 893
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🔴 🟡 V2 prefetcher leaks on the documented V2-failure fallback path. When V2 returns an error (PanickedIdx, ExecErrIdx) and ProcessBlock falls back to serial, the V2 statedb's "v2-settle" prefetcher is never stopped — the ctx.Err() guard at blockchain.go:888-890 cannot fire because the cancel-ordering fix in 4c688e4 deliberately moved cancel() to AFTER the fallback block, and the fallback then reassigns result to V1's value (stopping V1's prefetcher only). The leak is real but only on V2-failure paths (not exercised by the 1M-block soak since V2 wins overwhelmingly); fix is one line — stop the V2 prefetcher unconditionally on the V2 error path before sending the Result, e.g. if err != nil { parallelStatedb.StopPrefetcher() } in the V2 goroutine.

Extended reasoning...

What the bug is

The V2 goroutine in BlockChain.ProcessBlock starts the parallelStatedb prefetcher with StartPrefetcher("chain", witness, nil) at blockchain.go:875. Inside V2StateProcessor.Process, the prefetcher is unconditionally swapped to "v2-settle" at parallel_state_processor.go:1059-1060 (finalDB.StopPrefetcher(); finalDB.StartPrefetcher("v2-settle", prevWitness, nil)). On V2-failure paths — result.PanickedIdx >= 0 (line 1078) or result.ExecErrIdx >= 0 (line 1086) — Process returns nil, err immediately, with no defer cleanup and no explicit StopPrefetcher call. The "v2-settle" prefetcher is left running with live trie references.

Back in the V2 goroutine in blockchain.go, the only cleanup gate is the if ctx.Err() != nil { parallelStatedb.StopPrefetcher() } check at lines 888-890. But the cancel-ordering fix in 4c688e4 deliberately moved cancel() to AFTER the fallback block (now at line 946) — exactly so that V1 can run to completion when V2 fails. The trade-off: at the moment V2 evaluates ctx.Err() on its error path, the context is still active, so the StopPrefetcher call is skipped.

Why the fallback drain doesn't catch it\n\nWhen V2 sends its error result, the main goroutine enters the fallback at blockchain.go:929: result = <-resultChan waits for V1, then result.statedb.StopPrefetcher() at line 935 stops V1's prefetcher (because result was just reassigned to V1's Result). processorCount-- decrements to 1, so the second-drain block at lines 972-975 (if processorCount == 2 { second_result.statedb.StopPrefetcher() }) does NOT fire either. The V2 parallelStatedb reference is now unreachable from the return path, but its subfetcher goroutines (spawned by triePrefetcher.prefetchgo sf.loop() in trie_prefetcher.go) are still alive, blocked on sf.tasks/sf.stop, holding trie reader references.\n\n## Step-by-step proof\n\n1. V2 goroutine starts parallelStatedb.StartPrefetcher("chain", witness, nil) (blockchain.go:875).\n2. V2StateProcessor.Process runs finalDB.StopPrefetcher() then finalDB.StartPrefetcher("v2-settle", prevWitness, nil) (parallel_state_processor.go:1059-1060). The "v2-settle" triePrefetcher spawns subfetcher worker goroutines.\n3. Process hits result.PanickedIdx >= 0 (or ExecErrIdx >= 0) and returns nil, err (parallel_state_processor.go:1078-1080 / 1086-1088). No StopPrefetcher.\n4. V2 goroutine in blockchain.go has err != nil so it skips ValidateState. The guard if ctx.Err() != nil at line 888 evaluates to false (cancel hasn't run yet). StopPrefetcher is not called. V2 sends Result and exits.\n5. Main goroutine receives V2 result. result.parallel && result.err != nil enters the fallback. result = <-resultChan waits for V1. V1 returns successfully (ctx still active). result.statedb.StopPrefetcher() stops V1's prefetcher. processorCount-- = 1.\n6. cancel() finally runs at line 946. V2 goroutine has already exited. cancel() does not propagate into subfetcher.stop (only subfetcher.terminate() closes that channel — see trie_prefetcher.go:399-409).\n7. Final if processorCount == 2 drain at line 972 does not fire.\n8. ProcessBlock returns. The V2 "v2-settle" triePrefetcher goroutines are orphaned, holding live trie reader references.\n\n## Impact\n\n- Goroutine leak per V2-failure block. Each leak persists until process exit.\n- Trie reference leak. The orphaned subfetchers hold references to the parent trie reader, which can collide with the pathdb layer the caller commits afterward. The PR comment at blockchain.go:941-945 explicitly identifies this class of issue ("caller will commit the block (advancing the pathdb layer), which would invalidate any trie references still held by the loser's prefetcher") — but the fallback path leaves V2 in exactly that broken state. Could surface as layer-stale errors on subsequent imports, the very class the cancel-ordering relocation was trying to prevent.\n- Not consensus-affecting (V2's partial state is discarded; serial drives commit).\n- Not exercised by the 1M-block soak since V2 wins overwhelmingly. Manifests only when V2 actually returns an error — which is exactly the documented production fallback contract advertised in the PR description.\n\n## Asymmetry vs V1-failure path\n\nWorth noting: V1-failure does NOT leak because if V1 fails first, processorCount stays at 2 (no decrement), so the final drain at line 972 fires and stops V2's prefetcher. Only V2-failure decrements processorCount to 1 inside the fallback block, bypassing the drain.\n\n## How to fix\n\nMinimal one-line fix in the V2 goroutine in blockchain.go, after the Process+ValidateState block, before sending the Result:\n\ngo\nif err != nil {\n parallelStatedb.StopPrefetcher()\n}\n\n\nThis mirrors the symmetry that the existing if ctx.Err() != nil guard provides for the cancellation case. Alternative locations that work equally well: a defer parallelStatedb.StopPrefetcher() inside V2StateProcessor.Process after the prefetcher swap, or capturing+stopping the V2 prefetcher in the fallback block before the result = <-resultChan reassignment overwrites the reference.

resultChan <- Result{res.Receipts, res.Logs, res.GasUsed, err, parallelStatedb, blockExecutionParallelCounter, true}
}()
}
Expand All @@ -845,6 +908,9 @@
err = bc.validator.ValidateState(block, statedb, res, false)
vtime = time.Since(vstart)
}
if ctx.Err() != nil {
statedb.StopPrefetcher()
}
if res == nil {
res = &ProcessResult{}
}
Expand All @@ -854,8 +920,14 @@

result := <-resultChan

// If V2 returned an error (panic, ApplyMessage consensus error, etc.)
// and the serial processor is also running, fall back to the serial
// result BEFORE cancelling — cancelling first would interrupt the
// still-running serial processor at its next tx boundary and the
// fallback would receive context.Canceled instead of a usable
// recovery. The fallback IS the recovery; it must run to completion.
if result.parallel && result.err != nil {
log.Warn("Parallel state processor failed", "err", result.err)
log.Warn("Parallel state processor failed", "number", block.NumberU64(), "hash", block.Hash(), "err", result.err)
blockExecutionParallelErrorCounter.Inc(1)
// If the parallel processor failed, we will fallback to the serial processor if enabled
if processorCount == 2 {
Expand All @@ -865,14 +937,41 @@
}
}

// With the result we plan to keep in hand, cancel the shared context
// so the loser (if any) stops at its next tx boundary, and signal the
// throwaway prefetcher to stop. This must happen BEFORE ProcessBlock
// returns, because the caller will commit the block (advancing the
// pathdb layer), which would invalidate any trie references still
// held by the loser's prefetcher.
cancel()
followupInterrupt.Store(true)

result.counter.Inc(1)

// Make sure we are not leaking any prefetchers
// Report per-block mgasps for the winning processor.
// Value is scaled by 1000 (stored as µgasps) to preserve 3 decimal places,
// e.g. 210.357 mgasps → 210357. Divide by 1000 when reading.
// Exclude sprint-end blocks (with state sync tx) — their Finalize overhead
// (Heimdall state sync ~164ms) distorts the execution throughput metric.
hasStateSync := false
if txs := block.Transactions(); len(txs) > 0 {
hasStateSync = txs[len(txs)-1].Type() == types.StateSyncTxType
}
if elapsed := time.Since(execStart); elapsed > 0 && result.usedGas > 0 && !hasStateSync {
mgasps := int64(float64(result.usedGas) * 1e6 / float64(elapsed)) // µgasps (mgasps * 1000)
blockMgaspsMeter.Update(mgasps)
}

// Wait for the losing processor to finish and stop its prefetcher.
// Must be synchronous: the caller will commit the block (advancing the
// pathdb layer), which invalidates trie references held by the loser's
// prefetcher subfetchers. The context is already cancelled and both V1
// and V2 honour it at task-boundary level (V1 in its task loop; V2 in
// the executor's dispatcher and validation loop), so the loser stops
// promptly — typically within one tx execution.
if processorCount == 2 {
go func() {
second_result := <-resultChan
second_result.statedb.StopPrefetcher()
}()
second_result := <-resultChan
second_result.statedb.StopPrefetcher()
}

return result.receipts, result.logs, result.usedGas, result.statedb, vtime, result.err
Expand Down
2 changes: 1 addition & 1 deletion core/blockstm/executor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (t *testExecTask) Execute(mvh *MVHashMap, incarnation int) error {

sleep(op.duration)

t.readMap[k] = ReadDescriptor{k, readKind, Version{TxnIndex: result.depIdx, Incarnation: result.incarnation}}
t.readMap[k] = ReadDescriptor{Path: k, Kind: readKind, V: Version{TxnIndex: result.depIdx, Incarnation: result.incarnation}}
case writeType:
t.writeMap[k] = WriteDescriptor{k, version, op.val}
case otherType:
Expand Down
Loading
Loading