diff --git a/.golangci.yml b/.golangci.yml index aae7da6562..86540a53fe 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ # This file configures github.com/golangci/golangci-lint. version: '2' run: - go: '1.26.2' + go: '1.26.3' tests: true linters: default: none diff --git a/Dockerfile b/Dockerfile index 32098fced0..93df2d9b53 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # ─── BUILDER STAGE ─────────────────────────────────────────────────────────────── -FROM golang:1.26.2-alpine AS builder +FROM golang:1.26.3-alpine AS builder ARG BOR_DIR=/var/lib/bor/ ENV BOR_DIR=$BOR_DIR diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 0fe1dc4a57..72913d1ca9 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.26.2-alpine AS builder +FROM golang:1.26.3-alpine AS builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index db168eb7c1..901577a5d3 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -1,6 +1,6 @@ module github.com/ethereum/go-ethereum/cmd/keeper -go 1.26.2 +go 1.26.3 require ( github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20260104020744-7268a54d0358 @@ -48,9 +48,9 @@ require ( github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - golang.org/x/crypto v0.46.0 // indirect - golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.40.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect golang.org/x/time v0.12.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index 43778f130f..34b85d8c3c 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -158,15 +158,18 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= diff --git a/consensus/bor/api.go b/consensus/bor/api.go index 9176fbe7ea..82eb3103ca 100644 --- a/consensus/bor/api.go +++ b/consensus/bor/api.go @@ -277,7 +277,9 @@ func (api *API) GetCurrentValidators() ([]*valset.Validator, error) { return snap.ValidatorSet.Validators, nil } -// GetRootHash returns the merkle root of the start-to-end blocks' headers +// GetRootHash returns the merkle root of the start-to-end blocks' headers. +// rootHashCache is normally initialized eagerly inside Bor.APIs (sync.Once); +// the lazy init below is kept as fallback for direct-API paths (e.g., tests) that don't go through Bor.APIs. func (api *API) GetRootHash(start uint64, end uint64) (string, error) { if err := api.initializeRootHashCache(); err != nil { return "", err diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 242ebc9bfd..f565f071ee 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -278,6 +278,10 @@ type Bor struct { // ctx is cancelled when Close() is called, allowing in-flight operations to abort promptly. ctx context.Context ctxCancel context.CancelFunc + + // api is the bor engine API instance reused across all callers (JSON-RPC and gRPC). + api *API + apiOnce sync.Once } type signer struct { @@ -1529,11 +1533,29 @@ func (c *Bor) SealHash(header *types.Header) common.Hash { // APIs implements consensus.Engine, returning the user facing RPC API to allow // controlling the signer voting. +// +// The returned *API is cached on the first call so that per-API state (e.g., +// rootHashCache) persists across calls. JSON-RPC only invokes APIs() once at +// node startup, but the gRPC backend fetches it on every handler call — without +// the cache those calls would each start from an empty state. +// +// rootHashCache is initialized here (inside the sync.Once) rather than lazily +// in GetRootHash so that concurrent gRPC handlers sharing the cached *API +// cannot race in initializeRootHashCache. func (c *Bor) APIs(chain consensus.ChainHeaderReader) []rpc.API { + c.apiOnce.Do(func() { + a := &API{chain: chain, bor: c} + if err := a.initializeRootHashCache(); err != nil { + // log.Crit logs at the highest severity and then exits the process; + // This is currently unreachable (size is a constant in initializeRootHashCache), + log.Crit("bor: failed to initialize rootHashCache", "err", err) + } + c.api = a + }) return []rpc.API{{ Namespace: "bor", Version: "1.0", - Service: &API{chain: chain, bor: c}, + Service: c.api, Public: false, }} } diff --git a/consensus/bor/bor_test.go b/consensus/bor/bor_test.go index 86e8707893..6828dcb89a 100644 --- a/consensus/bor/bor_test.go +++ b/consensus/bor/bor_test.go @@ -2040,6 +2040,20 @@ func TestAPIs_ReturnsBorNamespace(t *testing.T) { require.Equal(t, "1.0", apis[0].Version) } +// TestAPIs_ReturnsSameInstanceAcrossCalls verifies that repeated calls to APIs() must return the same *API +// so per-API state such as rootHashCache persists across calls. This matters for the gRPC backend +// which fetches APIs() on every handler invocation; returning a fresh *API each call defeats caching. +func TestAPIs_ReturnsSameInstanceAcrossCalls(t *testing.T) { + t.Parallel() + sp := &fakeSpanner{vals: []*valset.Validator{{Address: common.HexToAddress("0x1"), VotingPower: 1}}} + borCfg := defaultBorConfig() + chain, b := newChainAndBorForTest(t, sp, borCfg, false, common.Address{}, uint64(time.Now().Unix())) + + first := b.APIs(chain.HeaderChain()) + second := b.APIs(chain.HeaderChain()) + require.Same(t, first[0].Service, second[0].Service, "APIs must return the cached *API on repeated calls") +} + func TestClose_Idempotent(t *testing.T) { t.Parallel() sp := &fakeSpanner{vals: []*valset.Validator{{Address: common.HexToAddress("0x1"), VotingPower: 1}}} diff --git a/docs/cli/default_config.toml b/docs/cli/default_config.toml index ad81b57cbf..047c6070d8 100644 --- a/docs/cli/default_config.toml +++ b/docs/cli/default_config.toml @@ -220,6 +220,7 @@ devfakeauthor = false [grpc] addr = "127.0.0.1:3131" + token = "" [developer] dev = false diff --git a/docs/cli/server.md b/docs/cli/server.md index bfa7f29de6..5e00df2edf 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -64,7 +64,9 @@ The ```bor server``` command runs the Bor client. - ```gpo.percentile```: Suggested gas price is the given percentile of a set of recent transaction gas prices (default: 60) -- ```grpc.addr```: Address and port to bind the GRPC server (default: 127.0.0.1:3131) +- ```grpc.addr```: Address and port to bind the GRPC server. Empty disables the server. Non-loopback binds without --grpc.token log a startup warning. (default: 127.0.0.1:3131) + +- ```grpc.token```: Raw token expected in the `authorization: Bearer ` header of incoming gRPC calls (empty disables auth; the `Bearer ` prefix is stripped before comparison). Prefer the BOR_GRPC_TOKEN environment variable over this flag. - ```history.logs```: Number of recent blocks to maintain log search index for (default = about 2 months, 0 = entire chain) (default: 2350000) diff --git a/go.mod b/go.mod index 2983eeb461..b371cc5afa 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module github.com/ethereum/go-ethereum // Note: Change the go image version in Dockerfile if you change this. -go 1.26.2 +go 1.26.3 require ( github.com/0xPolygon/crand v1.0.3 - github.com/0xPolygon/heimdall-v2 v0.6.0 - github.com/0xPolygon/polyproto v0.0.7 + github.com/0xPolygon/heimdall-v2 v0.7.1 + github.com/0xPolygon/polyproto v0.0.8-0.20260423132317-7d955b45ef8a github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/BurntSushi/toml v1.4.0 github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d @@ -94,13 +94,13 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.5.0 - golang.org/x/crypto v0.46.0 + golang.org/x/crypto v0.50.0 golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 - golang.org/x/sync v0.19.0 - golang.org/x/sys v0.42.0 - golang.org/x/text v0.32.0 + golang.org/x/sync v0.20.0 + golang.org/x/sys v0.43.0 + golang.org/x/text v0.36.0 golang.org/x/time v0.12.0 - golang.org/x/tools v0.39.0 + golang.org/x/tools v0.43.0 google.golang.org/grpc v1.79.3 google.golang.org/protobuf v1.36.10 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -164,8 +164,8 @@ require ( github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/net v0.53.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gotest.tools v2.2.0+incompatible pgregory.net/rapid v1.2.0 @@ -328,7 +328,7 @@ require ( go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/term v0.38.0 // indirect + golang.org/x/term v0.42.0 // indirect google.golang.org/api v0.247.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect diff --git a/go.sum b/go.sum index 802074e40b..05f9147dfa 100644 --- a/go.sum +++ b/go.sum @@ -86,10 +86,10 @@ github.com/0xPolygon/cosmos-sdk/client/v2 v2.0.0-beta.6 h1:+6AxZcMTWHaRHV0HILf/r github.com/0xPolygon/cosmos-sdk/client/v2 v2.0.0-beta.6/go.mod h1:4p0P6o0ro+FizakJUYS9SeM94RNbv0thLmkHRw5o5as= github.com/0xPolygon/crand v1.0.3 h1:BYYflmgLhmGPEgqtopG4muq6wV6DOkwD8uPymNz5WeQ= github.com/0xPolygon/crand v1.0.3/go.mod h1:km4366oC7EVFl1xNUCwzxUXNM10swZqd8LZ0E5SgbAE= -github.com/0xPolygon/heimdall-v2 v0.6.0 h1:rA8RISMnns1w08PxTLvDBS5WiaTOFHJGSrhDWDJLtHc= -github.com/0xPolygon/heimdall-v2 v0.6.0/go.mod h1:fVkGiODG6cGLaDyrE3qxIrvz1rbUr4Zdrr3dOm2SPgg= -github.com/0xPolygon/polyproto v0.0.7 h1:Ody+kFyCRK4QXRPXbsP5pdxKrDgwAAXtFB8NPgaIxRs= -github.com/0xPolygon/polyproto v0.0.7/go.mod h1:2Iw93k2LismvckKKeXQITuhJH9vLbqOa212AMskH6no= +github.com/0xPolygon/heimdall-v2 v0.7.1 h1:L50HuFky97OvSF7uHlRXoClujKTFDeMP059GNeFc90g= +github.com/0xPolygon/heimdall-v2 v0.7.1/go.mod h1:YrGakfr3jRlcXzGrBiJPxkfOfkqodzDZHNhf+2mYG5U= +github.com/0xPolygon/polyproto v0.0.8-0.20260423132317-7d955b45ef8a h1:vVtSjO29FcFBZbNVsVGy6z3lv3RRr/sI1vPR7IzbZZE= +github.com/0xPolygon/polyproto v0.0.8-0.20260423132317-7d955b45ef8a/go.mod h1:2Iw93k2LismvckKKeXQITuhJH9vLbqOa212AMskH6no= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= @@ -1325,8 +1325,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1366,8 +1366,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1421,8 +1421,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1448,8 +1448,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1530,8 +1530,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1539,8 +1539,8 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1555,8 +1555,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1625,8 +1625,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/cli/command.go b/internal/cli/command.go index 3dbafd8b26..fc6f190af0 100644 --- a/internal/cli/command.go +++ b/internal/cli/command.go @@ -1,6 +1,7 @@ package cli import ( + "context" "fmt" "os" @@ -222,7 +223,8 @@ func Commands() map[string]MarkDownCommandFactory { type Meta2 struct { UI cli.Ui - addr string + addr string + token string } func (m *Meta2) NewFlagSet(n string) *flagset.Flagset { @@ -234,12 +236,55 @@ func (m *Meta2) NewFlagSet(n string) *flagset.Flagset { Usage: "Address of the grpc endpoint", Default: "127.0.0.1:3131", }) + f.StringFlag(&flagset.StringFlag{ + Name: "token", + Value: &m.token, + Usage: "Bearer token to authenticate with the bor gRPC server (matches --grpc.token on the server). Falls back to the BOR_GRPC_TOKEN environment variable when unset.", + Default: "", + }) return f } +// bearerCreds implements grpc/credentials.PerRPCCredentials for the +// "Authorization: Bearer " header the bor gRPC server expects. +// Local-only: not exported. +type bearerCreds struct{ token string } + +func (b bearerCreds) GetRequestMetadata(_ context.Context, _ ...string) (map[string]string, error) { + return map[string]string{"authorization": "Bearer " + b.token}, nil +} + +// RequireTransportSecurity returns false so the credentials still attach +// when the CLI dials a plaintext loopback server (the common case for +// `bor status` etc. against same-host bor). Cross-host TLS plumbing for +// the CLI is a separate concern not addressed here. +func (b bearerCreds) RequireTransportSecurity() bool { return false } + func (m *Meta2) Conn() (*grpc.ClientConn, error) { - conn, err := grpc.NewClient(m.addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + // Token resolution: --token flag > BOR_GRPC_TOKEN env var > unauthenticated. + // Mirrors the server-side precedence so an operator who exports + // BOR_GRPC_TOKEN once gets both `bor server` AND `bor ` to use it. + token := m.token + if token == "" { + token = os.Getenv("BOR_GRPC_TOKEN") + } + + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} + if token != "" { + // Symmetric with heimdall's client-side guarantee: refuse to send the + // bearer token in cleartext to a non-loopback peer. + if !server.IsLoopbackHostPort(m.addr) { + return nil, fmt.Errorf( + "refusing to send bearer token to non-loopback address %q over plaintext; "+ + "use --address with a loopback host (e.g. 127.0.0.1:3131) or extend the CLI with TLS support", + m.addr, + ) + } + opts = append(opts, grpc.WithPerRPCCredentials(bearerCreds{token: token})) + } + + conn, err := grpc.NewClient(m.addr, opts...) if err != nil { return nil, fmt.Errorf("failed to connect to server: %v", err) } diff --git a/internal/cli/command_test.go b/internal/cli/command_test.go new file mode 100644 index 0000000000..ae6593b720 --- /dev/null +++ b/internal/cli/command_test.go @@ -0,0 +1,57 @@ +package cli + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestMeta2Conn_NoToken: default behaviour (no token) keeps the original +// insecure-only dial. No regressions for the loopback no-auth flow. +func TestMeta2Conn_NoToken(t *testing.T) { + t.Setenv("BOR_GRPC_TOKEN", "") + m := &Meta2{addr: "127.0.0.1:3131"} + conn, err := m.Conn() + require.NoError(t, err) + t.Cleanup(func() { _ = conn.Close() }) +} + +// TestMeta2Conn_LoopbackWithToken: token attached over plaintext is allowed +// when the dial address is loopback (the typical same-host validator pair). +func TestMeta2Conn_LoopbackWithToken(t *testing.T) { + t.Setenv("BOR_GRPC_TOKEN", "") + m := &Meta2{addr: "127.0.0.1:3131", token: "secret"} + conn, err := m.Conn() + require.NoError(t, err) + t.Cleanup(func() { _ = conn.Close() }) +} + +// TestMeta2Conn_NonLoopbackWithTokenRefused: refuse to send a bearer token in +// cleartext to a remote host. Mirrors the heimdall-side guarantee. +func TestMeta2Conn_NonLoopbackWithTokenRefused(t *testing.T) { + t.Setenv("BOR_GRPC_TOKEN", "") + m := &Meta2{addr: "bor.example.net:3131", token: "secret"} + _, err := m.Conn() + require.Error(t, err) + require.Contains(t, err.Error(), "refusing to send bearer token to non-loopback") +} + +// TestMeta2Conn_EnvVarFallback: BOR_GRPC_TOKEN env var is used when --token is +// not passed. The non-loopback refusal applies to env-supplied tokens too. +func TestMeta2Conn_EnvVarFallback(t *testing.T) { + t.Setenv("BOR_GRPC_TOKEN", "from-env") + m := &Meta2{addr: "bor.example.net:3131"} + _, err := m.Conn() + require.Error(t, err) + require.Contains(t, err.Error(), "refusing to send bearer token to non-loopback") +} + +// TestBearerCreds_GetRequestMetadata returns the correctly framed Authorization +// header with a Bearer scheme. +func TestBearerCreds_GetRequestMetadata(t *testing.T) { + t.Parallel() + + md, err := bearerCreds{token: "abc123"}.GetRequestMetadata(nil) + require.NoError(t, err) + require.Equal(t, "Bearer abc123", md["authorization"]) +} diff --git a/internal/cli/server/api_service.go b/internal/cli/server/api_service.go index 9e37ff1248..8b9da8d4d0 100644 --- a/internal/cli/server/api_service.go +++ b/internal/cli/server/api_service.go @@ -4,39 +4,174 @@ import ( "context" "errors" "math" + "strings" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + borconsensus "github.com/ethereum/go-ethereum/consensus/bor" + "github.com/ethereum/go-ethereum/consensus/bor/valset" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" protobor "github.com/0xPolygon/polyproto/bor" + commonproto "github.com/0xPolygon/polyproto/common" protoutil "github.com/0xPolygon/polyproto/utils" ) +// protoHashToCommon safely converts a proto H256 to a common.Hash. Returns +// codes.InvalidArgument if the outer pointer or any inner H128 sub-message is nil +func protoHashToCommon(h *commonproto.H256) (common.Hash, error) { + if h == nil || h.Hi == nil || h.Lo == nil { + return common.Hash{}, status.Error(codes.InvalidArgument, "hash is required with non-nil Hi/Lo") + } + return common.Hash(protoutil.ConvertH256ToHash(h)), nil +} + +// maxBlockInfoBatchSize caps the per-call range to prevent abuse of the batch endpoint. +// Must be >= heimdall's MaxMilestonePropositionLength. +const maxBlockInfoBatchSize = 256 + func (s *Server) GetRootHash(ctx context.Context, req *protobor.GetRootHashRequest) (*protobor.GetRootHashResponse, error) { + // Pre-validate the request so malformed inputs surface as + // codes.InvalidArgument instead of being passed to the backend (which + // would return them as the default codes.Unknown). + if req.EndBlockNumber < req.StartBlockNumber { + return nil, status.Error(codes.InvalidArgument, "invalid range: end < start") + } + if req.EndBlockNumber > math.MaxInt64 { + return nil, status.Error(codes.InvalidArgument, "invalid range: end exceeds max int64") + } + rootHash, err := s.backend.APIBackend.GetRootHash(ctx, req.StartBlockNumber, req.EndBlockNumber) if err != nil { - return nil, err + return nil, mapBorAPIError(err) } return &protobor.GetRootHashResponse{RootHash: rootHash}, nil } func (s *Server) GetVoteOnHash(ctx context.Context, req *protobor.GetVoteOnHashRequest) (*protobor.GetVoteOnHashResponse, error) { + if req.EndBlockNumber < req.StartBlockNumber { + return nil, status.Error(codes.InvalidArgument, "invalid range: end < start") + } + if req.EndBlockNumber > math.MaxInt64 { + return nil, status.Error(codes.InvalidArgument, "invalid range: end exceeds max int64") + } + if req.Hash == "" { + return nil, status.Error(codes.InvalidArgument, "hash is required") + } + if req.MilestoneId == "" { + return nil, status.Error(codes.InvalidArgument, "milestone id is required") + } + vote, err := s.backend.APIBackend.GetVoteOnHash(ctx, req.StartBlockNumber, req.EndBlockNumber, req.Hash, req.MilestoneId) if err != nil { - return nil, err + return nil, mapBorAPIError(err) } return &protobor.GetVoteOnHashResponse{Response: vote}, nil } -func headerToProtoborHeader(h *types.Header) *protobor.Header { - return &protobor.Header{ - Number: h.Number.Uint64(), - ParentHash: protoutil.ConvertHashToH256(h.ParentHash), - Time: h.Time, +// mapBorAPIError translates the most common backend error sentinels emitted by +// EthAPIBackend.GetRootHash / GetVoteOnHash into canonical gRPC status codes. +// Unknown errors are wrapped as codes.Internal instead of default codes.Unknown, +// so clients can distinguish what went wrong. +func mapBorAPIError(err error) error { + if err == nil { + return nil + } + // Already a gRPC status (e.g., from a nested handler) — pass through. + if _, ok := status.FromError(err); ok { + return err + } + // Struct-typed errors come first: their messages contain dynamic %d + // fields, so equality matching against the message string can never hit + // them. Walk wrapped chains via errors.As. + var invalidRange *valset.InvalidStartEndBlockError + if errors.As(err, &invalidRange) { + return status.Error(codes.InvalidArgument, err.Error()) + } + var rangeTooLong *borconsensus.MaxCheckpointLengthExceededError + if errors.As(err, &rangeTooLong) { + return status.Error(codes.OutOfRange, err.Error()) + } + msg := err.Error() + switch { + case msg == "Only available in Bor engine": + // Server is misconfigured / not running bor consensus. + return status.Error(codes.FailedPrecondition, msg) + case msg == "unknown block": + return status.Error(codes.NotFound, msg) + case msg == "end block number is out of safe range": + return status.Error(codes.OutOfRange, msg) + case msg == "failed to get end block", msg == "failed to get tip confirmation block": + return status.Error(codes.NotFound, msg) + case strings.HasPrefix(msg, "hash mismatch"): + return status.Error(codes.InvalidArgument, msg) + // Reorg during checkpoint root computation: transient/retriable. Aborted + // signals "the operation was cancelled, typically due to a concurrency + // issue" — the heimdall caller can branch on this and retry once the + // chain settles instead of treating it as a server bug. + case msg == "reorg occurred while computing checkpoint root": + return status.Error(codes.Aborted, msg) + // Non-contiguous header range = ancient pruning / DB inconsistency. + // DataLoss conveys "unrecoverable state on the server" without the + // "this is a bug" implication of Internal. + case msg == "non-contiguous headers in checkpoint range": + return status.Error(codes.DataLoss, msg) + default: + return status.Error(codes.Internal, msg) + } +} + +func headerToProtoBorHeader(h *types.Header) *protobor.Header { + out := &protobor.Header{ + Number: h.Number.Uint64(), + ParentHash: protoutil.ConvertHashToH256(h.ParentHash), + Time: h.Time, + UncleHash: protoutil.ConvertHashToH256(h.UncleHash), + Coinbase: protoutil.ConvertAddressToH160(h.Coinbase), + StateRoot: protoutil.ConvertHashToH256(h.Root), + TxRoot: protoutil.ConvertHashToH256(h.TxHash), + ReceiptRoot: protoutil.ConvertHashToH256(h.ReceiptHash), + Bloom: append([]byte(nil), h.Bloom.Bytes()...), + GasLimit: h.GasLimit, + GasUsed: h.GasUsed, + ExtraData: append([]byte(nil), h.Extra...), + MixDigest: protoutil.ConvertHashToH256(h.MixDigest), + Nonce: append([]byte(nil), h.Nonce[:]...), + } + if h.Difficulty != nil { + out.Difficulty = h.Difficulty.Bytes() + } + if h.BaseFee != nil { + out.BaseFee = h.BaseFee.Bytes() + } + if h.WithdrawalsHash != nil { + out.WithdrawalsHash = protoutil.ConvertHashToH256(*h.WithdrawalsHash) + } + // BlobGasUsed and ExcessBlobGas are proto3 optional. *uint64 preserves + // nil-vs-zero; we copy through a fresh variable so the proto doesn't alias + // the source header's pointers (consistent with how ExtraData/Bloom/Nonce + // are handled above). + if h.BlobGasUsed != nil { + v := *h.BlobGasUsed + out.BlobGasUsed = &v + } + if h.ExcessBlobGas != nil { + v := *h.ExcessBlobGas + out.ExcessBlobGas = &v + } + if h.ParentBeaconRoot != nil { + out.ParentBeaconBlockRoot = protoutil.ConvertHashToH256(*h.ParentBeaconRoot) + } + if h.RequestsHash != nil { + out.RequestsHash = protoutil.ConvertHashToH256(*h.RequestsHash) } + return out } func (s *Server) HeaderByNumber(ctx context.Context, req *protobor.GetHeaderByNumberRequest) (*protobor.GetHeaderByNumberResponse, error) { @@ -50,10 +185,10 @@ func (s *Server) HeaderByNumber(ctx context.Context, req *protobor.GetHeaderByNu } if header == nil { - return nil, errors.New("header not found") + return nil, status.Error(codes.NotFound, "header not found") } - return &protobor.GetHeaderByNumberResponse{Header: headerToProtoborHeader(header)}, nil + return &protobor.GetHeaderByNumberResponse{Header: headerToProtoBorHeader(header)}, nil } func (s *Server) BlockByNumber(ctx context.Context, req *protobor.GetBlockByNumberRequest) (*protobor.GetBlockByNumberResponse, error) { @@ -67,7 +202,7 @@ func (s *Server) BlockByNumber(ctx context.Context, req *protobor.GetBlockByNumb } if block == nil { - return nil, errors.New("block not found") + return nil, status.Error(codes.NotFound, "block not found") } return &protobor.GetBlockByNumberResponse{Block: blockToProtoBlock(block)}, nil @@ -75,12 +210,19 @@ func (s *Server) BlockByNumber(ctx context.Context, req *protobor.GetBlockByNumb func blockToProtoBlock(h *types.Block) *protobor.Block { return &protobor.Block{ - Header: headerToProtoborHeader(h.Header()), + Header: headerToProtoBorHeader(h.Header()), } } func (s *Server) TransactionReceipt(ctx context.Context, req *protobor.ReceiptRequest) (*protobor.ReceiptResponse, error) { - _, _, blockHash, _, txnIndex := s.backend.APIBackend.GetTransaction(protoutil.ConvertH256ToHash(req.Hash)) + txHash, err := protoHashToCommon(req.Hash) + if err != nil { + return nil, err + } + found, _, blockHash, _, txnIndex := s.backend.APIBackend.GetTransaction(txHash) + if !found { + return nil, status.Error(codes.NotFound, "transaction not found") + } receipts, err := s.backend.APIBackend.GetReceipts(ctx, blockHash) if err != nil { @@ -88,23 +230,191 @@ func (s *Server) TransactionReceipt(ctx context.Context, req *protobor.ReceiptRe } if receipts == nil { - return nil, errors.New("no receipts found") + return nil, status.Error(codes.NotFound, "no receipts found") } if len(receipts) <= int(txnIndex) { - return nil, errors.New("transaction index out of bounds") + return nil, status.Error(codes.OutOfRange, "transaction index out of bounds") } - return &protobor.ReceiptResponse{Receipt: ConvertReceiptToProtoReceipt(receipts[txnIndex])}, nil + pr, err := ConvertReceiptToProtoReceipt(receipts[txnIndex]) + if err != nil { + return nil, err + } + return &protobor.ReceiptResponse{Receipt: pr}, nil } func (s *Server) BorBlockReceipt(ctx context.Context, req *protobor.ReceiptRequest) (*protobor.ReceiptResponse, error) { - receipt, err := s.backend.APIBackend.GetBorBlockReceipt(ctx, protoutil.ConvertH256ToHash(req.Hash)) + txHash, err := protoHashToCommon(req.Hash) if err != nil { return nil, err } + receipt, err := s.backend.APIBackend.GetBorBlockReceipt(ctx, txHash) + // EthAPIBackend returns ethereum.NotFound when the receipt is missing; + // other backends may return (nil, nil). Map both to codes.NotFound so + // callers can branch on canonical gRPC codes instead of seeing Unknown + // or panicking on a nil dereference inside the converter. + if errors.Is(err, ethereum.NotFound) { + return nil, status.Error(codes.NotFound, "bor block receipt not found") + } + if err != nil { + return nil, err + } + if receipt == nil { + return nil, status.Error(codes.NotFound, "bor block receipt not found") + } + + pr, err := ConvertReceiptToProtoReceipt(receipt) + if err != nil { + return nil, err + } + return &protobor.ReceiptResponse{Receipt: pr}, nil +} + +func (s *Server) GetAuthor(ctx context.Context, req *protobor.GetAuthorRequest) (*protobor.GetAuthorResponse, error) { + bN, err := getRpcBlockNumberFromString(req.Number) + if err != nil { + return nil, err + } + + header, err := s.backend.APIBackend.HeaderByNumber(ctx, bN) + if err != nil { + return nil, err + } + if header == nil { + return nil, status.Error(codes.NotFound, "header not found") + } + + author, err := s.backend.Engine().Author(header) + if err != nil { + return nil, err + } + + return &protobor.GetAuthorResponse{Author: protoutil.ConvertAddressToH160(author)}, nil +} + +func (s *Server) GetTdByHash(ctx context.Context, req *protobor.GetTdByHashRequest) (*protobor.GetTdResponse, error) { + hash, err := protoHashToCommon(req.Hash) + if err != nil { + return nil, err + } + + td := s.backend.APIBackend.GetTd(ctx, hash) + if td == nil { + return nil, status.Error(codes.NotFound, "total difficulty not found") + } + if !td.IsUint64() { + return nil, status.Error(codes.OutOfRange, "total difficulty overflows uint64") + } + return &protobor.GetTdResponse{TotalDifficulty: td.Uint64()}, nil +} + +func (s *Server) GetTdByNumber(ctx context.Context, req *protobor.GetTdByNumberRequest) (*protobor.GetTdResponse, error) { + bN, err := getRpcBlockNumberFromString(req.Number) + if err != nil { + return nil, err + } + // Resolve the block number (including special tags) to a concrete header + // before looking up TD by hash. + header, err := s.backend.APIBackend.HeaderByNumber(ctx, bN) + if err != nil { + return nil, err + } + if header == nil { + return nil, status.Error(codes.NotFound, "header not found") + } + td := s.backend.APIBackend.GetTd(ctx, header.Hash()) + if td == nil { + return nil, status.Error(codes.NotFound, "total difficulty not found") + } + if !td.IsUint64() { + return nil, status.Error(codes.OutOfRange, "total difficulty overflows uint64") + } + return &protobor.GetTdResponse{TotalDifficulty: td.Uint64()}, nil +} + +func (s *Server) GetBlockInfoInBatch(ctx context.Context, req *protobor.GetBlockInfoInBatchRequest) (*protobor.GetBlockInfoInBatchResponse, error) { + // Input validation returns codes.InvalidArgument so clients can + // distinguish malformed requests from internal failures + if req.EndBlockNumber < req.StartBlockNumber { + return nil, status.Error(codes.InvalidArgument, "invalid range: end < start") + } + if req.EndBlockNumber-req.StartBlockNumber >= uint64(maxBlockInfoBatchSize) { + return nil, status.Error(codes.InvalidArgument, "invalid range: exceeds max batch size") + } + if req.EndBlockNumber > math.MaxInt64 { + return nil, status.Error(codes.InvalidArgument, "invalid range: end exceeds max int64") + } + + count := req.EndBlockNumber - req.StartBlockNumber + 1 + out := &protobor.GetBlockInfoInBatchResponse{ + Blocks: make([]*protobor.BlockInfo, 0, count), + } + + for j := uint64(0); j < count; j++ { + if err := ctx.Err(); err != nil { + return nil, err + } + info, ok, err := s.fetchBlockInfo(ctx, req.StartBlockNumber+j) + if err != nil { + return nil, err + } + // this requires APIBackend mock returning a missing block mid-range + // mutator-disable-next-line gap-stop semantics + if !ok { + // Match HTTP batch semantics: stop at the first gap, return what we have. + break + } + out.Blocks = append(out.Blocks, info) + } + + return out, nil +} + +// fetchBlockInfo loads header, total difficulty, and author for blockNum. +// Return semantics: +// - (info, true, nil): success, append to the batch +// - (nil, false, nil): legitimate gap (header not yet on chain / TD missing); caller should break the loop and return the partial prefix, matching HTTP side +// - (nil, false, err): real failure (backend error, ecrecover failure, overflow); caller should propagate the error +// +// Author is left as a nil *H160 for genesis; callers must nil-check before decoding. +func (s *Server) fetchBlockInfo(ctx context.Context, blockNum uint64) (*protobor.BlockInfo, bool, error) { + if blockNum > math.MaxInt64 { + return nil, false, status.Error(codes.InvalidArgument, "block number exceeds max int64") + } + header, err := s.backend.APIBackend.HeaderByNumber(ctx, rpc.BlockNumber(blockNum)) + if err != nil { + return nil, false, err + } + if header == nil { + return nil, false, nil + } + + td := s.backend.APIBackend.GetTd(ctx, header.Hash()) + if td == nil { + // TD not yet indexed — treat as a gap + return nil, false, nil + } + if !td.IsUint64() { + return nil, false, status.Error(codes.OutOfRange, "total difficulty overflows uint64") + } + + info := &protobor.BlockInfo{ + Header: headerToProtoBorHeader(header), + TotalDifficulty: td.Uint64(), + } + + if blockNum > 0 { + author, err := s.backend.Engine().Author(header) + if err != nil { + // Author() failure on a validated header indicates a corrupted + // seal — propagate the error. + return nil, false, err + } + info.Author = protoutil.ConvertAddressToH160(author) + } - return &protobor.ReceiptResponse{Receipt: ConvertReceiptToProtoReceipt(receipt)}, nil + return info, true, nil } func getRpcBlockNumberFromString(blockNumber string) (rpc.BlockNumber, error) { @@ -122,10 +432,10 @@ func getRpcBlockNumberFromString(blockNumber string) (rpc.BlockNumber, error) { default: blckNum, err := hexutil.DecodeUint64(blockNumber) if err != nil { - return rpc.BlockNumber(0), errors.New("invalid block number") + return rpc.BlockNumber(0), status.Error(codes.InvalidArgument, "invalid block number") } if blckNum > math.MaxInt64 { - return rpc.BlockNumber(0), errors.New("block number out of range") + return rpc.BlockNumber(0), status.Error(codes.InvalidArgument, "block number out of range") } return rpc.BlockNumber(blckNum), nil } diff --git a/internal/cli/server/api_service_test.go b/internal/cli/server/api_service_test.go new file mode 100644 index 0000000000..9cab383562 --- /dev/null +++ b/internal/cli/server/api_service_test.go @@ -0,0 +1,615 @@ +package server + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + borconsensus "github.com/ethereum/go-ethereum/consensus/bor" + "github.com/ethereum/go-ethereum/consensus/bor/valset" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + protobor "github.com/0xPolygon/polyproto/bor" + commonproto "github.com/0xPolygon/polyproto/common" + protoutil "github.com/0xPolygon/polyproto/utils" +) + +// Compile-time check that Server implements the proto interface. +var _ protobor.BorApiServer = (*Server)(nil) + +func TestGetAuthor_InvalidBlockNumber(t *testing.T) { + srv := &Server{} + _, err := srv.GetAuthor(context.Background(), &protobor.GetAuthorRequest{Number: "not-a-number"}) + if err == nil { + t.Fatalf("expected error on invalid block number, got nil") + } +} + +func TestHeaderToProtoBorHeader_RoundTrip_Cancun(t *testing.T) { + src := &types.Header{ + ParentHash: common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), + UncleHash: types.EmptyUncleHash, + Coinbase: common.HexToAddress("0x0123456789abcdef0123456789abcdef01234567"), + Root: common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), + TxHash: common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"), + ReceiptHash: common.HexToHash("0x6666666666666666666666666666666666666666666666666666666666666666"), + Bloom: types.Bloom{0x01, 0x02, 0x03}, + Difficulty: big.NewInt(17), + Number: big.NewInt(1234567), + GasLimit: 30_000_000, + GasUsed: 21_000, + Time: 1_700_000_000, + Extra: []byte{0xde, 0xad, 0xbe, 0xef}, + MixDigest: common.HexToHash("0x7777777777777777777777777777777777777777777777777777777777777777"), + Nonce: types.BlockNonce{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, + BaseFee: big.NewInt(1_000_000_000), + WithdrawalsHash: new(common.HexToHash("0xaabbccddeeff00112233445566778899aabbccddeeff00112233445566778899")), + BlobGasUsed: new(uint64(131072)), + ExcessBlobGas: new(uint64(262144)), + ParentBeaconRoot: new(common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")), + RequestsHash: new(common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")), + } + + pb := headerToProtoBorHeader(src) + got := protoHeaderToEthHeaderLocal(t, pb) + if got.Hash() != src.Hash() { + t.Fatalf("hash mismatch: got %x want %x", got.Hash(), src.Hash()) + } +} + +// TestHeaderToProtoBorHeader_RoundTrip_CancunZeroBlobGas guards against the nil vs. zero trap on blobGasUsed / excessBlobGas. +// A header with BlobGasUsed=&0 must round-trip to BlobGasUsed=&0 (not nil), +// otherwise Hash() changes and milestone propositions break. +func TestHeaderToProtoBorHeader_RoundTrip_CancunZeroBlobGas(t *testing.T) { + zeroHash := common.Hash{} + + src := &types.Header{ + ParentHash: common.HexToHash("0x01"), + UncleHash: types.EmptyUncleHash, + Coinbase: common.HexToAddress("0x0123456789abcdef0123456789abcdef01234567"), + Root: common.HexToHash("0x02"), + TxHash: common.HexToHash("0x03"), + ReceiptHash: common.HexToHash("0x04"), + Difficulty: big.NewInt(1), + Number: big.NewInt(100), + GasLimit: 30_000_000, + Time: 1_700_000_000, + BaseFee: big.NewInt(1_000_000_000), + BlobGasUsed: new(uint64(0)), + ExcessBlobGas: new(uint64(0)), + ParentBeaconRoot: &zeroHash, + } + + pb := headerToProtoBorHeader(src) + got := protoHeaderToEthHeaderLocal(t, pb) + if got.Hash() != src.Hash() { + t.Fatalf("hash mismatch (zero-blob-gas): got %x want %x", got.Hash(), src.Hash()) + } + if got.BlobGasUsed == nil { + t.Fatalf("BlobGasUsed must round-trip to &0, not nil") + } + if got.ExcessBlobGas == nil { + t.Fatalf("ExcessBlobGas must round-trip to &0, not nil") + } +} + +func TestHeaderToProtoBorHeader_RoundTrip_PreShanghai(t *testing.T) { + src := &types.Header{ + ParentHash: common.HexToHash("0x01"), + UncleHash: types.EmptyUncleHash, + Coinbase: common.HexToAddress("0x0123456789abcdef0123456789abcdef01234567"), + Root: common.HexToHash("0x02"), + TxHash: common.HexToHash("0x03"), + ReceiptHash: common.HexToHash("0x04"), + Difficulty: big.NewInt(1), + Number: big.NewInt(100), + GasLimit: 30_000_000, + GasUsed: 0, + Time: 1_600_000_000, + Extra: []byte{}, + MixDigest: common.Hash{}, + Nonce: types.BlockNonce{}, + } + pb := headerToProtoBorHeader(src) + got := protoHeaderToEthHeaderLocal(t, pb) + if got.Hash() != src.Hash() { + t.Fatalf("hash mismatch pre-shanghai: got %x want %x", got.Hash(), src.Hash()) + } +} + +func TestGetTdByNumber_InvalidNumber(t *testing.T) { + srv := &Server{} + _, err := srv.GetTdByNumber(context.Background(), &protobor.GetTdByNumberRequest{Number: "not-a-number"}) + if err == nil { + t.Fatalf("expected error on invalid block number") + } +} + +func TestGetBlockInfoInBatch_RangeBound(t *testing.T) { + srv := &Server{} + _, err := srv.GetBlockInfoInBatch(context.Background(), &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: 0, EndBlockNumber: 2_000, // exceeds maxBlockInfoBatchSize + }) + if err == nil { + t.Fatalf("expected error when batch range exceeds limit") + } +} + +func TestGetBlockInfoInBatch_StartGreaterThanEnd(t *testing.T) { + srv := &Server{} + _, err := srv.GetBlockInfoInBatch(context.Background(), &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: 10, EndBlockNumber: 5, + }) + if err == nil { + t.Fatalf("expected error when start > end") + } +} + +// TestGetBlockInfoInBatch_RangeOverflow guards against the uint64 overflow that allowed `end - start + 1` to wrap +// past the batch-size limit and drive the server into a non-terminating loop. +func TestGetBlockInfoInBatch_RangeOverflow(t *testing.T) { + srv := &Server{} + _, err := srv.GetBlockInfoInBatch(context.Background(), &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: 0, EndBlockNumber: math.MaxUint64, + }) + if err == nil { + t.Fatalf("expected error on overflowing range, got nil (would non-terminate)") + } +} + +// TestGetBlockInfoInBatch_NearMaxUint64 guards against a narrow range near MaxUint64. +func TestGetBlockInfoInBatch_NearMaxUint64(t *testing.T) { + srv := &Server{} + _, err := srv.GetBlockInfoInBatch(context.Background(), &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: math.MaxUint64 - 3, + EndBlockNumber: math.MaxUint64, + }) + if err == nil { + t.Fatalf("expected error on near-MaxUint64 range, got nil (would wrap and walk chain)") + } + if !strings.Contains(err.Error(), "exceeds max int64") { + t.Fatalf("expected int64-overflow error, got: %v", err) + } +} + +// TestGetBlockInfoInBatch_SizeGateBoundary tests the boundaries. +// A range of exactly maxBlockInfoBatchSize must pass the size gate, +// and a range of maxBlockInfoBatchSize+1 must fail it with the specific error. +// We distinguish the size gate from downstream failures by checking the error +// message. A panic from the nil backend is fine for the at-limit case since +// we only care that the gate itself didn't reject. +func TestGetBlockInfoInBatch_SizeGateBoundary(t *testing.T) { + t.Run("at limit passes the size gate", func(t *testing.T) { + srv := &Server{} + // size = end - start + 1 = 256 = maxBlockInfoBatchSize (allowed) + var err error + func() { + defer func() { + // Backend is nil; handler panics calling APIBackend.HeaderByNumber. + // A panic here means we *passed* the gate, which is what we want. + _ = recover() + }() + _, err = srv.GetBlockInfoInBatch(context.Background(), &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: 0, EndBlockNumber: uint64(maxBlockInfoBatchSize - 1), + }) + }() + if err != nil && strings.Contains(err.Error(), "exceeds max batch size") { + t.Fatalf("size gate rejected a size-of-%d request; should accept: %v", maxBlockInfoBatchSize, err) + } + }) + + t.Run("just over limit fails the size gate", func(t *testing.T) { + srv := &Server{} + // size = end - start + 1 = 257 > maxBlockInfoBatchSize + _, err := srv.GetBlockInfoInBatch(context.Background(), &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: 0, EndBlockNumber: uint64(maxBlockInfoBatchSize), + }) + if err == nil { + t.Fatalf("expected size-gate error for range size %d (>%d), got nil", maxBlockInfoBatchSize+1, maxBlockInfoBatchSize) + } + if !strings.Contains(err.Error(), "exceeds max batch size") { + t.Fatalf("expected size-gate error message, got: %v", err) + } + }) +} + +// TestProtoHashToCommon covers every branch of the H256 → common.Hash decoder. +// All branches happen before any backend access, so they're exercised with +// just the plain function — no Server needed. +func TestProtoHashToCommon(t *testing.T) { + t.Parallel() + + t.Run("nil outer pointer returns InvalidArgument", func(t *testing.T) { + t.Parallel() + _, err := protoHashToCommon(nil) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("nil Hi returns InvalidArgument", func(t *testing.T) { + t.Parallel() + valid := protoutil.ConvertHashToH256(common.HexToHash("0x01")) + bad := &commonproto.H256{Hi: nil, Lo: valid.Lo} + _, err := protoHashToCommon(bad) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("nil Lo returns InvalidArgument", func(t *testing.T) { + t.Parallel() + valid := protoutil.ConvertHashToH256(common.HexToHash("0x02")) + bad := &commonproto.H256{Hi: valid.Hi, Lo: nil} + _, err := protoHashToCommon(bad) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("valid H256 round-trips to the same hash", func(t *testing.T) { + t.Parallel() + want := common.HexToHash("0xdeadbeefcafebabe112233445566778899aabbccddeeff00112233445566778899") + out, err := protoHashToCommon(protoutil.ConvertHashToH256(want)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if out != want { + t.Fatalf("hash mismatch: got %x want %x", out, want) + } + }) +} + +// TestGetRpcBlockNumberFromString covers every tag plus invalid and overflow +// branches of the helper that every block-number-taking handler funnels +// through. +func TestGetRpcBlockNumberFromString(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + input string + want rpc.BlockNumber + wantErr codes.Code + }{ + {"latest", "latest", rpc.LatestBlockNumber, codes.OK}, + {"earliest", "earliest", rpc.EarliestBlockNumber, codes.OK}, + {"pending", "pending", rpc.PendingBlockNumber, codes.OK}, + {"finalized", "finalized", rpc.FinalizedBlockNumber, codes.OK}, + {"safe", "safe", rpc.SafeBlockNumber, codes.OK}, + {"hex zero", "0x0", rpc.BlockNumber(0), codes.OK}, + {"hex value", "0x2a", rpc.BlockNumber(42), codes.OK}, + {"not a number", "garbage", 0, codes.InvalidArgument}, + {"missing 0x prefix", "42", 0, codes.InvalidArgument}, + // math.MaxInt64 + 1 in hex = 0x8000000000000000. + {"overflow int64", "0x8000000000000000", 0, codes.InvalidArgument}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got, err := getRpcBlockNumberFromString(tc.input) + if tc.wantErr == codes.OK { + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tc.want { + t.Fatalf("got %d want %d", got, tc.want) + } + return + } + st, ok := status.FromError(err) + if !ok || st.Code() != tc.wantErr { + t.Fatalf("want code %v, got err %v", tc.wantErr, err) + } + }) + } +} + +// TestHandlersInputValidation walks every gRPC handler that runs input +// validation BEFORE touching the backend. We pass invalid inputs and assert +// the canonical gRPC code; the backend pointer stays nil because validation +// short-circuits before any deref. This is the slice of patch coverage that +// doesn't need a real *eth.Ethereum. +func TestHandlersInputValidation(t *testing.T) { + t.Parallel() + srv := &Server{} + ctx := context.Background() + + t.Run("HeaderByNumber rejects invalid number", func(t *testing.T) { + t.Parallel() + _, err := srv.HeaderByNumber(ctx, &protobor.GetHeaderByNumberRequest{Number: "not-hex"}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("BlockByNumber rejects invalid number", func(t *testing.T) { + t.Parallel() + _, err := srv.BlockByNumber(ctx, &protobor.GetBlockByNumberRequest{Number: "0xZZ"}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("TransactionReceipt rejects nil hash", func(t *testing.T) { + t.Parallel() + _, err := srv.TransactionReceipt(ctx, &protobor.ReceiptRequest{Hash: nil}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("BorBlockReceipt rejects nil hash", func(t *testing.T) { + t.Parallel() + _, err := srv.BorBlockReceipt(ctx, &protobor.ReceiptRequest{Hash: nil}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetTdByHash rejects nil hash", func(t *testing.T) { + t.Parallel() + _, err := srv.GetTdByHash(ctx, &protobor.GetTdByHashRequest{Hash: nil}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetTdByHash rejects H256 with nil Hi", func(t *testing.T) { + t.Parallel() + valid := protoutil.ConvertHashToH256(common.HexToHash("0x01")) + _, err := srv.GetTdByHash(ctx, &protobor.GetTdByHashRequest{Hash: &commonproto.H256{Hi: nil, Lo: valid.Lo}}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetTdByNumber rejects invalid number", func(t *testing.T) { + t.Parallel() + _, err := srv.GetTdByNumber(ctx, &protobor.GetTdByNumberRequest{Number: "0xZZ"}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetRootHash rejects end < start", func(t *testing.T) { + t.Parallel() + _, err := srv.GetRootHash(ctx, &protobor.GetRootHashRequest{StartBlockNumber: 100, EndBlockNumber: 50}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetRootHash rejects end > MaxInt64", func(t *testing.T) { + t.Parallel() + _, err := srv.GetRootHash(ctx, &protobor.GetRootHashRequest{StartBlockNumber: 1, EndBlockNumber: math.MaxInt64 + 1}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetVoteOnHash rejects end < start", func(t *testing.T) { + t.Parallel() + _, err := srv.GetVoteOnHash(ctx, &protobor.GetVoteOnHashRequest{ + StartBlockNumber: 100, EndBlockNumber: 50, + Hash: "0x0", MilestoneId: "m", + }) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetVoteOnHash rejects end > MaxInt64", func(t *testing.T) { + t.Parallel() + _, err := srv.GetVoteOnHash(ctx, &protobor.GetVoteOnHashRequest{ + StartBlockNumber: 1, EndBlockNumber: math.MaxInt64 + 1, + Hash: "0x0", MilestoneId: "m", + }) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetVoteOnHash rejects empty hash", func(t *testing.T) { + t.Parallel() + _, err := srv.GetVoteOnHash(ctx, &protobor.GetVoteOnHashRequest{ + StartBlockNumber: 1, EndBlockNumber: 2, + Hash: "", MilestoneId: "m", + }) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetVoteOnHash rejects empty milestone id", func(t *testing.T) { + t.Parallel() + _, err := srv.GetVoteOnHash(ctx, &protobor.GetVoteOnHashRequest{ + StartBlockNumber: 1, EndBlockNumber: 2, + Hash: "0x0", MilestoneId: "", + }) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) + + t.Run("GetAuthor rejects invalid number", func(t *testing.T) { + t.Parallel() + _, err := srv.GetAuthor(ctx, &protobor.GetAuthorRequest{Number: "0xZZ"}) + st, ok := status.FromError(err) + if !ok || st.Code() != codes.InvalidArgument { + t.Fatalf("want InvalidArgument, got %v", err) + } + }) +} + +// TestMapBorAPIError covers the canonical-code mapping for both struct-typed +// errors (which carry %d-formatted dynamic messages and so cannot be matched +// by string equality) and the literal sentinels. +func TestMapBorAPIError(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + err error + want codes.Code + }{ + {"nil passes through", nil, codes.OK}, + {"already a status code is preserved", + status.Error(codes.PermissionDenied, "x"), codes.PermissionDenied}, + {"InvalidStartEndBlockError → InvalidArgument", + &valset.InvalidStartEndBlockError{Start: 100, End: 50_000, CurrentHeader: 1000}, + codes.InvalidArgument}, + {"MaxCheckpointLengthExceededError → OutOfRange", + &borconsensus.MaxCheckpointLengthExceededError{Start: 0, End: 50_000}, + codes.OutOfRange}, + {"engine missing → FailedPrecondition", + errors.New("Only available in Bor engine"), codes.FailedPrecondition}, + {"unknown block → NotFound", + errors.New("unknown block"), codes.NotFound}, + {"end-block out of safe range → OutOfRange", + errors.New("end block number is out of safe range"), codes.OutOfRange}, + {"failed to get end block → NotFound", + errors.New("failed to get end block"), codes.NotFound}, + {"failed to get tip confirmation block → NotFound", + errors.New("failed to get tip confirmation block"), codes.NotFound}, + {"hash mismatch prefix → InvalidArgument", + fmt.Errorf("hash mismatch: localChainHash 0xaa, milestoneHash 0xbb"), codes.InvalidArgument}, + {"reorg sentinel → Aborted", + errors.New("reorg occurred while computing checkpoint root"), codes.Aborted}, + {"non-contiguous range → DataLoss", + errors.New("non-contiguous headers in checkpoint range"), codes.DataLoss}, + {"unknown error → Internal (not Unknown)", + errors.New("totally novel failure"), codes.Internal}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got := mapBorAPIError(tc.err) + if tc.want == codes.OK { + require.Nil(t, got) + return + } + st, ok := status.FromError(got) + require.True(t, ok, "expected gRPC status, got %v", got) + require.Equal(t, tc.want, st.Code(), "msg=%s", st.Message()) + }) + } +} + +// TestFetchBlockInfo_BlockNumOverflow exercises the defensive uint64→int64 +// guard. Reaching it from GetBlockInfoInBatch is impossible (the outer cap +// catches MaxInt64 first), so we call fetchBlockInfo directly. +func TestFetchBlockInfo_BlockNumOverflow(t *testing.T) { + t.Parallel() + srv := &Server{} + _, ok, err := srv.fetchBlockInfo(context.Background(), uint64(math.MaxInt64)+1) + require.False(t, ok) + require.Error(t, err) + st, sok := status.FromError(err) + require.True(t, sok) + require.Equal(t, codes.InvalidArgument, st.Code()) +} + +// TestBlockToProtoBlock confirms the pure converter delegates to the header +// converter unchanged — no backend needed. +func TestBlockToProtoBlock(t *testing.T) { + t.Parallel() + + header := &types.Header{ + Number: big.NewInt(7), + Difficulty: big.NewInt(1), + GasLimit: 30_000_000, + Time: 1_700_000_000, + } + block := types.NewBlockWithHeader(header) + pb := blockToProtoBlock(block) + if pb == nil || pb.Header == nil { + t.Fatalf("nil result: %+v", pb) + } + if pb.Header.Number != 7 { + t.Fatalf("number mismatch: got %d want 7", pb.Header.Number) + } +} + +// protoHeaderToEthHeaderLocal is the test-side inverse of headerToProtoBorHeader. +// It mirrors the decoder that heimdall's x/bor/grpc package will ship. +func protoHeaderToEthHeaderLocal(t *testing.T, p *protobor.Header) *types.Header { + t.Helper() + if p == nil { + return nil + } + convH := func(h *commonproto.H256) common.Hash { + if h == nil { + return common.Hash{} + } + b := protoutil.ConvertH256ToHash(h) + return common.BytesToHash(b[:]) + } + convA := func(a *commonproto.H160) common.Address { + if a == nil { + return common.Address{} + } + arr := protoutil.ConvertH160toAddress(a) + return common.BytesToAddress(arr[:]) + } + + h := &types.Header{ + ParentHash: convH(p.ParentHash), + UncleHash: convH(p.UncleHash), + Coinbase: convA(p.Coinbase), + Root: convH(p.StateRoot), + TxHash: convH(p.TxRoot), + ReceiptHash: convH(p.ReceiptRoot), + Difficulty: new(big.Int).SetBytes(p.Difficulty), + Number: new(big.Int).SetUint64(p.Number), + GasLimit: p.GasLimit, + GasUsed: p.GasUsed, + Time: p.Time, + Extra: append([]byte(nil), p.ExtraData...), + MixDigest: convH(p.MixDigest), + } + h.Bloom.SetBytes(p.Bloom) + copy(h.Nonce[:], p.Nonce) + + if len(p.BaseFee) > 0 { + h.BaseFee = new(big.Int).SetBytes(p.BaseFee) + } + if p.WithdrawalsHash != nil { + h.WithdrawalsHash = new(convH(p.WithdrawalsHash)) + } + // BlobGasUsed / ExcessBlobGas are proto3 `optional` on the wire → *uint64 on the Go side. + h.BlobGasUsed = p.BlobGasUsed + h.ExcessBlobGas = p.ExcessBlobGas + if p.ParentBeaconBlockRoot != nil { + h.ParentBeaconRoot = new(convH(p.ParentBeaconBlockRoot)) + } + if p.RequestsHash != nil { + h.RequestsHash = new(convH(p.RequestsHash)) + } + return h +} diff --git a/internal/cli/server/command.go b/internal/cli/server/command.go index 2925affaea..da149ab4f6 100644 --- a/internal/cli/server/command.go +++ b/internal/cli/server/command.go @@ -134,6 +134,18 @@ func (c *Command) extractFlags(args []string) error { // Handle multiple flags for tx lookup limit c.cliConfig.Cache.TxLookupLimit = handleTxLookupLimitFlag(tomlConfig, args, c.cliConfig) + // Env-var fallback for the gRPC auth token. + if c.cliConfig.GRPC != nil { + if c.cliConfig.GRPC.Token == "" { + if envTok := os.Getenv("BOR_GRPC_TOKEN"); envTok != "" { + c.cliConfig.GRPC.Token = envTok + } + } else { + // Warn when the token was supplied via --grpc.token or flag + log.Warn("grpc.token sourced from CLI/TOML config — prefer the BOR_GRPC_TOKEN env var to avoid exposing the token") + } + } + c.config = c.cliConfig return nil diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 16a0a791f3..f3612b7dda 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -531,6 +531,8 @@ type AUTHConfig struct { type GRPCConfig struct { // Addr is the bind address for the grpc rpc server Addr string `hcl:"addr,optional" toml:"addr,optional"` + // Token is the bearer token required for incoming gRPC calls; empty disables auth + Token string `hcl:"token,optional" toml:"token,optional"` } type APIConfig struct { diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 39e01ea7ab..e759dfa770 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -1227,10 +1227,16 @@ func (c *Command) Flags(config *Config) *flagset.Flagset { // grpc f.StringFlag(&flagset.StringFlag{ Name: "grpc.addr", - Usage: "Address and port to bind the GRPC server", + Usage: "Address and port to bind the GRPC server. Empty disables the server. Non-loopback binds without --grpc.token log a startup warning.", Value: &c.cliConfig.GRPC.Addr, Default: c.cliConfig.GRPC.Addr, }) + f.StringFlag(&flagset.StringFlag{ + Name: "grpc.token", + Usage: "Raw token expected in the `authorization: Bearer ` header of incoming gRPC calls (empty disables auth; the `Bearer ` prefix is stripped before comparison). Prefer the BOR_GRPC_TOKEN environment variable over this flag.", + Value: &c.cliConfig.GRPC.Token, + Default: c.cliConfig.GRPC.Token, + }) // developer f.BoolFlag(&flagset.BoolFlag{ diff --git a/internal/cli/server/grpc_auth_test.go b/internal/cli/server/grpc_auth_test.go new file mode 100644 index 0000000000..852c880213 --- /dev/null +++ b/internal/cli/server/grpc_auth_test.go @@ -0,0 +1,251 @@ +package server + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const fakeToken = "Bearer secret" + +func TestAuthenticate_MissingMetadata(t *testing.T) { + t.Parallel() + // Plain context with no gRPC metadata attached. + err := authenticate(context.Background(), "secret") + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + require.Contains(t, s.Message(), "missing metadata") +} + +func TestAuthenticate_MissingAuthorizationHeader(t *testing.T) { + t.Parallel() + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("other-header", "value")) + err := authenticate(ctx, "secret") + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + require.Contains(t, s.Message(), "missing authorization header") +} + +func TestAuthenticate_MissingBearerPrefix(t *testing.T) { + t.Parallel() + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "Basic secret")) + err := authenticate(ctx, "secret") + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + require.Contains(t, s.Message(), "invalid authorization header") +} + +func TestAuthenticate_WrongToken(t *testing.T) { + t.Parallel() + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "Bearer wrong_token")) + err := authenticate(ctx, "secret") + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + require.Contains(t, s.Message(), "invalid token") +} + +func TestAuthenticate_CorrectToken(t *testing.T) { + t.Parallel() + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", fakeToken)) + err := authenticate(ctx, "secret") + require.NoError(t, err) +} + +// TestAuthenticate_CaseInsensitiveBearerPrefix verifies the auth scheme name is case-insensitive. +func TestAuthenticate_CaseInsensitiveBearerPrefix(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + hdr string + }{ + {"canonical", "Bearer secret"}, + {"lowercase", "bearer secret"}, + {"uppercase", "BEARER secret"}, + {"mixed-case", "BeArEr secret"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", tc.hdr)) + require.NoError(t, authenticate(ctx, "secret")) + }) + } +} + +// TestAuthenticate_ConstantTimeCompare verifies that both a close-miss token and +// a completely different token both return Unauthenticated (no behavioral +// difference based on byte position — the unit test checks that both fail). +func TestAuthenticate_ConstantTimeCompare(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + token string + }{ + {"one-byte-diff", "abd"}, + {"totally-different", "xyz"}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", "Bearer "+tc.token)) + err := authenticate(ctx, "abc") + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + }) + } +} + +// TestCombinedUnaryInterceptor tests the unary interceptor's behavior with various token configurations and metadata. +func TestCombinedUnaryInterceptor(t *testing.T) { + makeSrv := func(token string) *Server { + return &Server{config: &Config{GRPC: &GRPCConfig{Token: token}}} + } + + info := &grpc.UnaryServerInfo{FullMethod: "/test/Method"} + okHandler := func(ctx context.Context, req interface{}) (interface{}, error) { return "ran", nil } + + t.Run("no configured token bypasses auth and runs handler", func(t *testing.T) { + srv := makeSrv("") + resp, err := srv.combinedUnaryInterceptor()(context.Background(), nil, info, okHandler) + require.NoError(t, err) + require.Equal(t, "ran", resp) + }) + + t.Run("configured token with no client metadata rejects Unauthenticated", func(t *testing.T) { + srv := makeSrv("secret") + ran := false + handler := func(ctx context.Context, req interface{}) (interface{}, error) { ran = true; return nil, nil } + _, err := srv.combinedUnaryInterceptor()(context.Background(), nil, info, handler) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + require.False(t, ran, "handler must not run when auth is rejected") + }) + + t.Run("configured token with matching bearer runs handler", func(t *testing.T) { + srv := makeSrv("secret") + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", fakeToken)) + resp, err := srv.combinedUnaryInterceptor()(ctx, nil, info, okHandler) + require.NoError(t, err) + require.Equal(t, "ran", resp) + }) +} + +// TestCombinedStreamInterceptor covers the stream interceptor's behavior with various token configurations and metadata. +func TestCombinedStreamInterceptor(t *testing.T) { + makeSrv := func(token string) *Server { + return &Server{config: &Config{GRPC: &GRPCConfig{Token: token}}} + } + + info := &grpc.StreamServerInfo{FullMethod: "/test/Stream"} + + t.Run("no configured token bypasses auth and runs handler", func(t *testing.T) { + srv := makeSrv("") + ran := false + handler := func(srv interface{}, ss grpc.ServerStream) error { ran = true; return nil } + err := srv.combinedStreamInterceptor()(nil, &fakeServerStream{ctx: context.Background()}, info, handler) + require.NoError(t, err) + require.True(t, ran) + }) + + t.Run("configured token with no metadata rejects Unauthenticated and skips handler", func(t *testing.T) { + srv := makeSrv("secret") + ran := false + handler := func(srv interface{}, ss grpc.ServerStream) error { ran = true; return nil } + err := srv.combinedStreamInterceptor()(nil, &fakeServerStream{ctx: context.Background()}, info, handler) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unauthenticated, s.Code()) + require.False(t, ran, "handler must not run when auth is rejected") + }) + + t.Run("handler error is propagated unchanged", func(t *testing.T) { + srv := makeSrv("") + sentinel := errors.New("handler failure sentinel") + handler := func(srv interface{}, ss grpc.ServerStream) error { return sentinel } + err := srv.combinedStreamInterceptor()(nil, &fakeServerStream{ctx: context.Background()}, info, handler) + require.ErrorIs(t, err, sentinel, "interceptor must propagate handler errors, not swallow them") + }) + + t.Run("configured token with matching bearer runs handler", func(t *testing.T) { + srv := makeSrv("secret") + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("authorization", fakeToken)) + ran := false + handler := func(srv interface{}, ss grpc.ServerStream) error { ran = true; return nil } + err := srv.combinedStreamInterceptor()(nil, &fakeServerStream{ctx: ctx}, info, handler) + require.NoError(t, err) + require.True(t, ran) + }) +} + +// fakeServerStream is the minimum surface of grpc.ServerStream needed by the +// auth interceptor: it only inspects ss.Context(). +type fakeServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (f *fakeServerStream) Context() context.Context { return f.ctx } + +func TestIsLoopbackHostPort(t *testing.T) { + t.Parallel() + + cases := []struct { + hostport string + want bool + }{ + {"127.0.0.1:3131", true}, + {"127.0.0.5:3131", true}, // anywhere in 127.0.0.0/8 + {"[::1]:3131", true}, + {"localhost:3131", true}, + {"LOCALHOST:3131", true}, // hostnames are case-insensitive (RFC 4343) + {"LocalHost:3131", true}, + + {"0.0.0.0:3131", false}, + {"[::]:3131", false}, + {":3131", false}, // wildcard via empty host + {"10.0.0.1:3131", false}, + {"192.168.1.5:3131", false}, + {"bor.example.net:3131", false}, // unresolved hostname — conservative + {"", false}, + } + for _, tc := range cases { + t.Run(tc.hostport, func(t *testing.T) { + t.Parallel() + require.Equal(t, tc.want, IsLoopbackHostPort(tc.hostport)) + }) + } +} + +// TestWithGRPCAddress_EmptyAddrSkipsStartup verifies that an empty grpc.addr +// is treated as a clean disable — the closure returns nil without trying to +// bind a listener. Other configurations actually bind a listener and are +// covered by the integration-style tests in server_test.go. +func TestWithGRPCAddress_EmptyAddrSkipsStartup(t *testing.T) { + t.Parallel() + cfg := &Config{GRPC: &GRPCConfig{Addr: "", Token: ""}} + // nil Server is safe here — the guard returns before touching it. + err := WithGRPCAddress()(nil, cfg) + require.NoError(t, err) +} diff --git a/internal/cli/server/server.go b/internal/cli/server/server.go index 8cde0e2149..52eeabe3a5 100644 --- a/internal/cli/server/server.go +++ b/internal/cli/server/server.go @@ -2,6 +2,7 @@ package server import ( "context" + "crypto/subtle" "encoding/json" "fmt" "io" @@ -23,7 +24,10 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/reflection" + "google.golang.org/grpc/status" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" @@ -54,6 +58,10 @@ import ( protobor "github.com/0xPolygon/polyproto/bor" ) +// maxGRPCMessageSize matches go-ethereum's BatchResponseMaxSize default +// to keep the gRPC and JSON-RPC surfaces under comparable response-size protection. +const maxGRPCMessageSize = 25 * 1000 * 1000 + type Server struct { proto.UnimplementedBorServer protobor.UnimplementedBorApiServer @@ -80,10 +88,49 @@ func init() { func WithGRPCAddress() serverOption { return func(srv *Server, config *Config) error { - return srv.gRPCServerByAddress(config.GRPC.Addr) + // readConfigFile may leave config.GRPC nil when the HCL/TOML config + // omits the [grpc] block. Treat that as "gRPC disabled" instead of + // dereferencing into a panic at startup. + if config.GRPC == nil { + log.Info("gRPC server disabled (no [grpc] block in config)") + return nil + } + addr := config.GRPC.Addr + if addr == "" { + log.Info("gRPC server disabled (grpc.addr is empty)") + return nil + } + // Mirror heimdall's client-side posture: warn (don't block) when the + // operator opts into a non-loopback bind without a bearer token. The + // startup log entry is what operators are expected to act on. + if config.GRPC.Token == "" && !IsLoopbackHostPort(addr) { + log.Warn( + "Starting unauthenticated gRPC server on non-loopback address; "+ + "set --grpc.token / BOR_GRPC_TOKEN to require authentication.", + "addr", addr, + ) + } + return srv.gRPCServerByAddress(addr) } } +// IsLoopbackHostPort reports whether hostport refers to a loopback host. It +// returns false for wildcard binds (":3131", "0.0.0.0:3131", "[::]:3131"). +func IsLoopbackHostPort(hostport string) bool { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + host = hostport + } + if host == "" { + return false + } + if strings.EqualFold(host, "localhost") { + return true + } + ip := net.ParseIP(host) + return ip != nil && ip.IsLoopback() +} + func WithGRPCListener(lis net.Listener) serverOption { return func(srv *Server, _ *Config) error { return srv.gRPCServerByListener(lis) @@ -434,7 +481,12 @@ func (s *Server) gRPCServerByAddress(addr string) error { } func (s *Server) gRPCServerByListener(listener net.Listener) error { - s.grpcServer = grpc.NewServer(s.withLoggingUnaryInterceptor()) + s.grpcServer = grpc.NewServer( + grpc.UnaryInterceptor(s.combinedUnaryInterceptor()), + grpc.StreamInterceptor(s.combinedStreamInterceptor()), + grpc.MaxRecvMsgSize(maxGRPCMessageSize), + grpc.MaxSendMsgSize(maxGRPCMessageSize), + ) proto.RegisterBorServer(s.grpcServer, s) protobor.RegisterBorApiServer(s.grpcServer, s) reflection.Register(s.grpcServer) @@ -450,17 +502,83 @@ func (s *Server) gRPCServerByListener(listener net.Listener) error { return nil } -func (s *Server) withLoggingUnaryInterceptor() grpc.ServerOption { - return grpc.UnaryInterceptor(s.loggingServerInterceptor) +// combinedUnaryInterceptor returns a single unary server interceptor that +// optionally enforces bearer-token authentication (when a token is configured) +// and logs the request outcome — both successful handler invocations and +// auth-rejected attempts. Rejected attempts are logged, successful calls are +// logged at Trace, rejections at Debug. +func (s *Server) combinedUnaryInterceptor() grpc.UnaryServerInterceptor { + token := s.tokenForInterceptor() + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if token != "" { + if err := authenticate(ctx, token); err != nil { + log.Debug("gRPC auth rejected", "method", info.FullMethod, "error", err) + return nil, err + } + } + start := time.Now() + h, err := handler(ctx, req) + log.Trace("Request", "method", info.FullMethod, "duration", time.Since(start), "error", err) + return h, err + } } -func (s *Server) loggingServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - start := time.Now() - h, err := handler(ctx, req) +// combinedStreamInterceptor mirrors combinedUnaryInterceptor for stream RPCs. +// Needed so the reflection service is gated by the same bearer-token check as +// unary calls. Logging behavior matches combinedUnaryInterceptor: rejected +// auth at Debug, successful stream duration at Trace. +func (s *Server) combinedStreamInterceptor() grpc.StreamServerInterceptor { + token := s.tokenForInterceptor() + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if token != "" { + if err := authenticate(ss.Context(), token); err != nil { + log.Debug("gRPC auth rejected (stream)", "method", info.FullMethod, "error", err) + return err + } + } + start := time.Now() + err := handler(srv, ss) + log.Trace("Stream", "method", info.FullMethod, "duration", time.Since(start), "error", err) + return err + } +} - log.Trace("Request", "method", info.FullMethod, "duration", time.Since(start), "error", err) +// tokenForInterceptor returns the configured bearer token, or empty when no +// [grpc] block exists in the config. Empty disables auth — callers like +// gRPCServerByListener may run with a partial Config (e.g., tests, embedders), +// so reading s.config.GRPC.Token directly would nil-deref. Mirrors the same +// "missing block = disabled" treatment WithGRPCAddress applies. +func (s *Server) tokenForInterceptor() string { + if s.config == nil || s.config.GRPC == nil { + return "" + } + return s.config.GRPC.Token +} - return h, err +// authenticate validates the bearer token in the gRPC metadata against the +// configured token. +// Token byte-comparison uses subtle.ConstantTimeCompare, which is constant-time +// for equal-length inputs; length-mismatched inputs short-circuit. +// Scheme matching is case-insensitive per RFC 6750 §2.1. +func authenticate(ctx context.Context, expected string) error { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return status.Error(codes.Unauthenticated, "missing metadata") + } + headers := md.Get("authorization") + if len(headers) == 0 { + return status.Error(codes.Unauthenticated, "missing authorization header") + } + const prefix = "Bearer " + h := headers[0] + if len(h) < len(prefix) || !strings.EqualFold(h[:len(prefix)], prefix) { + return status.Error(codes.Unauthenticated, "invalid authorization header") + } + got := h[len(prefix):] + if subtle.ConstantTimeCompare([]byte(got), []byte(expected)) != 1 { + return status.Error(codes.Unauthenticated, "invalid token") + } + return nil } func setupLogger(logLevel int, loggingInfo LoggingConfig) { @@ -496,6 +614,10 @@ func (s *Server) GetLatestBlockNumber() *big.Int { } func (s *Server) GetGrpcAddr() string { + // Treat "missing block" as "gRPC disabled" rather than nil-deref. + if s.config == nil || s.config.GRPC == nil { + return "" + } return s.config.GRPC.Addr } @@ -620,8 +742,8 @@ func (s *Server) customHealthServiceHandler() http.Handler { healthResponse["node_info"] = s.getBorInfo() - status := s.performHealthChecks(healthResponse) - healthResponse["status"] = status + sts := s.performHealthChecks(healthResponse) + healthResponse["status"] = sts healthResponse["error"] = false healthResponse["error_message"] = "" diff --git a/internal/cli/server/server_test.go b/internal/cli/server/server_test.go index b3b8e89bae..c204300c24 100644 --- a/internal/cli/server/server_test.go +++ b/internal/cli/server/server_test.go @@ -1,10 +1,22 @@ package server import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http/httptest" "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + protobor "github.com/0xPolygon/polyproto/bor" + protoutil "github.com/0xPolygon/polyproto/utils" ) func TestServer_DeveloperMode(t *testing.T) { @@ -39,3 +51,317 @@ func TestServer_DeveloperMode(t *testing.T) { } } } + +// TestPerformHealthChecks_NoConfig short-circuits when health config is absent. +// All gRPC-related telemetry knobs default to "off" and the function must +// return StatusOK without dereferencing the nil config. +func TestPerformHealthChecks_NoConfig(t *testing.T) { + t.Parallel() + + srv := &Server{config: nil} + got := srv.performHealthChecks(map[string]any{}) + require.Equal(t, StatusOK, got.Level) + require.Equal(t, 0, got.Code) + require.Equal(t, "", got.Message) +} + +// TestPerformHealthChecks_NilHealthSection: same as above but config exists +// without a [health] block — must take the early-return path, not panic. +func TestPerformHealthChecks_NilHealthSection(t *testing.T) { + t.Parallel() + + srv := &Server{config: &Config{Health: nil}} + got := srv.performHealthChecks(map[string]any{ + "system": map[string]any{"goroutines_count": float64(99999)}, + }) + require.Equal(t, StatusOK, got.Level) +} + +// TestPerformHealthChecks_GoroutinesThresholds drives the goroutine-count +// branches: under both, only over warn, over critical (which wins over warn), +// plus the "0 = disabled" sentinel. +func TestPerformHealthChecks_GoroutinesThresholds(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + warn int + max int + count float64 + wantLvl HealthStatusLevel + wantMsg string + }{ + {"under both", 1000, 5000, 200, StatusOK, ""}, + {"over warn only", 100, 5000, 200, StatusWarn, "above the warning threshold"}, + {"over critical wins", 100, 1000, 5000, StatusCritical, "above the maximum threshold"}, + {"thresholds disabled by 0", 0, 0, 1_000_000, StatusOK, ""}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + srv := &Server{config: &Config{Health: &HealthConfig{ + MaxGoRoutineThreshold: tc.max, + WarnGoRoutineThreshold: tc.warn, + }}} + resp := map[string]any{ + "system": map[string]any{"goroutines_count": tc.count}, + } + got := srv.performHealthChecks(resp) + require.Equal(t, tc.wantLvl, got.Level) + if tc.wantMsg != "" { + require.Contains(t, got.Message, tc.wantMsg) + } + }) + } +} + +// TestPerformHealthChecks_PeerThresholds drives the peer-count branches. +// peer_count is read as int (not float64 like goroutine count). +func TestPerformHealthChecks_PeerThresholds(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + warn int + min int + count int + wantLvl HealthStatusLevel + wantMsg string + }{ + {"healthy", 5, 1, 50, StatusOK, ""}, + {"below warn only", 10, 1, 5, StatusWarn, "below the warning threshold"}, + {"below critical wins", 5, 10, 1, StatusCritical, "below the minimum threshold"}, + {"thresholds disabled by 0", 0, 0, 0, StatusOK, ""}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + srv := &Server{config: &Config{Health: &HealthConfig{ + MinPeerThreshold: tc.min, + WarnPeerThreshold: tc.warn, + }}} + resp := map[string]any{ + "node_info": map[string]any{"peer_count": tc.count}, + } + got := srv.performHealthChecks(resp) + require.Equal(t, tc.wantLvl, got.Level) + if tc.wantMsg != "" { + require.Contains(t, got.Message, tc.wantMsg) + } + }) + } +} + +// TestPerformHealthChecks_CombinedFailures verifies that both checks compose: +// a critical goroutines reading and a warn-level peer count must surface as +// Critical with both messages joined. +func TestPerformHealthChecks_CombinedFailures(t *testing.T) { + t.Parallel() + + srv := &Server{config: &Config{Health: &HealthConfig{ + MaxGoRoutineThreshold: 1000, + WarnGoRoutineThreshold: 500, + MinPeerThreshold: 1, + WarnPeerThreshold: 10, + }}} + resp := map[string]any{ + "system": map[string]any{"goroutines_count": float64(5000)}, + "node_info": map[string]any{"peer_count": 5}, + } + got := srv.performHealthChecks(resp) + require.Equal(t, StatusCritical, got.Level) + require.Contains(t, got.Message, "goroutines") + require.Contains(t, got.Message, "peers") +} + +// TestGetGrpcAddr returns the configured gRPC bind address verbatim. +func TestGetGrpcAddr(t *testing.T) { + t.Parallel() + + srv := &Server{config: &Config{GRPC: &GRPCConfig{Addr: "127.0.0.1:3131"}}} + require.Equal(t, "127.0.0.1:3131", srv.GetGrpcAddr()) +} + +// TestServer_GRPCHandlersHappyPath spins up a real mock server in dev mode, +// mines a few blocks, then exercises every gRPC handler that needs a live +// backend. Heavy test (~10s), but it's the only way to cover the actual +// proto-marshaling success paths in api_service.go. +func TestServer_GRPCHandlersHappyPath(t *testing.T) { + cfg := DefaultConfig() + cfg.Developer.Enabled = true + cfg.Developer.Period = 1 + + srv, err := CreateMockServer(cfg) + require.NoError(t, err) + defer CloseMockServer(srv) + + // Mine a couple blocks so the latest is greater than genesis, and we have a non-genesis + // header to query (GetAuthor / GetBlockInfoInBatch's author path requires blockNum > 0). + require.Eventually(t, func() bool { + return srv.GetLatestBlockNumber().Uint64() >= 2 + }, 10*time.Second, 100*time.Millisecond, "no blocks mined within 10s") + + ctx := context.Background() + + t.Run("HeaderByNumber latest tag", func(t *testing.T) { + resp, err := srv.HeaderByNumber(ctx, &protobor.GetHeaderByNumberRequest{Number: "latest"}) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Header) + require.GreaterOrEqual(t, resp.Header.Number, uint64(1)) + }) + + t.Run("HeaderByNumber hex zero returns genesis", func(t *testing.T) { + resp, err := srv.HeaderByNumber(ctx, &protobor.GetHeaderByNumberRequest{Number: "0x0"}) + require.NoError(t, err) + require.Equal(t, uint64(0), resp.Header.Number) + }) + + t.Run("BlockByNumber latest", func(t *testing.T) { + resp, err := srv.BlockByNumber(ctx, &protobor.GetBlockByNumberRequest{Number: "latest"}) + require.NoError(t, err) + require.NotNil(t, resp.Block) + require.NotNil(t, resp.Block.Header) + }) + + t.Run("GetAuthor latest", func(t *testing.T) { + resp, err := srv.GetAuthor(ctx, &protobor.GetAuthorRequest{Number: "latest"}) + require.NoError(t, err) + require.NotNil(t, resp.Author) + }) + + t.Run("GetTdByNumber latest", func(t *testing.T) { + resp, err := srv.GetTdByNumber(ctx, &protobor.GetTdByNumberRequest{Number: "latest"}) + require.NoError(t, err) + require.GreaterOrEqual(t, resp.TotalDifficulty, uint64(1)) + }) + + t.Run("GetTdByHash for known block", func(t *testing.T) { + // First fetch the latest header so we have a valid hash, then look up TD by it. + hr, err := srv.HeaderByNumber(ctx, &protobor.GetHeaderByNumberRequest{Number: "latest"}) + require.NoError(t, err) + + // Reconstruct the proto H256 from the header's parent hash; using + // parent guarantees the hash exists on chain (non-zero TD). + req := &protobor.GetTdByHashRequest{Hash: hr.Header.ParentHash} + resp, err := srv.GetTdByHash(ctx, req) + require.NoError(t, err) + require.GreaterOrEqual(t, resp.TotalDifficulty, uint64(1)) + }) + + // Asserting the engine-rejected error path here: + t.Run("GetRootHash on ethash mock returns engine error", func(t *testing.T) { + _, err := srv.GetRootHash(ctx, &protobor.GetRootHashRequest{StartBlockNumber: 0, EndBlockNumber: 1}) + require.Error(t, err) + }) + + t.Run("GetVoteOnHash on ethash mock returns engine error", func(t *testing.T) { + _, err := srv.GetVoteOnHash(ctx, &protobor.GetVoteOnHashRequest{ + StartBlockNumber: 0, EndBlockNumber: 1, + Hash: "0x0", MilestoneId: "test", + }) + require.Error(t, err) + }) + + t.Run("GetBlockInfoInBatch for [0,2]", func(t *testing.T) { + resp, err := srv.GetBlockInfoInBatch(ctx, &protobor.GetBlockInfoInBatchRequest{ + StartBlockNumber: 0, + EndBlockNumber: 2, + }) + require.NoError(t, err) + require.Len(t, resp.Blocks, 3) + require.NotNil(t, resp.Blocks[0].Header) + // Block 0 (genesis) has no author per fetchBlockInfo's contract. + require.Nil(t, resp.Blocks[0].Author, "genesis must have nil author") + require.NotNil(t, resp.Blocks[1].Author, "non-genesis must have non-nil author") + }) + + t.Run("HeaderByNumber far-future returns NotFound", func(t *testing.T) { + _, err := srv.HeaderByNumber(ctx, &protobor.GetHeaderByNumberRequest{ + Number: fmt.Sprintf("0x%x", uint64(1_000_000_000)), + }) + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("TransactionReceipt for unknown hash returns NotFound", func(t *testing.T) { + // Random hash that won't match any tx; protoHashToCommon passes, then + // the backend lookup misses → NotFound. + hash := protoutil.ConvertHashToH256(common.HexToHash("0xdeadbeef")) + _, err := srv.TransactionReceipt(ctx, &protobor.ReceiptRequest{Hash: hash}) + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("BorBlockReceipt for unknown hash returns NotFound", func(t *testing.T) { + hash := protoutil.ConvertHashToH256(common.HexToHash("0xfeedface")) + _, err := srv.BorBlockReceipt(ctx, &protobor.ReceiptRequest{Hash: hash}) + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("GetTdByHash for unknown hash returns NotFound", func(t *testing.T) { + hash := protoutil.ConvertHashToH256(common.HexToHash("0xbaddcafe")) + _, err := srv.GetTdByHash(ctx, &protobor.GetTdByHashRequest{Hash: hash}) + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("GetLatestBlockNumber and GetGrpcAddr return server state", func(t *testing.T) { + require.NotNil(t, srv.GetLatestBlockNumber()) + require.GreaterOrEqual(t, srv.GetLatestBlockNumber().Int64(), int64(1)) + require.NotEmpty(t, srv.GetGrpcAddr()) + }) + + t.Run("getBorInfo populates expected fields", func(t *testing.T) { + info := srv.getBorInfo() + require.Contains(t, info, "chain_id") + require.Contains(t, info, "latest_block_hash") + require.Contains(t, info, "latest_block_number") + require.Contains(t, info, "latest_block_timestamp") + require.Contains(t, info, "peer_count") + require.Contains(t, info, "sync_mode") + require.Contains(t, info, "catching_up") + }) + + t.Run("customHealthServiceHandler responds with composed JSON", func(t *testing.T) { + require.NoError(t, srv.setupHealthService()) + handler := srv.customHealthServiceHandler() + + req := httptest.NewRequest("GET", "/health", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + require.Equal(t, 200, rec.Code) + var body map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &body)) + require.Contains(t, body, "node_info") + require.Contains(t, body, "status") + require.Equal(t, false, body["error"]) + }) +} + +// TestWithGRPCListener: option function should wire the server's grpcServer +// using the provided listener. We feed it an arbitrary loopback listener, +// confirm the option callback succeeds, and immediately tear down the server +// to avoid leaking the Serve goroutine. +func TestWithGRPCListener(t *testing.T) { + t.Parallel() + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + opt := WithGRPCListener(lis) + // Server with no [grpc] block: tokenForInterceptor handles the nil case + // internally, so the option must succeed without manual scaffolding. + srv := &Server{} + require.NoError(t, opt(srv, &Config{})) + require.NotNil(t, srv.grpcServer, "WithGRPCListener must wire grpcServer") + // GracefulStop drains the in-flight serve goroutine and closes the + // listener so the test doesn't leak. + srv.grpcServer.GracefulStop() +} diff --git a/internal/cli/server/service.go b/internal/cli/server/service.go index a63736e5da..c88434bac7 100644 --- a/internal/cli/server/service.go +++ b/internal/cli/server/service.go @@ -22,7 +22,12 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" ) -const chunkSize = 1024 * 1024 * 1024 +// chunkSize must stay below server.go:maxGRPCMessageSize so ChunkedEncoder +// actually splits payloads into messages small enough to pass the +// MaxSendMsgSize cap. DebugPprof and DebugBlock can produce multi-MB outputs +// (CPU/heap profiles, block-state dumps); each chunk becomes one stream +// SendMsg, and the Conn.Write loop already iterates over chunks. +const chunkSize = 16 * 1024 * 1024 var ErrUnavailable = errors.New("bor service is currently unavailable, try again later") var ErrUnavailable2 = errors.New("bor service unavailable even after waiting for 10 seconds, make sure bor is running") diff --git a/internal/cli/server/utils.go b/internal/cli/server/utils.go index 192a602100..8c71d18f96 100644 --- a/internal/cli/server/utils.go +++ b/internal/cli/server/utils.go @@ -7,6 +7,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/internal/cli/server/proto" "github.com/ethereum/go-ethereum/p2p" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" protobor "github.com/0xPolygon/polyproto/bor" protocommon "github.com/0xPolygon/polyproto/common" @@ -60,7 +62,24 @@ func ConvertTopicsToProtoTopics(topics []common.Hash) []*protocommon.H256 { return protoTopics } -func ConvertReceiptToProtoReceipt(receipt *types.Receipt) *protobor.Receipt { +func ConvertReceiptToProtoReceipt(receipt *types.Receipt) (*protobor.Receipt, error) { + // EffectiveGasPrice and BlockNumber are *big.Int; the proto schema is int64. + // Reject values outside int64 with OutOfRange instead of silently truncating + // to a negative or rolled-over value. + var egp int64 + if receipt.EffectiveGasPrice != nil { + if !receipt.EffectiveGasPrice.IsInt64() { + return nil, status.Error(codes.OutOfRange, "effective gas price exceeds int64 range") + } + egp = receipt.EffectiveGasPrice.Int64() + } + var blockNum int64 + if receipt.BlockNumber != nil { + if !receipt.BlockNumber.IsInt64() { + return nil, status.Error(codes.OutOfRange, "block number exceeds int64 range") + } + blockNum = receipt.BlockNumber.Int64() + } return &protobor.Receipt{ Type: uint64(receipt.Type), PostState: receipt.PostState, @@ -71,12 +90,12 @@ func ConvertReceiptToProtoReceipt(receipt *types.Receipt) *protobor.Receipt { TxHash: protoutil.ConvertHashToH256(receipt.TxHash), ContractAddress: protoutil.ConvertAddressToH160(receipt.ContractAddress), GasUsed: receipt.GasUsed, - EffectiveGasPrice: receipt.EffectiveGasPrice.Int64(), + EffectiveGasPrice: egp, BlobGasUsed: receipt.BlobGasUsed, BlockHash: protoutil.ConvertHashToH256(receipt.BlockHash), - BlockNumber: receipt.BlockNumber.Int64(), + BlockNumber: blockNum, TransactionIndex: uint64(receipt.TransactionIndex), - } + }, nil } // HealthStatus represents the health status with level, code, and message diff --git a/internal/cli/server/utils_test.go b/internal/cli/server/utils_test.go new file mode 100644 index 0000000000..23bdcde6a9 --- /dev/null +++ b/internal/cli/server/utils_test.go @@ -0,0 +1,216 @@ +package server + +import ( + "encoding/json" + "math/big" + "net/http/httptest" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/p2p" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestConvertReceiptToProtoReceipt_Int64Range(t *testing.T) { + t.Parallel() + + // Largest value that fits in int64 — must round-trip cleanly. + t.Run("max int64 round-trips", func(t *testing.T) { + t.Parallel() + max := new(big.Int).SetInt64(1<<63 - 1) + r := &types.Receipt{EffectiveGasPrice: max, BlockNumber: max} + out, err := ConvertReceiptToProtoReceipt(r) + require.NoError(t, err) + require.Equal(t, int64(1<<63-1), out.EffectiveGasPrice) + require.Equal(t, int64(1<<63-1), out.BlockNumber) + }) + + // One past max int64 — must error rather than silently truncate to a + // negative value. + t.Run("EffectiveGasPrice over int64 errors", func(t *testing.T) { + t.Parallel() + over := new(big.Int).Add(new(big.Int).SetInt64(1<<63-1), big.NewInt(1)) + r := &types.Receipt{EffectiveGasPrice: over, BlockNumber: big.NewInt(1)} + _, err := ConvertReceiptToProtoReceipt(r) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.OutOfRange, s.Code()) + require.Contains(t, s.Message(), "effective gas price") + }) + + t.Run("BlockNumber over int64 errors", func(t *testing.T) { + t.Parallel() + over := new(big.Int).Add(new(big.Int).SetInt64(1<<63-1), big.NewInt(1)) + r := &types.Receipt{EffectiveGasPrice: big.NewInt(1), BlockNumber: over} + _, err := ConvertReceiptToProtoReceipt(r) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.OutOfRange, s.Code()) + require.Contains(t, s.Message(), "block number") + }) + + // Nil big.Int fields default to 0 — preserve that behavior. + t.Run("nil big.Int fields default to 0", func(t *testing.T) { + t.Parallel() + r := &types.Receipt{} + out, err := ConvertReceiptToProtoReceipt(r) + require.NoError(t, err) + require.Equal(t, int64(0), out.EffectiveGasPrice) + require.Equal(t, int64(0), out.BlockNumber) + }) +} + +func TestPeerInfoToPeer(t *testing.T) { + t.Parallel() + + info := &p2p.PeerInfo{ + ID: "node-1", + Enode: "enode://abc@127.0.0.1:30303", + ENR: "enr:-...", + Caps: []string{"eth/68", "snap/1"}, + Name: "bor/v1.0.0", + } + info.Network.Trusted = true + info.Network.Static = false + + out := PeerInfoToPeer(info) + require.Equal(t, "node-1", out.Id) + require.Equal(t, "enode://abc@127.0.0.1:30303", out.Enode) + require.Equal(t, "enr:-...", out.Enr) + require.Equal(t, []string{"eth/68", "snap/1"}, out.Caps) + require.Equal(t, "bor/v1.0.0", out.Name) + require.True(t, out.Trusted) + require.False(t, out.Static) +} + +func TestConvertTopicsToProtoTopics(t *testing.T) { + t.Parallel() + + t.Run("empty input returns nil slice", func(t *testing.T) { + t.Parallel() + require.Nil(t, ConvertTopicsToProtoTopics(nil)) + }) + + t.Run("preserves order and count", func(t *testing.T) { + t.Parallel() + topics := []common.Hash{ + common.HexToHash("0x01"), + common.HexToHash("0x02"), + common.HexToHash("0x03"), + } + out := ConvertTopicsToProtoTopics(topics) + require.Len(t, out, 3) + // Each element should be non-nil and have non-nil Hi/Lo. + for i, h := range out { + require.NotNil(t, h, "topic %d nil", i) + require.NotNil(t, h.Hi, "topic %d Hi nil", i) + require.NotNil(t, h.Lo, "topic %d Lo nil", i) + } + }) +} + +func TestConvertLogsToProtoLogs(t *testing.T) { + t.Parallel() + + t.Run("empty slice returns nil", func(t *testing.T) { + t.Parallel() + require.Nil(t, ConvertLogsToProtoLogs(nil)) + }) + + t.Run("single log carries every field through", func(t *testing.T) { + t.Parallel() + log := &types.Log{ + Address: common.HexToAddress("0xabcdef0123456789abcdef0123456789abcdef01"), + Topics: []common.Hash{common.HexToHash("0xaa"), common.HexToHash("0xbb")}, + Data: []byte{0xde, 0xad, 0xbe, 0xef}, + BlockNumber: 42, + TxHash: common.HexToHash("0x01"), + TxIndex: 7, + BlockHash: common.HexToHash("0x02"), + Index: 3, + Removed: true, + } + out := ConvertLogsToProtoLogs([]*types.Log{log}) + require.Len(t, out, 1) + require.Equal(t, []byte{0xde, 0xad, 0xbe, 0xef}, out[0].Data) + require.Equal(t, uint64(42), out[0].BlockNumber) + require.Equal(t, uint64(7), out[0].TxIndex) + require.Equal(t, uint64(3), out[0].Index) + require.True(t, out[0].Removed) + require.Len(t, out[0].Topics, 2) + }) +} + +func TestHealthStatusLevel_String(t *testing.T) { + t.Parallel() + + cases := []struct { + lvl HealthStatusLevel + want string + }{ + {StatusOK, "OK"}, + {StatusWarn, "WARN"}, + {StatusCritical, "CRITICAL"}, + {HealthStatusLevel(99), "UNKNOWN"}, + } + for _, tc := range cases { + require.Equal(t, tc.want, tc.lvl.String()) + } +} + +func TestHealthStatusLevel_Code(t *testing.T) { + t.Parallel() + + cases := []struct { + lvl HealthStatusLevel + want int + }{ + {StatusOK, 0}, + {StatusWarn, 1}, + {StatusCritical, 2}, + {HealthStatusLevel(99), -1}, + } + for _, tc := range cases { + require.Equal(t, tc.want, tc.lvl.Code()) + } +} + +func TestHealthStatusLevel_MarshalJSON(t *testing.T) { + t.Parallel() + + // The level marshals as a JSON string with the canonical name, not as the + // numeric code — so wire consumers can treat it as a stable enum label. + for _, tc := range []struct { + lvl HealthStatusLevel + want string + }{ + {StatusOK, `"OK"`}, + {StatusWarn, `"WARN"`}, + {StatusCritical, `"CRITICAL"`}, + } { + got, err := json.Marshal(tc.lvl) + require.NoError(t, err) + require.Equal(t, tc.want, string(got)) + } +} + +func TestResponseRecorder(t *testing.T) { + t.Parallel() + + r := &ResponseRecorder{ResponseWriter: httptest.NewRecorder()} + r.WriteHeader(418) + n, err := r.Write([]byte("hello")) + require.NoError(t, err) + require.Equal(t, 5, n) + n, err = r.Write([]byte(" world")) + require.NoError(t, err) + require.Equal(t, 6, n) + // statusCode + body are captured for later replay + require.Equal(t, 418, r.statusCode) + require.Equal(t, "hello world", string(r.body)) +}