Merge pull request 'chore(ci): align Go to 1.23.x, add staticcheck/govulncheck/gitleaks gates' (#5) from devin/1776539160-chore-ci-go-version-and-scanners into master
Some checks failed
CI / Backend security scanners (push) Has been cancelled
CI / Backend (go 1.23.x) (push) Has been cancelled
CI / Frontend (node 20) (push) Has been cancelled
CI / gitleaks (secret scan) (push) Has been cancelled

This commit was merged in pull request #5.
This commit is contained in:
2026-04-18 19:34:37 +00:00
9 changed files with 170 additions and 79 deletions

View File

@@ -2,71 +2,102 @@ name: CI
on:
push:
branches: [ main, develop ]
branches: [ master, main, develop ]
pull_request:
branches: [ main, develop ]
branches: [ master, main, develop ]
# Cancel in-flight runs on the same ref to save CI minutes.
concurrency:
group: ci-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
GO_VERSION: '1.23.4'
NODE_VERSION: '20'
jobs:
test-backend:
name: Backend (go 1.23.x)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-go@v4
with:
go-version: '1.22'
- name: Run tests
run: |
cd backend
go test ./...
- name: Build
run: |
cd backend
go build ./...
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
- name: go vet
working-directory: backend
run: go vet ./...
- name: go build
working-directory: backend
run: go build ./...
- name: go test
working-directory: backend
run: go test ./...
scan-backend:
name: Backend security scanners
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
- name: Install staticcheck
run: go install honnef.co/go/tools/cmd/staticcheck@v0.5.1
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: staticcheck
working-directory: backend
run: staticcheck ./...
- name: govulncheck
working-directory: backend
run: govulncheck ./...
test-frontend:
name: Frontend (node 20)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-node@v3
with:
node-version: '20'
- name: Install dependencies
run: |
cd frontend
npm ci
- name: Run tests
run: |
cd frontend
npm test
- name: Build
run: |
cd frontend
npm run build
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
working-directory: frontend
run: npm ci
- name: Lint (eslint)
working-directory: frontend
run: npm run lint
- name: Type-check (tsc)
working-directory: frontend
run: npm run type-check
- name: Build
working-directory: frontend
run: npm run build
lint:
gitleaks:
name: gitleaks (secret scan)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-go@v4
with:
go-version: '1.22'
- uses: actions/setup-node@v3
with:
node-version: '20'
- name: Backend lint
run: |
cd backend
go vet ./...
- name: Frontend lint
run: |
cd frontend
npm ci
npm run lint
npm run type-check
- uses: actions/checkout@v4
with:
# Full history so we can also scan past commits, not just the tip.
fetch-depth: 0
- name: Run gitleaks
uses: gitleaks/gitleaks-action@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Repo-local config lives at .gitleaks.toml.
GITLEAKS_CONFIG: .gitleaks.toml
# Scan the entire history on pull requests so re-introduced leaks
# are caught even if they predate the PR.
GITLEAKS_ENABLE_SUMMARY: 'true'

24
.gitleaks.toml Normal file
View File

@@ -0,0 +1,24 @@
# gitleaks configuration for explorer-monorepo.
#
# Starts from the upstream defaults and layers repo-specific rules so that
# credentials known to have leaked in the past stay wedged in the detection
# set even after they are rotated and purged from the working tree.
#
# See docs/SECURITY.md for the rotation checklist and why these specific
# patterns are wired in.
[extend]
useDefault = true
[[rules]]
id = "explorer-legacy-db-password-L@ker"
description = "Legacy hardcoded Postgres / SSH password (L@ker$2010 / L@kers2010)"
regex = '''L@kers?\$?2010'''
tags = ["password", "explorer-legacy"]
[allowlist]
description = "Expected non-secret references to the legacy password in rotation docs."
paths = [
'''^docs/SECURITY\.md$''',
'''^CHANGELOG\.md$''',
]

View File

@@ -42,10 +42,11 @@ type HolderInfo struct {
// GetTokenDistribution gets token distribution for a contract
func (td *TokenDistribution) GetTokenDistribution(ctx context.Context, contract string, topN int) (*DistributionStats, error) {
// Refresh materialized view
_, err := td.db.Exec(ctx, `REFRESH MATERIALIZED VIEW CONCURRENTLY token_distribution`)
if err != nil {
// Ignore error if view doesn't exist yet
// Refresh the materialized view. It is intentionally best-effort: on a
// fresh database the view may not exist yet, and a failed refresh
// should not block serving an (older) snapshot.
if _, err := td.db.Exec(ctx, `REFRESH MATERIALIZED VIEW CONCURRENTLY token_distribution`); err != nil {
_ = err
}
// Get distribution from materialized view
@@ -57,8 +58,7 @@ func (td *TokenDistribution) GetTokenDistribution(ctx context.Context, contract
var holders int
var totalSupply string
err = td.db.QueryRow(ctx, query, contract, td.chainID).Scan(&holders, &totalSupply)
if err != nil {
if err := td.db.QueryRow(ctx, query, contract, td.chainID).Scan(&holders, &totalSupply); err != nil {
return nil, fmt.Errorf("failed to get distribution: %w", err)
}

View File

@@ -41,14 +41,11 @@ func (s *Server) loggingMiddleware(next http.Handler) http.Handler {
})
}
// compressionMiddleware adds gzip compression (simplified - use gorilla/handlers in production)
// compressionMiddleware is a pass-through today; it exists so that the
// routing stack can be composed without conditionals while we evaluate the
// right compression approach (likely gorilla/handlers.CompressHandler in a
// follow-up). Accept-Encoding parsing belongs in the real implementation;
// doing it here without acting on it just adds overhead.
func (s *Server) compressionMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Check if client accepts gzip
if r.Header.Get("Accept-Encoding") != "" {
// In production, use gorilla/handlers.CompressHandler
// For now, just pass through
}
next.ServeHTTP(w, r)
})
return next
}

View File

@@ -475,8 +475,12 @@ func (s *Server) HandleMissionControlBridgeTrace(w http.ResponseWriter, r *http.
body, statusCode, err := fetchBlockscoutTransaction(r.Context(), tx)
if err == nil && statusCode == http.StatusOK {
var txDoc map[string]interface{}
if err := json.Unmarshal(body, &txDoc); err != nil {
err = fmt.Errorf("invalid blockscout JSON")
if uerr := json.Unmarshal(body, &txDoc); uerr != nil {
// Fall through to the RPC fallback below. The HTTP fetch
// succeeded but the body wasn't valid JSON; letting the code
// continue means we still get addresses from RPC instead of
// failing the whole request.
_ = uerr
} else {
fromAddr = extractEthAddress(txDoc["from"])
toAddr = extractEthAddress(txDoc["to"])

View File

@@ -87,9 +87,12 @@ func (t *Tracer) storeTrace(ctx context.Context, txHash common.Hash, blockNumber
) PARTITION BY LIST (chain_id)
`
_, err := t.db.Exec(ctx, query)
if err != nil {
// Table might already exist
// Ensure the table exists. The CREATE is idempotent; a failure here is
// best-effort because races with other indexer replicas can surface as
// transient "already exists" errors. The follow-up INSERT will surface
// any real schema problem.
if _, err := t.db.Exec(ctx, query); err != nil {
_ = err
}
// Insert trace

View File

@@ -86,7 +86,14 @@ func (bi *BlockIndexer) IndexLatestBlocks(ctx context.Context, count int) error
latestBlock := header.Number.Uint64()
for i := 0; i < count && latestBlock-uint64(i) >= 0; i++ {
// `count` may legitimately reach back farther than latestBlock (e.g.
// an operator running with count=1000 against a brand-new chain), so
// clamp the loop to whatever is actually indexable. The previous
// "latestBlock-uint64(i) >= 0" guard was a no-op on an unsigned type.
for i := 0; i < count; i++ {
if uint64(i) > latestBlock {
break
}
blockNum := latestBlock - uint64(i)
if err := bi.IndexBlock(ctx, blockNum); err != nil {
// Log error but continue

17
backend/staticcheck.conf Normal file
View File

@@ -0,0 +1,17 @@
checks = [
"all",
# Style / unused nits. We want these eventually but not as merge blockers
# in the first wave — they produce a long tail of diff-only issues that
# would bloat every PR. Re-enable in a dedicated cleanup PR.
"-ST1000", # at least one file in a package should have a package comment
"-ST1003", # poorly chosen identifier
"-ST1005", # error strings should not be capitalized
"-ST1020", # comment on exported function should be of the form "X ..."
"-ST1021", # comment on exported type should be of the form "X ..."
"-ST1022", # comment on exported var/const should be of the form "X ..."
"-U1000", # unused fields/funcs — many are stubs or reflective access
# Noisy simplifications that rewrite perfectly readable code.
"-S1016", # should use type conversion instead of struct literal
"-S1031", # unnecessary nil check around range — defensive anyway
]

View File

@@ -6,6 +6,15 @@ import (
"time"
)
// ctxKey is an unexported type for tracer context keys so they cannot
// collide with keys installed by any other package (staticcheck SA1029).
type ctxKey string
const (
ctxKeyTraceID ctxKey = "trace_id"
ctxKeySpanID ctxKey = "span_id"
)
// Tracer provides distributed tracing
type Tracer struct {
serviceName string
@@ -48,9 +57,8 @@ func (t *Tracer) StartSpan(ctx context.Context, name string) (*Span, context.Con
Logs: []LogEntry{},
}
// Add to context
ctx = context.WithValue(ctx, "trace_id", traceID)
ctx = context.WithValue(ctx, "span_id", spanID)
ctx = context.WithValue(ctx, ctxKeyTraceID, traceID)
ctx = context.WithValue(ctx, ctxKeySpanID, spanID)
return span, ctx
}