Skip to content

Commit f369e5d

Browse files
authored
feat(sync/customrawdb): migrate customrawdb package from coreth (#4387)
Signed-off-by: Tsvetan Dimitrov ([email protected])
1 parent 265aaa3 commit f369e5d

File tree

6 files changed

+1178
-0
lines changed

6 files changed

+1178
-0
lines changed

vms/evm/sync/customrawdb/db.go

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
2+
// See the file LICENSE for licensing terms.
3+
4+
package customrawdb
5+
6+
import (
7+
"errors"
8+
9+
"github.com/ava-labs/libevm/core/rawdb"
10+
"github.com/ava-labs/libevm/ethdb"
11+
)
12+
13+
// FirewoodScheme is the scheme for the Firewood storage scheme.
14+
const FirewoodScheme = "firewood"
15+
16+
// errStateSchemeConflict indicates the provided state scheme conflicts with
17+
// what is on disk.
18+
var errStateSchemeConflict = errors.New("state scheme conflict")
19+
20+
// ParseStateScheme parses the state scheme from the provided string.
21+
func ParseStateScheme(provided string, db ethdb.Database) (string, error) {
22+
// Check for custom scheme
23+
if provided == FirewoodScheme {
24+
if diskScheme := rawdb.ReadStateScheme(db); diskScheme != "" {
25+
// Valid scheme on db mismatched
26+
return "", errStateSchemeConflict
27+
}
28+
// If no conflicting scheme is found, is valid.
29+
return FirewoodScheme, nil
30+
}
31+
32+
// Check for valid eth scheme
33+
return rawdb.ParseStateScheme(provided, db)
34+
}
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
2+
// See the file LICENSE for licensing terms.
3+
4+
package customrawdb
5+
6+
import (
7+
"testing"
8+
9+
"github.com/ava-labs/libevm/core/rawdb"
10+
"github.com/stretchr/testify/require"
11+
)
12+
13+
func TestParseStateScheme(t *testing.T) {
14+
db := rawdb.NewMemoryDatabase()
15+
16+
// Provided Firewood on empty disk -> allowed.
17+
scheme, err := ParseStateScheme(FirewoodScheme, db)
18+
require.NoError(t, err)
19+
require.Equal(t, FirewoodScheme, scheme)
20+
21+
// Simulate disk has non-empty path scheme by writing persistent state id.
22+
rawdb.WritePersistentStateID(db, 1)
23+
scheme, err = ParseStateScheme(FirewoodScheme, db)
24+
require.ErrorIs(t, err, errStateSchemeConflict)
25+
require.Empty(t, scheme)
26+
27+
// Pass-through to rawdb for non-Firewood using a fresh empty DB.
28+
db2 := rawdb.NewMemoryDatabase()
29+
scheme, err = ParseStateScheme(rawdb.HashScheme, db2)
30+
require.NoError(t, err)
31+
require.Equal(t, rawdb.HashScheme, scheme)
32+
}
Lines changed: 231 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,231 @@
1+
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
2+
// See the file LICENSE for licensing terms.
3+
4+
package customrawdb
5+
6+
import (
7+
"encoding/json"
8+
"errors"
9+
"fmt"
10+
"time"
11+
12+
"github.com/ava-labs/libevm/common"
13+
"github.com/ava-labs/libevm/core/rawdb"
14+
"github.com/ava-labs/libevm/ethdb"
15+
"github.com/ava-labs/libevm/params"
16+
"github.com/ava-labs/libevm/rlp"
17+
18+
"github.com/ava-labs/avalanchego/database"
19+
)
20+
21+
var (
22+
// errInvalidData indicates the stored value exists but is malformed or undecodable.
23+
errInvalidData = errors.New("invalid data")
24+
errFailedToGetUpgradeConfig = errors.New("failed to get upgrade config")
25+
errFailedToMarshalUpgradeConfig = errors.New("failed to marshal upgrade config")
26+
27+
upgradeConfigPrefix = []byte("upgrade-config-")
28+
// offlinePruningKey tracks runs of offline pruning.
29+
offlinePruningKey = []byte("OfflinePruning")
30+
// populateMissingTriesKey tracks runs of trie backfills.
31+
populateMissingTriesKey = []byte("PopulateMissingTries")
32+
// pruningDisabledKey tracks whether the node has ever run in archival mode
33+
// to ensure that a user does not accidentally corrupt an archival node.
34+
pruningDisabledKey = []byte("PruningDisabled")
35+
// acceptorTipKey tracks the tip of the last accepted block that has been fully processed.
36+
acceptorTipKey = []byte("AcceptorTipKey")
37+
// snapshotBlockHashKey tracks the block hash of the last snapshot.
38+
snapshotBlockHashKey = []byte("SnapshotBlockHash")
39+
)
40+
41+
// WriteOfflinePruning writes a time marker of the last attempt to run offline pruning.
42+
// The marker is written when offline pruning completes and is deleted when the node
43+
// is started successfully with offline pruning disabled. This ensures users must
44+
// disable offline pruning and start their node successfully between runs of offline
45+
// pruning.
46+
func WriteOfflinePruning(db ethdb.KeyValueWriter, ts time.Time) error {
47+
return writeTimeMarker(db, offlinePruningKey, ts)
48+
}
49+
50+
// ReadOfflinePruning reads the most recent timestamp of an attempt to run offline
51+
// pruning if present.
52+
func ReadOfflinePruning(db ethdb.KeyValueReader) (time.Time, error) {
53+
return readTimeMarker(db, offlinePruningKey)
54+
}
55+
56+
// DeleteOfflinePruning deletes any marker of the last attempt to run offline pruning.
57+
func DeleteOfflinePruning(db ethdb.KeyValueWriter) error {
58+
return db.Delete(offlinePruningKey)
59+
}
60+
61+
// WritePopulateMissingTries writes a marker for the current attempt to populate
62+
// missing tries.
63+
func WritePopulateMissingTries(db ethdb.KeyValueWriter, ts time.Time) error {
64+
return writeTimeMarker(db, populateMissingTriesKey, ts)
65+
}
66+
67+
// ReadPopulateMissingTries reads the most recent timestamp of an attempt to
68+
// re-populate missing trie nodes.
69+
func ReadPopulateMissingTries(db ethdb.KeyValueReader) (time.Time, error) {
70+
return readTimeMarker(db, populateMissingTriesKey)
71+
}
72+
73+
// DeletePopulateMissingTries deletes any marker of the last attempt to
74+
// re-populate missing trie nodes.
75+
func DeletePopulateMissingTries(db ethdb.KeyValueWriter) error {
76+
return db.Delete(populateMissingTriesKey)
77+
}
78+
79+
// WritePruningDisabled writes a marker to track whether the node has ever run
80+
// with pruning disabled.
81+
func WritePruningDisabled(db ethdb.KeyValueWriter) error {
82+
return db.Put(pruningDisabledKey, nil)
83+
}
84+
85+
// HasPruningDisabled returns true if there is a marker present indicating that
86+
// the node has run with pruning disabled at some point.
87+
func HasPruningDisabled(db ethdb.KeyValueReader) (bool, error) {
88+
return db.Has(pruningDisabledKey)
89+
}
90+
91+
// WriteAcceptorTip writes `hash` as the last accepted block that has been fully processed.
92+
func WriteAcceptorTip(db ethdb.KeyValueWriter, hash common.Hash) error {
93+
return db.Put(acceptorTipKey, hash[:])
94+
}
95+
96+
// ReadAcceptorTip reads the hash of the last accepted block that was fully processed.
97+
// If there is no value present (the index is being initialized for the first time), then the
98+
// empty hash is returned.
99+
func ReadAcceptorTip(db ethdb.KeyValueReader) (common.Hash, error) {
100+
ok, err := db.Has(acceptorTipKey)
101+
if err != nil {
102+
return common.Hash{}, err
103+
}
104+
if !ok {
105+
return common.Hash{}, database.ErrNotFound
106+
}
107+
h, err := db.Get(acceptorTipKey)
108+
if err != nil {
109+
return common.Hash{}, err
110+
}
111+
if len(h) != common.HashLength {
112+
return common.Hash{}, fmt.Errorf("%w: length %d", errInvalidData, len(h))
113+
}
114+
return common.BytesToHash(h), nil
115+
}
116+
117+
// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
118+
// The provided `upgradeConfig` (any JSON-unmarshalable type) will be populated if present on disk.
119+
func ReadChainConfig[T any](db ethdb.KeyValueReader, hash common.Hash, upgradeConfig *T) (*params.ChainConfig, error) {
120+
config := rawdb.ReadChainConfig(db, hash)
121+
if config == nil {
122+
return nil, database.ErrNotFound
123+
}
124+
125+
upgrade, err := db.Get(upgradeConfigKey(hash))
126+
if err != nil {
127+
return nil, fmt.Errorf("%w: %w", errFailedToGetUpgradeConfig, err)
128+
}
129+
130+
if len(upgrade) == 0 {
131+
return config, nil
132+
}
133+
134+
if err := json.Unmarshal(upgrade, upgradeConfig); err != nil {
135+
return nil, errInvalidData
136+
}
137+
138+
return config, nil
139+
}
140+
141+
// WriteChainConfig writes the chain config settings to the database.
142+
// The provided `upgradeConfig` (any JSON-marshalable type) will be stored alongside the chain config.
143+
func WriteChainConfig[T any](db ethdb.KeyValueWriter, hash common.Hash, config *params.ChainConfig, upgradeConfig T) error {
144+
rawdb.WriteChainConfig(db, hash, config)
145+
if config == nil {
146+
return nil
147+
}
148+
149+
data, err := json.Marshal(upgradeConfig)
150+
if err != nil {
151+
return fmt.Errorf("%w: %w", errFailedToMarshalUpgradeConfig, err)
152+
}
153+
return db.Put(upgradeConfigKey(hash), data)
154+
}
155+
156+
// NewAccountSnapshotsIterator returns an iterator for walking all of the accounts in the snapshot
157+
func NewAccountSnapshotsIterator(db ethdb.Iteratee) ethdb.Iterator {
158+
it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
159+
keyLen := len(rawdb.SnapshotAccountPrefix) + common.HashLength
160+
return rawdb.NewKeyLengthIterator(it, keyLen)
161+
}
162+
163+
// ReadSnapshotBlockHash retrieves the hash of the block whose state is contained in
164+
// the persisted snapshot.
165+
func ReadSnapshotBlockHash(db ethdb.KeyValueReader) (common.Hash, error) {
166+
ok, err := db.Has(snapshotBlockHashKey)
167+
if err != nil {
168+
return common.Hash{}, err
169+
}
170+
if !ok {
171+
return common.Hash{}, database.ErrNotFound
172+
}
173+
174+
data, err := db.Get(snapshotBlockHashKey)
175+
if err != nil {
176+
return common.Hash{}, err
177+
}
178+
if len(data) != common.HashLength {
179+
return common.Hash{}, fmt.Errorf("%w: length %d", errInvalidData, len(data))
180+
}
181+
return common.BytesToHash(data), nil
182+
}
183+
184+
// WriteSnapshotBlockHash stores the root of the block whose state is contained in
185+
// the persisted snapshot.
186+
func WriteSnapshotBlockHash(db ethdb.KeyValueWriter, blockHash common.Hash) error {
187+
return db.Put(snapshotBlockHashKey, blockHash[:])
188+
}
189+
190+
// DeleteSnapshotBlockHash deletes the hash of the block whose state is contained in
191+
// the persisted snapshot. Since snapshots are not immutable, this method can
192+
// be used during updates, so a crash or failure will mark the entire snapshot
193+
// invalid.
194+
func DeleteSnapshotBlockHash(db ethdb.KeyValueWriter) error {
195+
return db.Delete(snapshotBlockHashKey)
196+
}
197+
198+
func writeTimeMarker(db ethdb.KeyValueWriter, key []byte, ts time.Time) error {
199+
data, err := rlp.EncodeToBytes(uint64(ts.Unix()))
200+
if err != nil {
201+
return err
202+
}
203+
return db.Put(key, data)
204+
}
205+
206+
func readTimeMarker(db ethdb.KeyValueReader, key []byte) (time.Time, error) {
207+
// Check existence first to map missing marker to a stable sentinel error.
208+
ok, err := db.Has(key)
209+
if err != nil {
210+
return time.Time{}, err
211+
}
212+
if !ok {
213+
return time.Time{}, database.ErrNotFound
214+
}
215+
216+
data, err := db.Get(key)
217+
if err != nil {
218+
return time.Time{}, err
219+
}
220+
221+
var unix uint64
222+
if err := rlp.DecodeBytes(data, &unix); err != nil {
223+
return time.Time{}, fmt.Errorf("%w: %w", errInvalidData, err)
224+
}
225+
226+
return time.Unix(int64(unix), 0), nil
227+
}
228+
229+
func upgradeConfigKey(hash common.Hash) []byte {
230+
return append(upgradeConfigPrefix, hash.Bytes()...)
231+
}

0 commit comments

Comments
 (0)