diff --git a/core/blockchain.go b/core/blockchain.go index 7a86e21dddbd..3747a087a7a1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -99,7 +99,6 @@ const ( receiptsCacheLimit = 32 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 - badBlockLimit = 10 triesInMemory = 128 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. @@ -197,7 +196,6 @@ type BlockChain struct { resultProcess *lru.Cache[common.Hash, *ResultProcessBlock] // Cache for processed blocks calculatingBlock *lru.Cache[common.Hash, *CalculatedBlock] // Cache for processing blocks downloadingBlock *lru.Cache[common.Hash, struct{}] // Cache for downloading blocks (avoid duplication from fetcher) - badBlocks *lru.Cache[common.Hash, *types.Block] // Bad block cache // future blocks are blocks added for later processing futureBlocks *lru.Cache[common.Hash, *types.Block] @@ -262,7 +260,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par engine: engine, vmConfig: vmConfig, logger: vmConfig.Tracer, - badBlocks: lru.NewCache[common.Hash, *types.Block](badBlockLimit), blocksHashCache: lru.NewCache[uint64, []common.Hash](blocksHashCacheLimit), resultTrade: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), rejectedOrders: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), @@ -2574,25 +2571,9 @@ func (bc *BlockChain) futureBlocksLoop() { } } -// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network -func (bc *BlockChain) BadBlocks() []*types.Block { - blocks := make([]*types.Block, 0, bc.badBlocks.Len()) - for _, hash := range bc.badBlocks.Keys() { - if blk, exist := bc.badBlocks.Peek(hash); exist { - blocks = append(blocks, blk) - } - } - return blocks -} - -// addBadBlock adds a bad block to the bad-block LRU cache -func (bc *BlockChain) addBadBlock(block *types.Block) { - bc.badBlocks.Add(block.Hash(), block) -} - // reportBlock logs a bad block error. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { - bc.addBadBlock(block) + rawdb.WriteBadBlock(bc.db, block) var roundNumber = types.Round(0) engine, ok := bc.Engine().(*XDPoS.XDPoS) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index db9606368adc..001ec8cbe015 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "errors" "math/big" + "sort" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/types" @@ -600,6 +601,102 @@ func deleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number DeleteTd(db, hash, number) } +const badBlockToKeep = 10 + +type badBlock struct { + Header *types.Header + Body *types.Body +} + +// badBlockList implements the sort interface to allow sorting a list of +// bad blocks by their number in the reverse order. +type badBlockList []*badBlock + +func (s badBlockList) Len() int { return len(s) } +func (s badBlockList) Less(i, j int) bool { + return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64() +} +func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// ReadBadBlock retrieves the bad block with the corresponding block hash. +func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { + blob, err := db.Get(badBlockKey) + if err != nil { + return nil + } + var badBlocks badBlockList + if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { + return nil + } + for _, bad := range badBlocks { + if bad.Header.Hash() == hash { + return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles) + } + } + return nil +} + +// ReadAllBadBlocks retrieves all the bad blocks in the database. +// All returned blocks are sorted in reverse order by number. +func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { + blob, err := db.Get(badBlockKey) + if err != nil { + return nil + } + var badBlocks badBlockList + if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { + return nil + } + var blocks []*types.Block + for _, bad := range badBlocks { + blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)) + } + return blocks +} + +// WriteBadBlock serializes the bad block into the database. If the cumulated +// bad blocks exceeds the limitation, the oldest will be dropped. +func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { + blob, err := db.Get(badBlockKey) + if err != nil { + log.Warn("Failed to load old bad blocks", "error", err) + } + var badBlocks badBlockList + if len(blob) > 0 { + if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { + log.Crit("Failed to decode old bad blocks", "error", err) + } + } + for _, b := range badBlocks { + if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() { + log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash()) + return + } + } + badBlocks = append(badBlocks, &badBlock{ + Header: block.Header(), + Body: block.Body(), + }) + sort.Sort(sort.Reverse(badBlocks)) + if len(badBlocks) > badBlockToKeep { + badBlocks = badBlocks[:badBlockToKeep] + } + data, err := rlp.EncodeToBytes(badBlocks) + if err != nil { + log.Crit("Failed to encode bad blocks", "err", err) + } + if err := db.Put(badBlockKey, data); err != nil { + log.Crit("Failed to write bad blocks", "err", err) + } +} + +// DeleteBadBlocks deletes all the bad blocks from the database +func DeleteBadBlocks(db ethdb.KeyValueWriter) { + if err := db.Delete(badBlockKey); err != nil { + log.Crit("Failed to delete bad blocks", "err", err) + } +} + // ReadHeadHeader returns the current canonical head header. func ReadHeadHeader(db ethdb.Reader) *types.Header { headHeaderHash := ReadHeadHeaderHash(db) diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index f3287e9134b5..b6f8a156d782 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "math/big" + "math/rand/v2" "os" "testing" @@ -209,6 +210,75 @@ func TestPartialBlockStorage(t *testing.T) { } } +// Tests block storage and retrieval operations. +func TestBadBlockStorage(t *testing.T) { + db := NewMemoryDatabase() + + // Create a test block to move around the database and make sure it's really new + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(1), + Extra: []byte("bad block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + if entry := ReadBadBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + // Write and verify the block in the database + WriteBadBlock(db, block) + if entry := ReadBadBlock(db, block.Hash()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } + // Write one more bad block + blockTwo := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(2), + Extra: []byte("bad block two"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + WriteBadBlock(db, blockTwo) + + // Write the block one again, should be filtered out. + WriteBadBlock(db, block) + badBlocks := ReadAllBadBlocks(db) + if len(badBlocks) != 2 { + t.Fatalf("Failed to load all bad blocks") + } + + // Write a bunch of bad blocks, all the blocks are should sorted + // in reverse order. The extra blocks should be truncated. + for _, n := range rand.Perm(100) { + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(int64(n)), + Extra: []byte("bad block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + WriteBadBlock(db, block) + } + badBlocks = ReadAllBadBlocks(db) + if len(badBlocks) != badBlockToKeep { + t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks)) + } + for i := 0; i < len(badBlocks)-1; i++ { + if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() { + t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64()) + } + } + + // Delete all bad blocks + DeleteBadBlocks(db) + badBlocks = ReadAllBadBlocks(db) + if len(badBlocks) != 0 { + t.Fatalf("Failed to delete bad blocks") + } +} + // Tests block total difficulty storage and retrieval operations. func TestTdStorage(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/database.go b/core/rawdb/database.go index cbacd19824ee..8d5ee008badd 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -202,7 +202,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { trieSize += size default: var accounted bool - for _, meta := range [][]byte{databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey} { + for _, meta := range [][]byte{databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey, uncleanShutdownKey, badBlockKey} { if bytes.Equal(key, meta) { metadata += size accounted = true diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 236934e260f5..289b1e659ff8 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -42,6 +42,12 @@ var ( // fastTrieProgressKey tracks the number of trie entries imported during fast sync. fastTrieProgressKey = []byte("TrieSync") + // badBlockKey tracks the list of bad blocks seen by local + badBlockKey = []byte("InvalidBlock") + + // uncleanShutdownKey tracks the list of local crashes + uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/eth/api.go b/eth/api.go index dac84b31ac3f..e6edb38f4f42 100644 --- a/eth/api.go +++ b/eth/api.go @@ -336,22 +336,29 @@ type BadBlockArgs struct { // GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network // and returns them as a JSON list of block-hashes func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { - blocks := api.eth.BlockChain().BadBlocks() - results := make([]*BadBlockArgs, len(blocks)) - - var err error - for i, block := range blocks { - results[i] = &BadBlockArgs{ - Hash: block.Hash(), - } + var ( + err error + blocks = rawdb.ReadAllBadBlocks(api.eth.chainDb) + results = make([]*BadBlockArgs, 0, len(blocks)) + ) + for _, block := range blocks { + var ( + blockRlp string + blockJSON map[string]interface{} + ) if rlpBytes, err := rlp.EncodeToBytes(block); err != nil { - results[i].RLP = err.Error() // Hacky, but hey, it works + blockRlp = err.Error() // Hacky, but hey, it works } else { - results[i].RLP = fmt.Sprintf("0x%x", rlpBytes) + blockRlp = fmt.Sprintf("0x%x", rlpBytes) } - if results[i].Block, err = ethapi.RPCMarshalBlock(block, true, true, api.eth.ApiBackend.ChainConfig()); err != nil { - results[i].Block = map[string]interface{}{"error": err.Error()} + if blockJSON, err = ethapi.RPCMarshalBlock(block, true, true, api.eth.ApiBackend.ChainConfig()); err != nil { + blockJSON = map[string]interface{}{"error": err.Error()} } + results = append(results, &BadBlockArgs{ + Hash: block.Hash(), + RLP: blockRlp, + Block: blockJSON, + }) } return results, nil }