Skip to content

Commit

Permalink
Populate RLP Bytes in Cache (#337)
Browse files Browse the repository at this point in the history
* cause panic

* fix cache

* pass pointers
  • Loading branch information
patrick-ogrady authored Nov 2, 2022
1 parent 5026630 commit 80e6d0e
Showing 1 changed file with 7 additions and 10 deletions.
17 changes: 7 additions & 10 deletions trie/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -399,9 +399,6 @@ func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) {
memcacheCleanReadMeter.Mark(int64(len(enc)))
return enc, nil, nil
}
} else {
// TODO: remove later
log.Warn("trie database cleans is empty")
}
// Retrieve the node from the dirty cache if available
db.lock.RLock()
Expand All @@ -417,7 +414,7 @@ func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) {

// Content unavailable in memory, attempt to retrieve from disk
enc := rawdb.ReadTrieNode(db.diskdb, hash)
if len(enc) != 0 {
if len(enc) > 0 {
if db.cleans != nil {
db.cleans.Set(hash[:], enc)
memcacheCleanMissMeter.Mark(1)
Expand Down Expand Up @@ -564,7 +561,7 @@ type flushItem struct {
// writeFlushItems writes all items in [toFlush] to disk in batches of
// [ethdb.IdealBatchSize]. This function does not access any variables inside
// of [Database] and does not need to be synchronized.
func (db *Database) writeFlushItems(toFlush []flushItem) error {
func (db *Database) writeFlushItems(toFlush []*flushItem) error {
batch := db.diskdb.NewBatch()
for _, item := range toFlush {
rlp := item.node.rlp()
Expand Down Expand Up @@ -621,12 +618,12 @@ func (db *Database) Cap(limit common.StorageSize) error {
}

// Keep removing nodes from the flush-list until we're below allowance
toFlush := make([]flushItem, 0, 128)
toFlush := make([]*flushItem, 0, 128)
oldest := db.oldest
for pendingSize > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
node := db.dirties[oldest]
toFlush = append(toFlush, flushItem{oldest, node, nil})
toFlush = append(toFlush, &flushItem{oldest, node, nil})

// Iterate to the next flush item, or abort if the size cap was achieved. Size
// is the total size, including the useful cached data (hash -> blob), the
Expand Down Expand Up @@ -692,7 +689,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
db.lock.RLock()
lockStart := time.Now()
nodes, storage := len(db.dirties), db.dirtiesSize
toFlush, err := db.commit(node, make([]flushItem, 0, 128), callback)
toFlush, err := db.commit(node, make([]*flushItem, 0, 128), callback)
if err != nil {
log.Error("Failed to commit trie from trie database", "err", err)
return err
Expand Down Expand Up @@ -742,7 +739,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
//
// [callback] will be invoked as soon as it is determined a trie node will be
// flushed to disk (before it is actually written).
func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func(common.Hash)) ([]flushItem, error) {
func (db *Database) commit(hash common.Hash, toFlush []*flushItem, callback func(common.Hash)) ([]*flushItem, error) {
// If the node does not exist, it's a previously committed node
node, ok := db.dirties[hash]
if !ok {
Expand All @@ -760,7 +757,7 @@ func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func(
// By processing the children of each node before the node itself, we ensure
// that children are committed before their parents (an invariant of this
// package).
toFlush = append(toFlush, flushItem{hash, node, nil})
toFlush = append(toFlush, &flushItem{hash, node, nil})
if callback != nil {
callback(hash)
}
Expand Down

0 comments on commit 80e6d0e

Please sign in to comment.