Merge pull request #22739 from holiman/remove_code

core: remove old conversion to shuffle leveldb blocks into ancients
This commit is contained in:
Péter Szilágyi 2021-05-03 15:37:46 +03:00 committed by GitHub
commit fc1c1cbea9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 7 additions and 56 deletions

View File

@ -1208,63 +1208,14 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
}
var (
start = time.Now()
logged = time.Now()
count int
)
// Migrate all ancient blocks. This can happen if someone upgrades from Geth
// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
// long term.
for {
// We can ignore the error here since light client won't hit this code path.
frozen, _ := bc.db.Ancients()
if frozen >= block.NumberU64() {
break
if block.NumberU64() == 1 {
// Make sure to write the genesis into the freezer
if frozen, _ := bc.db.Ancients(); frozen == 0 {
h := rawdb.ReadCanonicalHash(bc.db, 0)
b := rawdb.ReadBlock(bc.db, h, 0)
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0))
log.Info("Wrote genesis to ancients")
}
h := rawdb.ReadCanonicalHash(bc.db, frozen)
b := rawdb.ReadBlock(bc.db, h, frozen)
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
count += 1
// Always keep genesis block in active database.
if b.NumberU64() != 0 {
deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
}
if time.Since(logged) > 8*time.Second {
log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
// Don't collect too much in-memory, write it out every 100K blocks
if len(deleted) > 100000 {
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
return 0, err
}
// Wipe out canonical block data.
for _, nh := range deleted {
rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
rawdb.DeleteCanonicalHash(batch, nh.number)
}
if err := batch.Write(); err != nil {
return 0, err
}
batch.Reset()
// Wipe out side chain too.
for _, nh := range deleted {
for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
rawdb.DeleteBlock(batch, hash, nh.number)
}
}
if err := batch.Write(); err != nil {
return 0, err
}
batch.Reset()
deleted = deleted[0:]
}
}
if count > 0 {
log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
}
// Flush data into ancient database.
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))