mirror of
https://github.com/btcsuite/btcd.git
synced 2025-02-23 14:40:44 +01:00
database/ffldb: Add PruneBlocks to db interface
This change is part of the effort to add pruning support to btcd. PruneBlocks will prune the earliest block files until it reaches the given target size. The returned hashes are the hashes of the blocks that were pruned.
This commit is contained in:
parent
f258d0c8d2
commit
5c1dd21e79
4 changed files with 244 additions and 1 deletions
|
@ -1669,6 +1669,101 @@ func (tx *transaction) writePendingAndCommit() error {
|
|||
return tx.db.cache.commitTx(tx)
|
||||
}
|
||||
|
||||
// PruneBlocks deletes the block files until it reaches the target size
|
||||
// (specified in bytes). Throws an error if the target size is below
|
||||
// the maximum size of a single block file.
|
||||
//
|
||||
// This function is part of the database.Tx interface implementation.
|
||||
func (tx *transaction) PruneBlocks(targetSize uint64) ([]chainhash.Hash, error) {
|
||||
// Ensure transaction state is valid.
|
||||
if err := tx.checkClosed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure the transaction is writable.
|
||||
if !tx.writable {
|
||||
str := "prune blocks requires a writable database transaction"
|
||||
return nil, makeDbErr(database.ErrTxNotWritable, str, nil)
|
||||
}
|
||||
|
||||
// Make a local alias for the maxBlockFileSize.
|
||||
maxSize := uint64(tx.db.store.maxBlockFileSize)
|
||||
if targetSize < maxSize {
|
||||
return nil, fmt.Errorf("got target size of %d but it must be greater "+
|
||||
"than %d, the max size of a single block file",
|
||||
targetSize, maxSize)
|
||||
}
|
||||
|
||||
first, last, lastFileSize, err := scanBlockFiles(tx.db.store.basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we have no files on disk or just a single file on disk, return early.
|
||||
if first == last {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Last file number minus the first file number gives us the count of files
|
||||
// on disk minus 1. We don't want to count the last file since we can't assume
|
||||
// that it is of max size.
|
||||
maxSizeFileCount := last - first
|
||||
|
||||
// If the total size of block files are under the target, return early and
|
||||
// don't prune.
|
||||
totalSize := uint64(lastFileSize) + (maxSize * uint64(maxSizeFileCount))
|
||||
if totalSize <= targetSize {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Tracef("Using %d more bytes than the target of %d MiB. Pruning files...",
|
||||
totalSize-targetSize,
|
||||
targetSize/(1024*1024))
|
||||
|
||||
deletedFiles := make(map[uint32]struct{})
|
||||
|
||||
// We use < not <= so that the last file is never deleted. There are other checks in place
|
||||
// but setting it to < here doesn't hurt.
|
||||
for i := uint32(first); i < uint32(last); i++ {
|
||||
err = tx.db.store.deleteFileFunc(i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PruneBlocks: Failed to delete block file "+
|
||||
"number %d: %v", i, err)
|
||||
}
|
||||
|
||||
// Add the file index to the deleted files map so that we can later
|
||||
// delete the block location index.
|
||||
deletedFiles[i] = struct{}{}
|
||||
|
||||
// If we're already at or below the target usage, break and don't
|
||||
// try to delete more files.
|
||||
totalSize -= maxSize
|
||||
if totalSize <= targetSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the indexed block locations for the files that we've just deleted.
|
||||
var deletedBlockHashes []chainhash.Hash
|
||||
cursor := tx.blockIdxBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
loc := deserializeBlockLoc(cursor.Value())
|
||||
|
||||
_, found := deletedFiles[loc.blockFileNum]
|
||||
if found {
|
||||
deletedBlockHashes = append(deletedBlockHashes, *(*chainhash.Hash)(cursor.Key()))
|
||||
err := cursor.Delete()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Tracef("Finished pruning. Database now at %d bytes", totalSize)
|
||||
|
||||
return deletedBlockHashes, nil
|
||||
}
|
||||
|
||||
// Commit commits all changes that have been made to the root metadata bucket
|
||||
// and all of its sub-buckets to the database cache which is periodically synced
|
||||
// to persistent storage. In addition, it commits all new blocks directly to
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package ffldb_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -13,6 +14,7 @@ import (
|
|||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/database"
|
||||
"github.com/btcsuite/btcd/database/ffldb"
|
||||
)
|
||||
|
@ -253,6 +255,135 @@ func TestPersistence(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestPrune tests that the older .fdb files are deleted with a call to prune.
|
||||
func TestPrune(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a new database to run tests against.
|
||||
dbPath := t.TempDir()
|
||||
db, err := database.Create(dbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create test database (%s) %v", dbType, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
blockFileSize := uint64(2048)
|
||||
|
||||
testfn := func(t *testing.T, db database.DB) {
|
||||
// Load the test blocks and save in the test context for use throughout
|
||||
// the tests.
|
||||
blocks, err := loadBlocks(t, blockDataFile, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("loadBlocks: Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
err = db.Update(func(tx database.Tx) error {
|
||||
for i, block := range blocks {
|
||||
err := tx.StoreBlock(block)
|
||||
if err != nil {
|
||||
return fmt.Errorf("StoreBlock #%d: unexpected error: "+
|
||||
"%v", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
blockHashMap := make(map[chainhash.Hash][]byte, len(blocks))
|
||||
for _, block := range blocks {
|
||||
bytes, err := block.Bytes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockHashMap[*block.Hash()] = bytes
|
||||
}
|
||||
|
||||
err = db.Update(func(tx database.Tx) error {
|
||||
_, err := tx.PruneBlocks(1024)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected an error when attempting to prune" +
|
||||
"below the maxFileSize")
|
||||
}
|
||||
|
||||
_, err = tx.PruneBlocks(0)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected an error when attempting to prune" +
|
||||
"below the maxFileSize")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var deletedBlocks []chainhash.Hash
|
||||
|
||||
// This should leave 3 files on disk.
|
||||
err = db.Update(func(tx database.Tx) error {
|
||||
deletedBlocks, err = tx.PruneBlocks(blockFileSize * 3)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The only error we can get is a bad pattern error. Since we're hardcoding
|
||||
// the pattern, we should not have an error at runtime.
|
||||
files, _ := filepath.Glob(filepath.Join(dbPath, "*.fdb"))
|
||||
if len(files) != 3 {
|
||||
t.Fatalf("Expected to find %d files but got %d",
|
||||
3, len(files))
|
||||
}
|
||||
|
||||
// Check that all the blocks that say were deleted are deleted from the
|
||||
// block index bucket as well.
|
||||
err = db.View(func(tx database.Tx) error {
|
||||
for _, deletedBlock := range deletedBlocks {
|
||||
_, err := tx.FetchBlock(&deletedBlock)
|
||||
if dbErr, ok := err.(database.Error); !ok ||
|
||||
dbErr.ErrorCode != database.ErrBlockNotFound {
|
||||
|
||||
return fmt.Errorf("Expected ErrBlockNotFound "+
|
||||
"but got %v", dbErr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that the not deleted blocks are present.
|
||||
for _, deletedBlock := range deletedBlocks {
|
||||
delete(blockHashMap, deletedBlock)
|
||||
}
|
||||
err = db.View(func(tx database.Tx) error {
|
||||
for hash, wantBytes := range blockHashMap {
|
||||
gotBytes, err := tx.FetchBlock(&hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(gotBytes, wantBytes) {
|
||||
return fmt.Errorf("got bytes %x, want bytes %x",
|
||||
gotBytes, wantBytes)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
ffldb.TstRunWithMaxBlockFileSize(db, uint32(blockFileSize), func() {
|
||||
testfn(t, db)
|
||||
})
|
||||
}
|
||||
|
||||
// TestInterface performs all interfaces tests for this database driver.
|
||||
func TestInterface(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
|
|
@ -11,7 +11,9 @@ The functions are only exported while the tests are being run.
|
|||
|
||||
package ffldb
|
||||
|
||||
import "github.com/btcsuite/btcd/database"
|
||||
import (
|
||||
"github.com/btcsuite/btcd/database"
|
||||
)
|
||||
|
||||
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
|
||||
// file size for the database set to the provided value. The value will be set
|
||||
|
|
|
@ -389,6 +389,21 @@ type Tx interface {
|
|||
// implementations.
|
||||
FetchBlockRegions(regions []BlockRegion) ([][]byte, error)
|
||||
|
||||
// PruneBlocks deletes the block files until it reaches the target size
|
||||
// (specificed in bytes).
|
||||
//
|
||||
// The interface contract guarantees at least the following errors will
|
||||
// be returned (other implementation-specific errors are possible):
|
||||
// - ErrTxNotWritable if attempted against a read-only transaction
|
||||
// - ErrTxClosed if the transaction has already been closed
|
||||
//
|
||||
// NOTE: The data returned by this function is only valid during a
|
||||
// database transaction. Attempting to access it after a transaction
|
||||
// has ended results in undefined behavior. This constraint prevents
|
||||
// additional data copies and allows support for memory-mapped database
|
||||
// implementations.
|
||||
PruneBlocks(targetSize uint64) ([]chainhash.Hash, error)
|
||||
|
||||
// ******************************************************************
|
||||
// Methods related to both atomic metadata storage and block storage.
|
||||
// ******************************************************************
|
||||
|
|
Loading…
Add table
Reference in a new issue