Merge pull request #1971 from kcalvinalvin/add-pruning

main, wire, blockchain, indexers, ffldb: Add pruning
This commit is contained in:
Olaoluwa Osuntokun 2023-08-23 15:59:37 -07:00 committed by GitHub
commit ec401d00a1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 561 additions and 36 deletions

View file

@ -115,6 +115,10 @@ type BlockChain struct {
// fields in this struct below this point. // fields in this struct below this point.
chainLock sync.RWMutex chainLock sync.RWMutex
// pruneTarget is the size in bytes the database targets for when the node
// is pruned.
pruneTarget uint64
// These fields are related to the memory block index. They both have // These fields are related to the memory block index. They both have
// their own locks, however they are often also protected by the chain // their own locks, however they are often also protected by the chain
// lock to help prevent logic races when blocks are being processed. // lock to help prevent logic races when blocks are being processed.
@ -600,6 +604,26 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block,
// Atomically insert info into the database. // Atomically insert info into the database.
err = b.db.Update(func(dbTx database.Tx) error { err = b.db.Update(func(dbTx database.Tx) error {
// If the pruneTarget isn't 0, we should attempt to delete older blocks
// from the database.
if b.pruneTarget != 0 {
// When the total block size is under the prune target, prune blocks is
// a no-op and the deleted hashes are nil.
deletedHashes, err := dbTx.PruneBlocks(b.pruneTarget)
if err != nil {
return err
}
// Only attempt to delete if we have any deleted blocks.
if len(deletedHashes) != 0 {
// Delete the spend journals of the pruned blocks.
err = dbPruneSpendJournalEntry(dbTx, deletedHashes)
if err != nil {
return err
}
}
}
// Update best block state. // Update best block state.
err := dbPutBestState(dbTx, state, node.workSum) err := dbPutBestState(dbTx, state, node.workSum)
if err != nil { if err != nil {
@ -1702,6 +1726,11 @@ type Config struct {
// This field can be nil if the caller is not interested in using a // This field can be nil if the caller is not interested in using a
// signature cache. // signature cache.
HashCache *txscript.HashCache HashCache *txscript.HashCache
// Prune specifies the target database usage (in bytes) the database
// will target for with block files. Prune at 0 specifies that no
// blocks will be deleted.
Prune uint64
} }
// New returns a BlockChain instance using the provided configuration details. // New returns a BlockChain instance using the provided configuration details.
@ -1757,6 +1786,7 @@ func New(config *Config) (*BlockChain, error) {
prevOrphans: make(map[chainhash.Hash][]*orphanBlock), prevOrphans: make(map[chainhash.Hash][]*orphanBlock),
warningCaches: newThresholdCaches(vbNumBits), warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments),
pruneTarget: config.Prune,
} }
// Ensure all the deployments are synchronized with our clock if // Ensure all the deployments are synchronized with our clock if

View file

@ -494,6 +494,21 @@ func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) erro
return spendBucket.Delete(blockHash[:]) return spendBucket.Delete(blockHash[:])
} }
// dbPruneSpendJournalEntry uses an existing database transaction to remove all
// the spend journal entries for the pruned blocks.
func dbPruneSpendJournalEntry(dbTx database.Tx, blockHashes []chainhash.Hash) error {
spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName)
for _, blockHash := range blockHashes {
err := spendBucket.Delete(blockHash[:])
if err != nil {
return err
}
}
return nil
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// The unspent transaction output (utxo) set consists of an entry for each // The unspent transaction output (utxo) set consists of an entry for each
// unspent output using a format that is optimized to reduce space using domain // unspent output using a format that is optimized to reduce space using domain

View file

@ -991,3 +991,15 @@ func NewAddrIndex(db database.DB, chainParams *chaincfg.Params) *AddrIndex {
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error { func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, addrIndexKey, addrIndexName, interrupt) return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
} }
// AddrIndexInitialized returns true if the address index has been created previously.
func AddrIndexInitialized(db database.DB) bool {
var exists bool
db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(addrIndexKey)
exists = bucket != nil
return nil
})
return exists
}

View file

@ -355,3 +355,15 @@ func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex {
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error { func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt) return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
} }
// CfIndexInitialized returns true if the cfindex has been created previously.
func CfIndexInitialized(db database.DB) bool {
var exists bool
db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(cfIndexParentBucketKey)
exists = bucket != nil
return nil
})
return exists
}

View file

@ -481,3 +481,15 @@ func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, txIndexKey, txIndexName, interrupt) return dropIndex(db, txIndexKey, txIndexName, interrupt)
} }
// TxIndexInitialized returns true if the tx index has been created previously.
func TxIndexInitialized(db database.DB) bool {
var exists bool
db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(txIndexKey)
exists = bucket != nil
return nil
})
return exists
}

82
btcd.go
View file

@ -157,6 +157,88 @@ func btcdMain(serverChan chan<- *server) error {
return nil return nil
} }
// Check if the database had previously been pruned. If it had been, it's
// not possible to newly generate the tx index and addr index.
var beenPruned bool
db.View(func(dbTx database.Tx) error {
beenPruned, err = dbTx.BeenPruned()
return err
})
if err != nil {
btcdLog.Errorf("%v", err)
return err
}
if beenPruned && cfg.Prune == 0 {
err = fmt.Errorf("--prune cannot be disabled as the node has been "+
"previously pruned. You must delete the files in the datadir: \"%s\" "+
"and sync from the beginning to disable pruning", cfg.DataDir)
btcdLog.Errorf("%v", err)
return err
}
if beenPruned && cfg.TxIndex {
err = fmt.Errorf("--txindex cannot be enabled as the node has been "+
"previously pruned. You must delete the files in the datadir: \"%s\" "+
"and sync from the beginning to enable the desired index", cfg.DataDir)
btcdLog.Errorf("%v", err)
return err
}
if beenPruned && cfg.AddrIndex {
err = fmt.Errorf("--addrindex cannot be enabled as the node has been "+
"previously pruned. You must delete the files in the datadir: \"%s\" "+
"and sync from the beginning to enable the desired index", cfg.DataDir)
btcdLog.Errorf("%v", err)
return err
}
// If we've previously been pruned and the cfindex isn't present, it means that the
// user wants to enable the cfindex after the node has already synced up and been
// pruned.
if beenPruned && !indexers.CfIndexInitialized(db) && !cfg.NoCFilters {
err = fmt.Errorf("compact filters cannot be enabled as the node has been "+
"previously pruned. You must delete the files in the datadir: \"%s\" "+
"and sync from the beginning to enable the desired index. You may "+
"use the --nocfilters flag to start the node up without the compact "+
"filters", cfg.DataDir)
btcdLog.Errorf("%v", err)
return err
}
// If the user wants to disable the cfindex and is pruned or has enabled pruning, force
// the user to either drop the cfindex manually or restart the node without the --nocfilters
// flag.
if (beenPruned || cfg.Prune != 0) && indexers.CfIndexInitialized(db) && cfg.NoCFilters {
err = fmt.Errorf("--nocfilters flag was given but the compact filters have " +
"previously been enabled on this node and the index data currently " +
"exists in the database. The node has also been previously pruned and " +
"the database would be left in an inconsistent state if the compact " +
"filters don't get indexed now. To disable compact filters, please drop the " +
"index completely with the --dropcfindex flag and restart the node. " +
"To keep the compact filters, restart the node without the --nocfilters " +
"flag")
btcdLog.Errorf("%v", err)
return err
}
// Enforce removal of txindex and addrindex if user requested pruning.
// This is to require explicit action from the user before removing
// indexes that won't be useful when block files are pruned.
//
// NOTE: The order is important here because dropping the tx index also
// drops the address index since it relies on it. We explicitly make the
// user drop both indexes if --addrindex was enabled previously.
if cfg.Prune != 0 && indexers.AddrIndexInitialized(db) {
err = fmt.Errorf("--prune flag may not be given when the address index " +
"has been initialized. Please drop the address index with the " +
"--dropaddrindex flag before enabling pruning")
btcdLog.Errorf("%v", err)
return err
}
if cfg.Prune != 0 && indexers.TxIndexInitialized(db) {
err = fmt.Errorf("--prune flag may not be given when the transaction index " +
"has been initialized. Please drop the transaction index with the " +
"--droptxindex flag before enabling pruning")
btcdLog.Errorf("%v", err)
return err
}
// The config file is already created if it did not exist and the log // The config file is already created if it did not exist and the log
// file has already been opened by now so we only need to allow // file has already been opened by now so we only need to allow
// creating rpc cert and key files if they don't exist. // creating rpc cert and key files if they don't exist.

View file

@ -66,6 +66,7 @@ const (
sampleConfigFilename = "sample-btcd.conf" sampleConfigFilename = "sample-btcd.conf"
defaultTxIndex = false defaultTxIndex = false
defaultAddrIndex = false defaultAddrIndex = false
pruneMinSize = 1536
) )
var ( var (
@ -146,6 +147,7 @@ type config struct {
Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"` Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"`
ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"` ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"`
ProxyUser string `long:"proxyuser" description:"Username for proxy server"` ProxyUser string `long:"proxyuser" description:"Username for proxy server"`
Prune uint64 `long:"prune" description:"Prune already validated blocks from the database. Must specify a target size in MiB (minimum value of 1536, default value of 0 will disable pruning)"`
RegressionTest bool `long:"regtest" description:"Use the regression test network"` RegressionTest bool `long:"regtest" description:"Use the regression test network"`
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."` RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
RejectReplacement bool `long:"rejectreplacement" description:"Reject transactions that attempt to replace existing transactions within the mempool through the Replace-By-Fee (RBF) signaling policy."` RejectReplacement bool `long:"rejectreplacement" description:"Reject transactions that attempt to replace existing transactions within the mempool through the Replace-By-Fee (RBF) signaling policy."`
@ -1137,6 +1139,30 @@ func loadConfig() (*config, []string, error) {
} }
} }
if cfg.Prune != 0 && cfg.Prune < pruneMinSize {
err := fmt.Errorf("%s: the minimum value for --prune is %d. Got %d",
funcName, pruneMinSize, cfg.Prune)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if cfg.Prune != 0 && cfg.TxIndex {
err := fmt.Errorf("%s: the --prune and --txindex options may "+
"not be activated at the same time", funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if cfg.Prune != 0 && cfg.AddrIndex {
err := fmt.Errorf("%s: the --prune and --addrindex options may "+
"not be activated at the same time", funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Warn about missing config file only after all other configuration is // Warn about missing config file only after all other configuration is
// done. This prevents the warning on help messages and invalid // done. This prevents the warning on help messages and invalid
// options. Note this should go directly before the return. // options. Note this should go directly before the return.

View file

@ -15,6 +15,9 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strconv"
"strings"
"sync" "sync"
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
@ -23,6 +26,10 @@ import (
) )
const ( const (
// blockFileExtension is the extension that's used to store the block
// files on the disk.
blockFileExtension = ".fdb"
// The Bitcoin protocol encodes block height as int32, so max number of // The Bitcoin protocol encodes block height as int32, so max number of
// blocks is 2^31. Max block size per the protocol is 32MiB per block. // blocks is 2^31. Max block size per the protocol is 32MiB per block.
// So the theoretical max at the time this comment was written is 64PiB // So the theoretical max at the time this comment was written is 64PiB
@ -32,7 +39,7 @@ const (
// 512MiB each for a total of ~476.84PiB (roughly 7.4 times the current // 512MiB each for a total of ~476.84PiB (roughly 7.4 times the current
// theoretical max), so there is room for the max block size to grow in // theoretical max), so there is room for the max block size to grow in
// the future. // the future.
blockFilenameTemplate = "%09d.fdb" blockFilenameTemplate = "%09d" + blockFileExtension
// maxOpenFiles is the max number of open files to maintain in the // maxOpenFiles is the max number of open files to maintain in the
// open blocks cache. Note that this does not include the current // open blocks cache. Note that this does not include the current
@ -713,36 +720,57 @@ func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) {
} }
// scanBlockFiles searches the database directory for all flat block files to // scanBlockFiles searches the database directory for all flat block files to
// find the end of the most recent file. This position is considered the // find the first file, last file, and the end of the most recent file. The
// current write cursor which is also stored in the metadata. Thus, it is used // position at the last file is considered the current write cursor which is
// to detect unexpected shutdowns in the middle of writes so the block files // also stored in the metadata. Thus, it is used to detect unexpected shutdowns
// can be reconciled. // in the middle of writes so the block files can be reconciled.
func scanBlockFiles(dbPath string) (int, uint32) { func scanBlockFiles(dbPath string) (int, int, uint32, error) {
lastFile := -1 firstFile, lastFile, lastFileLen, err := int(-1), int(-1), uint32(0), error(nil)
fileLen := uint32(0)
for i := 0; ; i++ {
filePath := blockFilePath(dbPath, uint32(i))
st, err := os.Stat(filePath)
if err != nil {
break
}
lastFile = i
fileLen = uint32(st.Size()) files, err := filepath.Glob(filepath.Join(dbPath, "*"+blockFileExtension))
if err != nil {
return 0, 0, 0, err
}
sort.Strings(files)
// Return early if there's no block files.
if len(files) == 0 {
return firstFile, lastFile, lastFileLen, nil
} }
log.Tracef("Scan found latest block file #%d with length %d", lastFile, // Grab the first and last file's number.
fileLen) firstFile, err = strconv.Atoi(strings.TrimSuffix(filepath.Base(files[0]), blockFileExtension))
return lastFile, fileLen if err != nil {
return 0, 0, 0, fmt.Errorf("scanBlockFiles error: %v", err)
}
lastFile, err = strconv.Atoi(strings.TrimSuffix(filepath.Base(files[len(files)-1]), blockFileExtension))
if err != nil {
return 0, 0, 0, fmt.Errorf("scanBlockFiles error: %v", err)
}
// Get the last file's length.
filePath := blockFilePath(dbPath, uint32(lastFile))
st, err := os.Stat(filePath)
if err != nil {
return 0, 0, 0, err
}
lastFileLen = uint32(st.Size())
log.Tracef("Scan found latest block file #%d with length %d", lastFile, lastFileLen)
return firstFile, lastFile, lastFileLen, err
} }
// newBlockStore returns a new block store with the current block file number // newBlockStore returns a new block store with the current block file number
// and offset set and all fields initialized. // and offset set and all fields initialized.
func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore { func newBlockStore(basePath string, network wire.BitcoinNet) (*blockStore, error) {
// Look for the end of the latest block to file to determine what the // Look for the end of the latest block to file to determine what the
// write cursor position is from the viewpoing of the block files on // write cursor position is from the viewpoing of the block files on
// disk. // disk.
fileNum, fileOff := scanBlockFiles(basePath) _, fileNum, fileOff, err := scanBlockFiles(basePath)
if err != nil {
return nil, err
}
if fileNum == -1 { if fileNum == -1 {
fileNum = 0 fileNum = 0
fileOff = 0 fileOff = 0
@ -765,5 +793,5 @@ func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore {
store.openFileFunc = store.openFile store.openFileFunc = store.openFile
store.openWriteFileFunc = store.openWriteFile store.openWriteFileFunc = store.openWriteFile
store.deleteFileFunc = store.deleteFile store.deleteFileFunc = store.deleteFile
return store return store, nil
} }

View file

@ -1669,6 +1669,116 @@ func (tx *transaction) writePendingAndCommit() error {
return tx.db.cache.commitTx(tx) return tx.db.cache.commitTx(tx)
} }
// PruneBlocks deletes the block files until it reaches the target size
// (specified in bytes). Throws an error if the target size is below
// the maximum size of a single block file.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) PruneBlocks(targetSize uint64) ([]chainhash.Hash, error) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
}
// Ensure the transaction is writable.
if !tx.writable {
str := "prune blocks requires a writable database transaction"
return nil, makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Make a local alias for the maxBlockFileSize.
maxSize := uint64(tx.db.store.maxBlockFileSize)
if targetSize < maxSize {
return nil, fmt.Errorf("got target size of %d but it must be greater "+
"than %d, the max size of a single block file",
targetSize, maxSize)
}
first, last, lastFileSize, err := scanBlockFiles(tx.db.store.basePath)
if err != nil {
return nil, err
}
// If we have no files on disk or just a single file on disk, return early.
if first == last {
return nil, nil
}
// Last file number minus the first file number gives us the count of files
// on disk minus 1. We don't want to count the last file since we can't assume
// that it is of max size.
maxSizeFileCount := last - first
// If the total size of block files are under the target, return early and
// don't prune.
totalSize := uint64(lastFileSize) + (maxSize * uint64(maxSizeFileCount))
if totalSize <= targetSize {
return nil, nil
}
log.Tracef("Using %d more bytes than the target of %d MiB. Pruning files...",
totalSize-targetSize,
targetSize/(1024*1024))
deletedFiles := make(map[uint32]struct{})
// We use < not <= so that the last file is never deleted. There are other checks in place
// but setting it to < here doesn't hurt.
for i := uint32(first); i < uint32(last); i++ {
err = tx.db.store.deleteFileFunc(i)
if err != nil {
return nil, fmt.Errorf("PruneBlocks: Failed to delete block file "+
"number %d: %v", i, err)
}
// Add the file index to the deleted files map so that we can later
// delete the block location index.
deletedFiles[i] = struct{}{}
// If we're already at or below the target usage, break and don't
// try to delete more files.
totalSize -= maxSize
if totalSize <= targetSize {
break
}
}
// Delete the indexed block locations for the files that we've just deleted.
var deletedBlockHashes []chainhash.Hash
cursor := tx.blockIdxBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
loc := deserializeBlockLoc(cursor.Value())
_, found := deletedFiles[loc.blockFileNum]
if found {
deletedBlockHashes = append(deletedBlockHashes, *(*chainhash.Hash)(cursor.Key()))
err := cursor.Delete()
if err != nil {
return nil, err
}
}
}
log.Tracef("Finished pruning. Database now at %d bytes", totalSize)
return deletedBlockHashes, nil
}
// BeenPruned returns if the block storage has ever been pruned.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) BeenPruned() (bool, error) {
first, last, _, err := scanBlockFiles(tx.db.store.basePath)
if err != nil {
return false, err
}
// If the database is pruned, then the first .fdb will not be there.
// We also check that there isn't just 1 file on disk or if there are
// no files on disk by checking if first != last.
return first != 0 && (first != last), nil
}
// Commit commits all changes that have been made to the root metadata bucket // Commit commits all changes that have been made to the root metadata bucket
// and all of its sub-buckets to the database cache which is periodically synced // and all of its sub-buckets to the database cache which is periodically synced
// to persistent storage. In addition, it commits all new blocks directly to // to persistent storage. In addition, it commits all new blocks directly to
@ -2016,7 +2126,10 @@ func openDB(dbPath string, network wire.BitcoinNet, create bool) (database.DB, e
// according to the data that is actually on disk. Also create the // according to the data that is actually on disk. Also create the
// database cache which wraps the underlying leveldb database to provide // database cache which wraps the underlying leveldb database to provide
// write caching. // write caching.
store := newBlockStore(dbPath, network) store, err := newBlockStore(dbPath, network)
if err != nil {
return nil, convertErr(err.Error(), err)
}
cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs) cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs)
pdb := &db{store: store, cache: cache} pdb := &db{store: store, cache: cache}

View file

@ -5,6 +5,7 @@
package ffldb_test package ffldb_test
import ( import (
"bytes"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -13,6 +14,7 @@ import (
"github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcd/database/ffldb" "github.com/btcsuite/btcd/database/ffldb"
) )
@ -253,6 +255,166 @@ func TestPersistence(t *testing.T) {
} }
} }
// TestPrune tests that the older .fdb files are deleted with a call to prune.
func TestPrune(t *testing.T) {
t.Parallel()
// Create a new database to run tests against.
dbPath := t.TempDir()
db, err := database.Create(dbType, dbPath, blockDataNet)
if err != nil {
t.Errorf("Failed to create test database (%s) %v", dbType, err)
return
}
defer db.Close()
blockFileSize := uint64(2048)
testfn := func(t *testing.T, db database.DB) {
// Load the test blocks and save in the test context for use throughout
// the tests.
blocks, err := loadBlocks(t, blockDataFile, blockDataNet)
if err != nil {
t.Errorf("loadBlocks: Unexpected error: %v", err)
return
}
err = db.Update(func(tx database.Tx) error {
for i, block := range blocks {
err := tx.StoreBlock(block)
if err != nil {
return fmt.Errorf("StoreBlock #%d: unexpected error: "+
"%v", i, err)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
blockHashMap := make(map[chainhash.Hash][]byte, len(blocks))
for _, block := range blocks {
bytes, err := block.Bytes()
if err != nil {
t.Fatal(err)
}
blockHashMap[*block.Hash()] = bytes
}
err = db.Update(func(tx database.Tx) error {
_, err := tx.PruneBlocks(1024)
if err == nil {
return fmt.Errorf("Expected an error when attempting to prune" +
"below the maxFileSize")
}
_, err = tx.PruneBlocks(0)
if err == nil {
return fmt.Errorf("Expected an error when attempting to prune" +
"below the maxFileSize")
}
return nil
})
if err != nil {
t.Fatal(err)
}
err = db.View(func(tx database.Tx) error {
pruned, err := tx.BeenPruned()
if err != nil {
return err
}
if pruned {
err = fmt.Errorf("The database hasn't been pruned but " +
"BeenPruned returned true")
}
return err
})
if err != nil {
t.Fatal(err)
}
var deletedBlocks []chainhash.Hash
// This should leave 3 files on disk.
err = db.Update(func(tx database.Tx) error {
deletedBlocks, err = tx.PruneBlocks(blockFileSize * 3)
return err
})
if err != nil {
t.Fatal(err)
}
// The only error we can get is a bad pattern error. Since we're hardcoding
// the pattern, we should not have an error at runtime.
files, _ := filepath.Glob(filepath.Join(dbPath, "*.fdb"))
if len(files) != 3 {
t.Fatalf("Expected to find %d files but got %d",
3, len(files))
}
err = db.View(func(tx database.Tx) error {
pruned, err := tx.BeenPruned()
if err != nil {
return err
}
if !pruned {
err = fmt.Errorf("The database has been pruned but " +
"BeenPruned returned false")
}
return err
})
if err != nil {
t.Fatal(err)
}
// Check that all the blocks that say were deleted are deleted from the
// block index bucket as well.
err = db.View(func(tx database.Tx) error {
for _, deletedBlock := range deletedBlocks {
_, err := tx.FetchBlock(&deletedBlock)
if dbErr, ok := err.(database.Error); !ok ||
dbErr.ErrorCode != database.ErrBlockNotFound {
return fmt.Errorf("Expected ErrBlockNotFound "+
"but got %v", dbErr)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Check that the not deleted blocks are present.
for _, deletedBlock := range deletedBlocks {
delete(blockHashMap, deletedBlock)
}
err = db.View(func(tx database.Tx) error {
for hash, wantBytes := range blockHashMap {
gotBytes, err := tx.FetchBlock(&hash)
if err != nil {
return err
}
if !bytes.Equal(gotBytes, wantBytes) {
return fmt.Errorf("got bytes %x, want bytes %x",
gotBytes, wantBytes)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
ffldb.TstRunWithMaxBlockFileSize(db, uint32(blockFileSize), func() {
testfn(t, db)
})
}
// TestInterface performs all interfaces tests for this database driver. // TestInterface performs all interfaces tests for this database driver.
func TestInterface(t *testing.T) { func TestInterface(t *testing.T) {
t.Parallel() t.Parallel()

View file

@ -11,7 +11,9 @@ The functions are only exported while the tests are being run.
package ffldb package ffldb
import "github.com/btcsuite/btcd/database" import (
"github.com/btcsuite/btcd/database"
)
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed // TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
// file size for the database set to the provided value. The value will be set // file size for the database set to the provided value. The value will be set

View file

@ -389,6 +389,26 @@ type Tx interface {
// implementations. // implementations.
FetchBlockRegions(regions []BlockRegion) ([][]byte, error) FetchBlockRegions(regions []BlockRegion) ([][]byte, error)
// PruneBlocks deletes the block files until it reaches the target size
// (specificed in bytes).
//
// The interface contract guarantees at least the following errors will
// be returned (other implementation-specific errors are possible):
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// NOTE: The data returned by this function is only valid during a
// database transaction. Attempting to access it after a transaction
// has ended results in undefined behavior. This constraint prevents
// additional data copies and allows support for memory-mapped database
// implementations.
PruneBlocks(targetSize uint64) ([]chainhash.Hash, error)
// BeenPruned returns if the block storage has ever been pruned.
//
// Implementation specific errors are possible.
BeenPruned() (bool, error)
// ****************************************************************** // ******************************************************************
// Methods related to both atomic metadata storage and block storage. // Methods related to both atomic metadata storage and block storage.
// ****************************************************************** // ******************************************************************

View file

@ -1214,7 +1214,7 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str
BestBlockHash: chainSnapshot.Hash.String(), BestBlockHash: chainSnapshot.Hash.String(),
Difficulty: getDifficultyRatio(chainSnapshot.Bits, params), Difficulty: getDifficultyRatio(chainSnapshot.Bits, params),
MedianTime: chainSnapshot.MedianTime.Unix(), MedianTime: chainSnapshot.MedianTime.Unix(),
Pruned: false, Pruned: cfg.Prune != 0,
SoftForks: &btcjson.SoftForks{ SoftForks: &btcjson.SoftForks{
Bip9SoftForks: make(map[string]*btcjson.Bip9SoftForkDescription), Bip9SoftForks: make(map[string]*btcjson.Bip9SoftForkDescription),
}, },

View file

@ -44,8 +44,8 @@ import (
const ( const (
// defaultServices describes the default services that are supported by // defaultServices describes the default services that are supported by
// the server. // the server.
defaultServices = wire.SFNodeNetwork | wire.SFNodeBloom | defaultServices = wire.SFNodeNetwork | wire.SFNodeNetworkLimited |
wire.SFNodeWitness | wire.SFNodeCF wire.SFNodeBloom | wire.SFNodeWitness | wire.SFNodeCF
// defaultRequiredServices describes the default services that are // defaultRequiredServices describes the default services that are
// required to be supported by outbound peers. // required to be supported by outbound peers.
@ -2730,6 +2730,9 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
if cfg.NoCFilters { if cfg.NoCFilters {
services &^= wire.SFNodeCF services &^= wire.SFNodeCF
} }
if cfg.Prune != 0 {
services &^= wire.SFNodeNetwork
}
amgr := addrmgr.New(cfg.DataDir, btcdLookup) amgr := addrmgr.New(cfg.DataDir, btcdLookup)
@ -2831,6 +2834,7 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
SigCache: s.sigCache, SigCache: s.sigCache,
IndexManager: indexManager, IndexManager: indexManager,
HashCache: s.hashCache, HashCache: s.hashCache,
Prune: cfg.Prune * 1024 * 1024,
}) })
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -93,18 +93,23 @@ const (
// SFNode2X is a flag used to indicate a peer is running the Segwit2X // SFNode2X is a flag used to indicate a peer is running the Segwit2X
// software. // software.
SFNode2X SFNode2X
// SFNodeNetWorkLimited is a flag used to indicate a peer supports serving
// the last 288 blocks.
SFNodeNetworkLimited = 1 << 10
) )
// Map of service flags back to their constant names for pretty printing. // Map of service flags back to their constant names for pretty printing.
var sfStrings = map[ServiceFlag]string{ var sfStrings = map[ServiceFlag]string{
SFNodeNetwork: "SFNodeNetwork", SFNodeNetwork: "SFNodeNetwork",
SFNodeGetUTXO: "SFNodeGetUTXO", SFNodeGetUTXO: "SFNodeGetUTXO",
SFNodeBloom: "SFNodeBloom", SFNodeBloom: "SFNodeBloom",
SFNodeWitness: "SFNodeWitness", SFNodeWitness: "SFNodeWitness",
SFNodeXthin: "SFNodeXthin", SFNodeXthin: "SFNodeXthin",
SFNodeBit5: "SFNodeBit5", SFNodeBit5: "SFNodeBit5",
SFNodeCF: "SFNodeCF", SFNodeCF: "SFNodeCF",
SFNode2X: "SFNode2X", SFNode2X: "SFNode2X",
SFNodeNetworkLimited: "SFNodeNetworkLimited",
} }
// orderedSFStrings is an ordered list of service flags from highest to // orderedSFStrings is an ordered list of service flags from highest to
@ -118,6 +123,7 @@ var orderedSFStrings = []ServiceFlag{
SFNodeBit5, SFNodeBit5,
SFNodeCF, SFNodeCF,
SFNode2X, SFNode2X,
SFNodeNetworkLimited,
} }
// String returns the ServiceFlag in human-readable form. // String returns the ServiceFlag in human-readable form.

View file

@ -21,7 +21,8 @@ func TestServiceFlagStringer(t *testing.T) {
{SFNodeBit5, "SFNodeBit5"}, {SFNodeBit5, "SFNodeBit5"},
{SFNodeCF, "SFNodeCF"}, {SFNodeCF, "SFNodeCF"},
{SFNode2X, "SFNode2X"}, {SFNode2X, "SFNode2X"},
{0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeWitness|SFNodeXthin|SFNodeBit5|SFNodeCF|SFNode2X|0xffffff00"}, {SFNodeNetworkLimited, "SFNodeNetworkLimited"},
{0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeWitness|SFNodeXthin|SFNodeBit5|SFNodeCF|SFNode2X|SFNodeNetworkLimited|0xfffffb00"},
} }
t.Logf("Running %d tests", len(tests)) t.Logf("Running %d tests", len(tests))