Implement infrastructure for BIP0009.

This commit adds all of the infrastructure needed to support BIP0009
soft forks.

The following is an overview of the changes:

- Add new configuration options to the chaincfg package which allows the
  rule deployments to be defined per chain
- Implement code to calculate the threshold state as required by BIP0009
  - Use threshold state caches that are stored to the database in order
    to accelerate startup time
  - Remove caches that are invalid due to definition changes in the
    params including additions, deletions, and changes to existing
    entries
- Detect and warn when a new unknown rule is about to activate or has
  been activated in the block connection code
- Detect and warn when 50% of the last 100 blocks have unexpected
  versions.
- Remove the latest block version from wire since it no longer applies
- Add a version parameter to the wire.NewBlockHeader function since the
  default is no longer available
- Update the miner block template generation code to use the calculated
  block version based on the currently defined rule deployments and
  their threshold states as of the previous block
- Add tests for new error type
- Add tests for threshold state cache
This commit is contained in:
Dave Collins 2016-08-28 17:51:55 -05:00
parent 95e6de00b8
commit c440584efc
No known key found for this signature in database
GPG key ID: B8904D9D9C93D1F2
17 changed files with 1672 additions and 36 deletions

View file

@ -223,6 +223,34 @@ type BlockChain struct {
// chain state can be quickly reconstructed on load. // chain state can be quickly reconstructed on load.
stateLock sync.RWMutex stateLock sync.RWMutex
stateSnapshot *BestState stateSnapshot *BestState
// The following caches are used to efficiently keep track of the
// current deployment threshold state of each rule change deployment.
//
// This information is stored in the database so it can be quickly
// reconstructed on load.
//
// warningCaches caches the current deployment threshold state for blocks
// in each of the **possible** deployments. This is used in order to
// detect when new unrecognized rule changes are being voted on and/or
// have been activated such as will be the case when older versions of
// the software are being used
//
// deploymentCaches caches the current deployment threshold state for
// blocks in each of the actively defined deployments.
warningCaches []thresholdStateCache
deploymentCaches []thresholdStateCache
// The following fields are used to determine if certain warnings have
// already been shown.
//
// unknownRulesWarned refers to warnings due to unknown rules being
// activated.
//
// unknownVersionsWarned refers to warnings due to unknown versions
// being mined.
unknownRulesWarned bool
unknownVersionsWarned bool
} }
// DisableVerify provides a mechanism to disable transaction script validation // DisableVerify provides a mechanism to disable transaction script validation
@ -556,6 +584,38 @@ func (b *BlockChain) relativeNode(anchor *blockNode, distance uint32) (*blockNod
return iterNode, nil return iterNode, nil
} }
// ancestorNode returns the ancestor block node at the provided height by
// following the chain backwards from the given node while dynamically loading
// any pruned nodes from the database and updating the memory block chain as
// needed. The returned block will be nil when a height is requested that is
// after the height of the passed node or is less than zero.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) ancestorNode(node *blockNode, height int32) (*blockNode, error) {
// Nothing to do if the requested height is outside of the valid range.
if height > node.height || height < 0 {
return nil, nil
}
// Iterate backwards until the requested height is reached.
iterNode := node
for iterNode != nil && iterNode.height > height {
// Get the previous block node. This function is used over
// simply accessing iterNode.parent directly as it will
// dynamically create previous block nodes as needed. This
// helps allow only the pieces of the chain that are needed
// to remain in memory.
var err error
iterNode, err = b.getPrevNodeFromNode(iterNode)
if err != nil {
log.Errorf("getPrevNodeFromNode: %v", err)
return nil, err
}
}
return iterNode, nil
}
// removeBlockNode removes the passed block node from the memory chain by // removeBlockNode removes the passed block node from the memory chain by
// unlinking all of its children and removing it from the the node and // unlinking all of its children and removing it from the the node and
// dependency indices. // dependency indices.
@ -934,6 +994,22 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U
"spent transaction out information") "spent transaction out information")
} }
// No warnings about unknown rules or versions until the chain is
// current.
if b.isCurrent() {
// Warn if any unknown new rules are either about to activate or
// have already been activated.
if err := b.warnUnknownRuleActivations(node); err != nil {
return err
}
// Warn if a high enough percentage of the last blocks have
// unexpected versions.
if err := b.warnUnknownVersions(node); err != nil {
return err
}
}
// Calculate the median time for the block. // Calculate the median time for the block.
medianTime, err := b.calcPastMedianTime(node) medianTime, err := b.calcPastMedianTime(node)
if err != nil { if err != nil {
@ -996,12 +1072,17 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U
} }
} }
return nil // Update the cached threshold states in the database as needed.
return b.putThresholdCaches(dbTx)
}) })
if err != nil { if err != nil {
return err return err
} }
// Mark all modified entries in the threshold caches as flushed now that
// they have been committed to the database.
b.markThresholdCachesFlushed()
// Prune fully spent entries and mark all entries in the view unmodified // Prune fully spent entries and mark all entries in the view unmodified
// now that the modifications have been committed to the database. // now that the modifications have been committed to the database.
view.commit() view.commit()
@ -1507,17 +1588,14 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
return true, nil return true, nil
} }
// IsCurrent returns whether or not the chain believes it is current. Several // isCurrent returns whether or not the chain believes it is current. Several
// factors are used to guess, but the key factors that allow the chain to // factors are used to guess, but the key factors that allow the chain to
// believe it is current are: // believe it is current are:
// - Latest block height is after the latest checkpoint (if enabled) // - Latest block height is after the latest checkpoint (if enabled)
// - Latest block has a timestamp newer than 24 hours ago // - Latest block has a timestamp newer than 24 hours ago
// //
// This function is safe for concurrent access. // This function MUST be called with the chain state lock held (for reads).
func (b *BlockChain) IsCurrent() bool { func (b *BlockChain) isCurrent() bool {
b.chainLock.RLock()
defer b.chainLock.RUnlock()
// Not current if the latest main (best) chain height is before the // Not current if the latest main (best) chain height is before the
// latest known good checkpoint (when checkpoints are enabled). // latest known good checkpoint (when checkpoints are enabled).
checkpoint := b.latestCheckpoint() checkpoint := b.latestCheckpoint()
@ -1534,6 +1612,20 @@ func (b *BlockChain) IsCurrent() bool {
return !b.bestNode.timestamp.Before(minus24Hours) return !b.bestNode.timestamp.Before(minus24Hours)
} }
// IsCurrent returns whether or not the chain believes it is current. Several
// factors are used to guess, but the key factors that allow the chain to
// believe it is current are:
// - Latest block height is after the latest checkpoint (if enabled)
// - Latest block has a timestamp newer than 24 hours ago
//
// This function is safe for concurrent access.
func (b *BlockChain) IsCurrent() bool {
b.chainLock.RLock()
defer b.chainLock.RUnlock()
return b.isCurrent()
}
// BestSnapshot returns information about the current best chain block and // BestSnapshot returns information about the current best chain block and
// related state as of the current point in time. The returned instance must be // related state as of the current point in time. The returned instance must be
// treated as immutable since it is shared by all callers. // treated as immutable since it is shared by all callers.
@ -1653,6 +1745,8 @@ func New(config *Config) (*BlockChain, error) {
orphans: make(map[chainhash.Hash]*orphanBlock), orphans: make(map[chainhash.Hash]*orphanBlock),
prevOrphans: make(map[chainhash.Hash][]*orphanBlock), prevOrphans: make(map[chainhash.Hash][]*orphanBlock),
blockCache: make(map[chainhash.Hash]*btcutil.Block), blockCache: make(map[chainhash.Hash]*btcutil.Block),
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments),
} }
// Initialize the chain state from the passed database. When the db // Initialize the chain state from the passed database. When the db
@ -1670,6 +1764,14 @@ func New(config *Config) (*BlockChain, error) {
} }
} }
// Initialize rule change threshold state caches from the passed
// database. When the db does not yet contains any cached information
// for a given threshold cache, the threshold states will be calculated
// using the chain state.
if err := b.initThresholdCaches(); err != nil {
return nil, err
}
log.Infof("Chain state (height %d, hash %v, totaltx %d, work %v)", log.Infof("Chain state (height %d, hash %v, totaltx %d, work %v)",
b.bestNode.height, b.bestNode.hash, b.stateSnapshot.TotalTxns, b.bestNode.height, b.bestNode.hash, b.stateSnapshot.TotalTxns,
b.bestNode.workSum) b.bestNode.workSum)

View file

@ -11,6 +11,7 @@ import (
"math/big" "math/big"
"sort" "sort"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
@ -38,6 +39,27 @@ var (
// unspent transaction output set. // unspent transaction output set.
utxoSetBucketName = []byte("utxoset") utxoSetBucketName = []byte("utxoset")
// thresholdBucketName is the name of the db bucket used to house cached
// threshold states.
thresholdBucketName = []byte("thresholdstate")
// numDeploymentsKeyName is the name of the db key used to store the
// number of saved deployment caches.
numDeploymentsKeyName = []byte("numdeployments")
// deploymentBucketName is the name of the db bucket used to house the
// cached threshold states for the actively defined rule deployments.
deploymentBucketName = []byte("deploymentcache")
// deploymentStateKeyName is the name of the db key used to store the
// deployment state associated with the threshold cache for a given rule
// deployment.
deploymentStateKeyName = []byte("deploymentstate")
// warningBucketName is the name of the db bucket used to house the
// cached threshold states for unknown rule deployments.
warningBucketName = []byte("warningcache")
// byteOrder is the preferred byte order used for serializing numeric // byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database. // fields for storage in the database.
byteOrder = binary.LittleEndian byteOrder = binary.LittleEndian
@ -75,6 +97,13 @@ func isDeserializeErr(err error) bool {
return ok return ok
} }
// isDbBucketNotFoundErr returns whether or not the passed error is a
// database.Error with an error code of database.ErrBucketNotFound.
func isDbBucketNotFoundErr(err error) bool {
dbErr, ok := err.(database.Error)
return ok && dbErr.ErrorCode == database.ErrBucketNotFound
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// The transaction spend journal consists of an entry for each block connected // The transaction spend journal consists of an entry for each block connected
// to the main chain which contains the transaction outputs the block spends // to the main chain which contains the transaction outputs the block spends
@ -1403,3 +1432,528 @@ func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash
}) })
return hashList, err return hashList, err
} }
// -----------------------------------------------------------------------------
// The threshold state consists of individual threshold cache buckets for each
// cache id under one main threshold state bucket. Each threshold cache bucket
// contains entries keyed by the block hash for the final block in each window
// and their associated threshold states as well as the associated deployment
// parameters.
//
// The serialized value format is for each cache entry keyed by hash is:
//
// <thresholdstate>
//
// Field Type Size
// threshold state uint8 1 byte
//
//
// In addition, the threshold cache buckets for deployments contain the specific
// deployment parameters they were created with. This allows the cache
// invalidation when there any changes to their definitions.
//
// The serialized value format for the deployment parameters is:
//
// <bit number><start time><expire time>
//
// Field Type Size
// bit number uint8 1 byte
// start time uint64 8 bytes
// expire time uint64 8 bytes
//
//
// Finally, the main threshold bucket also contains the number of stored
// deployment buckets as described above.
//
// The serialized value format for the number of stored deployment buckets is:
//
// <num deployments>
//
// Field Type Size
// num deployments uint32 4 bytes
// -----------------------------------------------------------------------------
// serializeDeploymentCacheParams serializes the parameters for the passed
// deployment into a single byte slice according to the format described in
// detail above.
func serializeDeploymentCacheParams(deployment *chaincfg.ConsensusDeployment) []byte {
serialized := make([]byte, 1+8+8)
serialized[0] = deployment.BitNumber
byteOrder.PutUint64(serialized[1:], deployment.StartTime)
byteOrder.PutUint64(serialized[9:], deployment.ExpireTime)
return serialized
}
// deserializeDeploymentCacheParams deserializes the passed serialized
// deployment cache parameters into a deployment struct.
func deserializeDeploymentCacheParams(serialized []byte) (chaincfg.ConsensusDeployment, error) {
// Ensure the serialized data has enough bytes to properly deserialize
// the bit number, start time, and expire time.
if len(serialized) != 1+8+8 {
return chaincfg.ConsensusDeployment{}, database.Error{
ErrorCode: database.ErrCorruption,
Description: "corrupt deployment cache state",
}
}
var deployment chaincfg.ConsensusDeployment
deployment.BitNumber = serialized[0]
deployment.StartTime = byteOrder.Uint64(serialized[1:])
deployment.ExpireTime = byteOrder.Uint64(serialized[9:])
return deployment, nil
}
// dbPutDeploymentCacheParams uses an existing database transaction to update
// the deployment cache params with the given values.
func dbPutDeploymentCacheParams(bucket database.Bucket, deployment *chaincfg.ConsensusDeployment) error {
serialized := serializeDeploymentCacheParams(deployment)
return bucket.Put(deploymentStateKeyName, serialized)
}
// dbFetchDeploymentCacheParams uses an existing database transaction to
// retrieve the deployment parameters from the given bucket, deserialize them,
// and returns the resulting deployment struct.
func dbFetchDeploymentCacheParams(bucket database.Bucket) (chaincfg.ConsensusDeployment, error) {
serialized := bucket.Get(deploymentStateKeyName)
return deserializeDeploymentCacheParams(serialized)
}
// serializeNumDeployments serializes the parameters for the passed number of
// deployments into a single byte slice according to the format described in
// detail above.
func serializeNumDeployments(numDeployments uint32) []byte {
serialized := make([]byte, 4)
byteOrder.PutUint32(serialized, numDeployments)
return serialized
}
// deserializeDeploymentCacheParams deserializes the passed serialized
// number of deployments.
func deserializeNumDeployments(serialized []byte) (uint32, error) {
if len(serialized) != 4 {
return 0, database.Error{
ErrorCode: database.ErrCorruption,
Description: "corrupt stored number of deployments",
}
}
return byteOrder.Uint32(serialized), nil
}
// dbPutNumDeployments uses an existing database transaction to update the
// number of deployments to the given value.
func dbPutNumDeployments(bucket database.Bucket, numDeployments uint32) error {
serialized := serializeNumDeployments(numDeployments)
return bucket.Put(numDeploymentsKeyName, serialized)
}
// dbFetchNumDeployments uses an existing database transaction to retrieve the
// number of deployments, deserialize it, and returns the result.
func dbFetchNumDeployments(bucket database.Bucket) (uint32, error) {
// Ensure the serialized data has enough bytes to properly deserialize
// the number of stored deployments.
serialized := bucket.Get(numDeploymentsKeyName)
return deserializeNumDeployments(serialized)
}
// thresholdCacheBucket returns the serialized bucket name to use for a
// threshold cache given a prefix and an ID.
func thresholdCacheBucket(prefix []byte, id uint32) []byte {
bucketName := make([]byte, len(prefix)+4)
copy(bucketName, prefix)
byteOrder.PutUint32(bucketName[len(bucketName)-4:], id)
return bucketName
}
// dbPutThresholdState uses an existing database transaction to update or add
// the rule change threshold state for the provided block hash.
func dbPutThresholdState(bucket database.Bucket, hash chainhash.Hash, state ThresholdState) error {
// Add the block hash to threshold state mapping.
var serializedState [1]byte
serializedState[0] = byte(state)
return bucket.Put(hash[:], serializedState[:])
}
// dbPutThresholdCaches uses an existing database transaction to update the
// provided threshold state caches using the given bucket prefix.
func dbPutThresholdCaches(dbTx database.Tx, caches []thresholdStateCache, bucketPrefix []byte) error {
// Loop through each of the defined cache IDs in the provided cache and
// populate the associated bucket with all of the block hash to
// threshold state mappings for it.
cachesBucket := dbTx.Metadata().Bucket(thresholdBucketName)
for i := uint32(0); i < uint32(len(caches)); i++ {
cache := &caches[i]
if len(cache.dbUpdates) == 0 {
continue
}
cacheIDBucketName := thresholdCacheBucket(bucketPrefix, i)
bucket := cachesBucket.Bucket(cacheIDBucketName)
for blockHash, state := range cache.dbUpdates {
err := dbPutThresholdState(bucket, blockHash, state)
if err != nil {
return err
}
}
}
return nil
}
// putThresholdCaches uses an existing database transaction to update the
// threshold state caches.
func (b *BlockChain) putThresholdCaches(dbTx database.Tx) error {
err := dbPutThresholdCaches(dbTx, b.deploymentCaches,
deploymentBucketName)
if err != nil {
return err
}
return dbPutThresholdCaches(dbTx, b.warningCaches, warningBucketName)
}
// markThresholdCachesFlushed clears any pending updates to be written from
// threshold state caches. Callers are intended to call this after the pending
// updates have been successfully written to the database via the
// putThresholdCaches function and its associated database transation is closed.
// This approach is taken to ensure the memory state is not updated until after
// the atomic database update was successful.
func (b *BlockChain) markThresholdCachesFlushed() {
for i := 0; i < len(b.deploymentCaches); i++ {
b.deploymentCaches[i].MarkFlushed()
}
for i := 0; i < len(b.warningCaches); i++ {
b.warningCaches[i].MarkFlushed()
}
}
// dbFetchThresholdCaches uses an existing database transaction to retrieve
// the threshold state caches from the provided bucket prefix into the given
// cache parameter. When the db does not contain any information for a specific
// id within that cache, that entry will simply be empty.
func dbFetchThresholdCaches(dbTx database.Tx, caches []thresholdStateCache, bucketPrefix []byte) error {
// Nothing to load if the main threshold state caches bucket
// doesn't exist.
cachesBucket := dbTx.Metadata().Bucket(thresholdBucketName)
if cachesBucket == nil {
return nil
}
// Loop through each of the cache IDs and load any saved threshold
// states.
for i := 0; i < len(caches); i++ {
// Nothing to do for this cache ID if there is no bucket for it.
cacheIDBucketName := thresholdCacheBucket(bucketPrefix, uint32(i))
cacheIDBucket := cachesBucket.Bucket(cacheIDBucketName[:])
if cacheIDBucket == nil {
continue
}
// Load all of the cached block hash to threshold state mappings
// from the bucket.
err := cacheIDBucket.ForEach(func(k, v []byte) error {
// Skip non-hash entries.
if len(k) != chainhash.HashSize {
return nil
}
var hash chainhash.Hash
copy(hash[:], k)
caches[i].entries[hash] = ThresholdState(v[0])
return nil
})
if err != nil {
return err
}
}
return nil
}
// invalidateThresholdCaches removes any threshold state caches that are no
// longer valid. This can happen if a deployment ID is changed such as when it
// is reused, or if it is reordered in the parameter definitions. It is also
// necessary for specific bits in the warning cache when deployment definitions
// are added and removed since it could change the expected block versions and
// hence potentially change the result of the warning states for that bit.
func (b *BlockChain) invalidateThresholdCaches(cachesBucket database.Bucket) error {
deployments := b.chainParams.Deployments[:]
// Remove any stored deployments that are no longer defined along with
// the warning cache associated with their bits.
numStoredDeployments, err := dbFetchNumDeployments(cachesBucket)
if err != nil {
return err
}
definedDeployments := uint32(len(deployments))
for i := definedDeployments; i < numStoredDeployments; i++ {
// Nothing to do when nothing is stored for the deployment.
deployBucketKey := thresholdCacheBucket(deploymentBucketName, i)
deployBucket := cachesBucket.Bucket(deployBucketKey)
if deployBucket == nil {
continue
}
// Load the deployment details the cache was created for from
// the database.
stored, err := dbFetchDeploymentCacheParams(deployBucket)
if err != nil {
return err
}
// Remove the warning cache for the bit associated with the old
// deployment definition.
oldBit := uint32(stored.BitNumber)
bn := thresholdCacheBucket(warningBucketName, oldBit)
err = cachesBucket.DeleteBucket(bn)
if err != nil && !isDbBucketNotFoundErr(err) {
return err
}
// Remove deployment state and cache.
err = cachesBucket.DeleteBucket(deployBucketKey)
if err != nil && !isDbBucketNotFoundErr(err) {
return err
}
log.Debugf("Removed threshold state caches for deployment %d "+
"and warning bit %d", i, oldBit)
}
// Remove any deployment caches that no longer match the associated
// deployment definition.
for i := uint32(0); i < uint32(len(deployments)); i++ {
// Remove the warning cache for the bit associated with the new
// deployment definition if nothing is already stored for the
// deployment.
deployBucketKey := thresholdCacheBucket(deploymentBucketName, i)
deployBucket := cachesBucket.Bucket(deployBucketKey)
if deployBucket == nil {
// Remove the warning cache for the bit associated with
// the new deployment definition.
newBit := uint32(deployments[i].BitNumber)
bn := thresholdCacheBucket(warningBucketName, newBit)
err = cachesBucket.DeleteBucket(bn)
if err != nil && !isDbBucketNotFoundErr(err) {
return err
}
log.Debugf("Removed threshold state cache for warning "+
"bit %d ", newBit)
continue
}
// Load the deployment details the cache was created for from
// the database, compare them against the currently defined
// deployment, and invalidate the relevant caches if they don't
// match.
stored, err := dbFetchDeploymentCacheParams(deployBucket)
if err != nil {
return err
}
if stored != deployments[i] {
// Remove deployment state and cache.
err := cachesBucket.DeleteBucket(deployBucketKey)
if err != nil && !isDbBucketNotFoundErr(err) {
return err
}
// Remove the warning cache for the bit associated with
// the new deployment definition.
newBit := uint32(deployments[i].BitNumber)
bn := thresholdCacheBucket(warningBucketName, newBit)
err = cachesBucket.DeleteBucket(bn)
if err != nil && !isDbBucketNotFoundErr(err) {
return err
}
// Remove the warning cache for the bit associated with
// the old deployment definition if it is different than
// the new one.
oldBit := uint32(stored.BitNumber)
if oldBit == newBit {
log.Debugf("Removed threshold state caches for "+
"deployment %d and warning bit %d", i,
newBit)
continue
}
bn = thresholdCacheBucket(warningBucketName, oldBit)
err = cachesBucket.DeleteBucket(bn)
if err != nil && !isDbBucketNotFoundErr(err) {
return err
}
log.Debugf("Removed threshold state caches for "+
"deployment %d and warning bits %d and %d", i,
oldBit, newBit)
}
}
return nil
}
// initThresholdCacheBuckets creates any missing buckets needed for the defined
// threshold caches and populates them with state-related details so they can
// be invalidated as needed.
func (b *BlockChain) initThresholdCacheBuckets(meta database.Bucket) error {
// Create overall bucket that houses all of the threshold caches and
// their related state as needed.
cachesBucket, err := meta.CreateBucketIfNotExists(thresholdBucketName)
if err != nil {
return err
}
// Update the number of stored deployment as needed.
definedDeployments := uint32(len(b.deploymentCaches))
storedDeployments, err := dbFetchNumDeployments(cachesBucket)
if err != nil || storedDeployments != definedDeployments {
err := dbPutNumDeployments(cachesBucket, definedDeployments)
if err != nil {
return err
}
}
// Create buckets for each of the deployment caches as needed, and
// populate the created buckets with the specific deployment details so
// that the cache(s) can be invalidated properly with future updates.
for i := uint32(0); i < definedDeployments; i++ {
name := thresholdCacheBucket(deploymentBucketName, i)
if bucket := cachesBucket.Bucket(name); bucket != nil {
continue
}
deployBucket, err := cachesBucket.CreateBucket(name)
if err != nil {
return err
}
deployment := &b.chainParams.Deployments[i]
err = dbPutDeploymentCacheParams(deployBucket, deployment)
if err != nil {
return err
}
}
// Create buckets for each of the warning caches as needed.
for i := uint32(0); i < uint32(len(b.warningCaches)); i++ {
name := thresholdCacheBucket(warningBucketName, i)
_, err := cachesBucket.CreateBucketIfNotExists(name)
if err != nil {
return err
}
}
return nil
}
// initThresholdCaches initializes the threshold state caches from the database.
// When the db does not yet contain any information for a specific threshold
// cache or a given id within that cache, it will simply be empty which will
// lead to it being calculated as needed.
func (b *BlockChain) initThresholdCaches() error {
// Create and initialize missing threshold state cache buckets and
// remove any that are no longer valid.
err := b.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
cachesBucket := meta.Bucket(thresholdBucketName)
if cachesBucket != nil {
err := b.invalidateThresholdCaches(cachesBucket)
if err != nil {
return err
}
}
// Create all cache buckets as needed.
return b.initThresholdCacheBuckets(meta)
})
if err != nil {
return err
}
// Load the deployment caches.
err = b.db.View(func(dbTx database.Tx) error {
// Load the deployment threshold states.
err := dbFetchThresholdCaches(dbTx, b.deploymentCaches,
deploymentBucketName)
if err != nil {
return err
}
// Load the warning threshold states.
return dbFetchThresholdCaches(dbTx, b.warningCaches,
warningBucketName)
})
if err != nil {
return err
}
// Inform the user the states might take a while to recalculate if any
// of the threshold state caches aren't populated.
var showMsg bool
for i := 0; i < len(b.warningCaches); i++ {
if len(b.warningCaches[i].entries) == 0 {
showMsg = true
break
}
}
if !showMsg {
for i := 0; i < len(b.deploymentCaches); i++ {
if len(b.deploymentCaches[i].entries) == 0 {
showMsg = true
break
}
}
}
if showMsg {
log.Info("Recalculating threshold states due to definition " +
"change. This might take a while...")
}
// Initialize the warning and deployment caches by calculating the
// threshold state for each of them. This will ensure the caches are
// populated and any states that needed to be recalculated due to
// definition changes is done now.
for bit := uint32(0); bit < vbNumBits; bit++ {
checker := bitConditionChecker{bit: bit, chain: b}
cache := &b.warningCaches[bit]
_, err := b.thresholdState(b.bestNode, checker, cache)
if err != nil {
return err
}
}
for id := 0; id < len(b.chainParams.Deployments); id++ {
deployment := &b.chainParams.Deployments[id]
cache := &b.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, chain: b}
_, err := b.thresholdState(b.bestNode, checker, cache)
if err != nil {
return err
}
}
// No warnings about unknown rules or versions until the chain is
// current.
if b.isCurrent() {
// Warn if a high enough percentage of the last blocks have
// unexpected versions.
if err := b.warnUnknownVersions(b.bestNode); err != nil {
return err
}
// Warn if any unknown new rules are either about to activate or
// have already been activated.
if err := b.warnUnknownRuleActivations(b.bestNode); err != nil {
return err
}
}
// Update the cached threshold states in the database as needed.
err = b.db.Update(func(dbTx database.Tx) error {
return b.putThresholdCaches(dbTx)
})
if err != nil {
return err
}
// Mark all modified entries in the threshold caches as flushed now that
// they have been committed to the database.
b.markThresholdCachesFlushed()
return nil
}

View file

@ -8,11 +8,21 @@ import (
"fmt" "fmt"
) )
// DeploymentError identifies an error that indicates a deployment ID was
// specified that does not exist.
type DeploymentError uint32
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e DeploymentError) Error() string {
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
}
// AssertError identifies an error that indicates an internal code consistency // AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error. // issue and should be treated as a critical and unrecoverable error.
type AssertError string type AssertError string
// Error returns the assertion error as a huma-readable string and satisfies // Error returns the assertion error as a human-readable string and satisfies
// the error interface. // the error interface.
func (e AssertError) Error() string { func (e AssertError) Error() string {
return "assertion failed: " + string(e) return "assertion failed: " + string(e)

View file

@ -95,3 +95,37 @@ func TestRuleError(t *testing.T) {
} }
} }
} }
// TestDeploymentError tests the stringized output for the DeploymentError type.
func TestDeploymentError(t *testing.T) {
t.Parallel()
tests := []struct {
in blockchain.DeploymentError
want string
}{
{
blockchain.DeploymentError(0),
"deployment ID 0 does not exist",
},
{
blockchain.DeploymentError(10),
"deployment ID 10 does not exist",
},
{
blockchain.DeploymentError(123),
"deployment ID 123 does not exist",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}

View file

@ -0,0 +1,322 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
// ThresholdState define the various threshold states used when voting on
// consensus changes.
type ThresholdState byte
// These constants are used to identify specific threshold states.
//
// NOTE: This section specifically does not use iota for the individual states
// since these values are serialized and must be stable for long-term storage.
const (
// ThresholdDefined is the first state for each deployment and is the
// state for the genesis block has by defintion for all deployments.
ThresholdDefined ThresholdState = 0
// ThresholdStarted is the state for a deployment once its start time
// has been reached.
ThresholdStarted ThresholdState = 1
// ThresholdLockedIn is the state for a deployment during the retarget
// period which is after the ThresholdStarted state period and the
// number of blocks that have voted for the deployment equal or exceed
// the required number of votes for the deployment.
ThresholdLockedIn ThresholdState = 2
// ThresholdActive is the state for a deployment for all blocks after a
// retarget period in which the deployment was in the ThresholdLockedIn
// state.
ThresholdActive ThresholdState = 3
// ThresholdFailed is the state for a deployment once its expiration
// time has been reached and it did not reach the ThresholdLockedIn
// state.
ThresholdFailed ThresholdState = 4
// numThresholdsStates is the maximum number of threshold states used in
// tests.
numThresholdsStates = iota
)
// thresholdStateStrings is a map of ThresholdState values back to their
// constant names for pretty printing.
var thresholdStateStrings = map[ThresholdState]string{
ThresholdDefined: "ThresholdDefined",
ThresholdStarted: "ThresholdStarted",
ThresholdLockedIn: "ThresholdLockedIn",
ThresholdActive: "ThresholdActive",
ThresholdFailed: "ThresholdFailed",
}
// String returns the ThresholdState as a human-readable name.
func (t ThresholdState) String() string {
if s := thresholdStateStrings[t]; s != "" {
return s
}
return fmt.Sprintf("Unknown ThresholdState (%d)", int(t))
}
// thresholdConditionChecker provides a generic interface that is invoked to
// determine when a consensus rule change threshold should be changed.
type thresholdConditionChecker interface {
// BeginTime returns the unix timestamp for the median block time after
// which voting on a rule change starts (at the next window).
BeginTime() uint64
// EndTime returns the unix timestamp for the median block time after
// which an attempted rule change fails if it has not already been
// locked in or activated.
EndTime() uint64
// RuleChangeActivationThreshold is the number of blocks for which the
// condition must be true in order to lock in a rule change.
RuleChangeActivationThreshold() uint32
// MinerConfirmationWindow is the number of blocks in each threshold
// state retarget window.
MinerConfirmationWindow() uint32
// Condition returns whether or not the rule change activation condition
// has been met. This typically involves checking whether or not the
// bit assocaited with the condition is set, but can be more complex as
// needed.
Condition(*blockNode) (bool, error)
}
// thresholdStateCache provides a type to cache the threshold states of each
// threshold window for a set of IDs. It also keeps track of which entries have
// been modified and therefore need to be written to the database.
type thresholdStateCache struct {
dbUpdates map[chainhash.Hash]ThresholdState
entries map[chainhash.Hash]ThresholdState
}
// Lookup returns the threshold state associated with the given hash along with
// a boolean that indicates whether or not it is valid.
func (c *thresholdStateCache) Lookup(hash chainhash.Hash) (ThresholdState, bool) {
state, ok := c.entries[hash]
return state, ok
}
// Update updates the cache to contain the provided hash to threshold state
// mapping while properly tracking needed updates flush changes to the database.
func (c *thresholdStateCache) Update(hash chainhash.Hash, state ThresholdState) {
if existing, ok := c.entries[hash]; ok && existing == state {
return
}
c.dbUpdates[hash] = state
c.entries[hash] = state
}
// MarkFlushed marks all of the current udpates as flushed to the database.
// This is useful so the caller can ensure the needed database updates are not
// lost until they have successfully been written to the database.
func (c *thresholdStateCache) MarkFlushed() {
for hash := range c.dbUpdates {
delete(c.dbUpdates, hash)
}
}
// newThresholdCaches returns a new array of caches to be used when calculating
// threshold states.
func newThresholdCaches(numCaches uint32) []thresholdStateCache {
caches := make([]thresholdStateCache, numCaches)
for i := 0; i < len(caches); i++ {
caches[i] = thresholdStateCache{
entries: make(map[chainhash.Hash]ThresholdState),
dbUpdates: make(map[chainhash.Hash]ThresholdState),
}
}
return caches
}
// thresholdState returns the current rule change threshold state for the given
// node and deployment ID. The cache is used to ensure the threshold states for
// previous windows are only calculated once.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
// The threshold state for the window that contains the genesis block is
// defined by definition.
confirmationWindow := int32(checker.MinerConfirmationWindow())
if prevNode == nil || (prevNode.height+1) < confirmationWindow {
return ThresholdDefined, nil
}
// Get the ancestor that is the last block of the previous confirmation
// window in order to get its threshold state. This can be done because
// the state is the same for all blocks within a given window.
var err error
prevNode, err = b.ancestorNode(prevNode, prevNode.height-
(prevNode.height+1)%confirmationWindow)
if err != nil {
return ThresholdFailed, err
}
// Iterate backwards through each of the previous confirmation windows
// to find the most recently cached threshold state.
var neededStates []*blockNode
for prevNode != nil {
// Nothing more to do if the state of the block is already
// cached.
if _, ok := cache.Lookup(*prevNode.hash); ok {
break
}
// The start and expiration times are based on the median block
// time, so calculate it now.
medianTime, err := b.calcPastMedianTime(prevNode)
if err != nil {
return ThresholdFailed, err
}
// The state is simply defined if the start time hasn't been
// been reached yet.
if uint64(medianTime.Unix()) < checker.BeginTime() {
cache.Update(*prevNode.hash, ThresholdDefined)
break
}
// Add this node to the list of nodes that need the state
// calculated and cached.
neededStates = append(neededStates, prevNode)
// Get the ancestor that is the last block of the previous
// confirmation window.
prevNode, err = b.ancestorNode(prevNode, prevNode.height-
confirmationWindow)
if err != nil {
return ThresholdFailed, err
}
}
// Start with the threshold state for the most recent confirmation
// window that has a cached state.
state := ThresholdDefined
if prevNode != nil {
var ok bool
state, ok = cache.Lookup(*prevNode.hash)
if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf(
"thresholdState: cache lookup failed for %v",
prevNode.hash))
}
}
// Since each threshold state depends on the state of the previous
// window, iterate starting from the oldest unknown window.
for neededNum := len(neededStates) - 1; neededNum >= 0; neededNum-- {
prevNode := neededStates[neededNum]
switch state {
case ThresholdDefined:
// The deployment of the rule change fails if it expires
// before it is accepted and locked in.
medianTime, err := b.calcPastMedianTime(prevNode)
if err != nil {
return ThresholdFailed, err
}
medianTimeUnix := uint64(medianTime.Unix())
if medianTimeUnix >= checker.EndTime() {
state = ThresholdFailed
break
}
// The state for the rule moves to the started state
// once its start time has been reached (and it hasn't
// already expired per the above).
if medianTimeUnix >= checker.BeginTime() {
state = ThresholdStarted
}
case ThresholdStarted:
// The deployment of the rule change fails if it expires
// before it is accepted and locked in.
medianTime, err := b.calcPastMedianTime(prevNode)
if err != nil {
return ThresholdFailed, err
}
if uint64(medianTime.Unix()) >= checker.EndTime() {
state = ThresholdFailed
break
}
// At this point, the rule change is still being voted
// on by the miners, so iterate backwards through the
// confirmation window to count all of the votes in it.
var count uint32
countNode := prevNode
for i := int32(0); i < confirmationWindow; i++ {
condition, err := checker.Condition(countNode)
if err != nil {
return ThresholdFailed, err
}
if condition {
count++
}
// Get the previous block node. This function
// is used over simply accessing countNode.parent
// directly as it will dynamically create
// previous block nodes as needed. This helps
// allow only the pieces of the chain that are
// needed to remain in memory.
countNode, err = b.getPrevNodeFromNode(countNode)
if err != nil {
return ThresholdFailed, err
}
}
// The state is locked in if the number of blocks in the
// period that voted for the rule change meets the
// activation threshold.
if count >= checker.RuleChangeActivationThreshold() {
state = ThresholdLockedIn
}
case ThresholdLockedIn:
// The new rule becomes active when its previous state
// was locked in.
state = ThresholdActive
// Nothing to do if the previous state is active or failed since
// they are both terminal states.
case ThresholdActive:
case ThresholdFailed:
}
// Update the cache to avoid recalculating the state in the
// future.
cache.Update(*prevNode.hash, state)
}
return state, nil
}
// ThresholdState returns the current rule change threshold state of the given
// deployment ID for the end of the current best chain.
//
// This function is safe for concurrent access.
func (b *BlockChain) ThresholdState(deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(b.chainParams.Deployments)) {
return ThresholdFailed, DeploymentError(deploymentID)
}
deployment := &b.chainParams.Deployments[deploymentID]
checker := deploymentChecker{deployment: deployment, chain: b}
cache := &b.deploymentCaches[deploymentID]
b.chainLock.Lock()
state, err := b.thresholdState(b.bestNode, checker, cache)
b.chainLock.Unlock()
return state, err
}

View file

@ -0,0 +1,195 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"testing"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
// TestThresholdStateStringer tests the stringized output for the
// ThresholdState type.
func TestThresholdStateStringer(t *testing.T) {
t.Parallel()
tests := []struct {
in ThresholdState
want string
}{
{ThresholdDefined, "ThresholdDefined"},
{ThresholdStarted, "ThresholdStarted"},
{ThresholdLockedIn, "ThresholdLockedIn"},
{ThresholdActive, "ThresholdActive"},
{ThresholdFailed, "ThresholdFailed"},
{0xff, "Unknown ThresholdState (255)"},
}
// Detect additional threshold states that don't have the stringer added.
if len(tests)-1 != int(numThresholdsStates) {
t.Errorf("It appears a threshold statewas added without " +
"adding an associated stringer test")
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.String()
if result != test.want {
t.Errorf("String #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
// TestThresholdStateCache ensure the threshold state cache works as intended
// including adding entries, updating existing entries, and flushing.
func TestThresholdStateCache(t *testing.T) {
t.Parallel()
tests := []struct {
name string
numEntries int
state ThresholdState
}{
{name: "2 entries defined", numEntries: 2, state: ThresholdDefined},
{name: "7 entries started", numEntries: 7, state: ThresholdStarted},
{name: "10 entries active", numEntries: 10, state: ThresholdActive},
{name: "5 entries locked in", numEntries: 5, state: ThresholdLockedIn},
{name: "3 entries failed", numEntries: 3, state: ThresholdFailed},
}
nextTest:
for _, test := range tests {
cache := &newThresholdCaches(1)[0]
for i := 0; i < test.numEntries; i++ {
var hash chainhash.Hash
hash[0] = uint8(i + 1)
// Ensure the hash isn't available in the cache already.
_, ok := cache.Lookup(hash)
if ok {
t.Errorf("Lookup (%s): has entry for hash %v",
test.name, hash)
continue nextTest
}
// Ensure hash that was added to the cache reports it's
// available and the state is the expected value.
cache.Update(hash, test.state)
state, ok := cache.Lookup(hash)
if !ok {
t.Errorf("Lookup (%s): missing entry for hash "+
"%v", test.name, hash)
continue nextTest
}
if state != test.state {
t.Errorf("Lookup (%s): state mismatch - got "+
"%v, want %v", test.name, state,
test.state)
continue nextTest
}
// Ensure the update is also added to the internal
// database updates map and its state matches.
state, ok = cache.dbUpdates[hash]
if !ok {
t.Errorf("dbUpdates (%s): missing entry for "+
"hash %v", test.name, hash)
continue nextTest
}
if state != test.state {
t.Errorf("dbUpdates (%s): state mismatch - "+
"got %v, want %v", test.name, state,
test.state)
continue nextTest
}
// Ensure flushing the cache removes all entries from
// the internal database updates map.
cache.MarkFlushed()
if len(cache.dbUpdates) != 0 {
t.Errorf("dbUpdates (%s): unflushed entries",
test.name)
continue nextTest
}
// Ensure hash is still available in the cache and the
// state is the expected value.
state, ok = cache.Lookup(hash)
if !ok {
t.Errorf("Lookup (%s): missing entry after "+
"flush for hash %v", test.name, hash)
continue nextTest
}
if state != test.state {
t.Errorf("Lookup (%s): state mismatch after "+
"flush - got %v, want %v", test.name,
state, test.state)
continue nextTest
}
// Ensure adding an existing hash with the same state
// doesn't break the existing entry and it is NOT added
// to the database updates map.
cache.Update(hash, test.state)
state, ok = cache.Lookup(hash)
if !ok {
t.Errorf("Lookup (%s): missing entry after "+
"second add for hash %v", test.name,
hash)
continue nextTest
}
if state != test.state {
t.Errorf("Lookup (%s): state mismatch after "+
"second add - got %v, want %v",
test.name, state, test.state)
continue nextTest
}
if len(cache.dbUpdates) != 0 {
t.Errorf("dbUpdates (%s): unflushed entries "+
"after duplicate add", test.name)
continue nextTest
}
// Ensure adding an existing hash with a different state
// updates the existing entry.
newState := ThresholdFailed
if newState == test.state {
newState = ThresholdStarted
}
cache.Update(hash, newState)
state, ok = cache.Lookup(hash)
if !ok {
t.Errorf("Lookup (%s): missing entry after "+
"state change for hash %v", test.name,
hash)
continue nextTest
}
if state != newState {
t.Errorf("Lookup (%s): state mismatch after "+
"state change - got %v, want %v",
test.name, state, newState)
continue nextTest
}
// Ensure the update is also added to the internal
// database updates map and its state matches.
state, ok = cache.dbUpdates[hash]
if !ok {
t.Errorf("dbUpdates (%s): missing entry after "+
"state change for hash %v", test.name,
hash)
continue nextTest
}
if state != newState {
t.Errorf("dbUpdates (%s): state mismatch "+
"after state change - got %v, want %v",
test.name, state, newState)
continue nextTest
}
}
}
}

316
blockchain/versionbits.go Normal file
View file

@ -0,0 +1,316 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"math"
"github.com/btcsuite/btcd/chaincfg"
)
const (
// vbLegacyBlockVersion is the highest legacy block version before the
// version bits scheme became active.
vbLegacyBlockVersion = 4
// vbTopBits defines the bits to set in the version to signal that the
// version bits scheme is being used.
vbTopBits = 0x20000000
// vbTopMask is the bitmask to use to determine whether or not the
// version bits scheme is in use.
vbTopMask = 0xe0000000
// vbNumBits is the total number of bits available for use with the
// version bits scheme.
vbNumBits = 29
// unknownVerNumToCheck is the number of previous blocks to consider
// when checking for a threshold of unknown block versions for the
// purposes of warning the user.
unknownVerNumToCheck = 100
// unknownVerWarnNum is the threshold of previous blocks that have an
// unknown version to use for the purposes of warning the user.
unknownVerWarnNum = unknownVerNumToCheck / 2
)
// bitConditionChecker provides a thresholdConditionChecker which can be used to
// test whether or not a specific bit is set when it's not supposed to be
// according to the expected version based on the known deployments and the
// current state of the chain. This is useful for detecting and warning about
// unknown rule activations.
type bitConditionChecker struct {
bit uint32
chain *BlockChain
}
// Ensure the bitConditionChecker type implements the thresholdConditionChecker
// interface.
var _ thresholdConditionChecker = bitConditionChecker{}
// BeginTime returns the unix timestamp for the median block time after which
// voting on a rule change starts (at the next window).
//
// Since this implementation checks for unknown rules, it returns 0 so the rule
// is always treated as active.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) BeginTime() uint64 {
return 0
}
// EndTime returns the unix timestamp for the median block time after which an
// attempted rule change fails if it has not already been locked in or
// activated.
//
// Since this implementation checks for unknown rules, it returns the maximum
// possible timestamp so the rule is always treated as active.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) EndTime() uint64 {
return math.MaxUint64
}
// RuleChangeActivationThreshold is the number of blocks for which the condition
// must be true in order to lock in a rule change.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) RuleChangeActivationThreshold() uint32 {
return c.chain.chainParams.RuleChangeActivationThreshold
}
// MinerConfirmationWindow is the number of blocks in each threshold state
// retarget window.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) MinerConfirmationWindow() uint32 {
return c.chain.chainParams.MinerConfirmationWindow
}
// Condition returns true when the specific bit associated with the checker is
// set and it's not supposed to be according to the expected version based on
// the known deployments and the current state of the chain.
//
// This function MUST be called with the chain state lock held (for writes).
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
conditionMask := uint32(1) << c.bit
version := uint32(node.version)
if version&vbTopMask != vbTopBits {
return false, nil
}
if version&conditionMask == 0 {
return false, nil
}
// Get the previous block node. This function is used over simply
// accessing node.parent directly as it will dynamically create previous
// block nodes as needed. This helps allow only the pieces of the chain
// that are needed to remain in memory.
prevNode, err := c.chain.getPrevNodeFromNode(node)
if err != nil {
return false, err
}
expectedVersion, err := c.chain.calcNextBlockVersion(prevNode)
if err != nil {
return false, err
}
return uint32(expectedVersion)&conditionMask == 0, nil
}
// deploymentChecker provides a thresholdConditionChecker which can be used to
// test a specific deployment rule. This is required for properly detecting
// and activating consensus rule changes.
type deploymentChecker struct {
deployment *chaincfg.ConsensusDeployment
chain *BlockChain
}
// Ensure the deploymentChecker type implements the thresholdConditionChecker
// interface.
var _ thresholdConditionChecker = deploymentChecker{}
// BeginTime returns the unix timestamp for the median block time after which
// voting on a rule change starts (at the next window).
//
// This implementation returns the value defined by the specific deployment the
// checker is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) BeginTime() uint64 {
return c.deployment.StartTime
}
// EndTime returns the unix timestamp for the median block time after which an
// attempted rule change fails if it has not already been locked in or
// activated.
//
// This implementation returns the value defined by the specific deployment the
// checker is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) EndTime() uint64 {
return c.deployment.ExpireTime
}
// RuleChangeActivationThreshold is the number of blocks for which the condition
// must be true in order to lock in a rule change.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) RuleChangeActivationThreshold() uint32 {
return c.chain.chainParams.RuleChangeActivationThreshold
}
// MinerConfirmationWindow is the number of blocks in each threshold state
// retarget window.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) MinerConfirmationWindow() uint32 {
return c.chain.chainParams.MinerConfirmationWindow
}
// Condition returns true when the specific bit defined by the deployment
// associated with the checker is set.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) Condition(node *blockNode) (bool, error) {
conditionMask := uint32(1) << c.deployment.BitNumber
version := uint32(node.version)
return (version&vbTopMask == vbTopBits) && (version&conditionMask != 0),
nil
}
// calcNextBlockVersion calculates the expected version of the block after the
// passed previous block node based on the state of started and locked in
// rule change deployments.
//
// This function differs from the exported CalcNextBlockVersion in that the
// exported version uses the current best chain as the previous block node
// while this function accepts any block node.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
// Set the appropriate bits for each actively defined rule deployment
// that is either in the process of being voted on, or locked in for the
// activation at the next threshold window change.
expectedVersion := uint32(vbTopBits)
for id := 0; id < len(b.chainParams.Deployments); id++ {
deployment := &b.chainParams.Deployments[id]
cache := &b.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, chain: b}
state, err := b.thresholdState(prevNode, checker, cache)
if err != nil {
return 0, err
}
if state == ThresholdStarted || state == ThresholdLockedIn {
expectedVersion |= uint32(1) << deployment.BitNumber
}
}
return int32(expectedVersion), nil
}
// CalcNextBlockVersion calculates the expected version of the block after the
// end of the current best chain based on the state of started and locked in
// rule change deployments.
//
// This function is safe for concurrent access.
func (b *BlockChain) CalcNextBlockVersion() (int32, error) {
b.chainLock.Lock()
version, err := b.calcNextBlockVersion(b.bestNode)
b.chainLock.Unlock()
return version, err
}
// warnUnknownRuleActivations displays a warning when any unknown new rules are
// either about to activate or have been activated. This will only happen once
// when new rules have been activated and every block for those about to be
// activated.
//
// This function MUST be called with the chain state lock held (for writes)
func (b *BlockChain) warnUnknownRuleActivations(node *blockNode) error {
// Warn if any unknown new rules are either about to activate or have
// already been activated.
for bit := uint32(0); bit < vbNumBits; bit++ {
checker := bitConditionChecker{bit: bit, chain: b}
cache := &b.warningCaches[bit]
state, err := b.thresholdState(node, checker, cache)
if err != nil {
return err
}
switch state {
case ThresholdActive:
if !b.unknownRulesWarned {
log.Warnf("Unknown new rules activated (bit %d)",
bit)
b.unknownRulesWarned = true
}
case ThresholdLockedIn:
window := int32(checker.MinerConfirmationWindow())
activationHeight := window - (node.height % window)
log.Warnf("Unknown new rules are about to activate in "+
"%d blocks (bit %d)", bit, activationHeight)
}
}
return nil
}
// warnUnknownVersions logs a warning if a high enough percentage of the last
// blocks have unexpected versions.
//
// This function MUST be called with the chain state lock held (for writes)
func (b *BlockChain) warnUnknownVersions(node *blockNode) error {
// Nothing to do if already warned.
if b.unknownVersionsWarned {
return nil
}
// Warn if enough previous blocks have unexpected versions.
numUpgraded := uint32(0)
for i := uint32(0); i < unknownVerNumToCheck && node != nil; i++ {
expectedVersion, err := b.calcNextBlockVersion(node.parent)
if err != nil {
return err
}
if expectedVersion > vbLegacyBlockVersion &&
(node.version & ^expectedVersion) != 0 {
numUpgraded++
}
// Get the previous block node. This function is used over
// simply accessing node.parent directly as it will dynamically
// create previous block nodes as needed. This helps allow only
// the pieces of the chain that are needed to remain in memory.
node, err = b.getPrevNodeFromNode(node)
if err != nil {
return err
}
}
if numUpgraded > unknownVerWarnNum {
log.Warn("Unknown block versions are being mined, so new " +
"rules might be in effect. Are you running the " +
"latest version of the software?")
b.unknownVersionsWarned = true
}
return nil
}

View file

@ -6,6 +6,7 @@ package chaincfg
import ( import (
"errors" "errors"
"math"
"math/big" "math/big"
"time" "time"
@ -60,6 +61,37 @@ type DNSSeed struct {
HasFiltering bool HasFiltering bool
} }
// ConsensusDeployment defines details related to a specific consensus rule
// change that is voted in. This is part of BIP0009.
type ConsensusDeployment struct {
// BitNumber defines the specific bit number within the block version
// this particular soft-fork deployment refers to.
BitNumber uint8
// StartTime is the median block time after which voting on the
// deployment starts.
StartTime uint64
// ExpireTime is the median block time after which the attempted
// deployment expires.
ExpireTime uint64
}
// Constants that define the deployment offset in the deployments field of the
// parameters for each deployment. This is useful to be able to get the details
// of a specific deployment by name.
const (
// DeploymentTestDummy defines the rule change deployment ID for testing
// purposes.
DeploymentTestDummy = iota
// NOTE: DefinedDeployments must always come last since it is used to
// determine how many defined deployments there currently are.
// DefinedDeployments is the number of currently defined deployments.
DefinedDeployments
)
// Params defines a Bitcoin network by its parameters. These parameters may be // Params defines a Bitcoin network by its parameters. These parameters may be
// used by Bitcoin applications to differentiate networks as well as addresses // used by Bitcoin applications to differentiate networks as well as addresses
// and keys for one network from those intended for use on another network. // and keys for one network from those intended for use on another network.
@ -143,6 +175,23 @@ type Params struct {
// The number of nodes to check. This is part of BIP0034. // The number of nodes to check. This is part of BIP0034.
BlockUpgradeNumToCheck uint64 BlockUpgradeNumToCheck uint64
// These fields are related to voting on consensus rule changes as
// defined by BIP0009.
//
// RuleChangeActivationThreshold is the number of blocks in a threshold
// state retarget window for which a positive vote for a rule change
// must be cast in order to lock in a rule change. It should typically
// be 95% for the main network and 75% for test networks.
//
// MinerConfirmationWindow is the number of blocks in each threshold
// state retarget window.
//
// Deployments define the specific consensus rule changes to be voted
// on.
RuleChangeActivationThreshold uint32
MinerConfirmationWindow uint32
Deployments [DefinedDeployments]ConsensusDeployment
// Mempool parameters // Mempool parameters
RelayNonStdTxs bool RelayNonStdTxs bool
@ -221,6 +270,20 @@ var MainNetParams = Params{
BlockRejectNumRequired: 950, BlockRejectNumRequired: 950,
BlockUpgradeNumToCheck: 1000, BlockUpgradeNumToCheck: 1000,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 1916, // 95% of MinerConfirmationWindow
MinerConfirmationWindow: 2016, //
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC
},
},
// Mempool parameters // Mempool parameters
RelayNonStdTxs: false, RelayNonStdTxs: false,
@ -274,6 +337,20 @@ var RegressionNetParams = Params{
BlockRejectNumRequired: 950, BlockRejectNumRequired: 950,
BlockUpgradeNumToCheck: 1000, BlockUpgradeNumToCheck: 1000,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 108, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 144,
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires
},
},
// Mempool parameters // Mempool parameters
RelayNonStdTxs: true, RelayNonStdTxs: true,
@ -334,6 +411,20 @@ var TestNet3Params = Params{
BlockRejectNumRequired: 75, BlockRejectNumRequired: 75,
BlockUpgradeNumToCheck: 100, BlockUpgradeNumToCheck: 100,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 1512, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 2016,
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC
},
},
// Mempool parameters // Mempool parameters
RelayNonStdTxs: true, RelayNonStdTxs: true,
@ -391,6 +482,20 @@ var SimNetParams = Params{
BlockRejectNumRequired: 75, BlockRejectNumRequired: 75,
BlockUpgradeNumToCheck: 100, BlockUpgradeNumToCheck: 100,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 75, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 100,
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires
},
},
// Mempool parameters // Mempool parameters
RelayNonStdTxs: true, RelayNonStdTxs: true,

View file

@ -22,14 +22,6 @@ const (
// transaction to be considered high priority. // transaction to be considered high priority.
MinHighPriority = btcutil.SatoshiPerBitcoin * 144.0 / 250 MinHighPriority = btcutil.SatoshiPerBitcoin * 144.0 / 250
// generatedBlockVersion is the version of the block being generated.
// It is defined as a constant here rather than using the
// wire.BlockVersion constant since a change in the block version
// will require changes to the generated block. Using the wire constant
// for generated block version could allow creation of invalid blocks
// for the updated version.
generatedBlockVersion = 4
// blockHeaderOverhead is the max number of bytes it takes to serialize // blockHeaderOverhead is the max number of bytes it takes to serialize
// a block header and max possible transaction count. // a block header and max possible transaction count.
blockHeaderOverhead = wire.MaxBlockHeaderPayload + wire.MaxVarIntPayload blockHeaderOverhead = wire.MaxBlockHeaderPayload + wire.MaxVarIntPayload
@ -766,11 +758,18 @@ mempoolLoop:
return nil, err return nil, err
} }
// Calculate the next expected block version based on the state of the
// rule change deployments.
nextBlockVersion, err := g.chain.CalcNextBlockVersion()
if err != nil {
return nil, err
}
// Create a new block ready to be solved. // Create a new block ready to be solved.
merkles := blockchain.BuildMerkleTreeStore(blockTxns) merkles := blockchain.BuildMerkleTreeStore(blockTxns)
var msgBlock wire.MsgBlock var msgBlock wire.MsgBlock
msgBlock.Header = wire.BlockHeader{ msgBlock.Header = wire.BlockHeader{
Version: generatedBlockVersion, Version: nextBlockVersion,
PrevBlock: *prevHash, PrevBlock: *prevHash,
MerkleRoot: *merkles[len(merkles)-1], MerkleRoot: *merkles[len(merkles)-1],
Timestamp: ts, Timestamp: ts,

View file

@ -454,7 +454,8 @@ func TestPeerListeners(t *testing.T) {
}, },
{ {
"OnBlock", "OnBlock",
wire.NewMsgBlock(wire.NewBlockHeader(&chainhash.Hash{}, &chainhash.Hash{}, 1, 1)), wire.NewMsgBlock(wire.NewBlockHeader(1,
&chainhash.Hash{}, &chainhash.Hash{}, 1, 1)),
}, },
{ {
"OnInv", "OnInv",
@ -498,7 +499,8 @@ func TestPeerListeners(t *testing.T) {
}, },
{ {
"OnMerkleBlock", "OnMerkleBlock",
wire.NewMsgMerkleBlock(wire.NewBlockHeader(&chainhash.Hash{}, &chainhash.Hash{}, 1, 1)), wire.NewMsgMerkleBlock(wire.NewBlockHeader(1,
&chainhash.Hash{}, &chainhash.Hash{}, 1, 1)),
}, },
// only one version message is allowed // only one version message is allowed
// only one verack message is allowed // only one verack message is allowed

View file

@ -419,7 +419,7 @@ func BenchmarkDecodeHeaders(b *testing.B) {
if err != nil { if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err) b.Fatalf("NewHashFromStr: unexpected error: %v", err)
} }
m.AddBlockHeader(NewBlockHeader(hash, hash, 0, uint32(i))) m.AddBlockHeader(NewBlockHeader(1, hash, hash, 0, uint32(i)))
} }
// Serialize it so the bytes are available to test the decode below. // Serialize it so the bytes are available to test the decode below.
@ -565,7 +565,7 @@ func BenchmarkDecodeMerkleBlock(b *testing.B) {
if err != nil { if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err) b.Fatalf("NewHashFromStr: unexpected error: %v", err)
} }
m.Header = *NewBlockHeader(hash, hash, 0, uint32(10000)) m.Header = *NewBlockHeader(1, hash, hash, 0, uint32(10000))
for i := 0; i < 105; i++ { for i := 0; i < 105; i++ {
hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i)) hash, err := chainhash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil { if err != nil {

View file

@ -12,9 +12,6 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
) )
// BlockVersion is the current latest supported block version.
const BlockVersion = 4
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be. // MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes + // Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes +
// PrevBlock and MerkleRoot hashes. // PrevBlock and MerkleRoot hashes.
@ -95,16 +92,16 @@ func (h *BlockHeader) Serialize(w io.Writer) error {
return writeBlockHeader(w, 0, h) return writeBlockHeader(w, 0, h)
} }
// NewBlockHeader returns a new BlockHeader using the provided previous block // NewBlockHeader returns a new BlockHeader using the provided version, previous
// hash, merkle root hash, difficulty bits, and nonce used to generate the // block hash, merkle root hash, difficulty bits, and nonce used to generate the
// block with defaults for the remaining fields. // block with defaults for the remaining fields.
func NewBlockHeader(prevHash *chainhash.Hash, merkleRootHash *chainhash.Hash, func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,
bits uint32, nonce uint32) *BlockHeader { bits uint32, nonce uint32) *BlockHeader {
// Limit the timestamp to one second precision since the protocol // Limit the timestamp to one second precision since the protocol
// doesn't support better. // doesn't support better.
return &BlockHeader{ return &BlockHeader{
Version: BlockVersion, Version: version,
PrevBlock: *prevHash, PrevBlock: *prevHash,
MerkleRoot: *merkleRootHash, MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0), Timestamp: time.Unix(time.Now().Unix(), 0),

View file

@ -24,7 +24,7 @@ func TestBlockHeader(t *testing.T) {
hash := mainNetGenesisHash hash := mainNetGenesisHash
merkleHash := mainNetGenesisMerkleRoot merkleHash := mainNetGenesisMerkleRoot
bits := uint32(0x1d00ffff) bits := uint32(0x1d00ffff)
bh := NewBlockHeader(&hash, &merkleHash, bits, nonce) bh := NewBlockHeader(1, &hash, &merkleHash, bits, nonce)
// Ensure we get the same data back out. // Ensure we get the same data back out.
if !bh.PrevBlock.IsEqual(&hash) { if !bh.PrevBlock.IsEqual(&hash) {

View file

@ -66,7 +66,7 @@ func TestMessage(t *testing.T) {
msgFilterAdd := NewMsgFilterAdd([]byte{0x01}) msgFilterAdd := NewMsgFilterAdd([]byte{0x01})
msgFilterClear := NewMsgFilterClear() msgFilterClear := NewMsgFilterClear()
msgFilterLoad := NewMsgFilterLoad([]byte{0x01}, 10, 0, BloomUpdateNone) msgFilterLoad := NewMsgFilterLoad([]byte{0x01}, 10, 0, BloomUpdateNone)
bh := NewBlockHeader(&chainhash.Hash{}, &chainhash.Hash{}, 0, 0) bh := NewBlockHeader(1, &chainhash.Hash{}, &chainhash.Hash{}, 0, 0)
msgMerkleBlock := NewMsgMerkleBlock(bh) msgMerkleBlock := NewMsgMerkleBlock(bh)
msgReject := NewMsgReject("block", RejectDuplicate, "duplicate block") msgReject := NewMsgReject("block", RejectDuplicate, "duplicate block")

View file

@ -24,7 +24,7 @@ func TestBlock(t *testing.T) {
merkleHash := &blockOne.Header.MerkleRoot merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
bh := NewBlockHeader(prevHash, merkleHash, bits, nonce) bh := NewBlockHeader(1, prevHash, merkleHash, bits, nonce)
// Ensure the command is expected value. // Ensure the command is expected value.
wantCmd := "block" wantCmd := "block"

View file

@ -66,7 +66,7 @@ func TestHeadersWire(t *testing.T) {
merkleHash := blockOne.Header.MerkleRoot merkleHash := blockOne.Header.MerkleRoot
bits := uint32(0x1d00ffff) bits := uint32(0x1d00ffff)
nonce := uint32(0x9962e301) nonce := uint32(0x9962e301)
bh := NewBlockHeader(&hash, &merkleHash, bits, nonce) bh := NewBlockHeader(1, &hash, &merkleHash, bits, nonce)
bh.Version = blockOne.Header.Version bh.Version = blockOne.Header.Version
bh.Timestamp = blockOne.Header.Timestamp bh.Timestamp = blockOne.Header.Timestamp
@ -223,7 +223,7 @@ func TestHeadersWireErrors(t *testing.T) {
merkleHash := blockOne.Header.MerkleRoot merkleHash := blockOne.Header.MerkleRoot
bits := uint32(0x1d00ffff) bits := uint32(0x1d00ffff)
nonce := uint32(0x9962e301) nonce := uint32(0x9962e301)
bh := NewBlockHeader(&hash, &merkleHash, bits, nonce) bh := NewBlockHeader(1, &hash, &merkleHash, bits, nonce)
bh.Version = blockOne.Header.Version bh.Version = blockOne.Header.Version
bh.Timestamp = blockOne.Header.Timestamp bh.Timestamp = blockOne.Header.Timestamp
@ -260,7 +260,7 @@ func TestHeadersWireErrors(t *testing.T) {
// Intentionally invalid block header that has a transaction count used // Intentionally invalid block header that has a transaction count used
// to force errors. // to force errors.
bhTrans := NewBlockHeader(&hash, &merkleHash, bits, nonce) bhTrans := NewBlockHeader(1, &hash, &merkleHash, bits, nonce)
bhTrans.Version = blockOne.Header.Version bhTrans.Version = blockOne.Header.Version
bhTrans.Timestamp = blockOne.Header.Timestamp bhTrans.Timestamp = blockOne.Header.Timestamp

View file

@ -25,7 +25,7 @@ func TestMerkleBlock(t *testing.T) {
merkleHash := &blockOne.Header.MerkleRoot merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
bh := NewBlockHeader(prevHash, merkleHash, bits, nonce) bh := NewBlockHeader(1, prevHash, merkleHash, bits, nonce)
// Ensure the command is expected value. // Ensure the command is expected value.
wantCmd := "merkleblock" wantCmd := "merkleblock"
@ -117,7 +117,7 @@ func TestMerkleBlockCrossProtocol(t *testing.T) {
merkleHash := &blockOne.Header.MerkleRoot merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce nonce := blockOne.Header.Nonce
bh := NewBlockHeader(prevHash, merkleHash, bits, nonce) bh := NewBlockHeader(1, prevHash, merkleHash, bits, nonce)
msg := NewMsgMerkleBlock(bh) msg := NewMsgMerkleBlock(bh)