btcd/blockchain/difficulty.go

313 lines
12 KiB
Go
Raw Normal View History

// Copyright (c) 2013-2017 The btcsuite developers
2013-07-18 16:49:28 +02:00
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
2013-07-18 16:49:28 +02:00
import (
"math/big"
"time"
2014-07-02 18:04:59 +02:00
"github.com/btcsuite/btcd/chaincfg/chainhash"
2013-07-18 16:49:28 +02:00
)
var (
// bigOne is 1 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid
// the overhead of creating it multiple times.
oneLsh256 = new(big.Int).Lsh(bigOne, 256)
)
// HashToBig converts a chainhash.Hash into a big.Int that can be used to
2013-07-18 16:49:28 +02:00
// perform math comparisons.
func HashToBig(hash *chainhash.Hash) *big.Int {
// A Hash is in little-endian, but the big package wants the bytes in
// big-endian, so reverse them.
buf := *hash
2013-07-18 16:49:28 +02:00
blen := len(buf)
for i := 0; i < blen/2; i++ {
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
}
return new(big.Int).SetBytes(buf[:])
2013-07-18 16:49:28 +02:00
}
// CompactToBig converts a compact representation of a whole number N to an
// unsigned 32-bit number. The representation is similar to IEEE754 floating
// point numbers.
//
// Like IEEE754 floating point, there are three basic components: the sign,
// the exponent, and the mantissa. They are broken out as follows:
//
// * the most significant 8 bits represent the unsigned base 256 exponent
// * bit 23 (the 24th bit) represents the sign bit
// * the least significant 23 bits represent the mantissa
//
// -------------------------------------------------
// | Exponent | Sign | Mantissa |
// -------------------------------------------------
// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] |
// -------------------------------------------------
//
// The formula to calculate N is:
// N = (-1^sign) * mantissa * 256^(exponent-3)
//
// This compact form is only used in bitcoin to encode unsigned 256-bit numbers
// which represent difficulty targets, thus there really is not a need for a
// sign bit, but it is implemented here to stay consistent with bitcoind.
func CompactToBig(compact uint32) *big.Int {
// Extract the mantissa, sign bit, and exponent.
mantissa := compact & 0x007fffff
isNegative := compact&0x00800000 != 0
exponent := uint(compact >> 24)
// Since the base for the exponent is 256, the exponent can be treated
// as the number of bytes to represent the full 256-bit number. So,
// treat the exponent as the number of bytes and shift the mantissa
// right or left accordingly. This is equivalent to:
// N = mantissa * 256^(exponent-3)
var bn *big.Int
if exponent <= 3 {
mantissa >>= 8 * (3 - exponent)
bn = big.NewInt(int64(mantissa))
} else {
bn = big.NewInt(int64(mantissa))
bn.Lsh(bn, 8*(exponent-3))
}
// Make it negative if the sign bit is set.
if isNegative {
bn = bn.Neg(bn)
}
return bn
}
// BigToCompact converts a whole number N to a compact representation using
// an unsigned 32-bit number. The compact representation only provides 23 bits
// of precision, so values larger than (2^23 - 1) only encode the most
// significant digits of the number. See CompactToBig for details.
func BigToCompact(n *big.Int) uint32 {
// No need to do any work if it's zero.
if n.Sign() == 0 {
return 0
}
// Since the base for the exponent is 256, the exponent can be treated
// as the number of bytes. So, shift the number right or left
// accordingly. This is equivalent to:
// mantissa = mantissa / 256^(exponent-3)
var mantissa uint32
exponent := uint(len(n.Bytes()))
if exponent <= 3 {
mantissa = uint32(n.Bits()[0])
mantissa <<= 8 * (3 - exponent)
} else {
// Use a copy to avoid modifying the caller's original number.
tn := new(big.Int).Set(n)
mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0])
}
// When the mantissa already has the sign bit set, the number is too
// large to fit into the available 23-bits, so divide the number by 256
// and increment the exponent accordingly.
if mantissa&0x00800000 != 0 {
mantissa >>= 8
exponent++
}
// Pack the exponent, sign bit, and mantissa into an unsigned 32-bit
// int and return it.
compact := uint32(exponent<<24) | mantissa
if n.Sign() < 0 {
compact |= 0x00800000
}
return compact
}
// CalcWork calculates a work value from difficulty bits. Bitcoin increases
2013-07-18 16:49:28 +02:00
// the difficulty for generating a block by decreasing the value which the
// generated hash must be less than. This difficulty target is stored in each
// block header using a compact representation as described in the documentation
2013-07-18 16:49:28 +02:00
// for CompactToBig. The main chain is selected by choosing the chain that has
// the most proof of work (highest difficulty). Since a lower target difficulty
// value equates to higher actual difficulty, the work value which will be
// accumulated must be the inverse of the difficulty. Also, in order to avoid
// potential division by zero and really small floating point numbers, the
// result adds 1 to the denominator and multiplies the numerator by 2^256.
func CalcWork(bits uint32) *big.Int {
// Return a work value of zero if the passed difficulty bits represent
// a negative number. Note this should not happen in practice with valid
// blocks, but an invalid block could trigger it.
2013-07-18 16:49:28 +02:00
difficultyNum := CompactToBig(bits)
if difficultyNum.Sign() <= 0 {
return big.NewInt(0)
}
// (1 << 256) / (difficultyNum + 1)
2013-07-18 16:49:28 +02:00
denominator := new(big.Int).Add(difficultyNum, bigOne)
return new(big.Int).Div(oneLsh256, denominator)
2013-07-18 16:49:28 +02:00
}
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
// can have given starting difficulty bits and a duration. It is mainly used to
// verify that claimed proof of work by a block is sane as compared to a
// known good checkpoint.
func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
2013-07-18 16:49:28 +02:00
// Convert types used in the calculations below.
durationVal := int64(duration / time.Second)
adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor)
2013-07-18 16:49:28 +02:00
// The test network rules allow minimum difficulty blocks after more
// than twice the desired amount of time needed to generate a block has
// elapsed.
if b.chainParams.ReduceMinDifficulty {
reductionTime := int64(b.chainParams.MinDiffReductionTime /
time.Second)
if durationVal > reductionTime {
return b.chainParams.PowLimitBits
}
}
2013-07-18 16:49:28 +02:00
// Since easier difficulty equates to higher numbers, the easiest
// difficulty for a given duration is the largest value possible given
// the number of retargets for the duration and starting difficulty
// multiplied by the max adjustment factor.
newTarget := CompactToBig(bits)
for durationVal > 0 && newTarget.Cmp(b.chainParams.PowLimit) < 0 {
2013-07-18 16:49:28 +02:00
newTarget.Mul(newTarget, adjustmentFactor)
durationVal -= b.maxRetargetTimespan
2013-07-18 16:49:28 +02:00
}
// Limit new value to the proof of work limit.
if newTarget.Cmp(b.chainParams.PowLimit) > 0 {
newTarget.Set(b.chainParams.PowLimit)
2013-07-18 16:49:28 +02:00
}
return BigToCompact(newTarget)
}
// findPrevTestNetDifficulty returns the difficulty of the previous block which
// did not have the special testnet minimum difficulty rule applied.
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
//
// This function MUST be called with the chain state lock held (for writes).
blockchain: Convert to full block index in mem. This reworks the block index code such that it loads all of the headers in the main chain at startup and constructs the full block index accordingly. Since the full index from the current best tip all the way back to the genesis block is now guaranteed to be in memory, this also removes all code related to dynamically loading the nodes and updates some of the logic to take advantage of the fact traversing the block index can longer potentially fail. There are also more optimizations and simplifications that can be made in the future as a result of this. Due to removing all of the extra overhead of tracking the dynamic state, and ensuring the block node structs are aligned to eliminate extra padding, the end result of a fully populated block index now takes quite a bit less memory than the previous dynamically loaded version. The main downside is that it now takes a while to start whereas it was nearly instant before, however, it is much better to provide more efficient runtime operation since that is its ultimate purpose and the benefits far outweigh this downside. Some benefits are: - Since every block node is in memory, the recent code which reconstructs headers from block nodes means that all headers can always be served from memory which is important since the majority of the network has moved to header-based semantics - Several of the error paths can be removed since they are no longer necessary - It is no longer expensive to calculate CSV sequence locks or median times of blocks way in the past - It will be possible to create much more efficient iteration and simplified views of the overall index - The entire threshold state database cache can be removed since it is cheap to construct it from the full block index as needed An overview of the logic changes are as follows: - Move AncestorNode from blockIndex to blockNode and greatly simplify since it no longer has to deal with the possibility of dynamically loading nodes and related failures - Rename RelativeNode to RelativeAncestor, move to blockNode, and redefine in terms of AncestorNode - Move CalcPastMedianTime from blockIndex to blockNode and remove no longer necessary test for nil - Change calcSequenceLock to use Ancestor instead of RelativeAncestor since it reads more clearly
2017-02-03 19:13:53 +01:00
func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
// Search backwards through the chain for the last block without
// the special rule applied.
iterNode := startNode
for iterNode != nil && iterNode.height%b.blocksPerRetarget != 0 &&
iterNode.bits == b.chainParams.PowLimitBits {
blockchain: Convert to full block index in mem. This reworks the block index code such that it loads all of the headers in the main chain at startup and constructs the full block index accordingly. Since the full index from the current best tip all the way back to the genesis block is now guaranteed to be in memory, this also removes all code related to dynamically loading the nodes and updates some of the logic to take advantage of the fact traversing the block index can longer potentially fail. There are also more optimizations and simplifications that can be made in the future as a result of this. Due to removing all of the extra overhead of tracking the dynamic state, and ensuring the block node structs are aligned to eliminate extra padding, the end result of a fully populated block index now takes quite a bit less memory than the previous dynamically loaded version. The main downside is that it now takes a while to start whereas it was nearly instant before, however, it is much better to provide more efficient runtime operation since that is its ultimate purpose and the benefits far outweigh this downside. Some benefits are: - Since every block node is in memory, the recent code which reconstructs headers from block nodes means that all headers can always be served from memory which is important since the majority of the network has moved to header-based semantics - Several of the error paths can be removed since they are no longer necessary - It is no longer expensive to calculate CSV sequence locks or median times of blocks way in the past - It will be possible to create much more efficient iteration and simplified views of the overall index - The entire threshold state database cache can be removed since it is cheap to construct it from the full block index as needed An overview of the logic changes are as follows: - Move AncestorNode from blockIndex to blockNode and greatly simplify since it no longer has to deal with the possibility of dynamically loading nodes and related failures - Rename RelativeNode to RelativeAncestor, move to blockNode, and redefine in terms of AncestorNode - Move CalcPastMedianTime from blockIndex to blockNode and remove no longer necessary test for nil - Change calcSequenceLock to use Ancestor instead of RelativeAncestor since it reads more clearly
2017-02-03 19:13:53 +01:00
iterNode = iterNode.parent
}
// Return the found difficulty or the minimum difficulty if no
// appropriate block was found.
lastBits := b.chainParams.PowLimitBits
if iterNode != nil {
lastBits = iterNode.bits
}
blockchain: Convert to full block index in mem. This reworks the block index code such that it loads all of the headers in the main chain at startup and constructs the full block index accordingly. Since the full index from the current best tip all the way back to the genesis block is now guaranteed to be in memory, this also removes all code related to dynamically loading the nodes and updates some of the logic to take advantage of the fact traversing the block index can longer potentially fail. There are also more optimizations and simplifications that can be made in the future as a result of this. Due to removing all of the extra overhead of tracking the dynamic state, and ensuring the block node structs are aligned to eliminate extra padding, the end result of a fully populated block index now takes quite a bit less memory than the previous dynamically loaded version. The main downside is that it now takes a while to start whereas it was nearly instant before, however, it is much better to provide more efficient runtime operation since that is its ultimate purpose and the benefits far outweigh this downside. Some benefits are: - Since every block node is in memory, the recent code which reconstructs headers from block nodes means that all headers can always be served from memory which is important since the majority of the network has moved to header-based semantics - Several of the error paths can be removed since they are no longer necessary - It is no longer expensive to calculate CSV sequence locks or median times of blocks way in the past - It will be possible to create much more efficient iteration and simplified views of the overall index - The entire threshold state database cache can be removed since it is cheap to construct it from the full block index as needed An overview of the logic changes are as follows: - Move AncestorNode from blockIndex to blockNode and greatly simplify since it no longer has to deal with the possibility of dynamically loading nodes and related failures - Rename RelativeNode to RelativeAncestor, move to blockNode, and redefine in terms of AncestorNode - Move CalcPastMedianTime from blockIndex to blockNode and remove no longer necessary test for nil - Change calcSequenceLock to use Ancestor instead of RelativeAncestor since it reads more clearly
2017-02-03 19:13:53 +01:00
return lastBits
}
2013-07-18 16:49:28 +02:00
// calcNextRequiredDifficulty calculates the required difficulty for the block
// after the passed previous block node based on the difficulty retarget rules.
// This function differs from the exported CalcNextRequiredDifficulty in that
// the exported version uses the current best chain as the previous block node
// while this function accepts any block node.
func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time) (uint32, error) {
2013-07-18 16:49:28 +02:00
// Genesis block.
if lastNode == nil {
return b.chainParams.PowLimitBits, nil
2013-07-18 16:49:28 +02:00
}
// Return the previous block's difficulty requirements if this block
// is not at a difficulty retarget interval.
if (lastNode.height+1)%b.blocksPerRetarget != 0 {
// For networks that support it, allow special reduction of the
// required difficulty once too much time has elapsed without
// mining a block.
if b.chainParams.ReduceMinDifficulty {
// Return minimum difficulty when more than the desired
// amount of time has elapsed without mining a block.
reductionTime := int64(b.chainParams.MinDiffReductionTime /
time.Second)
allowMinTime := lastNode.timestamp + reductionTime
if newBlockTime.Unix() > allowMinTime {
return b.chainParams.PowLimitBits, nil
}
// The block was mined within the desired timeframe, so
// return the difficulty for the last block which did
// not have the special minimum difficulty rule applied.
blockchain: Convert to full block index in mem. This reworks the block index code such that it loads all of the headers in the main chain at startup and constructs the full block index accordingly. Since the full index from the current best tip all the way back to the genesis block is now guaranteed to be in memory, this also removes all code related to dynamically loading the nodes and updates some of the logic to take advantage of the fact traversing the block index can longer potentially fail. There are also more optimizations and simplifications that can be made in the future as a result of this. Due to removing all of the extra overhead of tracking the dynamic state, and ensuring the block node structs are aligned to eliminate extra padding, the end result of a fully populated block index now takes quite a bit less memory than the previous dynamically loaded version. The main downside is that it now takes a while to start whereas it was nearly instant before, however, it is much better to provide more efficient runtime operation since that is its ultimate purpose and the benefits far outweigh this downside. Some benefits are: - Since every block node is in memory, the recent code which reconstructs headers from block nodes means that all headers can always be served from memory which is important since the majority of the network has moved to header-based semantics - Several of the error paths can be removed since they are no longer necessary - It is no longer expensive to calculate CSV sequence locks or median times of blocks way in the past - It will be possible to create much more efficient iteration and simplified views of the overall index - The entire threshold state database cache can be removed since it is cheap to construct it from the full block index as needed An overview of the logic changes are as follows: - Move AncestorNode from blockIndex to blockNode and greatly simplify since it no longer has to deal with the possibility of dynamically loading nodes and related failures - Rename RelativeNode to RelativeAncestor, move to blockNode, and redefine in terms of AncestorNode - Move CalcPastMedianTime from blockIndex to blockNode and remove no longer necessary test for nil - Change calcSequenceLock to use Ancestor instead of RelativeAncestor since it reads more clearly
2017-02-03 19:13:53 +01:00
return b.findPrevTestNetDifficulty(lastNode), nil
}
// For the main network (or any unrecognized networks), simply
// return the previous block's difficulty requirements.
return lastNode.bits, nil
2013-07-18 16:49:28 +02:00
}
// Get the block node at the previous retarget (targetTimespan days
// worth of blocks).
blockchain: Convert to full block index in mem. This reworks the block index code such that it loads all of the headers in the main chain at startup and constructs the full block index accordingly. Since the full index from the current best tip all the way back to the genesis block is now guaranteed to be in memory, this also removes all code related to dynamically loading the nodes and updates some of the logic to take advantage of the fact traversing the block index can longer potentially fail. There are also more optimizations and simplifications that can be made in the future as a result of this. Due to removing all of the extra overhead of tracking the dynamic state, and ensuring the block node structs are aligned to eliminate extra padding, the end result of a fully populated block index now takes quite a bit less memory than the previous dynamically loaded version. The main downside is that it now takes a while to start whereas it was nearly instant before, however, it is much better to provide more efficient runtime operation since that is its ultimate purpose and the benefits far outweigh this downside. Some benefits are: - Since every block node is in memory, the recent code which reconstructs headers from block nodes means that all headers can always be served from memory which is important since the majority of the network has moved to header-based semantics - Several of the error paths can be removed since they are no longer necessary - It is no longer expensive to calculate CSV sequence locks or median times of blocks way in the past - It will be possible to create much more efficient iteration and simplified views of the overall index - The entire threshold state database cache can be removed since it is cheap to construct it from the full block index as needed An overview of the logic changes are as follows: - Move AncestorNode from blockIndex to blockNode and greatly simplify since it no longer has to deal with the possibility of dynamically loading nodes and related failures - Rename RelativeNode to RelativeAncestor, move to blockNode, and redefine in terms of AncestorNode - Move CalcPastMedianTime from blockIndex to blockNode and remove no longer necessary test for nil - Change calcSequenceLock to use Ancestor instead of RelativeAncestor since it reads more clearly
2017-02-03 19:13:53 +01:00
firstNode := lastNode.RelativeAncestor(b.blocksPerRetarget - 1)
2013-07-18 16:49:28 +02:00
if firstNode == nil {
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
return 0, AssertError("unable to obtain previous retarget block")
2013-07-18 16:49:28 +02:00
}
// Limit the amount of adjustment that can occur to the previous
// difficulty.
actualTimespan := lastNode.timestamp - firstNode.timestamp
2013-07-18 16:49:28 +02:00
adjustedTimespan := actualTimespan
if actualTimespan < b.minRetargetTimespan {
adjustedTimespan = b.minRetargetTimespan
} else if actualTimespan > b.maxRetargetTimespan {
adjustedTimespan = b.maxRetargetTimespan
2013-07-18 16:49:28 +02:00
}
// Calculate new target difficulty as:
// currentDifficulty * (adjustedTimespan / targetTimespan)
// The result uses integer division which means it will be slightly
// rounded down. Bitcoind also uses integer division to calculate this
// result.
oldTarget := CompactToBig(lastNode.bits)
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
targetTimeSpan := int64(b.chainParams.TargetTimespan / time.Second)
newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
2013-07-18 16:49:28 +02:00
// Limit new value to the proof of work limit.
if newTarget.Cmp(b.chainParams.PowLimit) > 0 {
newTarget.Set(b.chainParams.PowLimit)
2013-07-18 16:49:28 +02:00
}
// Log new target difficulty and return it. The new target logging is
// intentionally converting the bits back to a number instead of using
// newTarget since conversion to the compact representation loses
// precision.
newTargetBits := BigToCompact(newTarget)
log.Debugf("Difficulty retarget at block height %d", lastNode.height+1)
log.Debugf("Old target %08x (%064x)", lastNode.bits, oldTarget)
log.Debugf("New target %08x (%064x)", newTargetBits, CompactToBig(newTargetBits))
log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v",
time.Duration(actualTimespan)*time.Second,
time.Duration(adjustedTimespan)*time.Second,
b.chainParams.TargetTimespan)
2013-07-18 16:49:28 +02:00
return newTargetBits, nil
}
// CalcNextRequiredDifficulty calculates the required difficulty for the block
// after the end of the current best chain based on the difficulty retarget
// rules.
//
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
// This function is safe for concurrent access.
func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) {
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
b.chainLock.Lock()
blockchain: Refactor to use new chain view. - Remove inMainChain from block nodes since that can now be efficiently determined by using the chain view - Track the best chain via a chain view instead of a single block node - Use the tip of the best chain view everywhere bestNode was used - Update chain view tip instead of updating best node - Change reorg logic to use more efficient chain view fork finding logic - Change block locator code over to use more efficient chain view logic - Remove now unused block-index-based block locator code - Move BlockLocator definition to chain.go - Move BlockLocatorFromHash and LatestBlockLocator to chain.go - Update both to use more efficient chain view logic - Rework IsCheckpointCandidate to use block index and chain view - Optimize MainChainHasBlock to use chain view instead of hitting db - Move to chain.go since it no longer involves database I/O - Removed error return since it can no longer fail - Optimize BlockHeightByHash to use chain view instead of hitting db - Move to chain.go since it no longer involves database I/O - Removed error return since it can no longer fail - Optimize BlockHashByHeight to use chain view instead of hitting db - Move to chain.go since it no longer involves database I/O - Removed error return since it can no longer fail - Optimize HeightRange to use chain view instead of hitting db - Move to chain.go since it no longer involves database I/O - Optimize BlockByHeight to use chain view for main chain check - Optimize BlockByHash to use chain view for main chain check
2017-08-18 14:25:54 +02:00
difficulty, err := b.calcNextRequiredDifficulty(b.bestChain.Tip(), timestamp)
blockchain: Rework to use new db interface. This commit is the first stage of several that are planned to convert the blockchain package into a concurrent safe package that will ultimately allow support for multi-peer download and concurrent chain processing. The goal is to update btcd proper after each step so it can take advantage of the enhancements as they are developed. In addition to the aforementioned benefit, this staged approach has been chosen since it is absolutely critical to maintain consensus. Separating the changes into several stages makes it easier for reviewers to logically follow what is happening and therefore helps prevent consensus bugs. Naturally there are significant automated tests to help prevent consensus issues as well. The main focus of this stage is to convert the blockchain package to use the new database interface and implement the chain-related functionality which it no longer handles. It also aims to improve efficiency in various areas by making use of the new database and chain capabilities. The following is an overview of the chain changes: - Update to use the new database interface - Add chain-related functionality that the old database used to handle - Main chain structure and state - Transaction spend tracking - Implement a new pruned unspent transaction output (utxo) set - Provides efficient direct access to the unspent transaction outputs - Uses a domain specific compression algorithm that understands the standard transaction scripts in order to significantly compress them - Removes reliance on the transaction index and paves the way toward eventually enabling block pruning - Modify the New function to accept a Config struct instead of inidividual parameters - Replace the old TxStore type with a new UtxoViewpoint type that makes use of the new pruned utxo set - Convert code to treat the new UtxoViewpoint as a rolling view that is used between connects and disconnects to improve efficiency - Make best chain state always set when the chain instance is created - Remove now unnecessary logic for dealing with unset best state - Make all exported functions concurrent safe - Currently using a single chain state lock as it provides a straight forward and easy to review path forward however this can be improved with more fine grained locking - Optimize various cases where full blocks were being loaded when only the header is needed to help reduce the I/O load - Add the ability for callers to get a snapshot of the current best chain stats in a concurrent safe fashion - Does not block callers while new blocks are being processed - Make error messages that reference transaction outputs consistently use <transaction hash>:<output index> - Introduce a new AssertError type an convert internal consistency checks to use it - Update tests and examples to reflect the changes - Add a full suite of tests to ensure correct functionality of the new code The following is an overview of the btcd changes: - Update to use the new database and chain interfaces - Temporarily remove all code related to the transaction index - Temporarily remove all code related to the address index - Convert all code that uses transaction stores to use the new utxo view - Rework several calls that required the block manager for safe concurrency to use the chain package directly now that it is concurrent safe - Change all calls to obtain the best hash to use the new best state snapshot capability from the chain package - Remove workaround for limits on fetching height ranges since the new database interface no longer imposes them - Correct the gettxout RPC handler to return the best chain hash as opposed the hash the txout was found in - Optimize various RPC handlers: - Change several of the RPC handlers to use the new chain snapshot capability to avoid needlessly loading data - Update several handlers to use new functionality to avoid accessing the block manager so they are able to return the data without blocking when the server is busy processing blocks - Update non-verbose getblock to avoid deserialization and serialization overhead - Update getblockheader to request the block height directly from chain and only load the header - Update getdifficulty to use the new cached data from chain - Update getmininginfo to use the new cached data from chain - Update non-verbose getrawtransaction to avoid deserialization and serialization overhead - Update gettxout to use the new utxo store versus loading full transactions using the transaction index The following is an overview of the utility changes: - Update addblock to use the new database and chain interfaces - Update findcheckpoint to use the new database and chain interfaces - Remove the dropafter utility which is no longer supported NOTE: The transaction index and address index will be reimplemented in another commit.
2015-08-26 06:03:18 +02:00
b.chainLock.Unlock()
return difficulty, err
}