Merge pull request #2196 from Crypt-iQ/2181_eugene

blockchain: Add ReconsiderBlock()
This commit is contained in:
Olaoluwa Osuntokun 2024-06-19 16:07:14 -07:00 committed by GitHub
commit cd5e5bab63
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 510 additions and 180 deletions

View File

@ -870,192 +870,24 @@ func countSpentOutputs(block *btcutil.Block) int {
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error {
// Nothing to do if no reorganize nodes were provided.
if detachNodes.Len() == 0 && attachNodes.Len() == 0 {
return nil
}
// Ensure the provided nodes match the current best chain.
tip := b.bestChain.Tip()
if detachNodes.Len() != 0 {
firstDetachNode := detachNodes.Front().Value.(*blockNode)
if firstDetachNode.hash != tip.hash {
return AssertError(fmt.Sprintf("reorganize nodes to detach are "+
"not for the current best chain -- first detach node %v, "+
"current chain %v", &firstDetachNode.hash, &tip.hash))
}
}
// Ensure the provided nodes are for the same fork point.
if attachNodes.Len() != 0 && detachNodes.Len() != 0 {
firstAttachNode := attachNodes.Front().Value.(*blockNode)
lastDetachNode := detachNodes.Back().Value.(*blockNode)
if firstAttachNode.parent.hash != lastDetachNode.parent.hash {
return AssertError(fmt.Sprintf("reorganize nodes do not have the "+
"same fork point -- first attach parent %v, last detach "+
"parent %v", &firstAttachNode.parent.hash,
&lastDetachNode.parent.hash))
}
}
// Track the old and new best chains heads.
oldBest := tip
newBest := tip
// All of the blocks to detach and related spend journal entries needed
// to unspend transaction outputs in the blocks being disconnected must
// be loaded from the database during the reorg check phase below and
// then they are needed again when doing the actual database updates.
// Rather than doing two loads, cache the loaded data into these slices.
detachBlocks := make([]*btcutil.Block, 0, detachNodes.Len())
detachSpentTxOuts := make([][]SpentTxOut, 0, detachNodes.Len())
attachBlocks := make([]*btcutil.Block, 0, attachNodes.Len())
// Disconnect all of the blocks back to the point of the fork. This
// entails loading the blocks and their associated spent txos from the
// database and using that information to unspend all of the spent txos
// and remove the utxos created by the blocks.
view := NewUtxoViewpoint()
view.SetBestHash(&oldBest.hash)
for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, n)
return err
})
if err != nil {
return err
}
if n.hash != *block.Hash() {
return AssertError(fmt.Sprintf("detach block node hash %v (height "+
"%v) does not match previous parent block hash %v", &n.hash,
n.height, block.Hash()))
}
// Load all of the utxos referenced by the block that aren't
// already in the view.
err = view.fetchInputUtxos(b.utxoCache, block)
if err != nil {
return err
}
// Load all of the spent txos for the block from the spend
// journal.
var stxos []SpentTxOut
err = b.db.View(func(dbTx database.Tx) error {
stxos, err = dbFetchSpendJournalEntry(dbTx, block)
return err
})
if err != nil {
return err
}
// Store the loaded block and spend journal entry for later.
detachBlocks = append(detachBlocks, block)
detachSpentTxOuts = append(detachSpentTxOuts, stxos)
err = view.disconnectTransactions(b.db, block, stxos)
if err != nil {
return err
}
newBest = n.parent
}
// Set the fork point only if there are nodes to attach since otherwise
// blocks are only being disconnected and thus there is no fork point.
var forkNode *blockNode
if attachNodes.Len() > 0 {
forkNode = newBest
}
// Perform several checks to verify each block that needs to be attached
// to the main chain can be connected without violating any rules and
// without actually connecting the block.
//
// NOTE: These checks could be done directly when connecting a block,
// however the downside to that approach is that if any of these checks
// fail after disconnecting some blocks or attaching others, all of the
// operations have to be rolled back to get the chain back into the
// state it was before the rule violation (or other failure). There are
// at least a couple of ways accomplish that rollback, but both involve
// tweaking the chain and/or database. This approach catches these
// issues before ever modifying the chain.
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, n)
return err
})
if err != nil {
return err
}
// Store the loaded block for later.
attachBlocks = append(attachBlocks, block)
// Skip checks if node has already been fully validated. Although
// checkConnectBlock gets skipped, we still need to update the UTXO
// view.
if b.index.NodeStatus(n).KnownValid() {
err = view.fetchInputUtxos(b.utxoCache, block)
if err != nil {
return err
}
err = view.connectTransactions(block, nil)
if err != nil {
return err
}
newBest = n
continue
}
// Notice the spent txout details are not requested here and
// thus will not be generated. This is done because the state
// is not being immediately written to the database, so it is
// not needed.
//
// In the case the block is determined to be invalid due to a
// rule violation, mark it as invalid and mark all of its
// descendants as having an invalid ancestor.
err = b.checkConnectBlock(n, block, view, nil)
if err != nil {
if _, ok := err.(RuleError); ok {
b.index.SetStatusFlags(n, statusValidateFailed)
for de := e.Next(); de != nil; de = de.Next() {
dn := de.Value.(*blockNode)
b.index.SetStatusFlags(dn, statusInvalidAncestor)
}
}
return err
}
b.index.SetStatusFlags(n, statusValid)
newBest = n
}
// Flush the utxo cache for the block disconnect below. The disconnect
// code assumes that it's directly modifying the database so the cache
// will be left in an inconsistent state. It needs to be flushed beforehand
// in order for that to not happen.
err := b.db.Update(func(dbTx database.Tx) error {
return b.utxoCache.flush(dbTx, FlushRequired, b.BestSnapshot())
})
// Check first that the detach and the attach nodes are valid and they
// pass verification.
detachBlocks, attachBlocks, detachSpentTxOuts,
err := b.verifyReorganizationValidity(detachNodes, attachNodes)
if err != nil {
return err
}
// Track the old and new best chains heads.
tip := b.bestChain.Tip()
oldBest := tip
newBest := tip
// Reset the view for the actual connection code below. This is
// required because the view was previously modified when checking if
// the reorg would be successful and the connection code requires the
// view to be valid from the viewpoint of each block being disconnected.
view = NewUtxoViewpoint()
view := NewUtxoViewpoint()
view.SetBestHash(&b.bestChain.Tip().hash)
// Disconnect blocks from the main chain.
@ -1072,8 +904,9 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
// Update the view to unspend all of the spent txos and remove
// the utxos created by the block.
err = view.disconnectTransactions(b.db, block,
detachSpentTxOuts[i])
err = view.disconnectTransactions(
b.db, block, detachSpentTxOuts[i],
)
if err != nil {
return err
}
@ -1084,6 +917,15 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
if err != nil {
return err
}
newBest = n.parent
}
// Set the fork point only if there are nodes to attach since otherwise
// blocks are only being disconnected and thus there is no fork point.
var forkNode *blockNode
if attachNodes.Len() > 0 {
forkNode = newBest
}
// Connect the new best chain blocks using the utxocache directly. It's more
@ -1109,6 +951,8 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
if err != nil {
return err
}
newBest = n
}
// Log the point where the chain forked and old and new best chain
@ -1125,6 +969,176 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
return nil
}
// verifyReorganizationValidity will verify that the disconnects and the connects
// that are in the list are able to be processed without mutating the chain.
//
// For the attach nodes, it'll check that each of the blocks are valid and will
// change the status of the block node in the list to invalid if the block fails
// to pass verification. For the detach nodes, it'll check that the blocks being
// detached and their spend journals are present on the database.
func (b *BlockChain) verifyReorganizationValidity(detachNodes, attachNodes *list.List) (
[]*btcutil.Block, []*btcutil.Block, [][]SpentTxOut, error) {
// Nothing to do if no reorganize nodes were provided.
if detachNodes.Len() == 0 && attachNodes.Len() == 0 {
return nil, nil, nil, nil
}
// Ensure the provided nodes match the current best chain.
tip := b.bestChain.Tip()
if detachNodes.Len() != 0 {
firstDetachNode := detachNodes.Front().Value.(*blockNode)
if firstDetachNode.hash != tip.hash {
return nil, nil, nil,
AssertError(fmt.Sprintf("reorganize nodes to detach are "+
"not for the current best chain -- first detach node %v, "+
"current chain %v", &firstDetachNode.hash, &tip.hash))
}
}
// Ensure the provided nodes are for the same fork point.
if attachNodes.Len() != 0 && detachNodes.Len() != 0 {
firstAttachNode := attachNodes.Front().Value.(*blockNode)
lastDetachNode := detachNodes.Back().Value.(*blockNode)
if firstAttachNode.parent.hash != lastDetachNode.parent.hash {
return nil, nil, nil,
AssertError(fmt.Sprintf("reorganize nodes do not have the "+
"same fork point -- first attach parent %v, last detach "+
"parent %v", &firstAttachNode.parent.hash,
&lastDetachNode.parent.hash))
}
}
// All of the blocks to detach and related spend journal entries needed
// to unspend transaction outputs in the blocks being disconnected must
// be loaded from the database during the reorg check phase below and
// then they are needed again when doing the actual database updates.
// Rather than doing two loads, cache the loaded data into these slices.
detachBlocks := make([]*btcutil.Block, 0, detachNodes.Len())
detachSpentTxOuts := make([][]SpentTxOut, 0, detachNodes.Len())
attachBlocks := make([]*btcutil.Block, 0, attachNodes.Len())
// Disconnect all of the blocks back to the point of the fork. This
// entails loading the blocks and their associated spent txos from the
// database and using that information to unspend all of the spent txos
// and remove the utxos created by the blocks.
view := NewUtxoViewpoint()
view.SetBestHash(&tip.hash)
for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, n)
return err
})
if err != nil {
return nil, nil, nil, err
}
if n.hash != *block.Hash() {
return nil, nil, nil, AssertError(
fmt.Sprintf("detach block node hash %v (height "+
"%v) does not match previous parent block hash %v",
&n.hash, n.height, block.Hash()))
}
// Load all of the utxos referenced by the block that aren't
// already in the view.
err = view.fetchInputUtxos(b.utxoCache, block)
if err != nil {
return nil, nil, nil, err
}
// Load all of the spent txos for the block from the spend
// journal.
var stxos []SpentTxOut
err = b.db.View(func(dbTx database.Tx) error {
stxos, err = dbFetchSpendJournalEntry(dbTx, block)
return err
})
if err != nil {
return nil, nil, nil, err
}
// Store the loaded block and spend journal entry for later.
detachBlocks = append(detachBlocks, block)
detachSpentTxOuts = append(detachSpentTxOuts, stxos)
err = view.disconnectTransactions(b.db, block, stxos)
if err != nil {
return nil, nil, nil, err
}
}
// Perform several checks to verify each block that needs to be attached
// to the main chain can be connected without violating any rules and
// without actually connecting the block.
//
// NOTE: These checks could be done directly when connecting a block,
// however the downside to that approach is that if any of these checks
// fail after disconnecting some blocks or attaching others, all of the
// operations have to be rolled back to get the chain back into the
// state it was before the rule violation (or other failure). There are
// at least a couple of ways accomplish that rollback, but both involve
// tweaking the chain and/or database. This approach catches these
// issues before ever modifying the chain.
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, n)
return err
})
if err != nil {
return nil, nil, nil, err
}
// Store the loaded block for later.
attachBlocks = append(attachBlocks, block)
// Skip checks if node has already been fully validated. Although
// checkConnectBlock gets skipped, we still need to update the UTXO
// view.
if b.index.NodeStatus(n).KnownValid() {
err = view.fetchInputUtxos(b.utxoCache, block)
if err != nil {
return nil, nil, nil, err
}
err = view.connectTransactions(block, nil)
if err != nil {
return nil, nil, nil, err
}
continue
}
// Notice the spent txout details are not requested here and
// thus will not be generated. This is done because the state
// is not being immediately written to the database, so it is
// not needed.
//
// In the case the block is determined to be invalid due to a
// rule violation, mark it as invalid and mark all of its
// descendants as having an invalid ancestor.
err = b.checkConnectBlock(n, block, view, nil)
if err != nil {
if _, ok := err.(RuleError); ok {
b.index.SetStatusFlags(n, statusValidateFailed)
for de := e.Next(); de != nil; de = de.Next() {
dn := de.Value.(*blockNode)
b.index.SetStatusFlags(dn, statusInvalidAncestor)
}
}
return nil, nil, nil, err
}
b.index.SetStatusFlags(n, statusValid)
}
return detachBlocks, attachBlocks, detachSpentTxOuts, nil
}
// connectBestChain handles connecting the passed block to the chain while
// respecting proper chain selection according to the chain with the most
// proof of work. In the typical case, the new block simply extends the main
@ -1936,6 +1950,96 @@ func (b *BlockChain) InvalidateBlock(hash *chainhash.Hash) error {
return err
}
// ReconsiderBlock reconsiders the validity of the block with the given hash.
//
// This function is safe for concurrent access.
func (b *BlockChain) ReconsiderBlock(hash *chainhash.Hash) error {
b.chainLock.Lock()
defer b.chainLock.Unlock()
log.Infof("Reconsidering block_hash=%v", hash[:])
reconsiderNode := b.index.LookupNode(hash)
if reconsiderNode == nil {
// Return an error if the block doesn't exist.
return fmt.Errorf("requested block hash of %s is not found "+
"and thus cannot be reconsidered", hash)
}
// Nothing to do if the given block is already valid.
if reconsiderNode.status.KnownValid() {
log.Infof("block_hash=%x is valid, nothing to reconsider", hash[:])
return nil
}
// Clear the status of the block being reconsidered.
b.index.UnsetStatusFlags(reconsiderNode, statusInvalidAncestor)
b.index.UnsetStatusFlags(reconsiderNode, statusValidateFailed)
// Grab all the tips.
tips := b.index.InactiveTips(b.bestChain)
tips = append(tips, b.bestChain.Tip())
log.Debugf("Examining %v inactive chain tips for reconsideration")
// Go through all the tips and unset the status for all the descendents of the
// block being reconsidered.
var reconsiderTip *blockNode
for _, tip := range tips {
// Continue if the given inactive tip is not a descendant of the block
// being invalidated.
if !tip.IsAncestor(reconsiderNode) {
// Set as the reconsider tip if the block node being reconsidered
// is a tip.
if tip == reconsiderNode {
reconsiderTip = reconsiderNode
}
continue
}
// Mark the current tip as the tip being reconsidered.
reconsiderTip = tip
// Unset the status of all the parents up until it reaches the block
// being reconsidered.
for n := tip; n != nil && n != reconsiderNode; n = n.parent {
b.index.UnsetStatusFlags(n, statusInvalidAncestor)
}
}
// Compare the cumulative work for the branch being reconsidered.
bestTipWork := b.bestChain.Tip().workSum
if reconsiderTip.workSum.Cmp(bestTipWork) <= 0 {
log.Debugf("Tip to reconsider has less cumulative work than current "+
"chain tip: %v vs %v", reconsiderTip.workSum, bestTipWork)
return nil
}
// If the reconsider tip has a higher cumulative work, then reorganize
// to it after checking the validity of the nodes.
detachNodes, attachNodes := b.getReorganizeNodes(reconsiderTip)
// We're checking if the reorganization that'll happen is actually valid.
// While this is called in reorganizeChain, we call it beforehand as the error
// returned from reorganizeChain doesn't differentiate between actual disconnect/
// connect errors or whether the branch we're trying to fork to is invalid.
//
// The block status changes here without being flushed so we immediately flush
// the blockindex after we call this function.
_, _, _, err := b.verifyReorganizationValidity(detachNodes, attachNodes)
if writeErr := b.index.flushToDB(); writeErr != nil {
log.Warnf("Error flushing block index changes to disk: %v", writeErr)
}
if err != nil {
// If we errored out during the verification of the reorg branch,
// it's ok to return nil as we reconsidered the block and determined
// that it's invalid.
return nil
}
return b.reorganizeChain(detachNodes, attachNodes)
}
// IndexManager provides a generic interface that the is called when blocks are
// connected and disconnected to and from the tip of the main chain for the
// purpose of supporting optional indexes.

View File

@ -1622,3 +1622,229 @@ func TestInvalidateBlock(t *testing.T) {
}()
}
}
func TestReconsiderBlock(t *testing.T) {
tests := []struct {
name string
chainGen func() (*BlockChain, []*chainhash.Hash, func())
}{
{
name: "one branch, invalidate once and revalidate",
chainGen: func() (*BlockChain, []*chainhash.Hash, func()) {
chain, params, tearDown := utxoCacheTestChain("TestInvalidateBlock-one-branch-invalidate-once")
// Create a chain with 101 blocks.
tip := btcutil.NewBlock(params.GenesisBlock)
_, _, err := addBlocks(101, chain, tip, []*testhelper.SpendableOut{})
if err != nil {
t.Fatal(err)
}
// Invalidate block 5.
block, err := chain.BlockByHeight(5)
if err != nil {
t.Fatal(err)
}
invalidateHash := block.Hash()
return chain, []*chainhash.Hash{invalidateHash}, tearDown
},
},
{
name: "invalidate the active branch with a side branch present and revalidate",
chainGen: func() (*BlockChain, []*chainhash.Hash, func()) {
chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-invalidate-with-side-branch")
// Create a chain with 101 blocks.
tip := btcutil.NewBlock(params.GenesisBlock)
_, spendableOuts, err := addBlocks(101, chain, tip, []*testhelper.SpendableOut{})
if err != nil {
t.Fatal(err)
}
// Invalidate block 5.
block, err := chain.BlockByHeight(5)
if err != nil {
t.Fatal(err)
}
invalidateHash := block.Hash()
// Create a side chain with 7 blocks that builds on block 1.
b1, err := chain.BlockByHeight(1)
if err != nil {
t.Fatal(err)
}
_, _, err = addBlocks(6, chain, b1, spendableOuts[0])
if err != nil {
t.Fatal(err)
}
return chain, []*chainhash.Hash{invalidateHash}, tearDown
},
},
{
name: "invalidate a side branch and revalidate it",
chainGen: func() (*BlockChain, []*chainhash.Hash, func()) {
chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-invalidate-a-side-branch")
// Create a chain with 101 blocks.
tip := btcutil.NewBlock(params.GenesisBlock)
_, spendableOuts, err := addBlocks(101, chain, tip, []*testhelper.SpendableOut{})
if err != nil {
t.Fatal(err)
}
// Create a side chain with 7 blocks that builds on block 1.
b1, err := chain.BlockByHeight(1)
if err != nil {
t.Fatal(err)
}
altBlockHashes, _, err := addBlocks(6, chain, b1, spendableOuts[0])
if err != nil {
t.Fatal(err)
}
// Grab block at height 4:
//
// b2, b3, b4, b5
// 0, 1, 2, 3
invalidateHash := altBlockHashes[2]
return chain, []*chainhash.Hash{invalidateHash}, tearDown
},
},
{
name: "reconsider an invalid side branch with a higher work",
chainGen: func() (*BlockChain, []*chainhash.Hash, func()) {
chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-reconsider-an-invalid-side-branch-higher")
tip := btcutil.NewBlock(params.GenesisBlock)
_, spendableOuts, err := addBlocks(6, chain, tip, []*testhelper.SpendableOut{})
if err != nil {
t.Fatal(err)
}
// Select utxos to be spent from the best block and
// modify the amount so that the block will be invalid.
nextSpends, _ := randomSelect(spendableOuts[len(spendableOuts)-1])
nextSpends[0].Amount += testhelper.LowFee
// Make an invalid block that best on top of the current tip.
bestBlock, err := chain.BlockByHash(&chain.BestSnapshot().Hash)
if err != nil {
t.Fatal(err)
}
invalidBlock, _, _ := newBlock(chain, bestBlock, nextSpends)
invalidateHash := invalidBlock.Hash()
// The block validation will fail here and we'll mark the
// block as invalid in the block index.
chain.ProcessBlock(invalidBlock, BFNone)
// Modify the amount again so it's valid.
nextSpends[0].Amount -= testhelper.LowFee
return chain, []*chainhash.Hash{invalidateHash}, tearDown
},
},
{
name: "reconsider an invalid side branch with a lower work",
chainGen: func() (*BlockChain, []*chainhash.Hash, func()) {
chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-reconsider-an-invalid-side-branch-lower")
tip := btcutil.NewBlock(params.GenesisBlock)
_, spendableOuts, err := addBlocks(6, chain, tip, []*testhelper.SpendableOut{})
if err != nil {
t.Fatal(err)
}
// Select utxos to be spent from the best block and
// modify the amount so that the block will be invalid.
nextSpends, _ := randomSelect(spendableOuts[len(spendableOuts)-1])
nextSpends[0].Amount += testhelper.LowFee
// Make an invalid block that best on top of the current tip.
bestBlock, err := chain.BlockByHash(&chain.BestSnapshot().Hash)
if err != nil {
t.Fatal(err)
}
invalidBlock, _, _ := newBlock(chain, bestBlock, nextSpends)
invalidateHash := invalidBlock.Hash()
// The block validation will fail here and we'll mark the
// block as invalid in the block index.
chain.ProcessBlock(invalidBlock, BFNone)
// Modify the amount again so it's valid.
nextSpends[0].Amount -= testhelper.LowFee
// Add more blocks to make the invalid block a
// side chain and not the most pow.
_, _, err = addBlocks(3, chain, bestBlock, []*testhelper.SpendableOut{})
if err != nil {
t.Fatal(err)
}
return chain, []*chainhash.Hash{invalidateHash}, tearDown
},
},
}
for _, test := range tests {
chain, invalidateHashes, tearDown := test.chainGen()
func() {
defer tearDown()
for _, invalidateHash := range invalidateHashes {
// Cache the chain tips before the invalidate. Since we'll reconsider
// the invalidated block, we should come back to these tips in the end.
tips := chain.ChainTips()
expectedChainTips := make(map[chainhash.Hash]ChainTip, len(tips))
for _, tip := range tips {
expectedChainTips[tip.BlockHash] = tip
}
// Invalidation.
err := chain.InvalidateBlock(invalidateHash)
if err != nil {
t.Fatal(err)
}
// Reconsideration.
err = chain.ReconsiderBlock(invalidateHash)
if err != nil {
t.Fatal(err)
}
// Compare the tips aginst the tips we've cached.
gotChainTips := chain.ChainTips()
for _, gotChainTip := range gotChainTips {
testChainTip, found := expectedChainTips[gotChainTip.BlockHash]
if !found {
t.Errorf("TestReconsiderBlock Failed test \"%s\". Couldn't find an expected "+
"chain tip with height %d, hash %s, branchlen %d, status \"%s\"",
test.name, testChainTip.Height, testChainTip.BlockHash.String(),
testChainTip.BranchLen, testChainTip.Status.String())
}
// If the invalid side branch is a lower work, we'll never
// actually process the block again until the branch becomes
// a greater work chain so it'll show up as valid-fork.
if test.name == "reconsider an invalid side branch with a lower work" &&
testChainTip.BlockHash == *invalidateHash {
testChainTip.Status = StatusValidFork
}
if !reflect.DeepEqual(testChainTip, gotChainTip) {
t.Errorf("TestReconsiderBlock Failed test \"%s\". Expected chain tip with "+
"height %d, hash %s, branchlen %d, status \"%s\" but got "+
"height %d, hash %s, branchlen %d, status \"%s\"", test.name,
testChainTip.Height, testChainTip.BlockHash.String(),
testChainTip.BranchLen, testChainTip.Status.String(),
gotChainTip.Height, gotChainTip.BlockHash.String(),
gotChainTip.BranchLen, gotChainTip.Status.String())
}
}
}
}()
}
}