2016-01-15 04:56:25 +01:00
|
|
|
package uspv
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"os"
|
2016-01-29 09:48:14 +01:00
|
|
|
"sync"
|
2016-01-15 04:56:25 +01:00
|
|
|
|
2016-05-15 16:17:44 +02:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
2016-01-15 04:56:25 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
keyFileName = "testseed.hex"
|
|
|
|
headerFileName = "headers.bin"
|
|
|
|
|
2016-01-19 08:43:41 +01:00
|
|
|
// version hardcoded for now, probably ok...?
|
2016-02-10 22:22:15 +01:00
|
|
|
// 70012 is for segnet... make this a init var?
|
|
|
|
VERSION = 70012
|
2016-01-15 04:56:25 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type SPVCon struct {
|
2016-01-29 09:48:14 +01:00
|
|
|
con net.Conn // the (probably tcp) connection to the node
|
|
|
|
|
2016-02-07 04:15:35 +01:00
|
|
|
// Enhanced SPV modes for users who have outgrown easy mode SPV
|
|
|
|
// but have not yet graduated to full nodes.
|
|
|
|
HardMode bool // hard mode doesn't use filters.
|
|
|
|
Ironman bool // ironman only gets blocks, never requests txs.
|
|
|
|
|
2016-01-29 09:48:14 +01:00
|
|
|
headerMutex sync.Mutex
|
|
|
|
headerFile *os.File // file for SPV headers
|
2016-01-15 04:56:25 +01:00
|
|
|
|
2016-01-23 01:04:27 +01:00
|
|
|
//[doesn't work without fancy mutexes, nevermind, just use header file]
|
|
|
|
// localHeight int32 // block height we're on
|
|
|
|
remoteHeight int32 // block height they're on
|
2016-01-15 04:56:25 +01:00
|
|
|
localVersion uint32 // version we report
|
|
|
|
remoteVersion uint32 // version remote node
|
|
|
|
|
|
|
|
// what's the point of the input queue? remove? leave for now...
|
|
|
|
inMsgQueue chan wire.Message // Messages coming in from remote node
|
|
|
|
outMsgQueue chan wire.Message // Messages going out to remote node
|
|
|
|
|
|
|
|
WBytes uint64 // total bytes written
|
|
|
|
RBytes uint64 // total bytes read
|
2016-01-15 11:40:56 +01:00
|
|
|
|
2016-02-03 08:37:29 +01:00
|
|
|
TS *TxStore // transaction store to write to
|
2016-01-19 23:23:18 +01:00
|
|
|
|
|
|
|
// mBlockQueue is for keeping track of what height we've requested.
|
2016-02-07 07:48:54 +01:00
|
|
|
blockQueue chan HashAndHeight
|
2016-01-29 04:35:49 +01:00
|
|
|
// fPositives is a channel to keep track of bloom filter false positives.
|
|
|
|
fPositives chan int32
|
2016-02-03 02:14:13 +01:00
|
|
|
|
|
|
|
// waitState is a channel that is empty while in the header and block
|
|
|
|
// sync modes, but when in the idle state has a "true" in it.
|
|
|
|
inWaitState chan bool
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
|
|
|
|
2016-01-23 01:04:27 +01:00
|
|
|
// AskForTx requests a tx we heard about from an inv message.
|
|
|
|
// It's one at a time but should be fast enough.
|
2016-02-04 05:26:12 +01:00
|
|
|
// I don't like this function because SPV shouldn't even ask...
|
2016-01-23 01:04:27 +01:00
|
|
|
func (s *SPVCon) AskForTx(txid wire.ShaHash) {
|
|
|
|
gdata := wire.NewMsgGetData()
|
|
|
|
inv := wire.NewInvVect(wire.InvTypeTx, &txid)
|
|
|
|
gdata.AddInvVect(inv)
|
|
|
|
s.outMsgQueue <- gdata
|
|
|
|
}
|
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
// HashAndHeight is needed instead of just height in case a fullnode
|
|
|
|
// responds abnormally (?) by sending out of order merkleblocks.
|
|
|
|
// we cache a merkleroot:height pair in the queue so we don't have to
|
|
|
|
// look them up from the disk.
|
|
|
|
// Also used when inv messages indicate blocks so we can add the header
|
|
|
|
// and parse the txs in one request instead of requesting headers first.
|
|
|
|
type HashAndHeight struct {
|
|
|
|
blockhash wire.ShaHash
|
|
|
|
height int32
|
|
|
|
final bool // indicates this is the last merkleblock requested
|
|
|
|
}
|
2016-01-15 04:56:25 +01:00
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
// NewRootAndHeight saves like 2 lines.
|
|
|
|
func NewRootAndHeight(b wire.ShaHash, h int32) (hah HashAndHeight) {
|
|
|
|
hah.blockhash = b
|
|
|
|
hah.height = h
|
|
|
|
return
|
|
|
|
}
|
2016-01-23 01:04:27 +01:00
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
func (s *SPVCon) RemoveHeaders(r int32) error {
|
|
|
|
endPos, err := s.headerFile.Seek(0, os.SEEK_END)
|
2016-01-15 04:56:25 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-05 10:16:45 +01:00
|
|
|
err = s.headerFile.Truncate(endPos - int64(r*80))
|
2016-01-15 04:56:25 +01:00
|
|
|
if err != nil {
|
2016-02-05 10:16:45 +01:00
|
|
|
return fmt.Errorf("couldn't truncate header file")
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
|
|
|
return nil
|
2016-01-15 08:08:37 +01:00
|
|
|
}
|
2016-01-15 04:56:25 +01:00
|
|
|
|
2016-02-07 04:15:35 +01:00
|
|
|
func (s *SPVCon) IngestMerkleBlock(m *wire.MsgMerkleBlock) {
|
2016-02-07 07:48:54 +01:00
|
|
|
|
2016-01-23 01:04:27 +01:00
|
|
|
txids, err := checkMBlock(m) // check self-consistency
|
|
|
|
if err != nil {
|
2016-02-07 04:15:35 +01:00
|
|
|
log.Printf("Merkle block error: %s\n", err.Error())
|
|
|
|
return
|
2016-01-23 01:04:27 +01:00
|
|
|
}
|
2016-02-03 04:04:03 +01:00
|
|
|
var hah HashAndHeight
|
2016-02-04 05:26:12 +01:00
|
|
|
select { // select here so we don't block on an unrequested mblock
|
2016-02-07 07:48:54 +01:00
|
|
|
case hah = <-s.blockQueue: // pop height off mblock queue
|
2016-02-03 04:04:03 +01:00
|
|
|
break
|
|
|
|
default:
|
2016-02-07 04:15:35 +01:00
|
|
|
log.Printf("Unrequested merkle block")
|
|
|
|
return
|
2016-02-03 04:04:03 +01:00
|
|
|
}
|
|
|
|
|
2016-01-23 01:04:27 +01:00
|
|
|
// this verifies order, and also that the returned header fits
|
|
|
|
// into our SPV header file
|
|
|
|
newMerkBlockSha := m.Header.BlockSha()
|
|
|
|
if !hah.blockhash.IsEqual(&newMerkBlockSha) {
|
2016-02-10 22:22:15 +01:00
|
|
|
log.Printf("merkle block out of order got %s expect %s",
|
|
|
|
m.Header.BlockSha().String(), hah.blockhash.String())
|
2016-02-13 23:03:11 +01:00
|
|
|
log.Printf("has %d hashes %d txs flags: %x",
|
|
|
|
len(m.Hashes), m.Transactions, m.Flags)
|
2016-02-07 04:15:35 +01:00
|
|
|
return
|
2016-01-23 01:04:27 +01:00
|
|
|
}
|
2016-01-29 04:35:49 +01:00
|
|
|
|
2016-01-23 01:04:27 +01:00
|
|
|
for _, txid := range txids {
|
|
|
|
err := s.TS.AddTxid(txid, hah.height)
|
|
|
|
if err != nil {
|
2016-02-07 04:15:35 +01:00
|
|
|
log.Printf("Txid store error: %s\n", err.Error())
|
|
|
|
return
|
2016-01-23 01:04:27 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-05 10:16:45 +01:00
|
|
|
// write to db that we've sync'd to the height indicated in the
|
|
|
|
// merkle block. This isn't QUITE true since we haven't actually gotten
|
|
|
|
// the txs yet but if there are problems with the txs we should backtrack.
|
2016-01-29 04:35:49 +01:00
|
|
|
err = s.TS.SetDBSyncHeight(hah.height)
|
|
|
|
if err != nil {
|
2016-02-07 04:15:35 +01:00
|
|
|
log.Printf("Merkle block error: %s\n", err.Error())
|
|
|
|
return
|
2016-01-29 04:35:49 +01:00
|
|
|
}
|
2016-02-04 05:26:12 +01:00
|
|
|
if hah.final {
|
|
|
|
// don't set waitstate; instead, ask for headers again!
|
|
|
|
// this way the only thing that triggers waitstate is asking for headers,
|
|
|
|
// getting 0, calling AskForMerkBlocks(), and seeing you don't need any.
|
|
|
|
// that way you are pretty sure you're synced up.
|
|
|
|
err = s.AskForHeaders()
|
|
|
|
if err != nil {
|
2016-02-07 04:15:35 +01:00
|
|
|
log.Printf("Merkle block error: %s\n", err.Error())
|
|
|
|
return
|
2016-02-04 05:26:12 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-07 04:15:35 +01:00
|
|
|
return
|
2016-01-23 01:04:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// IngestHeaders takes in a bunch of headers and appends them to the
|
|
|
|
// local header file, checking that they fit. If there's no headers,
|
|
|
|
// it assumes we're done and returns false. If it worked it assumes there's
|
2016-01-29 04:35:49 +01:00
|
|
|
// more to request and returns true.
|
2016-01-15 08:08:37 +01:00
|
|
|
func (s *SPVCon) IngestHeaders(m *wire.MsgHeaders) (bool, error) {
|
2016-02-04 05:26:12 +01:00
|
|
|
gotNum := int64(len(m.Headers))
|
|
|
|
if gotNum > 0 {
|
|
|
|
fmt.Printf("got %d headers. Range:\n%s - %s\n",
|
|
|
|
gotNum, m.Headers[0].BlockSha().String(),
|
|
|
|
m.Headers[len(m.Headers)-1].BlockSha().String())
|
|
|
|
} else {
|
|
|
|
log.Printf("got 0 headers, we're probably synced up")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2016-01-29 09:48:14 +01:00
|
|
|
s.headerMutex.Lock()
|
|
|
|
defer s.headerMutex.Unlock()
|
|
|
|
|
2016-01-15 08:08:37 +01:00
|
|
|
var err error
|
2016-01-19 10:33:58 +01:00
|
|
|
// seek to last header
|
2016-01-15 04:56:25 +01:00
|
|
|
_, err = s.headerFile.Seek(-80, os.SEEK_END)
|
|
|
|
if err != nil {
|
2016-01-15 08:08:37 +01:00
|
|
|
return false, err
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
|
|
|
var last wire.BlockHeader
|
|
|
|
err = last.Deserialize(s.headerFile)
|
|
|
|
if err != nil {
|
2016-01-15 08:08:37 +01:00
|
|
|
return false, err
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
|
|
|
prevHash := last.BlockSha()
|
|
|
|
|
2016-01-23 01:04:27 +01:00
|
|
|
endPos, err := s.headerFile.Seek(0, os.SEEK_END)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
tip := int32(endPos/80) - 1 // move back 1 header length to read
|
|
|
|
|
2016-01-15 08:08:37 +01:00
|
|
|
// check first header returned to make sure it fits on the end
|
|
|
|
// of our header file
|
|
|
|
if !m.Headers[0].PrevBlock.IsEqual(&prevHash) {
|
|
|
|
// delete 100 headers if this happens! Dumb reorg.
|
2016-01-23 01:04:27 +01:00
|
|
|
log.Printf("reorg? header msg doesn't fit. points to %s, expect %s",
|
2016-01-15 08:08:37 +01:00
|
|
|
m.Headers[0].PrevBlock.String(), prevHash.String())
|
|
|
|
if endPos < 8080 {
|
|
|
|
// jeez I give up, back to genesis
|
|
|
|
s.headerFile.Truncate(80)
|
|
|
|
} else {
|
|
|
|
err = s.headerFile.Truncate(endPos - 8000)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("couldn't truncate header file")
|
|
|
|
}
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
2016-01-29 04:35:49 +01:00
|
|
|
return true, fmt.Errorf("Truncated header file to try again")
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
|
|
|
|
2016-01-15 08:08:37 +01:00
|
|
|
for _, resphdr := range m.Headers {
|
|
|
|
// write to end of file
|
|
|
|
err = resphdr.Serialize(s.headerFile)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
// advance chain tip
|
|
|
|
tip++
|
|
|
|
// check last header
|
2016-02-03 08:37:29 +01:00
|
|
|
worked := CheckHeader(s.headerFile, tip, s.TS.Param)
|
2016-01-15 08:08:37 +01:00
|
|
|
if !worked {
|
|
|
|
if endPos < 8080 {
|
|
|
|
// jeez I give up, back to genesis
|
|
|
|
s.headerFile.Truncate(80)
|
|
|
|
} else {
|
|
|
|
err = s.headerFile.Truncate(endPos - 8000)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("couldn't truncate header file")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// probably should disconnect from spv node at this point,
|
|
|
|
// since they're giving us invalid headers.
|
2016-01-31 10:05:31 +01:00
|
|
|
return true, fmt.Errorf(
|
2016-01-15 08:08:37 +01:00
|
|
|
"Header %d - %s doesn't fit, dropping 100 headers.",
|
|
|
|
resphdr.BlockSha().String(), tip)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Printf("Headers to height %d OK.", tip)
|
|
|
|
return true, nil
|
2016-01-15 04:56:25 +01:00
|
|
|
}
|
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
func (s *SPVCon) AskForHeaders() error {
|
|
|
|
var hdr wire.BlockHeader
|
|
|
|
ghdr := wire.NewMsgGetHeaders()
|
|
|
|
ghdr.ProtocolVersion = s.localVersion
|
2016-01-19 23:23:18 +01:00
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
s.headerMutex.Lock() // start header file ops
|
|
|
|
info, err := s.headerFile.Stat()
|
2016-01-22 02:59:50 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-05 10:16:45 +01:00
|
|
|
headerFileSize := info.Size()
|
|
|
|
if headerFileSize == 0 || headerFileSize%80 != 0 { // header file broken
|
|
|
|
return fmt.Errorf("Header file not a multiple of 80 bytes")
|
2016-01-22 02:59:50 +01:00
|
|
|
}
|
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
// seek to 80 bytes from end of file
|
|
|
|
ns, err := s.headerFile.Seek(-80, os.SEEK_END)
|
2016-01-29 04:35:49 +01:00
|
|
|
if err != nil {
|
2016-02-05 10:16:45 +01:00
|
|
|
log.Printf("can't seek\n")
|
|
|
|
return err
|
2016-01-29 04:35:49 +01:00
|
|
|
}
|
|
|
|
|
2016-02-05 10:16:45 +01:00
|
|
|
log.Printf("suk to offset %d (should be near the end\n", ns)
|
|
|
|
|
|
|
|
// get header from last 80 bytes of file
|
|
|
|
err = hdr.Deserialize(s.headerFile)
|
2016-01-29 07:31:05 +01:00
|
|
|
if err != nil {
|
2016-02-05 10:16:45 +01:00
|
|
|
log.Printf("can't Deserialize")
|
2016-01-29 07:31:05 +01:00
|
|
|
return err
|
|
|
|
}
|
2016-02-05 10:16:45 +01:00
|
|
|
s.headerMutex.Unlock() // done with header file
|
|
|
|
|
|
|
|
cHash := hdr.BlockSha()
|
|
|
|
err = ghdr.AddBlockLocatorHash(&cHash)
|
2016-01-29 07:31:05 +01:00
|
|
|
if err != nil {
|
2016-02-05 10:16:45 +01:00
|
|
|
return err
|
2016-01-29 07:31:05 +01:00
|
|
|
}
|
2016-02-05 10:16:45 +01:00
|
|
|
|
|
|
|
fmt.Printf("get headers message has %d header hashes, first one is %s\n",
|
|
|
|
len(ghdr.BlockLocatorHashes), ghdr.BlockLocatorHashes[0].String())
|
|
|
|
|
|
|
|
s.outMsgQueue <- ghdr
|
|
|
|
|
2016-01-29 07:31:05 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-13 23:03:11 +01:00
|
|
|
// AskForOneBlock is for testing only, so you can ask for a specific block height
|
|
|
|
// and see what goes wrong
|
|
|
|
func (s *SPVCon) AskForOneBlock(h int32) error {
|
|
|
|
var hdr wire.BlockHeader
|
|
|
|
var err error
|
|
|
|
|
|
|
|
dbTip := int32(h)
|
|
|
|
s.headerMutex.Lock() // seek to header we need
|
|
|
|
_, err = s.headerFile.Seek(int64((dbTip)*80), os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = hdr.Deserialize(s.headerFile) // read header, done w/ file for now
|
|
|
|
s.headerMutex.Unlock() // unlock after reading 1 header
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("header deserialize error!\n")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
bHash := hdr.BlockSha()
|
|
|
|
// create inventory we're asking for
|
|
|
|
iv1 := wire.NewInvVect(wire.InvTypeWitnessBlock, &bHash)
|
|
|
|
gdataMsg := wire.NewMsgGetData()
|
|
|
|
// add inventory
|
|
|
|
err = gdataMsg.AddInvVect(iv1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
hah := NewRootAndHeight(bHash, h)
|
|
|
|
s.outMsgQueue <- gdataMsg
|
|
|
|
s.blockQueue <- hah // push height and mroot of requested block on queue
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-19 23:23:18 +01:00
|
|
|
// AskForMerkBlocks requests blocks from current to last
|
|
|
|
// right now this asks for 1 block per getData message.
|
|
|
|
// Maybe it's faster to ask for many in a each message?
|
2016-02-07 07:48:54 +01:00
|
|
|
func (s *SPVCon) AskForBlocks() error {
|
2016-01-15 11:40:56 +01:00
|
|
|
var hdr wire.BlockHeader
|
2016-01-29 04:35:49 +01:00
|
|
|
|
2016-02-04 05:26:12 +01:00
|
|
|
s.headerMutex.Lock() // lock just to check filesize
|
|
|
|
stat, err := os.Stat(headerFileName)
|
|
|
|
s.headerMutex.Unlock() // checked, unlock
|
|
|
|
endPos := stat.Size()
|
|
|
|
|
2016-02-22 23:35:01 +01:00
|
|
|
headerTip := int32(endPos/80) - 1 // move back 1 header length to read
|
2016-02-04 05:26:12 +01:00
|
|
|
|
|
|
|
dbTip, err := s.TS.GetDBSyncHeight()
|
2016-01-23 01:04:27 +01:00
|
|
|
if err != nil {
|
2016-01-29 04:35:49 +01:00
|
|
|
return err
|
2016-01-23 01:04:27 +01:00
|
|
|
}
|
2016-02-04 05:26:12 +01:00
|
|
|
fmt.Printf("dbTip %d headerTip %d\n", dbTip, headerTip)
|
|
|
|
if dbTip > headerTip {
|
|
|
|
return fmt.Errorf("error- db longer than headers! shouldn't happen.")
|
|
|
|
}
|
|
|
|
if dbTip == headerTip {
|
|
|
|
// nothing to ask for; set wait state and return
|
2016-02-08 04:52:45 +01:00
|
|
|
fmt.Printf("no blocks to request, entering wait state\n")
|
|
|
|
fmt.Printf("%d bytes received\n", s.RBytes)
|
2016-02-04 05:26:12 +01:00
|
|
|
s.inWaitState <- true
|
2016-02-05 10:16:45 +01:00
|
|
|
// also advertise any unconfirmed txs here
|
|
|
|
s.Rebroadcast()
|
2016-02-04 05:26:12 +01:00
|
|
|
return nil
|
2016-01-20 08:40:04 +01:00
|
|
|
}
|
2016-02-04 05:26:12 +01:00
|
|
|
|
2016-02-26 21:30:15 +01:00
|
|
|
fmt.Printf("will request blocks %d to %d\n", dbTip+1, headerTip)
|
2016-01-27 10:24:16 +01:00
|
|
|
|
2016-02-07 07:48:54 +01:00
|
|
|
if !s.HardMode { // don't send this in hardmode! that's the whole point
|
|
|
|
// create initial filter
|
|
|
|
filt, err := s.TS.GimmeFilter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// send filter
|
|
|
|
s.SendFilter(filt)
|
|
|
|
fmt.Printf("sent filter %x\n", filt.MsgFilterLoad().Filter)
|
2016-01-22 06:50:42 +01:00
|
|
|
}
|
2016-01-19 23:23:18 +01:00
|
|
|
// loop through all heights where we want merkleblocks.
|
2016-02-26 21:30:15 +01:00
|
|
|
for dbTip < headerTip {
|
|
|
|
dbTip++ // we're requesting the next header
|
2016-02-04 05:26:12 +01:00
|
|
|
|
2016-02-26 21:30:15 +01:00
|
|
|
// load header from file
|
2016-02-04 05:26:12 +01:00
|
|
|
s.headerMutex.Lock() // seek to header we need
|
2016-02-22 23:35:01 +01:00
|
|
|
_, err = s.headerFile.Seek(int64((dbTip)*80), os.SEEK_SET)
|
2016-01-15 11:40:56 +01:00
|
|
|
if err != nil {
|
2016-02-04 05:26:12 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = hdr.Deserialize(s.headerFile) // read header, done w/ file for now
|
|
|
|
s.headerMutex.Unlock() // unlock after reading 1 header
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("header deserialize error!\n")
|
2016-01-15 11:40:56 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
bHash := hdr.BlockSha()
|
2016-01-19 23:23:18 +01:00
|
|
|
// create inventory we're asking for
|
2016-02-07 07:48:54 +01:00
|
|
|
iv1 := new(wire.InvVect)
|
|
|
|
// if hardmode, ask for legit blocks, none of this ralphy stuff
|
|
|
|
if s.HardMode {
|
2016-02-13 23:03:11 +01:00
|
|
|
iv1 = wire.NewInvVect(wire.InvTypeWitnessBlock, &bHash)
|
2016-02-07 07:48:54 +01:00
|
|
|
} else { // ah well
|
2016-02-13 23:03:11 +01:00
|
|
|
iv1 = wire.NewInvVect(wire.InvTypeFilteredWitnessBlock, &bHash)
|
2016-02-07 07:48:54 +01:00
|
|
|
}
|
2016-01-15 11:40:56 +01:00
|
|
|
gdataMsg := wire.NewMsgGetData()
|
2016-01-19 23:23:18 +01:00
|
|
|
// add inventory
|
2016-01-15 11:40:56 +01:00
|
|
|
err = gdataMsg.AddInvVect(iv1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-26 21:30:15 +01:00
|
|
|
|
2016-02-04 05:26:12 +01:00
|
|
|
hah := NewRootAndHeight(hdr.BlockSha(), dbTip)
|
|
|
|
if dbTip == headerTip { // if this is the last block, indicate finality
|
|
|
|
hah.final = true
|
|
|
|
}
|
|
|
|
// waits here most of the time for the queue to empty out
|
2016-02-07 07:48:54 +01:00
|
|
|
s.blockQueue <- hah // push height and mroot of requested block on queue
|
2016-02-26 03:19:43 +01:00
|
|
|
s.outMsgQueue <- gdataMsg
|
2016-01-15 11:40:56 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|