mirror of
https://github.com/btcsuite/btcd.git
synced 2025-03-10 09:19:28 +01:00
btcutil: move btcutil into new sub-module
In this commit, we move `btcutil` as well as its sub-module, the `psbt` package into the `btcd` repo itself.
This commit is contained in:
parent
a1f43e4d84
commit
5cd3533e2b
101 changed files with 18198 additions and 0 deletions
16
btcutil/LICENSE
Normal file
16
btcutil/LICENSE
Normal file
|
@ -0,0 +1,16 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2013-2017 The btcsuite developers
|
||||
Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
49
btcutil/README.md
Normal file
49
btcutil/README.md
Normal file
|
@ -0,0 +1,49 @@
|
|||
btcutil
|
||||
=======
|
||||
|
||||
[](https://github.com/btcsuite/btcd/btcutil/actions)
|
||||
[](http://copyfree.org)
|
||||
[](https://godoc.org/github.com/btcsuite/btcd/btcutil)
|
||||
|
||||
Package btcutil provides bitcoin-specific convenience functions and types.
|
||||
A comprehensive suite of tests is provided to ensure proper functionality. See
|
||||
`test_coverage.txt` for the gocov coverage report. Alternatively, if you are
|
||||
running a POSIX OS, you can run the `cov_report.sh` script for a real-time
|
||||
report.
|
||||
|
||||
This package was developed for btcd, an alternative full-node implementation of
|
||||
bitcoin which is under active development by Conformal. Although it was
|
||||
primarily written for btcd, this package has intentionally been designed so it
|
||||
can be used as a standalone package for any projects needing the functionality
|
||||
provided.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil
|
||||
```
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package btcutil is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
720
btcutil/address.go
Normal file
720
btcutil/address.go
Normal file
|
@ -0,0 +1,720 @@
|
|||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
"github.com/btcsuite/btcd/btcutil/bech32"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
// UnsupportedWitnessVerError describes an error where a segwit address being
|
||||
// decoded has an unsupported witness version.
|
||||
type UnsupportedWitnessVerError byte
|
||||
|
||||
func (e UnsupportedWitnessVerError) Error() string {
|
||||
return fmt.Sprintf("unsupported witness version: %#x", byte(e))
|
||||
}
|
||||
|
||||
// UnsupportedWitnessProgLenError describes an error where a segwit address
|
||||
// being decoded has an unsupported witness program length.
|
||||
type UnsupportedWitnessProgLenError int
|
||||
|
||||
func (e UnsupportedWitnessProgLenError) Error() string {
|
||||
return fmt.Sprintf("unsupported witness program length: %d", int(e))
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrChecksumMismatch describes an error where decoding failed due
|
||||
// to a bad checksum.
|
||||
ErrChecksumMismatch = errors.New("checksum mismatch")
|
||||
|
||||
// ErrUnknownAddressType describes an error where an address can not
|
||||
// decoded as a specific address type due to the string encoding
|
||||
// beginning with an identifier byte unknown to any standard or
|
||||
// registered (via chaincfg.Register) network.
|
||||
ErrUnknownAddressType = errors.New("unknown address type")
|
||||
|
||||
// ErrAddressCollision describes an error where an address can not
|
||||
// be uniquely determined as either a pay-to-pubkey-hash or
|
||||
// pay-to-script-hash address since the leading identifier is used for
|
||||
// describing both address kinds, but for different networks. Rather
|
||||
// than assuming or defaulting to one or the other, this error is
|
||||
// returned and the caller must decide how to decode the address.
|
||||
ErrAddressCollision = errors.New("address collision")
|
||||
)
|
||||
|
||||
// encodeAddress returns a human-readable payment address given a ripemd160 hash
|
||||
// and netID which encodes the bitcoin network and address type. It is used
|
||||
// in both pay-to-pubkey-hash (P2PKH) and pay-to-script-hash (P2SH) address
|
||||
// encoding.
|
||||
func encodeAddress(hash160 []byte, netID byte) string {
|
||||
// Format is 1 byte for a network and address class (i.e. P2PKH vs
|
||||
// P2SH), 20 bytes for a RIPEMD160 hash, and 4 bytes of checksum.
|
||||
return base58.CheckEncode(hash160[:ripemd160.Size], netID)
|
||||
}
|
||||
|
||||
// encodeSegWitAddress creates a bech32 (or bech32m for SegWit v1) encoded
|
||||
// address string representation from witness version and witness program.
|
||||
func encodeSegWitAddress(hrp string, witnessVersion byte, witnessProgram []byte) (string, error) {
|
||||
// Group the address bytes into 5 bit groups, as this is what is used to
|
||||
// encode each character in the address string.
|
||||
converted, err := bech32.ConvertBits(witnessProgram, 8, 5, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Concatenate the witness version and program, and encode the resulting
|
||||
// bytes using bech32 encoding.
|
||||
combined := make([]byte, len(converted)+1)
|
||||
combined[0] = witnessVersion
|
||||
copy(combined[1:], converted)
|
||||
|
||||
var bech string
|
||||
switch witnessVersion {
|
||||
case 0:
|
||||
bech, err = bech32.Encode(hrp, combined)
|
||||
|
||||
case 1:
|
||||
bech, err = bech32.EncodeM(hrp, combined)
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported witness version %d",
|
||||
witnessVersion)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Check validity by decoding the created address.
|
||||
version, program, err := decodeSegWitAddress(bech)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid segwit address: %v", err)
|
||||
}
|
||||
|
||||
if version != witnessVersion || !bytes.Equal(program, witnessProgram) {
|
||||
return "", fmt.Errorf("invalid segwit address")
|
||||
}
|
||||
|
||||
return bech, nil
|
||||
}
|
||||
|
||||
// Address is an interface type for any type of destination a transaction
|
||||
// output may spend to. This includes pay-to-pubkey (P2PK), pay-to-pubkey-hash
|
||||
// (P2PKH), and pay-to-script-hash (P2SH). Address is designed to be generic
|
||||
// enough that other kinds of addresses may be added in the future without
|
||||
// changing the decoding and encoding API.
|
||||
type Address interface {
|
||||
// String returns the string encoding of the transaction output
|
||||
// destination.
|
||||
//
|
||||
// Please note that String differs subtly from EncodeAddress: String
|
||||
// will return the value as a string without any conversion, while
|
||||
// EncodeAddress may convert destination types (for example,
|
||||
// converting pubkeys to P2PKH addresses) before encoding as a
|
||||
// payment address string.
|
||||
String() string
|
||||
|
||||
// EncodeAddress returns the string encoding of the payment address
|
||||
// associated with the Address value. See the comment on String
|
||||
// for how this method differs from String.
|
||||
EncodeAddress() string
|
||||
|
||||
// ScriptAddress returns the raw bytes of the address to be used
|
||||
// when inserting the address into a txout's script.
|
||||
ScriptAddress() []byte
|
||||
|
||||
// IsForNet returns whether or not the address is associated with the
|
||||
// passed bitcoin network.
|
||||
IsForNet(*chaincfg.Params) bool
|
||||
}
|
||||
|
||||
// DecodeAddress decodes the string encoding of an address and returns
|
||||
// the Address if addr is a valid encoding for a known address type.
|
||||
//
|
||||
// The bitcoin network the address is associated with is extracted if possible.
|
||||
// When the address does not encode the network, such as in the case of a raw
|
||||
// public key, the address will be associated with the passed defaultNet.
|
||||
func DecodeAddress(addr string, defaultNet *chaincfg.Params) (Address, error) {
|
||||
// Bech32 encoded segwit addresses start with a human-readable part
|
||||
// (hrp) followed by '1'. For Bitcoin mainnet the hrp is "bc", and for
|
||||
// testnet it is "tb". If the address string has a prefix that matches
|
||||
// one of the prefixes for the known networks, we try to decode it as
|
||||
// a segwit address.
|
||||
oneIndex := strings.LastIndexByte(addr, '1')
|
||||
if oneIndex > 1 {
|
||||
prefix := addr[:oneIndex+1]
|
||||
if chaincfg.IsBech32SegwitPrefix(prefix) {
|
||||
witnessVer, witnessProg, err := decodeSegWitAddress(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We currently only support P2WPKH and P2WSH, which is
|
||||
// witness version 0 and P2TR which is witness version
|
||||
// 1.
|
||||
if witnessVer != 0 && witnessVer != 1 {
|
||||
return nil, UnsupportedWitnessVerError(witnessVer)
|
||||
}
|
||||
|
||||
// The HRP is everything before the found '1'.
|
||||
hrp := prefix[:len(prefix)-1]
|
||||
|
||||
switch len(witnessProg) {
|
||||
case 20:
|
||||
return newAddressWitnessPubKeyHash(hrp, witnessProg)
|
||||
case 32:
|
||||
if witnessVer == 1 {
|
||||
return newAddressTaproot(hrp, witnessProg)
|
||||
}
|
||||
|
||||
return newAddressWitnessScriptHash(hrp, witnessProg)
|
||||
default:
|
||||
return nil, UnsupportedWitnessProgLenError(len(witnessProg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serialized public keys are either 65 bytes (130 hex chars) if
|
||||
// uncompressed/hybrid or 33 bytes (66 hex chars) if compressed.
|
||||
if len(addr) == 130 || len(addr) == 66 {
|
||||
serializedPubKey, err := hex.DecodeString(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewAddressPubKey(serializedPubKey, defaultNet)
|
||||
}
|
||||
|
||||
// Switch on decoded length to determine the type.
|
||||
decoded, netID, err := base58.CheckDecode(addr)
|
||||
if err != nil {
|
||||
if err == base58.ErrChecksum {
|
||||
return nil, ErrChecksumMismatch
|
||||
}
|
||||
return nil, errors.New("decoded address is of unknown format")
|
||||
}
|
||||
switch len(decoded) {
|
||||
case ripemd160.Size: // P2PKH or P2SH
|
||||
isP2PKH := netID == defaultNet.PubKeyHashAddrID
|
||||
isP2SH := netID == defaultNet.ScriptHashAddrID
|
||||
switch hash160 := decoded; {
|
||||
case isP2PKH && isP2SH:
|
||||
return nil, ErrAddressCollision
|
||||
case isP2PKH:
|
||||
return newAddressPubKeyHash(hash160, netID)
|
||||
case isP2SH:
|
||||
return newAddressScriptHashFromHash(hash160, netID)
|
||||
default:
|
||||
return nil, ErrUnknownAddressType
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.New("decoded address is of unknown size")
|
||||
}
|
||||
}
|
||||
|
||||
// decodeSegWitAddress parses a bech32 encoded segwit address string and
|
||||
// returns the witness version and witness program byte representation.
|
||||
func decodeSegWitAddress(address string) (byte, []byte, error) {
|
||||
// Decode the bech32 encoded address.
|
||||
_, data, bech32version, err := bech32.DecodeGeneric(address)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
// The first byte of the decoded address is the witness version, it must
|
||||
// exist.
|
||||
if len(data) < 1 {
|
||||
return 0, nil, fmt.Errorf("no witness version")
|
||||
}
|
||||
|
||||
// ...and be <= 16.
|
||||
version := data[0]
|
||||
if version > 16 {
|
||||
return 0, nil, fmt.Errorf("invalid witness version: %v", version)
|
||||
}
|
||||
|
||||
// The remaining characters of the address returned are grouped into
|
||||
// words of 5 bits. In order to restore the original witness program
|
||||
// bytes, we'll need to regroup into 8 bit words.
|
||||
regrouped, err := bech32.ConvertBits(data[1:], 5, 8, false)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
// The regrouped data must be between 2 and 40 bytes.
|
||||
if len(regrouped) < 2 || len(regrouped) > 40 {
|
||||
return 0, nil, fmt.Errorf("invalid data length")
|
||||
}
|
||||
|
||||
// For witness version 0, address MUST be exactly 20 or 32 bytes.
|
||||
if version == 0 && len(regrouped) != 20 && len(regrouped) != 32 {
|
||||
return 0, nil, fmt.Errorf("invalid data length for witness "+
|
||||
"version 0: %v", len(regrouped))
|
||||
}
|
||||
|
||||
// For witness version 0, the bech32 encoding must be used.
|
||||
if version == 0 && bech32version != bech32.Version0 {
|
||||
return 0, nil, fmt.Errorf("invalid checksum expected bech32 " +
|
||||
"encoding for address with witness version 0")
|
||||
}
|
||||
|
||||
// For witness version 1, the bech32m encoding must be used.
|
||||
if version == 1 && bech32version != bech32.VersionM {
|
||||
return 0, nil, fmt.Errorf("invalid checksum expected bech32m " +
|
||||
"encoding for address with witness version 1")
|
||||
}
|
||||
|
||||
return version, regrouped, nil
|
||||
}
|
||||
|
||||
// AddressPubKeyHash is an Address for a pay-to-pubkey-hash (P2PKH)
|
||||
// transaction.
|
||||
type AddressPubKeyHash struct {
|
||||
hash [ripemd160.Size]byte
|
||||
netID byte
|
||||
}
|
||||
|
||||
// NewAddressPubKeyHash returns a new AddressPubKeyHash. pkHash mustbe 20
|
||||
// bytes.
|
||||
func NewAddressPubKeyHash(pkHash []byte, net *chaincfg.Params) (*AddressPubKeyHash, error) {
|
||||
return newAddressPubKeyHash(pkHash, net.PubKeyHashAddrID)
|
||||
}
|
||||
|
||||
// newAddressPubKeyHash is the internal API to create a pubkey hash address
|
||||
// with a known leading identifier byte for a network, rather than looking
|
||||
// it up through its parameters. This is useful when creating a new address
|
||||
// structure from a string encoding where the identifier byte is already
|
||||
// known.
|
||||
func newAddressPubKeyHash(pkHash []byte, netID byte) (*AddressPubKeyHash, error) {
|
||||
// Check for a valid pubkey hash length.
|
||||
if len(pkHash) != ripemd160.Size {
|
||||
return nil, errors.New("pkHash must be 20 bytes")
|
||||
}
|
||||
|
||||
addr := &AddressPubKeyHash{netID: netID}
|
||||
copy(addr.hash[:], pkHash)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// EncodeAddress returns the string encoding of a pay-to-pubkey-hash
|
||||
// address. Part of the Address interface.
|
||||
func (a *AddressPubKeyHash) EncodeAddress() string {
|
||||
return encodeAddress(a.hash[:], a.netID)
|
||||
}
|
||||
|
||||
// ScriptAddress returns the bytes to be included in a txout script to pay
|
||||
// to a pubkey hash. Part of the Address interface.
|
||||
func (a *AddressPubKeyHash) ScriptAddress() []byte {
|
||||
return a.hash[:]
|
||||
}
|
||||
|
||||
// IsForNet returns whether or not the pay-to-pubkey-hash address is associated
|
||||
// with the passed bitcoin network.
|
||||
func (a *AddressPubKeyHash) IsForNet(net *chaincfg.Params) bool {
|
||||
return a.netID == net.PubKeyHashAddrID
|
||||
}
|
||||
|
||||
// String returns a human-readable string for the pay-to-pubkey-hash address.
|
||||
// This is equivalent to calling EncodeAddress, but is provided so the type can
|
||||
// be used as a fmt.Stringer.
|
||||
func (a *AddressPubKeyHash) String() string {
|
||||
return a.EncodeAddress()
|
||||
}
|
||||
|
||||
// Hash160 returns the underlying array of the pubkey hash. This can be useful
|
||||
// when an array is more appropriate than a slice (for example, when used as map
|
||||
// keys).
|
||||
func (a *AddressPubKeyHash) Hash160() *[ripemd160.Size]byte {
|
||||
return &a.hash
|
||||
}
|
||||
|
||||
// AddressScriptHash is an Address for a pay-to-script-hash (P2SH)
|
||||
// transaction.
|
||||
type AddressScriptHash struct {
|
||||
hash [ripemd160.Size]byte
|
||||
netID byte
|
||||
}
|
||||
|
||||
// NewAddressScriptHash returns a new AddressScriptHash.
|
||||
func NewAddressScriptHash(serializedScript []byte, net *chaincfg.Params) (*AddressScriptHash, error) {
|
||||
scriptHash := Hash160(serializedScript)
|
||||
return newAddressScriptHashFromHash(scriptHash, net.ScriptHashAddrID)
|
||||
}
|
||||
|
||||
// NewAddressScriptHashFromHash returns a new AddressScriptHash. scriptHash
|
||||
// must be 20 bytes.
|
||||
func NewAddressScriptHashFromHash(scriptHash []byte, net *chaincfg.Params) (*AddressScriptHash, error) {
|
||||
return newAddressScriptHashFromHash(scriptHash, net.ScriptHashAddrID)
|
||||
}
|
||||
|
||||
// newAddressScriptHashFromHash is the internal API to create a script hash
|
||||
// address with a known leading identifier byte for a network, rather than
|
||||
// looking it up through its parameters. This is useful when creating a new
|
||||
// address structure from a string encoding where the identifier byte is already
|
||||
// known.
|
||||
func newAddressScriptHashFromHash(scriptHash []byte, netID byte) (*AddressScriptHash, error) {
|
||||
// Check for a valid script hash length.
|
||||
if len(scriptHash) != ripemd160.Size {
|
||||
return nil, errors.New("scriptHash must be 20 bytes")
|
||||
}
|
||||
|
||||
addr := &AddressScriptHash{netID: netID}
|
||||
copy(addr.hash[:], scriptHash)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// EncodeAddress returns the string encoding of a pay-to-script-hash
|
||||
// address. Part of the Address interface.
|
||||
func (a *AddressScriptHash) EncodeAddress() string {
|
||||
return encodeAddress(a.hash[:], a.netID)
|
||||
}
|
||||
|
||||
// ScriptAddress returns the bytes to be included in a txout script to pay
|
||||
// to a script hash. Part of the Address interface.
|
||||
func (a *AddressScriptHash) ScriptAddress() []byte {
|
||||
return a.hash[:]
|
||||
}
|
||||
|
||||
// IsForNet returns whether or not the pay-to-script-hash address is associated
|
||||
// with the passed bitcoin network.
|
||||
func (a *AddressScriptHash) IsForNet(net *chaincfg.Params) bool {
|
||||
return a.netID == net.ScriptHashAddrID
|
||||
}
|
||||
|
||||
// String returns a human-readable string for the pay-to-script-hash address.
|
||||
// This is equivalent to calling EncodeAddress, but is provided so the type can
|
||||
// be used as a fmt.Stringer.
|
||||
func (a *AddressScriptHash) String() string {
|
||||
return a.EncodeAddress()
|
||||
}
|
||||
|
||||
// Hash160 returns the underlying array of the script hash. This can be useful
|
||||
// when an array is more appropriate than a slice (for example, when used as map
|
||||
// keys).
|
||||
func (a *AddressScriptHash) Hash160() *[ripemd160.Size]byte {
|
||||
return &a.hash
|
||||
}
|
||||
|
||||
// PubKeyFormat describes what format to use for a pay-to-pubkey address.
|
||||
type PubKeyFormat int
|
||||
|
||||
const (
|
||||
// PKFUncompressed indicates the pay-to-pubkey address format is an
|
||||
// uncompressed public key.
|
||||
PKFUncompressed PubKeyFormat = iota
|
||||
|
||||
// PKFCompressed indicates the pay-to-pubkey address format is a
|
||||
// compressed public key.
|
||||
PKFCompressed
|
||||
|
||||
// PKFHybrid indicates the pay-to-pubkey address format is a hybrid
|
||||
// public key.
|
||||
PKFHybrid
|
||||
)
|
||||
|
||||
// AddressPubKey is an Address for a pay-to-pubkey transaction.
|
||||
type AddressPubKey struct {
|
||||
pubKeyFormat PubKeyFormat
|
||||
pubKey *btcec.PublicKey
|
||||
pubKeyHashID byte
|
||||
}
|
||||
|
||||
// NewAddressPubKey returns a new AddressPubKey which represents a pay-to-pubkey
|
||||
// address. The serializedPubKey parameter must be a valid pubkey and can be
|
||||
// uncompressed, compressed, or hybrid.
|
||||
func NewAddressPubKey(serializedPubKey []byte, net *chaincfg.Params) (*AddressPubKey, error) {
|
||||
pubKey, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the format of the pubkey. This probably should be returned
|
||||
// from btcec, but do it here to avoid API churn. We already know the
|
||||
// pubkey is valid since it parsed above, so it's safe to simply examine
|
||||
// the leading byte to get the format.
|
||||
pkFormat := PKFUncompressed
|
||||
switch serializedPubKey[0] {
|
||||
case 0x02, 0x03:
|
||||
pkFormat = PKFCompressed
|
||||
case 0x06, 0x07:
|
||||
pkFormat = PKFHybrid
|
||||
}
|
||||
|
||||
return &AddressPubKey{
|
||||
pubKeyFormat: pkFormat,
|
||||
pubKey: pubKey,
|
||||
pubKeyHashID: net.PubKeyHashAddrID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// serialize returns the serialization of the public key according to the
|
||||
// format associated with the address.
|
||||
func (a *AddressPubKey) serialize() []byte {
|
||||
switch a.pubKeyFormat {
|
||||
default:
|
||||
fallthrough
|
||||
case PKFUncompressed:
|
||||
return a.pubKey.SerializeUncompressed()
|
||||
|
||||
case PKFCompressed:
|
||||
return a.pubKey.SerializeCompressed()
|
||||
|
||||
case PKFHybrid:
|
||||
return a.pubKey.SerializeHybrid()
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeAddress returns the string encoding of the public key as a
|
||||
// pay-to-pubkey-hash. Note that the public key format (uncompressed,
|
||||
// compressed, etc) will change the resulting address. This is expected since
|
||||
// pay-to-pubkey-hash is a hash of the serialized public key which obviously
|
||||
// differs with the format. At the time of this writing, most Bitcoin addresses
|
||||
// are pay-to-pubkey-hash constructed from the uncompressed public key.
|
||||
//
|
||||
// Part of the Address interface.
|
||||
func (a *AddressPubKey) EncodeAddress() string {
|
||||
return encodeAddress(Hash160(a.serialize()), a.pubKeyHashID)
|
||||
}
|
||||
|
||||
// ScriptAddress returns the bytes to be included in a txout script to pay
|
||||
// to a public key. Setting the public key format will affect the output of
|
||||
// this function accordingly. Part of the Address interface.
|
||||
func (a *AddressPubKey) ScriptAddress() []byte {
|
||||
return a.serialize()
|
||||
}
|
||||
|
||||
// IsForNet returns whether or not the pay-to-pubkey address is associated
|
||||
// with the passed bitcoin network.
|
||||
func (a *AddressPubKey) IsForNet(net *chaincfg.Params) bool {
|
||||
return a.pubKeyHashID == net.PubKeyHashAddrID
|
||||
}
|
||||
|
||||
// String returns the hex-encoded human-readable string for the pay-to-pubkey
|
||||
// address. This is not the same as calling EncodeAddress.
|
||||
func (a *AddressPubKey) String() string {
|
||||
return hex.EncodeToString(a.serialize())
|
||||
}
|
||||
|
||||
// Format returns the format (uncompressed, compressed, etc) of the
|
||||
// pay-to-pubkey address.
|
||||
func (a *AddressPubKey) Format() PubKeyFormat {
|
||||
return a.pubKeyFormat
|
||||
}
|
||||
|
||||
// SetFormat sets the format (uncompressed, compressed, etc) of the
|
||||
// pay-to-pubkey address.
|
||||
func (a *AddressPubKey) SetFormat(pkFormat PubKeyFormat) {
|
||||
a.pubKeyFormat = pkFormat
|
||||
}
|
||||
|
||||
// AddressPubKeyHash returns the pay-to-pubkey address converted to a
|
||||
// pay-to-pubkey-hash address. Note that the public key format (uncompressed,
|
||||
// compressed, etc) will change the resulting address. This is expected since
|
||||
// pay-to-pubkey-hash is a hash of the serialized public key which obviously
|
||||
// differs with the format. At the time of this writing, most Bitcoin addresses
|
||||
// are pay-to-pubkey-hash constructed from the uncompressed public key.
|
||||
func (a *AddressPubKey) AddressPubKeyHash() *AddressPubKeyHash {
|
||||
addr := &AddressPubKeyHash{netID: a.pubKeyHashID}
|
||||
copy(addr.hash[:], Hash160(a.serialize()))
|
||||
return addr
|
||||
}
|
||||
|
||||
// PubKey returns the underlying public key for the address.
|
||||
func (a *AddressPubKey) PubKey() *btcec.PublicKey {
|
||||
return a.pubKey
|
||||
}
|
||||
|
||||
// AddressSegWit is the base address type for all SegWit addresses.
|
||||
type AddressSegWit struct {
|
||||
hrp string
|
||||
witnessVersion byte
|
||||
witnessProgram []byte
|
||||
}
|
||||
|
||||
// EncodeAddress returns the bech32 (or bech32m for SegWit v1) string encoding
|
||||
// of an AddressSegWit.
|
||||
//
|
||||
// NOTE: This method is part of the Address interface.
|
||||
func (a *AddressSegWit) EncodeAddress() string {
|
||||
str, err := encodeSegWitAddress(
|
||||
a.hrp, a.witnessVersion, a.witnessProgram[:],
|
||||
)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// ScriptAddress returns the witness program for this address.
|
||||
//
|
||||
// NOTE: This method is part of the Address interface.
|
||||
func (a *AddressSegWit) ScriptAddress() []byte {
|
||||
return a.witnessProgram[:]
|
||||
}
|
||||
|
||||
// IsForNet returns whether the AddressSegWit is associated with the passed
|
||||
// bitcoin network.
|
||||
//
|
||||
// NOTE: This method is part of the Address interface.
|
||||
func (a *AddressSegWit) IsForNet(net *chaincfg.Params) bool {
|
||||
return a.hrp == net.Bech32HRPSegwit
|
||||
}
|
||||
|
||||
// String returns a human-readable string for the AddressWitnessPubKeyHash.
|
||||
// This is equivalent to calling EncodeAddress, but is provided so the type
|
||||
// can be used as a fmt.Stringer.
|
||||
//
|
||||
// NOTE: This method is part of the Address interface.
|
||||
func (a *AddressSegWit) String() string {
|
||||
return a.EncodeAddress()
|
||||
}
|
||||
|
||||
// Hrp returns the human-readable part of the bech32 (or bech32m for SegWit v1)
|
||||
// encoded AddressSegWit.
|
||||
func (a *AddressSegWit) Hrp() string {
|
||||
return a.hrp
|
||||
}
|
||||
|
||||
// WitnessVersion returns the witness version of the AddressSegWit.
|
||||
func (a *AddressSegWit) WitnessVersion() byte {
|
||||
return a.witnessVersion
|
||||
}
|
||||
|
||||
// WitnessProgram returns the witness program of the AddressSegWit.
|
||||
func (a *AddressSegWit) WitnessProgram() []byte {
|
||||
return a.witnessProgram[:]
|
||||
}
|
||||
|
||||
// AddressWitnessPubKeyHash is an Address for a pay-to-witness-pubkey-hash
|
||||
// (P2WPKH) output. See BIP 173 for further details regarding native segregated
|
||||
// witness address encoding:
|
||||
// https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki
|
||||
type AddressWitnessPubKeyHash struct {
|
||||
AddressSegWit
|
||||
}
|
||||
|
||||
// NewAddressWitnessPubKeyHash returns a new AddressWitnessPubKeyHash.
|
||||
func NewAddressWitnessPubKeyHash(witnessProg []byte,
|
||||
net *chaincfg.Params) (*AddressWitnessPubKeyHash, error) {
|
||||
|
||||
return newAddressWitnessPubKeyHash(net.Bech32HRPSegwit, witnessProg)
|
||||
}
|
||||
|
||||
// newAddressWitnessPubKeyHash is an internal helper function to create an
|
||||
// AddressWitnessPubKeyHash with a known human-readable part, rather than
|
||||
// looking it up through its parameters.
|
||||
func newAddressWitnessPubKeyHash(hrp string,
|
||||
witnessProg []byte) (*AddressWitnessPubKeyHash, error) {
|
||||
|
||||
// Check for valid program length for witness version 0, which is 20
|
||||
// for P2WPKH.
|
||||
if len(witnessProg) != 20 {
|
||||
return nil, errors.New("witness program must be 20 " +
|
||||
"bytes for p2wpkh")
|
||||
}
|
||||
|
||||
addr := &AddressWitnessPubKeyHash{
|
||||
AddressSegWit{
|
||||
hrp: strings.ToLower(hrp),
|
||||
witnessVersion: 0x00,
|
||||
witnessProgram: witnessProg,
|
||||
},
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Hash160 returns the witness program of the AddressWitnessPubKeyHash as a
|
||||
// byte array.
|
||||
func (a *AddressWitnessPubKeyHash) Hash160() *[20]byte {
|
||||
var pubKeyHashWitnessProgram [20]byte
|
||||
copy(pubKeyHashWitnessProgram[:], a.witnessProgram)
|
||||
return &pubKeyHashWitnessProgram
|
||||
}
|
||||
|
||||
// AddressWitnessScriptHash is an Address for a pay-to-witness-script-hash
|
||||
// (P2WSH) output. See BIP 173 for further details regarding native segregated
|
||||
// witness address encoding:
|
||||
// https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki
|
||||
type AddressWitnessScriptHash struct {
|
||||
AddressSegWit
|
||||
}
|
||||
|
||||
// NewAddressWitnessScriptHash returns a new AddressWitnessPubKeyHash.
|
||||
func NewAddressWitnessScriptHash(witnessProg []byte,
|
||||
net *chaincfg.Params) (*AddressWitnessScriptHash, error) {
|
||||
|
||||
return newAddressWitnessScriptHash(net.Bech32HRPSegwit, witnessProg)
|
||||
}
|
||||
|
||||
// newAddressWitnessScriptHash is an internal helper function to create an
|
||||
// AddressWitnessScriptHash with a known human-readable part, rather than
|
||||
// looking it up through its parameters.
|
||||
func newAddressWitnessScriptHash(hrp string,
|
||||
witnessProg []byte) (*AddressWitnessScriptHash, error) {
|
||||
|
||||
// Check for valid program length for witness version 0, which is 32
|
||||
// for P2WSH.
|
||||
if len(witnessProg) != 32 {
|
||||
return nil, errors.New("witness program must be 32 " +
|
||||
"bytes for p2wsh")
|
||||
}
|
||||
|
||||
addr := &AddressWitnessScriptHash{
|
||||
AddressSegWit{
|
||||
hrp: strings.ToLower(hrp),
|
||||
witnessVersion: 0x00,
|
||||
witnessProgram: witnessProg,
|
||||
},
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// AddressTaproot is an Address for a pay-to-taproot (P2TR) output. See BIP 341
|
||||
// for further details.
|
||||
type AddressTaproot struct {
|
||||
AddressSegWit
|
||||
}
|
||||
|
||||
// NewAddressTaproot returns a new AddressTaproot.
|
||||
func NewAddressTaproot(witnessProg []byte,
|
||||
net *chaincfg.Params) (*AddressTaproot, error) {
|
||||
|
||||
return newAddressTaproot(net.Bech32HRPSegwit, witnessProg)
|
||||
}
|
||||
|
||||
// newAddressWitnessScriptHash is an internal helper function to create an
|
||||
// AddressWitnessScriptHash with a known human-readable part, rather than
|
||||
// looking it up through its parameters.
|
||||
func newAddressTaproot(hrp string,
|
||||
witnessProg []byte) (*AddressTaproot, error) {
|
||||
|
||||
// Check for valid program length for witness version 1, which is 32
|
||||
// for P2TR.
|
||||
if len(witnessProg) != 32 {
|
||||
return nil, errors.New("witness program must be 32 bytes for " +
|
||||
"p2tr")
|
||||
}
|
||||
|
||||
addr := &AddressTaproot{
|
||||
AddressSegWit{
|
||||
hrp: strings.ToLower(hrp),
|
||||
witnessVersion: 0x01,
|
||||
witnessProgram: witnessProg,
|
||||
},
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
1012
btcutil/address_test.go
Normal file
1012
btcutil/address_test.go
Normal file
File diff suppressed because it is too large
Load diff
122
btcutil/amount.go
Normal file
122
btcutil/amount.go
Normal file
|
@ -0,0 +1,122 @@
|
|||
// Copyright (c) 2013, 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// AmountUnit describes a method of converting an Amount to something
|
||||
// other than the base unit of a bitcoin. The value of the AmountUnit
|
||||
// is the exponent component of the decadic multiple to convert from
|
||||
// an amount in bitcoin to an amount counted in units.
|
||||
type AmountUnit int
|
||||
|
||||
// These constants define various units used when describing a bitcoin
|
||||
// monetary amount.
|
||||
const (
|
||||
AmountMegaBTC AmountUnit = 6
|
||||
AmountKiloBTC AmountUnit = 3
|
||||
AmountBTC AmountUnit = 0
|
||||
AmountMilliBTC AmountUnit = -3
|
||||
AmountMicroBTC AmountUnit = -6
|
||||
AmountSatoshi AmountUnit = -8
|
||||
)
|
||||
|
||||
// String returns the unit as a string. For recognized units, the SI
|
||||
// prefix is used, or "Satoshi" for the base unit. For all unrecognized
|
||||
// units, "1eN BTC" is returned, where N is the AmountUnit.
|
||||
func (u AmountUnit) String() string {
|
||||
switch u {
|
||||
case AmountMegaBTC:
|
||||
return "MBTC"
|
||||
case AmountKiloBTC:
|
||||
return "kBTC"
|
||||
case AmountBTC:
|
||||
return "BTC"
|
||||
case AmountMilliBTC:
|
||||
return "mBTC"
|
||||
case AmountMicroBTC:
|
||||
return "μBTC"
|
||||
case AmountSatoshi:
|
||||
return "Satoshi"
|
||||
default:
|
||||
return "1e" + strconv.FormatInt(int64(u), 10) + " BTC"
|
||||
}
|
||||
}
|
||||
|
||||
// Amount represents the base bitcoin monetary unit (colloquially referred
|
||||
// to as a `Satoshi'). A single Amount is equal to 1e-8 of a bitcoin.
|
||||
type Amount int64
|
||||
|
||||
// round converts a floating point number, which may or may not be representable
|
||||
// as an integer, to the Amount integer type by rounding to the nearest integer.
|
||||
// This is performed by adding or subtracting 0.5 depending on the sign, and
|
||||
// relying on integer truncation to round the value to the nearest Amount.
|
||||
func round(f float64) Amount {
|
||||
if f < 0 {
|
||||
return Amount(f - 0.5)
|
||||
}
|
||||
return Amount(f + 0.5)
|
||||
}
|
||||
|
||||
// NewAmount creates an Amount from a floating point value representing
|
||||
// some value in bitcoin. NewAmount errors if f is NaN or +-Infinity, but
|
||||
// does not check that the amount is within the total amount of bitcoin
|
||||
// producible as f may not refer to an amount at a single moment in time.
|
||||
//
|
||||
// NewAmount is for specifically for converting BTC to Satoshi.
|
||||
// For creating a new Amount with an int64 value which denotes a quantity of Satoshi,
|
||||
// do a simple type conversion from type int64 to Amount.
|
||||
// See GoDoc for example: http://godoc.org/github.com/btcsuite/btcd/btcutil#example-Amount
|
||||
func NewAmount(f float64) (Amount, error) {
|
||||
// The amount is only considered invalid if it cannot be represented
|
||||
// as an integer type. This may happen if f is NaN or +-Infinity.
|
||||
switch {
|
||||
case math.IsNaN(f):
|
||||
fallthrough
|
||||
case math.IsInf(f, 1):
|
||||
fallthrough
|
||||
case math.IsInf(f, -1):
|
||||
return 0, errors.New("invalid bitcoin amount")
|
||||
}
|
||||
|
||||
return round(f * SatoshiPerBitcoin), nil
|
||||
}
|
||||
|
||||
// ToUnit converts a monetary amount counted in bitcoin base units to a
|
||||
// floating point value representing an amount of bitcoin.
|
||||
func (a Amount) ToUnit(u AmountUnit) float64 {
|
||||
return float64(a) / math.Pow10(int(u+8))
|
||||
}
|
||||
|
||||
// ToBTC is the equivalent of calling ToUnit with AmountBTC.
|
||||
func (a Amount) ToBTC() float64 {
|
||||
return a.ToUnit(AmountBTC)
|
||||
}
|
||||
|
||||
// Format formats a monetary amount counted in bitcoin base units as a
|
||||
// string for a given unit. The conversion will succeed for any unit,
|
||||
// however, known units will be formated with an appended label describing
|
||||
// the units with SI notation, or "Satoshi" for the base unit.
|
||||
func (a Amount) Format(u AmountUnit) string {
|
||||
units := " " + u.String()
|
||||
return strconv.FormatFloat(a.ToUnit(u), 'f', -int(u+8), 64) + units
|
||||
}
|
||||
|
||||
// String is the equivalent of calling Format with AmountBTC.
|
||||
func (a Amount) String() string {
|
||||
return a.Format(AmountBTC)
|
||||
}
|
||||
|
||||
// MulF64 multiplies an Amount by a floating point value. While this is not
|
||||
// an operation that must typically be done by a full node or wallet, it is
|
||||
// useful for services that build on top of bitcoin (for example, calculating
|
||||
// a fee by multiplying by a percentage).
|
||||
func (a Amount) MulF64(f float64) Amount {
|
||||
return round(float64(a) * f)
|
||||
}
|
309
btcutil/amount_test.go
Normal file
309
btcutil/amount_test.go
Normal file
|
@ -0,0 +1,309 @@
|
|||
// Copyright (c) 2013, 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
. "github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
func TestAmountCreation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
amount float64
|
||||
valid bool
|
||||
expected Amount
|
||||
}{
|
||||
// Positive tests.
|
||||
{
|
||||
name: "zero",
|
||||
amount: 0,
|
||||
valid: true,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "max producible",
|
||||
amount: 21e6,
|
||||
valid: true,
|
||||
expected: MaxSatoshi,
|
||||
},
|
||||
{
|
||||
name: "min producible",
|
||||
amount: -21e6,
|
||||
valid: true,
|
||||
expected: -MaxSatoshi,
|
||||
},
|
||||
{
|
||||
name: "exceeds max producible",
|
||||
amount: 21e6 + 1e-8,
|
||||
valid: true,
|
||||
expected: MaxSatoshi + 1,
|
||||
},
|
||||
{
|
||||
name: "exceeds min producible",
|
||||
amount: -21e6 - 1e-8,
|
||||
valid: true,
|
||||
expected: -MaxSatoshi - 1,
|
||||
},
|
||||
{
|
||||
name: "one hundred",
|
||||
amount: 100,
|
||||
valid: true,
|
||||
expected: 100 * SatoshiPerBitcoin,
|
||||
},
|
||||
{
|
||||
name: "fraction",
|
||||
amount: 0.01234567,
|
||||
valid: true,
|
||||
expected: 1234567,
|
||||
},
|
||||
{
|
||||
name: "rounding up",
|
||||
amount: 54.999999999999943157,
|
||||
valid: true,
|
||||
expected: 55 * SatoshiPerBitcoin,
|
||||
},
|
||||
{
|
||||
name: "rounding down",
|
||||
amount: 55.000000000000056843,
|
||||
valid: true,
|
||||
expected: 55 * SatoshiPerBitcoin,
|
||||
},
|
||||
|
||||
// Negative tests.
|
||||
{
|
||||
name: "not-a-number",
|
||||
amount: math.NaN(),
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "-infinity",
|
||||
amount: math.Inf(-1),
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "+infinity",
|
||||
amount: math.Inf(1),
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
a, err := NewAmount(test.amount)
|
||||
switch {
|
||||
case test.valid && err != nil:
|
||||
t.Errorf("%v: Positive test Amount creation failed with: %v", test.name, err)
|
||||
continue
|
||||
case !test.valid && err == nil:
|
||||
t.Errorf("%v: Negative test Amount creation succeeded (value %v) when should fail", test.name, a)
|
||||
continue
|
||||
}
|
||||
|
||||
if a != test.expected {
|
||||
t.Errorf("%v: Created amount %v does not match expected %v", test.name, a, test.expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmountUnitConversions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
amount Amount
|
||||
unit AmountUnit
|
||||
converted float64
|
||||
s string
|
||||
}{
|
||||
{
|
||||
name: "MBTC",
|
||||
amount: MaxSatoshi,
|
||||
unit: AmountMegaBTC,
|
||||
converted: 21,
|
||||
s: "21 MBTC",
|
||||
},
|
||||
{
|
||||
name: "kBTC",
|
||||
amount: 44433322211100,
|
||||
unit: AmountKiloBTC,
|
||||
converted: 444.33322211100,
|
||||
s: "444.333222111 kBTC",
|
||||
},
|
||||
{
|
||||
name: "BTC",
|
||||
amount: 44433322211100,
|
||||
unit: AmountBTC,
|
||||
converted: 444333.22211100,
|
||||
s: "444333.222111 BTC",
|
||||
},
|
||||
{
|
||||
name: "mBTC",
|
||||
amount: 44433322211100,
|
||||
unit: AmountMilliBTC,
|
||||
converted: 444333222.11100,
|
||||
s: "444333222.111 mBTC",
|
||||
},
|
||||
{
|
||||
|
||||
name: "μBTC",
|
||||
amount: 44433322211100,
|
||||
unit: AmountMicroBTC,
|
||||
converted: 444333222111.00,
|
||||
s: "444333222111 μBTC",
|
||||
},
|
||||
{
|
||||
|
||||
name: "satoshi",
|
||||
amount: 44433322211100,
|
||||
unit: AmountSatoshi,
|
||||
converted: 44433322211100,
|
||||
s: "44433322211100 Satoshi",
|
||||
},
|
||||
{
|
||||
|
||||
name: "non-standard unit",
|
||||
amount: 44433322211100,
|
||||
unit: AmountUnit(-1),
|
||||
converted: 4443332.2211100,
|
||||
s: "4443332.22111 1e-1 BTC",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
f := test.amount.ToUnit(test.unit)
|
||||
if f != test.converted {
|
||||
t.Errorf("%v: converted value %v does not match expected %v", test.name, f, test.converted)
|
||||
continue
|
||||
}
|
||||
|
||||
s := test.amount.Format(test.unit)
|
||||
if s != test.s {
|
||||
t.Errorf("%v: format '%v' does not match expected '%v'", test.name, s, test.s)
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify that Amount.ToBTC works as advertised.
|
||||
f1 := test.amount.ToUnit(AmountBTC)
|
||||
f2 := test.amount.ToBTC()
|
||||
if f1 != f2 {
|
||||
t.Errorf("%v: ToBTC does not match ToUnit(AmountBTC): %v != %v", test.name, f1, f2)
|
||||
}
|
||||
|
||||
// Verify that Amount.String works as advertised.
|
||||
s1 := test.amount.Format(AmountBTC)
|
||||
s2 := test.amount.String()
|
||||
if s1 != s2 {
|
||||
t.Errorf("%v: String does not match Format(AmountBitcoin): %v != %v", test.name, s1, s2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmountMulF64(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
amt Amount
|
||||
mul float64
|
||||
res Amount
|
||||
}{
|
||||
{
|
||||
name: "Multiply 0.1 BTC by 2",
|
||||
amt: 100e5, // 0.1 BTC
|
||||
mul: 2,
|
||||
res: 200e5, // 0.2 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply 0.2 BTC by 0.02",
|
||||
amt: 200e5, // 0.2 BTC
|
||||
mul: 1.02,
|
||||
res: 204e5, // 0.204 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply 0.1 BTC by -2",
|
||||
amt: 100e5, // 0.1 BTC
|
||||
mul: -2,
|
||||
res: -200e5, // -0.2 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply 0.2 BTC by -0.02",
|
||||
amt: 200e5, // 0.2 BTC
|
||||
mul: -1.02,
|
||||
res: -204e5, // -0.204 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply -0.1 BTC by 2",
|
||||
amt: -100e5, // -0.1 BTC
|
||||
mul: 2,
|
||||
res: -200e5, // -0.2 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply -0.2 BTC by 0.02",
|
||||
amt: -200e5, // -0.2 BTC
|
||||
mul: 1.02,
|
||||
res: -204e5, // -0.204 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply -0.1 BTC by -2",
|
||||
amt: -100e5, // -0.1 BTC
|
||||
mul: -2,
|
||||
res: 200e5, // 0.2 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply -0.2 BTC by -0.02",
|
||||
amt: -200e5, // -0.2 BTC
|
||||
mul: -1.02,
|
||||
res: 204e5, // 0.204 BTC
|
||||
},
|
||||
{
|
||||
name: "Round down",
|
||||
amt: 49, // 49 Satoshis
|
||||
mul: 0.01,
|
||||
res: 0,
|
||||
},
|
||||
{
|
||||
name: "Round up",
|
||||
amt: 50, // 50 Satoshis
|
||||
mul: 0.01,
|
||||
res: 1, // 1 Satoshi
|
||||
},
|
||||
{
|
||||
name: "Multiply by 0.",
|
||||
amt: 1e8, // 1 BTC
|
||||
mul: 0,
|
||||
res: 0, // 0 BTC
|
||||
},
|
||||
{
|
||||
name: "Multiply 1 by 0.5.",
|
||||
amt: 1, // 1 Satoshi
|
||||
mul: 0.5,
|
||||
res: 1, // 1 Satoshi
|
||||
},
|
||||
{
|
||||
name: "Multiply 100 by 66%.",
|
||||
amt: 100, // 100 Satoshis
|
||||
mul: 0.66,
|
||||
res: 66, // 66 Satoshis
|
||||
},
|
||||
{
|
||||
name: "Multiply 100 by 66.6%.",
|
||||
amt: 100, // 100 Satoshis
|
||||
mul: 0.666,
|
||||
res: 67, // 67 Satoshis
|
||||
},
|
||||
{
|
||||
name: "Multiply 100 by 2/3.",
|
||||
amt: 100, // 100 Satoshis
|
||||
mul: 2.0 / 3,
|
||||
res: 67, // 67 Satoshis
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
a := test.amt.MulF64(test.mul)
|
||||
if a != test.res {
|
||||
t.Errorf("%v: expected %v got %v", test.name, test.res, a)
|
||||
}
|
||||
}
|
||||
}
|
105
btcutil/appdata.go
Normal file
105
btcutil/appdata.go
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// appDataDir returns an operating system specific directory to be used for
|
||||
// storing application data for an application. See AppDataDir for more
|
||||
// details. This unexported version takes an operating system argument
|
||||
// primarily to enable the testing package to properly test the function by
|
||||
// forcing an operating system that is not the currently one.
|
||||
func appDataDir(goos, appName string, roaming bool) string {
|
||||
if appName == "" || appName == "." {
|
||||
return "."
|
||||
}
|
||||
|
||||
// The caller really shouldn't prepend the appName with a period, but
|
||||
// if they do, handle it gracefully by trimming it.
|
||||
appName = strings.TrimPrefix(appName, ".")
|
||||
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
|
||||
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
|
||||
|
||||
// Get the OS specific home directory via the Go standard lib.
|
||||
var homeDir string
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
homeDir = usr.HomeDir
|
||||
}
|
||||
|
||||
// Fall back to standard HOME environment variable that works
|
||||
// for most POSIX OSes if the directory from the Go standard
|
||||
// lib failed.
|
||||
if err != nil || homeDir == "" {
|
||||
homeDir = os.Getenv("HOME")
|
||||
}
|
||||
|
||||
switch goos {
|
||||
// Attempt to use the LOCALAPPDATA or APPDATA environment variable on
|
||||
// Windows.
|
||||
case "windows":
|
||||
// Windows XP and before didn't have a LOCALAPPDATA, so fallback
|
||||
// to regular APPDATA when LOCALAPPDATA is not set.
|
||||
appData := os.Getenv("LOCALAPPDATA")
|
||||
if roaming || appData == "" {
|
||||
appData = os.Getenv("APPDATA")
|
||||
}
|
||||
|
||||
if appData != "" {
|
||||
return filepath.Join(appData, appNameUpper)
|
||||
}
|
||||
|
||||
case "darwin":
|
||||
if homeDir != "" {
|
||||
return filepath.Join(homeDir, "Library",
|
||||
"Application Support", appNameUpper)
|
||||
}
|
||||
|
||||
case "plan9":
|
||||
if homeDir != "" {
|
||||
return filepath.Join(homeDir, appNameLower)
|
||||
}
|
||||
|
||||
default:
|
||||
if homeDir != "" {
|
||||
return filepath.Join(homeDir, "."+appNameLower)
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to the current directory if all else fails.
|
||||
return "."
|
||||
}
|
||||
|
||||
// AppDataDir returns an operating system specific directory to be used for
|
||||
// storing application data for an application.
|
||||
//
|
||||
// The appName parameter is the name of the application the data directory is
|
||||
// being requested for. This function will prepend a period to the appName for
|
||||
// POSIX style operating systems since that is standard practice. An empty
|
||||
// appName or one with a single dot is treated as requesting the current
|
||||
// directory so only "." will be returned. Further, the first character
|
||||
// of appName will be made lowercase for POSIX style operating systems and
|
||||
// uppercase for Mac and Windows since that is standard practice.
|
||||
//
|
||||
// The roaming parameter only applies to Windows where it specifies the roaming
|
||||
// application data profile (%APPDATA%) should be used instead of the local one
|
||||
// (%LOCALAPPDATA%) that is used by default.
|
||||
//
|
||||
// Example results:
|
||||
// dir := AppDataDir("myapp", false)
|
||||
// POSIX (Linux/BSD): ~/.myapp
|
||||
// Mac OS: $HOME/Library/Application Support/Myapp
|
||||
// Windows: %LOCALAPPDATA%\Myapp
|
||||
// Plan 9: $home/myapp
|
||||
func AppDataDir(appName string, roaming bool) string {
|
||||
return appDataDir(runtime.GOOS, appName, roaming)
|
||||
}
|
133
btcutil/appdata_test.go
Normal file
133
btcutil/appdata_test.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unicode"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
|
||||
// results for various operating systems.
|
||||
func TestAppDataDir(t *testing.T) {
|
||||
// App name plus upper and lowercase variants.
|
||||
appName := "myapp"
|
||||
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
|
||||
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
|
||||
|
||||
// When we're on Windows, set the expected local and roaming directories
|
||||
// per the environment vars. When we aren't on Windows, the function
|
||||
// should return the current directory when forced to provide the
|
||||
// Windows path since the environment variables won't exist.
|
||||
winLocal := "."
|
||||
winRoaming := "."
|
||||
if runtime.GOOS == "windows" {
|
||||
localAppData := os.Getenv("LOCALAPPDATA")
|
||||
roamingAppData := os.Getenv("APPDATA")
|
||||
if localAppData == "" {
|
||||
localAppData = roamingAppData
|
||||
}
|
||||
winLocal = filepath.Join(localAppData, appNameUpper)
|
||||
winRoaming = filepath.Join(roamingAppData, appNameUpper)
|
||||
}
|
||||
|
||||
// Get the home directory to use for testing expected results.
|
||||
var homeDir string
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
t.Errorf("user.Current: %v", err)
|
||||
return
|
||||
}
|
||||
homeDir = usr.HomeDir
|
||||
|
||||
// Mac app data directory.
|
||||
macAppData := filepath.Join(homeDir, "Library", "Application Support")
|
||||
|
||||
tests := []struct {
|
||||
goos string
|
||||
appName string
|
||||
roaming bool
|
||||
want string
|
||||
}{
|
||||
// Various combinations of application name casing, leading
|
||||
// period, operating system, and roaming flags.
|
||||
{"windows", appNameLower, false, winLocal},
|
||||
{"windows", appNameUpper, false, winLocal},
|
||||
{"windows", "." + appNameLower, false, winLocal},
|
||||
{"windows", "." + appNameUpper, false, winLocal},
|
||||
{"windows", appNameLower, true, winRoaming},
|
||||
{"windows", appNameUpper, true, winRoaming},
|
||||
{"windows", "." + appNameLower, true, winRoaming},
|
||||
{"windows", "." + appNameUpper, true, winRoaming},
|
||||
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
|
||||
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
|
||||
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
|
||||
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
|
||||
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
|
||||
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
|
||||
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
|
||||
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
|
||||
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
|
||||
|
||||
// No application name provided, so expect current directory.
|
||||
{"windows", "", false, "."},
|
||||
{"windows", "", true, "."},
|
||||
{"linux", "", false, "."},
|
||||
{"darwin", "", false, "."},
|
||||
{"openbsd", "", false, "."},
|
||||
{"freebsd", "", false, "."},
|
||||
{"netbsd", "", false, "."},
|
||||
{"plan9", "", false, "."},
|
||||
{"unrecognized", "", false, "."},
|
||||
|
||||
// Single dot provided for application name, so expect current
|
||||
// directory.
|
||||
{"windows", ".", false, "."},
|
||||
{"windows", ".", true, "."},
|
||||
{"linux", ".", false, "."},
|
||||
{"darwin", ".", false, "."},
|
||||
{"openbsd", ".", false, "."},
|
||||
{"freebsd", ".", false, "."},
|
||||
{"netbsd", ".", false, "."},
|
||||
{"plan9", ".", false, "."},
|
||||
{"unrecognized", ".", false, "."},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
ret := btcutil.TstAppDataDir(test.goos, test.appName, test.roaming)
|
||||
if ret != test.want {
|
||||
t.Errorf("appDataDir #%d (%s) does not match - "+
|
||||
"expected got %s, want %s", i, test.goos, ret,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
34
btcutil/base58/README.md
Normal file
34
btcutil/base58/README.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
base58
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58)
|
||||
|
||||
Package base58 provides an API for encoding and decoding to and from the
|
||||
modified base58 encoding. It also provides an API to do Base58Check encoding,
|
||||
as described [here](https://en.bitcoin.it/wiki/Base58Check_encoding).
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/base58
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [Decode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-Decode)
|
||||
Demonstrates how to decode modified base58 encoded data.
|
||||
* [Encode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-Encode)
|
||||
Demonstrates how to encode data using the modified base58 encoding scheme.
|
||||
* [CheckDecode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-CheckDecode)
|
||||
Demonstrates how to decode Base58Check encoded data.
|
||||
* [CheckEncode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-CheckEncode)
|
||||
Demonstrates how to encode data using the Base58Check encoding scheme.
|
||||
|
||||
## License
|
||||
|
||||
Package base58 is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
49
btcutil/base58/alphabet.go
Normal file
49
btcutil/base58/alphabet.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright (c) 2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// AUTOGENERATED by genalphabet.go; do not edit.
|
||||
|
||||
package base58
|
||||
|
||||
const (
|
||||
// alphabet is the modified base58 alphabet used by Bitcoin.
|
||||
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
|
||||
|
||||
alphabetIdx0 = '1'
|
||||
)
|
||||
|
||||
var b58 = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 255, 255, 255, 255, 255, 255,
|
||||
255, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 255, 17, 18, 19, 20, 21, 255,
|
||||
22, 23, 24, 25, 26, 27, 28, 29,
|
||||
30, 31, 32, 255, 255, 255, 255, 255,
|
||||
255, 33, 34, 35, 36, 37, 38, 39,
|
||||
40, 41, 42, 43, 255, 44, 45, 46,
|
||||
47, 48, 49, 50, 51, 52, 53, 54,
|
||||
55, 56, 57, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
138
btcutil/base58/base58.go
Normal file
138
btcutil/base58/base58.go
Normal file
|
@ -0,0 +1,138 @@
|
|||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base58
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
//go:generate go run genalphabet.go
|
||||
|
||||
var bigRadix = [...]*big.Int{
|
||||
big.NewInt(0),
|
||||
big.NewInt(58),
|
||||
big.NewInt(58 * 58),
|
||||
big.NewInt(58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58),
|
||||
bigRadix10,
|
||||
}
|
||||
|
||||
var bigRadix10 = big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58) // 58^10
|
||||
|
||||
// Decode decodes a modified base58 string to a byte slice.
|
||||
func Decode(b string) []byte {
|
||||
answer := big.NewInt(0)
|
||||
scratch := new(big.Int)
|
||||
|
||||
// Calculating with big.Int is slow for each iteration.
|
||||
// x += b58[b[i]] * j
|
||||
// j *= 58
|
||||
//
|
||||
// Instead we can try to do as much calculations on int64.
|
||||
// We can represent a 10 digit base58 number using an int64.
|
||||
//
|
||||
// Hence we'll try to convert 10, base58 digits at a time.
|
||||
// The rough idea is to calculate `t`, such that:
|
||||
//
|
||||
// t := b58[b[i+9]] * 58^9 ... + b58[b[i+1]] * 58^1 + b58[b[i]] * 58^0
|
||||
// x *= 58^10
|
||||
// x += t
|
||||
//
|
||||
// Of course, in addition, we'll need to handle boundary condition when `b` is not multiple of 58^10.
|
||||
// In that case we'll use the bigRadix[n] lookup for the appropriate power.
|
||||
for t := b; len(t) > 0; {
|
||||
n := len(t)
|
||||
if n > 10 {
|
||||
n = 10
|
||||
}
|
||||
|
||||
total := uint64(0)
|
||||
for _, v := range t[:n] {
|
||||
tmp := b58[v]
|
||||
if tmp == 255 {
|
||||
return []byte("")
|
||||
}
|
||||
total = total*58 + uint64(tmp)
|
||||
}
|
||||
|
||||
answer.Mul(answer, bigRadix[n])
|
||||
scratch.SetUint64(total)
|
||||
answer.Add(answer, scratch)
|
||||
|
||||
t = t[n:]
|
||||
}
|
||||
|
||||
tmpval := answer.Bytes()
|
||||
|
||||
var numZeros int
|
||||
for numZeros = 0; numZeros < len(b); numZeros++ {
|
||||
if b[numZeros] != alphabetIdx0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
flen := numZeros + len(tmpval)
|
||||
val := make([]byte, flen)
|
||||
copy(val[numZeros:], tmpval)
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
// Encode encodes a byte slice to a modified base58 string.
|
||||
func Encode(b []byte) string {
|
||||
x := new(big.Int)
|
||||
x.SetBytes(b)
|
||||
|
||||
// maximum length of output is log58(2^(8*len(b))) == len(b) * 8 / log(58)
|
||||
maxlen := int(float64(len(b))*1.365658237309761) + 1
|
||||
answer := make([]byte, 0, maxlen)
|
||||
mod := new(big.Int)
|
||||
for x.Sign() > 0 {
|
||||
// Calculating with big.Int is slow for each iteration.
|
||||
// x, mod = x / 58, x % 58
|
||||
//
|
||||
// Instead we can try to do as much calculations on int64.
|
||||
// x, mod = x / 58^10, x % 58^10
|
||||
//
|
||||
// Which will give us mod, which is 10 digit base58 number.
|
||||
// We'll loop that 10 times to convert to the answer.
|
||||
|
||||
x.DivMod(x, bigRadix10, mod)
|
||||
if x.Sign() == 0 {
|
||||
// When x = 0, we need to ensure we don't add any extra zeros.
|
||||
m := mod.Int64()
|
||||
for m > 0 {
|
||||
answer = append(answer, alphabet[m%58])
|
||||
m /= 58
|
||||
}
|
||||
} else {
|
||||
m := mod.Int64()
|
||||
for i := 0; i < 10; i++ {
|
||||
answer = append(answer, alphabet[m%58])
|
||||
m /= 58
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// leading zero bytes
|
||||
for _, i := range b {
|
||||
if i != 0 {
|
||||
break
|
||||
}
|
||||
answer = append(answer, alphabetIdx0)
|
||||
}
|
||||
|
||||
// reverse
|
||||
alen := len(answer)
|
||||
for i := 0; i < alen/2; i++ {
|
||||
answer[i], answer[alen-1-i] = answer[alen-1-i], answer[i]
|
||||
}
|
||||
|
||||
return string(answer)
|
||||
}
|
101
btcutil/base58/base58_test.go
Normal file
101
btcutil/base58/base58_test.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base58_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
)
|
||||
|
||||
var stringTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{" ", "Z"},
|
||||
{"-", "n"},
|
||||
{"0", "q"},
|
||||
{"1", "r"},
|
||||
{"-1", "4SU"},
|
||||
{"11", "4k8"},
|
||||
{"abc", "ZiCa"},
|
||||
{"1234598760", "3mJr7AoUXx2Wqd"},
|
||||
{"abcdefghijklmnopqrstuvwxyz", "3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f"},
|
||||
{"00000000000000000000000000000000000000000000000000000000000000", "3sN2THZeE9Eh9eYrwkvZqNstbHGvrxSAM7gXUXvyFQP8XvQLUqNCS27icwUeDT7ckHm4FUHM2mTVh1vbLmk7y"},
|
||||
}
|
||||
|
||||
var invalidStringTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"0", ""},
|
||||
{"O", ""},
|
||||
{"I", ""},
|
||||
{"l", ""},
|
||||
{"3mJr0", ""},
|
||||
{"O3yxU", ""},
|
||||
{"3sNI", ""},
|
||||
{"4kl8", ""},
|
||||
{"0OIl", ""},
|
||||
{"!@#$%^&*()-_=+~`", ""},
|
||||
}
|
||||
|
||||
var hexTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"61", "2g"},
|
||||
{"626262", "a3gV"},
|
||||
{"636363", "aPEr"},
|
||||
{"73696d706c792061206c6f6e6720737472696e67", "2cFupjhnEsSn59qHXstmK2ffpLv2"},
|
||||
{"00eb15231dfceb60925886b67d065299925915aeb172c06647", "1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L"},
|
||||
{"516b6fcd0f", "ABnLTmg"},
|
||||
{"bf4f89001e670274dd", "3SEo3LWLoPntC"},
|
||||
{"572e4794", "3EFU7m"},
|
||||
{"ecac89cad93923c02321", "EJDM8drfXA6uyA"},
|
||||
{"10c8511e", "Rt5zm"},
|
||||
{"00000000000000000000", "1111111111"},
|
||||
{"000111d38e5fc9071ffcd20b4a763cc9ae4f252bb4e48fd66a835e252ada93ff480d6dd43dc62a641155a5", "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"},
|
||||
{"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", "1cWB5HCBdLjAuqGGReWE3R3CguuwSjw6RHn39s2yuDRTS5NsBgNiFpWgAnEx6VQi8csexkgYw3mdYrMHr8x9i7aEwP8kZ7vccXWqKDvGv3u1GxFKPuAkn8JCPPGDMf3vMMnbzm6Nh9zh1gcNsMvH3ZNLmP5fSG6DGbbi2tuwMWPthr4boWwCxf7ewSgNQeacyozhKDDQQ1qL5fQFUW52QKUZDZ5fw3KXNQJMcNTcaB723LchjeKun7MuGW5qyCBZYzA1KjofN1gYBV3NqyhQJ3Ns746GNuf9N2pQPmHz4xpnSrrfCvy6TVVz5d4PdrjeshsWQwpZsZGzvbdAdN8MKV5QsBDY"},
|
||||
}
|
||||
|
||||
func TestBase58(t *testing.T) {
|
||||
// Encode tests
|
||||
for x, test := range stringTests {
|
||||
tmp := []byte(test.in)
|
||||
if res := base58.Encode(tmp); res != test.out {
|
||||
t.Errorf("Encode test #%d failed: got: %s want: %s",
|
||||
x, res, test.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Decode tests
|
||||
for x, test := range hexTests {
|
||||
b, err := hex.DecodeString(test.in)
|
||||
if err != nil {
|
||||
t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in)
|
||||
continue
|
||||
}
|
||||
if res := base58.Decode(test.out); !bytes.Equal(res, b) {
|
||||
t.Errorf("Decode test #%d failed: got: %q want: %q",
|
||||
x, res, test.in)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Decode with invalid input
|
||||
for x, test := range invalidStringTests {
|
||||
if res := base58.Decode(test.in); string(res) != test.out {
|
||||
t.Errorf("Decode invalidString test #%d failed: got: %q want: %q",
|
||||
x, res, test.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
47
btcutil/base58/base58bench_test.go
Normal file
47
btcutil/base58/base58bench_test.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base58_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
)
|
||||
|
||||
var (
|
||||
raw5k = bytes.Repeat([]byte{0xff}, 5000)
|
||||
raw100k = bytes.Repeat([]byte{0xff}, 100*1000)
|
||||
encoded5k = base58.Encode(raw5k)
|
||||
encoded100k = base58.Encode(raw100k)
|
||||
)
|
||||
|
||||
func BenchmarkBase58Encode_5K(b *testing.B) {
|
||||
b.SetBytes(int64(len(raw5k)))
|
||||
for i := 0; i < b.N; i++ {
|
||||
base58.Encode(raw5k)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBase58Encode_100K(b *testing.B) {
|
||||
b.SetBytes(int64(len(raw100k)))
|
||||
for i := 0; i < b.N; i++ {
|
||||
base58.Encode(raw100k)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBase58Decode_5K(b *testing.B) {
|
||||
b.SetBytes(int64(len(encoded5k)))
|
||||
for i := 0; i < b.N; i++ {
|
||||
base58.Decode(encoded5k)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBase58Decode_100K(b *testing.B) {
|
||||
b.SetBytes(int64(len(encoded100k)))
|
||||
for i := 0; i < b.N; i++ {
|
||||
base58.Decode(encoded100k)
|
||||
}
|
||||
}
|
52
btcutil/base58/base58check.go
Normal file
52
btcutil/base58/base58check.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base58
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
|
||||
// the checksum.
|
||||
var ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrInvalidFormat indicates that the check-encoded string has an invalid format.
|
||||
var ErrInvalidFormat = errors.New("invalid format: version and/or checksum bytes missing")
|
||||
|
||||
// checksum: first four bytes of sha256^2
|
||||
func checksum(input []byte) (cksum [4]byte) {
|
||||
h := sha256.Sum256(input)
|
||||
h2 := sha256.Sum256(h[:])
|
||||
copy(cksum[:], h2[:4])
|
||||
return
|
||||
}
|
||||
|
||||
// CheckEncode prepends a version byte and appends a four byte checksum.
|
||||
func CheckEncode(input []byte, version byte) string {
|
||||
b := make([]byte, 0, 1+len(input)+4)
|
||||
b = append(b, version)
|
||||
b = append(b, input...)
|
||||
cksum := checksum(b)
|
||||
b = append(b, cksum[:]...)
|
||||
return Encode(b)
|
||||
}
|
||||
|
||||
// CheckDecode decodes a string that was encoded with CheckEncode and verifies the checksum.
|
||||
func CheckDecode(input string) (result []byte, version byte, err error) {
|
||||
decoded := Decode(input)
|
||||
if len(decoded) < 5 {
|
||||
return nil, 0, ErrInvalidFormat
|
||||
}
|
||||
version = decoded[0]
|
||||
var cksum [4]byte
|
||||
copy(cksum[:], decoded[len(decoded)-4:])
|
||||
if checksum(decoded[:len(decoded)-4]) != cksum {
|
||||
return nil, 0, ErrChecksum
|
||||
}
|
||||
payload := decoded[1 : len(decoded)-4]
|
||||
result = append(result, payload...)
|
||||
return
|
||||
}
|
69
btcutil/base58/base58check_test.go
Normal file
69
btcutil/base58/base58check_test.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base58_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
)
|
||||
|
||||
var checkEncodingStringTests = []struct {
|
||||
version byte
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{20, "", "3MNQE1X"},
|
||||
{20, " ", "B2Kr6dBE"},
|
||||
{20, "-", "B3jv1Aft"},
|
||||
{20, "0", "B482yuaX"},
|
||||
{20, "1", "B4CmeGAC"},
|
||||
{20, "-1", "mM7eUf6kB"},
|
||||
{20, "11", "mP7BMTDVH"},
|
||||
{20, "abc", "4QiVtDjUdeq"},
|
||||
{20, "1234598760", "ZmNb8uQn5zvnUohNCEPP"},
|
||||
{20, "abcdefghijklmnopqrstuvwxyz", "K2RYDcKfupxwXdWhSAxQPCeiULntKm63UXyx5MvEH2"},
|
||||
{20, "00000000000000000000000000000000000000000000000000000000000000", "bi1EWXwJay2udZVxLJozuTb8Meg4W9c6xnmJaRDjg6pri5MBAxb9XwrpQXbtnqEoRV5U2pixnFfwyXC8tRAVC8XxnjK"},
|
||||
}
|
||||
|
||||
func TestBase58Check(t *testing.T) {
|
||||
for x, test := range checkEncodingStringTests {
|
||||
// test encoding
|
||||
if res := base58.CheckEncode([]byte(test.in), test.version); res != test.out {
|
||||
t.Errorf("CheckEncode test #%d failed: got %s, want: %s", x, res, test.out)
|
||||
}
|
||||
|
||||
// test decoding
|
||||
res, version, err := base58.CheckDecode(test.out)
|
||||
switch {
|
||||
case err != nil:
|
||||
t.Errorf("CheckDecode test #%d failed with err: %v", x, err)
|
||||
|
||||
case version != test.version:
|
||||
t.Errorf("CheckDecode test #%d failed: got version: %d want: %d", x, version, test.version)
|
||||
|
||||
case string(res) != test.in:
|
||||
t.Errorf("CheckDecode test #%d failed: got: %s want: %s", x, res, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
// test the two decoding failure cases
|
||||
// case 1: checksum error
|
||||
_, _, err := base58.CheckDecode("3MNQE1Y")
|
||||
if err != base58.ErrChecksum {
|
||||
t.Error("Checkdecode test failed, expected ErrChecksum")
|
||||
}
|
||||
// case 2: invalid formats (string lengths below 5 mean the version byte and/or the checksum
|
||||
// bytes are missing).
|
||||
testString := ""
|
||||
for len := 0; len < 4; len++ {
|
||||
testString += "x"
|
||||
_, _, err = base58.CheckDecode(testString)
|
||||
if err != base58.ErrInvalidFormat {
|
||||
t.Error("Checkdecode test failed, expected ErrInvalidFormat")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
17
btcutil/base58/cov_report.sh
Normal file
17
btcutil/base58/cov_report.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
29
btcutil/base58/doc.go
Normal file
29
btcutil/base58/doc.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package base58 provides an API for working with modified base58 and Base58Check
|
||||
encodings.
|
||||
|
||||
Modified Base58 Encoding
|
||||
|
||||
Standard base58 encoding is similar to standard base64 encoding except, as the
|
||||
name implies, it uses a 58 character alphabet which results in an alphanumeric
|
||||
string and allows some characters which are problematic for humans to be
|
||||
excluded. Due to this, there can be various base58 alphabets.
|
||||
|
||||
The modified base58 alphabet used by Bitcoin, and hence this package, omits the
|
||||
0, O, I, and l characters that look the same in many fonts and are therefore
|
||||
hard to humans to distinguish.
|
||||
|
||||
Base58Check Encoding Scheme
|
||||
|
||||
The Base58Check encoding scheme is primarily used for Bitcoin addresses at the
|
||||
time of this writing, however it can be used to generically encode arbitrary
|
||||
byte arrays into human-readable strings along with a version byte that can be
|
||||
used to differentiate the same payload. For Bitcoin addresses, the extra
|
||||
version is used to differentiate the network of otherwise identical public keys
|
||||
which helps prevent using an address intended for one network on another.
|
||||
*/
|
||||
package base58
|
71
btcutil/base58/example_test.go
Normal file
71
btcutil/base58/example_test.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base58_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
)
|
||||
|
||||
// This example demonstrates how to decode modified base58 encoded data.
|
||||
func ExampleDecode() {
|
||||
// Decode example modified base58 encoded data.
|
||||
encoded := "25JnwSn7XKfNQ"
|
||||
decoded := base58.Decode(encoded)
|
||||
|
||||
// Show the decoded data.
|
||||
fmt.Println("Decoded Data:", string(decoded))
|
||||
|
||||
// Output:
|
||||
// Decoded Data: Test data
|
||||
}
|
||||
|
||||
// This example demonstrates how to encode data using the modified base58
|
||||
// encoding scheme.
|
||||
func ExampleEncode() {
|
||||
// Encode example data with the modified base58 encoding scheme.
|
||||
data := []byte("Test data")
|
||||
encoded := base58.Encode(data)
|
||||
|
||||
// Show the encoded data.
|
||||
fmt.Println("Encoded Data:", encoded)
|
||||
|
||||
// Output:
|
||||
// Encoded Data: 25JnwSn7XKfNQ
|
||||
}
|
||||
|
||||
// This example demonstrates how to decode Base58Check encoded data.
|
||||
func ExampleCheckDecode() {
|
||||
// Decode an example Base58Check encoded data.
|
||||
encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
|
||||
decoded, version, err := base58.CheckDecode(encoded)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Show the decoded data.
|
||||
fmt.Printf("Decoded data: %x\n", decoded)
|
||||
fmt.Println("Version Byte:", version)
|
||||
|
||||
// Output:
|
||||
// Decoded data: 62e907b15cbf27d5425399ebf6f0fb50ebb88f18
|
||||
// Version Byte: 0
|
||||
}
|
||||
|
||||
// This example demonstrates how to encode data using the Base58Check encoding
|
||||
// scheme.
|
||||
func ExampleCheckEncode() {
|
||||
// Encode example data with the Base58Check encoding scheme.
|
||||
data := []byte("Test data")
|
||||
encoded := base58.CheckEncode(data, 0)
|
||||
|
||||
// Show the encoded data.
|
||||
fmt.Println("Encoded Data:", encoded)
|
||||
|
||||
// Output:
|
||||
// Encoded Data: 182iP79GRURMp7oMHDU
|
||||
}
|
79
btcutil/base58/genalphabet.go
Normal file
79
btcutil/base58/genalphabet.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
// Copyright (c) 2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
start = []byte(`// Copyright (c) 2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// AUTOGENERATED by genalphabet.go; do not edit.
|
||||
|
||||
package base58
|
||||
|
||||
const (
|
||||
// alphabet is the modified base58 alphabet used by Bitcoin.
|
||||
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
|
||||
|
||||
alphabetIdx0 = '1'
|
||||
)
|
||||
|
||||
var b58 = [256]byte{`)
|
||||
|
||||
end = []byte(`}`)
|
||||
|
||||
alphabet = []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz")
|
||||
tab = []byte("\t")
|
||||
invalid = []byte("255")
|
||||
comma = []byte(",")
|
||||
space = []byte(" ")
|
||||
nl = []byte("\n")
|
||||
)
|
||||
|
||||
func write(w io.Writer, b []byte) {
|
||||
_, err := w.Write(b)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
fi, err := os.Create("alphabet.go")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
write(fi, start)
|
||||
write(fi, nl)
|
||||
for i := byte(0); i < 32; i++ {
|
||||
write(fi, tab)
|
||||
for j := byte(0); j < 8; j++ {
|
||||
idx := bytes.IndexByte(alphabet, i*8+j)
|
||||
if idx == -1 {
|
||||
write(fi, invalid)
|
||||
} else {
|
||||
write(fi, strconv.AppendInt(nil, int64(idx), 10))
|
||||
}
|
||||
write(fi, comma)
|
||||
if j != 7 {
|
||||
write(fi, space)
|
||||
}
|
||||
}
|
||||
write(fi, nl)
|
||||
}
|
||||
write(fi, end)
|
||||
write(fi, nl)
|
||||
}
|
29
btcutil/bech32/README.md
Normal file
29
btcutil/bech32/README.md
Normal file
|
@ -0,0 +1,29 @@
|
|||
bech32
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcutil/bech32)
|
||||
|
||||
Package bech32 provides a Go implementation of the bech32 format specified in
|
||||
[BIP 173](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki).
|
||||
|
||||
Test vectors from BIP 173 are added to ensure compatibility with the BIP.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/bech32
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [Bech32 decode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/bech32#example-Bech32Decode)
|
||||
Demonstrates how to decode a bech32 encoded string.
|
||||
* [Bech32 encode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/bech32#example-BechEncode)
|
||||
Demonstrates how to encode data into a bech32 string.
|
||||
|
||||
## License
|
||||
|
||||
Package bech32 is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
442
btcutil/bech32/bech32.go
Normal file
442
btcutil/bech32/bech32.go
Normal file
|
@ -0,0 +1,442 @@
|
|||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2019 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bech32
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charset is the set of characters used in the data section of bech32 strings.
|
||||
// Note that this is ordered, such that for a given charset[i], i is the binary
|
||||
// value of the character.
|
||||
const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
|
||||
|
||||
// gen encodes the generator polynomial for the bech32 BCH checksum.
|
||||
var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
|
||||
|
||||
// toBytes converts each character in the string 'chars' to the value of the
|
||||
// index of the correspoding character in 'charset'.
|
||||
func toBytes(chars string) ([]byte, error) {
|
||||
decoded := make([]byte, 0, len(chars))
|
||||
for i := 0; i < len(chars); i++ {
|
||||
index := strings.IndexByte(charset, chars[i])
|
||||
if index < 0 {
|
||||
return nil, ErrNonCharsetChar(chars[i])
|
||||
}
|
||||
decoded = append(decoded, byte(index))
|
||||
}
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
// bech32Polymod calculates the BCH checksum for a given hrp, values and
|
||||
// checksum data. Checksum is optional, and if nil a 0 checksum is assumed.
|
||||
//
|
||||
// Values and checksum (if provided) MUST be encoded as 5 bits per element (base
|
||||
// 32), otherwise the results are undefined.
|
||||
//
|
||||
// For more details on the polymod calculation, please refer to BIP 173.
|
||||
func bech32Polymod(hrp string, values, checksum []byte) int {
|
||||
chk := 1
|
||||
|
||||
// Account for the high bits of the HRP in the checksum.
|
||||
for i := 0; i < len(hrp); i++ {
|
||||
b := chk >> 25
|
||||
hiBits := int(hrp[i]) >> 5
|
||||
chk = (chk&0x1ffffff)<<5 ^ hiBits
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Account for the separator (0) between high and low bits of the HRP.
|
||||
// x^0 == x, so we eliminate the redundant xor used in the other rounds.
|
||||
b := chk >> 25
|
||||
chk = (chk & 0x1ffffff) << 5
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Account for the low bits of the HRP.
|
||||
for i := 0; i < len(hrp); i++ {
|
||||
b := chk >> 25
|
||||
loBits := int(hrp[i]) & 31
|
||||
chk = (chk&0x1ffffff)<<5 ^ loBits
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Account for the values.
|
||||
for _, v := range values {
|
||||
b := chk >> 25
|
||||
chk = (chk&0x1ffffff)<<5 ^ int(v)
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if checksum == nil {
|
||||
// A nil checksum is used during encoding, so assume all bytes are zero.
|
||||
// x^0 == x, so we eliminate the redundant xor used in the other rounds.
|
||||
for v := 0; v < 6; v++ {
|
||||
b := chk >> 25
|
||||
chk = (chk & 0x1ffffff) << 5
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Checksum is provided during decoding, so use it.
|
||||
for _, v := range checksum {
|
||||
b := chk >> 25
|
||||
chk = (chk&0x1ffffff)<<5 ^ int(v)
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return chk
|
||||
}
|
||||
|
||||
// writeBech32Checksum calculates the checksum data expected for a string that
|
||||
// will have the given hrp and payload data and writes it to the provided string
|
||||
// builder.
|
||||
//
|
||||
// The payload data MUST be encoded as a base 32 (5 bits per element) byte slice
|
||||
// and the hrp MUST only use the allowed character set (ascii chars between 33
|
||||
// and 126), otherwise the results are undefined.
|
||||
//
|
||||
// For more details on the checksum calculation, please refer to BIP 173.
|
||||
func writeBech32Checksum(hrp string, data []byte, bldr *strings.Builder,
|
||||
version Version) {
|
||||
|
||||
bech32Const := int(VersionToConsts[version])
|
||||
polymod := bech32Polymod(hrp, data, nil) ^ bech32Const
|
||||
for i := 0; i < 6; i++ {
|
||||
b := byte((polymod >> uint(5*(5-i))) & 31)
|
||||
|
||||
// This can't fail, given we explicitly cap the previous b byte by the
|
||||
// first 31 bits.
|
||||
c := charset[b]
|
||||
bldr.WriteByte(c)
|
||||
}
|
||||
}
|
||||
|
||||
// bech32VerifyChecksum verifies whether the bech32 string specified by the
|
||||
// provided hrp and payload data (encoded as 5 bits per element byte slice) has
|
||||
// the correct checksum suffix. The version of bech32 used (bech32 OG, or
|
||||
// bech32m) is also returned to allow the caller to perform proper address
|
||||
// validation (segwitv0 should use bech32, v1+ should use bech32m).
|
||||
//
|
||||
// Data MUST have more than 6 elements, otherwise this function panics.
|
||||
//
|
||||
// For more details on the checksum verification, please refer to BIP 173.
|
||||
func bech32VerifyChecksum(hrp string, data []byte) (Version, bool) {
|
||||
checksum := data[len(data)-6:]
|
||||
values := data[:len(data)-6]
|
||||
polymod := bech32Polymod(hrp, values, checksum)
|
||||
|
||||
// Before BIP-350, we'd always check this against a static constant of
|
||||
// 1 to know if the checksum was computed properly. As we want to
|
||||
// generically support decoding for bech32m as well as bech32, we'll
|
||||
// look up the returned value and compare it to the set of defined
|
||||
// constants.
|
||||
bech32Version, ok := ConstsToVersion[ChecksumConst(polymod)]
|
||||
if ok {
|
||||
return bech32Version, true
|
||||
}
|
||||
|
||||
return VersionUnknown, false
|
||||
}
|
||||
|
||||
// DecodeNoLimit is a bech32 checksum version aware arbitrary string length
|
||||
// decoder. This function will return the version of the decoded checksum
|
||||
// constant so higher level validation can be performed to ensure the correct
|
||||
// version of bech32 was used when encoding.
|
||||
func decodeNoLimit(bech string) (string, []byte, Version, error) {
|
||||
// The minimum allowed size of a bech32 string is 8 characters, since it
|
||||
// needs a non-empty HRP, a separator, and a 6 character checksum.
|
||||
if len(bech) < 8 {
|
||||
return "", nil, VersionUnknown, ErrInvalidLength(len(bech))
|
||||
}
|
||||
|
||||
// Only ASCII characters between 33 and 126 are allowed.
|
||||
var hasLower, hasUpper bool
|
||||
for i := 0; i < len(bech); i++ {
|
||||
if bech[i] < 33 || bech[i] > 126 {
|
||||
return "", nil, VersionUnknown, ErrInvalidCharacter(bech[i])
|
||||
}
|
||||
|
||||
// The characters must be either all lowercase or all uppercase. Testing
|
||||
// directly with ascii codes is safe here, given the previous test.
|
||||
hasLower = hasLower || (bech[i] >= 97 && bech[i] <= 122)
|
||||
hasUpper = hasUpper || (bech[i] >= 65 && bech[i] <= 90)
|
||||
if hasLower && hasUpper {
|
||||
return "", nil, VersionUnknown, ErrMixedCase{}
|
||||
}
|
||||
}
|
||||
|
||||
// Bech32 standard uses only the lowercase for of strings for checksum
|
||||
// calculation.
|
||||
if hasUpper {
|
||||
bech = strings.ToLower(bech)
|
||||
}
|
||||
|
||||
// The string is invalid if the last '1' is non-existent, it is the
|
||||
// first character of the string (no human-readable part) or one of the
|
||||
// last 6 characters of the string (since checksum cannot contain '1').
|
||||
one := strings.LastIndexByte(bech, '1')
|
||||
if one < 1 || one+7 > len(bech) {
|
||||
return "", nil, VersionUnknown, ErrInvalidSeparatorIndex(one)
|
||||
}
|
||||
|
||||
// The human-readable part is everything before the last '1'.
|
||||
hrp := bech[:one]
|
||||
data := bech[one+1:]
|
||||
|
||||
// Each character corresponds to the byte with value of the index in
|
||||
// 'charset'.
|
||||
decoded, err := toBytes(data)
|
||||
if err != nil {
|
||||
return "", nil, VersionUnknown, err
|
||||
}
|
||||
|
||||
// Verify if the checksum (stored inside decoded[:]) is valid, given the
|
||||
// previously decoded hrp.
|
||||
bech32Version, ok := bech32VerifyChecksum(hrp, decoded)
|
||||
if !ok {
|
||||
// Invalid checksum. Calculate what it should have been, so that the
|
||||
// error contains this information.
|
||||
|
||||
// Extract the payload bytes and actual checksum in the string.
|
||||
actual := bech[len(bech)-6:]
|
||||
payload := decoded[:len(decoded)-6]
|
||||
|
||||
// Calculate the expected checksum, given the hrp and payload
|
||||
// data. We'll actually compute _both_ possibly valid checksum
|
||||
// to further aide in debugging.
|
||||
var expectedBldr strings.Builder
|
||||
expectedBldr.Grow(6)
|
||||
writeBech32Checksum(hrp, payload, &expectedBldr, Version0)
|
||||
expectedVersion0 := expectedBldr.String()
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(6)
|
||||
writeBech32Checksum(hrp, payload, &expectedBldr, VersionM)
|
||||
expectedVersionM := expectedBldr.String()
|
||||
|
||||
err = ErrInvalidChecksum{
|
||||
Expected: expectedVersion0,
|
||||
ExpectedM: expectedVersionM,
|
||||
Actual: actual,
|
||||
}
|
||||
return "", nil, VersionUnknown, err
|
||||
}
|
||||
|
||||
// We exclude the last 6 bytes, which is the checksum.
|
||||
return hrp, decoded[:len(decoded)-6], bech32Version, nil
|
||||
}
|
||||
|
||||
// DecodeNoLimit decodes a bech32 encoded string, returning the human-readable
|
||||
// part and the data part excluding the checksum. This function does NOT
|
||||
// validate against the BIP-173 maximum length allowed for bech32 strings and
|
||||
// is meant for use in custom applications (such as lightning network payment
|
||||
// requests), NOT on-chain addresses.
|
||||
//
|
||||
// Note that the returned data is 5-bit (base32) encoded and the human-readable
|
||||
// part will be lowercase.
|
||||
func DecodeNoLimit(bech string) (string, []byte, error) {
|
||||
hrp, data, _, err := decodeNoLimit(bech)
|
||||
return hrp, data, err
|
||||
}
|
||||
|
||||
// Decode decodes a bech32 encoded string, returning the human-readable part and
|
||||
// the data part excluding the checksum.
|
||||
//
|
||||
// Note that the returned data is 5-bit (base32) encoded and the human-readable
|
||||
// part will be lowercase.
|
||||
func Decode(bech string) (string, []byte, error) {
|
||||
// The maximum allowed length for a bech32 string is 90.
|
||||
if len(bech) > 90 {
|
||||
return "", nil, ErrInvalidLength(len(bech))
|
||||
}
|
||||
|
||||
hrp, data, _, err := decodeNoLimit(bech)
|
||||
return hrp, data, err
|
||||
}
|
||||
|
||||
// DecodeGeneric is identical to the existing Decode method, but will also
|
||||
// return bech32 version that matches the decoded checksum. This method should
|
||||
// be used when decoding segwit addresses, as it enables additional
|
||||
// verification to ensure the proper checksum is used.
|
||||
func DecodeGeneric(bech string) (string, []byte, Version, error) {
|
||||
// The maximum allowed length for a bech32 string is 90.
|
||||
if len(bech) > 90 {
|
||||
return "", nil, VersionUnknown, ErrInvalidLength(len(bech))
|
||||
}
|
||||
|
||||
return decodeNoLimit(bech)
|
||||
}
|
||||
|
||||
// encodeGeneric is the base bech32 encoding function that is aware of the
|
||||
// existence of the checksum versions. This method is private, as the Encode
|
||||
// and EncodeM methods are intended to be used instead.
|
||||
func encodeGeneric(hrp string, data []byte,
|
||||
version Version) (string, error) {
|
||||
|
||||
// The resulting bech32 string is the concatenation of the lowercase
|
||||
// hrp, the separator 1, data and the 6-byte checksum.
|
||||
hrp = strings.ToLower(hrp)
|
||||
var bldr strings.Builder
|
||||
bldr.Grow(len(hrp) + 1 + len(data) + 6)
|
||||
bldr.WriteString(hrp)
|
||||
bldr.WriteString("1")
|
||||
|
||||
// Write the data part, using the bech32 charset.
|
||||
for _, b := range data {
|
||||
if int(b) >= len(charset) {
|
||||
return "", ErrInvalidDataByte(b)
|
||||
}
|
||||
bldr.WriteByte(charset[b])
|
||||
}
|
||||
|
||||
// Calculate and write the checksum of the data.
|
||||
writeBech32Checksum(hrp, data, &bldr, version)
|
||||
|
||||
return bldr.String(), nil
|
||||
}
|
||||
|
||||
// Encode encodes a byte slice into a bech32 string with the given
|
||||
// human-readable part (HRP). The HRP will be converted to lowercase if needed
|
||||
// since mixed cased encodings are not permitted and lowercase is used for
|
||||
// checksum purposes. Note that the bytes must each encode 5 bits (base32).
|
||||
func Encode(hrp string, data []byte) (string, error) {
|
||||
return encodeGeneric(hrp, data, Version0)
|
||||
}
|
||||
|
||||
// EncodeM is the exactly same as the Encode method, but it uses the new
|
||||
// bech32m constant instead of the original one. It should be used whenever one
|
||||
// attempts to encode a segwit address of v1 and beyond.
|
||||
func EncodeM(hrp string, data []byte) (string, error) {
|
||||
return encodeGeneric(hrp, data, VersionM)
|
||||
}
|
||||
|
||||
// ConvertBits converts a byte slice where each byte is encoding fromBits bits,
|
||||
// to a byte slice where each byte is encoding toBits bits.
|
||||
func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ([]byte, error) {
|
||||
if fromBits < 1 || fromBits > 8 || toBits < 1 || toBits > 8 {
|
||||
return nil, ErrInvalidBitGroups{}
|
||||
}
|
||||
|
||||
// Determine the maximum size the resulting array can have after base
|
||||
// conversion, so that we can size it a single time. This might be off
|
||||
// by a byte depending on whether padding is used or not and if the input
|
||||
// data is a multiple of both fromBits and toBits, but we ignore that and
|
||||
// just size it to the maximum possible.
|
||||
maxSize := len(data)*int(fromBits)/int(toBits) + 1
|
||||
|
||||
// The final bytes, each byte encoding toBits bits.
|
||||
regrouped := make([]byte, 0, maxSize)
|
||||
|
||||
// Keep track of the next byte we create and how many bits we have
|
||||
// added to it out of the toBits goal.
|
||||
nextByte := byte(0)
|
||||
filledBits := uint8(0)
|
||||
|
||||
for _, b := range data {
|
||||
|
||||
// Discard unused bits.
|
||||
b <<= 8 - fromBits
|
||||
|
||||
// How many bits remaining to extract from the input data.
|
||||
remFromBits := fromBits
|
||||
for remFromBits > 0 {
|
||||
// How many bits remaining to be added to the next byte.
|
||||
remToBits := toBits - filledBits
|
||||
|
||||
// The number of bytes to next extract is the minimum of
|
||||
// remFromBits and remToBits.
|
||||
toExtract := remFromBits
|
||||
if remToBits < toExtract {
|
||||
toExtract = remToBits
|
||||
}
|
||||
|
||||
// Add the next bits to nextByte, shifting the already
|
||||
// added bits to the left.
|
||||
nextByte = (nextByte << toExtract) | (b >> (8 - toExtract))
|
||||
|
||||
// Discard the bits we just extracted and get ready for
|
||||
// next iteration.
|
||||
b <<= toExtract
|
||||
remFromBits -= toExtract
|
||||
filledBits += toExtract
|
||||
|
||||
// If the nextByte is completely filled, we add it to
|
||||
// our regrouped bytes and start on the next byte.
|
||||
if filledBits == toBits {
|
||||
regrouped = append(regrouped, nextByte)
|
||||
filledBits = 0
|
||||
nextByte = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We pad any unfinished group if specified.
|
||||
if pad && filledBits > 0 {
|
||||
nextByte <<= toBits - filledBits
|
||||
regrouped = append(regrouped, nextByte)
|
||||
filledBits = 0
|
||||
nextByte = 0
|
||||
}
|
||||
|
||||
// Any incomplete group must be <= 4 bits, and all zeroes.
|
||||
if filledBits > 0 && (filledBits > 4 || nextByte != 0) {
|
||||
return nil, ErrInvalidIncompleteGroup{}
|
||||
}
|
||||
|
||||
return regrouped, nil
|
||||
}
|
||||
|
||||
// EncodeFromBase256 converts a base256-encoded byte slice into a base32-encoded
|
||||
// byte slice and then encodes it into a bech32 string with the given
|
||||
// human-readable part (HRP). The HRP will be converted to lowercase if needed
|
||||
// since mixed cased encodings are not permitted and lowercase is used for
|
||||
// checksum purposes.
|
||||
func EncodeFromBase256(hrp string, data []byte) (string, error) {
|
||||
converted, err := ConvertBits(data, 8, 5, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Encode(hrp, converted)
|
||||
}
|
||||
|
||||
// DecodeToBase256 decodes a bech32-encoded string into its associated
|
||||
// human-readable part (HRP) and base32-encoded data, converts that data to a
|
||||
// base256-encoded byte slice and returns it along with the lowercase HRP.
|
||||
func DecodeToBase256(bech string) (string, []byte, error) {
|
||||
hrp, data, err := Decode(bech)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
converted, err := ConvertBits(data, 5, 8, false)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return hrp, converted, nil
|
||||
}
|
691
btcutil/bech32/bech32_test.go
Normal file
691
btcutil/bech32/bech32_test.go
Normal file
|
@ -0,0 +1,691 @@
|
|||
// Copyright (c) 2017-2020 The btcsuite developers
|
||||
// Copyright (c) 2019 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bech32
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test
|
||||
// vectors works and if decoding invalid test vectors fails for the correct
|
||||
// reason.
|
||||
func TestBech32(t *testing.T) {
|
||||
tests := []struct {
|
||||
str string
|
||||
expectedError error
|
||||
}{
|
||||
{"A12UEL5L", nil},
|
||||
{"a12uel5l", nil},
|
||||
{"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", nil},
|
||||
{"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", nil},
|
||||
{"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", nil},
|
||||
{"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", nil},
|
||||
{"split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", "2y9e2w"}}, // invalid checksum
|
||||
{"s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", ErrInvalidCharacter(' ')}, // invalid character (space) in hrp
|
||||
{"spl\x7Ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", ErrInvalidCharacter(127)}, // invalid character (DEL) in hrp
|
||||
{"split1cheo2y9e2w", ErrNonCharsetChar('o')}, // invalid character (o) in data part
|
||||
{"split1a2y9w", ErrInvalidSeparatorIndex(5)}, // too short data part
|
||||
{"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", ErrInvalidSeparatorIndex(0)}, // empty hrp
|
||||
{"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", ErrInvalidLength(91)}, // too long
|
||||
|
||||
// Additional test vectors used in bitcoin core
|
||||
{" 1nwldj5", ErrInvalidCharacter(' ')},
|
||||
{"\x7f" + "1axkwrx", ErrInvalidCharacter(0x7f)},
|
||||
{"\x801eym55h", ErrInvalidCharacter(0x80)},
|
||||
{"an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx", ErrInvalidLength(91)},
|
||||
{"pzry9x0s0muk", ErrInvalidSeparatorIndex(-1)},
|
||||
{"1pzry9x0s0muk", ErrInvalidSeparatorIndex(0)},
|
||||
{"x1b4n0q5v", ErrNonCharsetChar(98)},
|
||||
{"li1dgmt3", ErrInvalidSeparatorIndex(2)},
|
||||
{"de1lg7wt\xff", ErrInvalidCharacter(0xff)},
|
||||
{"A1G7SGD8", ErrInvalidChecksum{"2uel5l", "2uel5llqfn3a", "g7sgd8"}},
|
||||
{"10a06t8", ErrInvalidLength(7)},
|
||||
{"1qzzfhee", ErrInvalidSeparatorIndex(0)},
|
||||
{"a12UEL5L", ErrMixedCase{}},
|
||||
{"A12uEL5L", ErrMixedCase{}},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
str := test.str
|
||||
hrp, decoded, err := Decode(str)
|
||||
if test.expectedError != err {
|
||||
t.Errorf("%d: expected decoding error %v "+
|
||||
"instead got %v", i, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// End test case here if a decoding error was expected.
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it encodes to the same string
|
||||
encoded, err := Encode(hrp, decoded)
|
||||
if err != nil {
|
||||
t.Errorf("encoding failed: %v", err)
|
||||
}
|
||||
|
||||
if encoded != strings.ToLower(str) {
|
||||
t.Errorf("expected data to encode to %v, but got %v",
|
||||
str, encoded)
|
||||
}
|
||||
|
||||
// Flip a bit in the string an make sure it is caught.
|
||||
pos := strings.LastIndexAny(str, "1")
|
||||
flipped := str[:pos+1] + string((str[pos+1] ^ 1)) + str[pos+2:]
|
||||
_, _, err = Decode(flipped)
|
||||
if err == nil {
|
||||
t.Error("expected decoding to fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBech32M tests that the following set of strings, based on the test
|
||||
// vectors in BIP-350 are either valid or invalid using the new bech32m
|
||||
// checksum algo. Some of these strings are similar to the set of above test
|
||||
// vectors, but end up with different checksums.
|
||||
func TestBech32M(t *testing.T) {
|
||||
tests := []struct {
|
||||
str string
|
||||
expectedError error
|
||||
}{
|
||||
{"A1LQFN3A", nil},
|
||||
{"a1lqfn3a", nil},
|
||||
{"an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6", nil},
|
||||
{"abcdef1l7aum6echk45nj3s0wdvt2fg8x9yrzpqzd3ryx", nil},
|
||||
{"11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8", nil},
|
||||
{"split1checkupstagehandshakeupstreamerranterredcaperredlc445v", nil},
|
||||
{"?1v759aa", nil},
|
||||
|
||||
// Additional test vectors used in bitcoin core
|
||||
{"\x201xj0phk", ErrInvalidCharacter('\x20')},
|
||||
{"\x7f1g6xzxy", ErrInvalidCharacter('\x7f')},
|
||||
{"\x801vctc34", ErrInvalidCharacter('\x80')},
|
||||
{"an84characterslonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11d6pts4", ErrInvalidLength(91)},
|
||||
{"qyrz8wqd2c9m", ErrInvalidSeparatorIndex(-1)},
|
||||
{"1qyrz8wqd2c9m", ErrInvalidSeparatorIndex(0)},
|
||||
{"y1b0jsk6g", ErrNonCharsetChar(98)},
|
||||
{"lt1igcx5c0", ErrNonCharsetChar(105)},
|
||||
{"in1muywd", ErrInvalidSeparatorIndex(2)},
|
||||
{"mm1crxm3i", ErrNonCharsetChar(105)},
|
||||
{"au1s5cgom", ErrNonCharsetChar(111)},
|
||||
{"M1VUXWEZ", ErrInvalidChecksum{"mzl49c", "mzl49cw70eq6", "vuxwez"}},
|
||||
{"16plkw9", ErrInvalidLength(7)},
|
||||
{"1p2gdwpf", ErrInvalidSeparatorIndex(0)},
|
||||
|
||||
{" 1nwldj5", ErrInvalidCharacter(' ')},
|
||||
{"\x7f" + "1axkwrx", ErrInvalidCharacter(0x7f)},
|
||||
{"\x801eym55h", ErrInvalidCharacter(0x80)},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
str := test.str
|
||||
hrp, decoded, err := Decode(str)
|
||||
if test.expectedError != err {
|
||||
t.Errorf("%d: (%v) expected decoding error %v "+
|
||||
"instead got %v", i, str, test.expectedError,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// End test case here if a decoding error was expected.
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it encodes to the same string, using bech32 m.
|
||||
encoded, err := EncodeM(hrp, decoded)
|
||||
if err != nil {
|
||||
t.Errorf("encoding failed: %v", err)
|
||||
}
|
||||
|
||||
if encoded != strings.ToLower(str) {
|
||||
t.Errorf("expected data to encode to %v, but got %v",
|
||||
str, encoded)
|
||||
}
|
||||
|
||||
// Flip a bit in the string an make sure it is caught.
|
||||
pos := strings.LastIndexAny(str, "1")
|
||||
flipped := str[:pos+1] + string((str[pos+1] ^ 1)) + str[pos+2:]
|
||||
_, _, err = Decode(flipped)
|
||||
if err == nil {
|
||||
t.Error("expected decoding to fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBech32DecodeGeneric tests that given a bech32 string, or a bech32m
|
||||
// string, the proper checksum version is returned so that callers can perform
|
||||
// segwit addr validation.
|
||||
func TestBech32DecodeGeneric(t *testing.T) {
|
||||
tests := []struct {
|
||||
str string
|
||||
version Version
|
||||
}{
|
||||
{"A1LQFN3A", VersionM},
|
||||
{"a1lqfn3a", VersionM},
|
||||
{"an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6", VersionM},
|
||||
{"abcdef1l7aum6echk45nj3s0wdvt2fg8x9yrzpqzd3ryx", VersionM},
|
||||
{"11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8", VersionM},
|
||||
{"split1checkupstagehandshakeupstreamerranterredcaperredlc445v", VersionM},
|
||||
{"?1v759aa", VersionM},
|
||||
|
||||
{"A12UEL5L", Version0},
|
||||
{"a12uel5l", Version0},
|
||||
{"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", Version0},
|
||||
{"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", Version0},
|
||||
{"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", Version0},
|
||||
{"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", Version0},
|
||||
|
||||
{"BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4", Version0},
|
||||
{"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7", Version0},
|
||||
{"bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kt5nd6y", VersionM},
|
||||
{"BC1SW50QGDZ25J", VersionM},
|
||||
{"bc1zw508d6qejxtdg4y5r3zarvaryvaxxpcs", VersionM},
|
||||
{"tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy", Version0},
|
||||
{"tb1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesf3hn0c", VersionM},
|
||||
{"bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqzk5jj0", VersionM},
|
||||
}
|
||||
for i, test := range tests {
|
||||
_, _, version, err := DecodeGeneric(test.str)
|
||||
if err != nil {
|
||||
t.Errorf("%d: (%v) unexpected error during "+
|
||||
"decoding: %v", i, test.str, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if version != test.version {
|
||||
t.Errorf("(%v): invalid version: expected %v, got %v",
|
||||
test.str, test.version, version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMixedCaseEncode ensures mixed case HRPs are converted to lowercase as
|
||||
// expected when encoding and that decoding the produced encoding when converted
|
||||
// to all uppercase produces the lowercase HRP and original data.
|
||||
func TestMixedCaseEncode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hrp string
|
||||
data string
|
||||
encoded string
|
||||
}{{
|
||||
name: "all uppercase HRP with no data",
|
||||
hrp: "A",
|
||||
data: "",
|
||||
encoded: "a12uel5l",
|
||||
}, {
|
||||
name: "all uppercase HRP with data",
|
||||
hrp: "UPPERCASE",
|
||||
data: "787878",
|
||||
encoded: "uppercase10pu8sss7kmp",
|
||||
}, {
|
||||
name: "mixed case HRP even offsets uppercase",
|
||||
hrp: "AbCdEf",
|
||||
data: "00443214c74254b635cf84653a56d7c675be77df",
|
||||
encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
|
||||
}, {
|
||||
name: "mixed case HRP odd offsets uppercase ",
|
||||
hrp: "aBcDeF",
|
||||
data: "00443214c74254b635cf84653a56d7c675be77df",
|
||||
encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
|
||||
}, {
|
||||
name: "all lowercase HRP",
|
||||
hrp: "abcdef",
|
||||
data: "00443214c74254b635cf84653a56d7c675be77df",
|
||||
encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
// Convert the text hex to bytes, convert those bytes from base256 to
|
||||
// base32, then ensure the encoded result with the HRP provided in the
|
||||
// test data is as expected.
|
||||
data, err := hex.DecodeString(test.data)
|
||||
if err != nil {
|
||||
t.Errorf("%q: invalid hex %q: %v", test.name, test.data, err)
|
||||
continue
|
||||
}
|
||||
convertedData, err := ConvertBits(data, 8, 5, true)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected convert bits error: %v", test.name,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
gotEncoded, err := Encode(test.hrp, convertedData)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected encode error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if gotEncoded != test.encoded {
|
||||
t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
|
||||
gotEncoded, test.encoded)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the decoding the expected lowercase encoding converted to all
|
||||
// uppercase produces the lowercase HRP and original data.
|
||||
gotHRP, gotData, err := Decode(strings.ToUpper(test.encoded))
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected decode error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
wantHRP := strings.ToLower(test.hrp)
|
||||
if gotHRP != wantHRP {
|
||||
t.Errorf("%q: mismatched decoded HRP -- got %q, want %q", test.name,
|
||||
gotHRP, wantHRP)
|
||||
continue
|
||||
}
|
||||
convertedGotData, err := ConvertBits(gotData, 5, 8, false)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected convert bits error: %v", test.name,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(convertedGotData, data) {
|
||||
t.Errorf("%q: mismatched data -- got %x, want %x", test.name,
|
||||
convertedGotData, data)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCanDecodeUnlimtedBech32 tests whether decoding a large bech32 string works
|
||||
// when using the DecodeNoLimit version
|
||||
func TestCanDecodeUnlimtedBech32(t *testing.T) {
|
||||
input := "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5kx0yd"
|
||||
|
||||
// Sanity check that an input of this length errors on regular Decode()
|
||||
_, _, err := Decode(input)
|
||||
if err == nil {
|
||||
t.Fatalf("Test vector not appropriate")
|
||||
}
|
||||
|
||||
// Try and decode it.
|
||||
hrp, data, err := DecodeNoLimit(input)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected decoding of large string to work. Got error: %v", err)
|
||||
}
|
||||
|
||||
// Verify data for correctness.
|
||||
if hrp != "1" {
|
||||
t.Fatalf("Unexpected hrp: %v", hrp)
|
||||
}
|
||||
decodedHex := fmt.Sprintf("%x", data)
|
||||
expected := "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
if decodedHex != expected {
|
||||
t.Fatalf("Unexpected decoded data: %s", decodedHex)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBech32Base256 ensures decoding and encoding various bech32, HRPs, and
|
||||
// data produces the expected results when using EncodeFromBase256 and
|
||||
// DecodeToBase256. It includes tests for proper handling of case
|
||||
// manipulations.
|
||||
func TestBech32Base256(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string // test name
|
||||
encoded string // bech32 string to decode
|
||||
hrp string // expected human-readable part
|
||||
data string // expected hex-encoded data
|
||||
err error // expected error
|
||||
}{{
|
||||
name: "all uppercase, no data",
|
||||
encoded: "A12UEL5L",
|
||||
hrp: "a",
|
||||
data: "",
|
||||
}, {
|
||||
name: "long hrp with separator and excluded chars, no data",
|
||||
encoded: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
|
||||
hrp: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio",
|
||||
data: "",
|
||||
}, {
|
||||
name: "6 char hrp with data with leading zero",
|
||||
encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
|
||||
hrp: "abcdef",
|
||||
data: "00443214c74254b635cf84653a56d7c675be77df",
|
||||
}, {
|
||||
name: "hrp same as separator and max length encoded string",
|
||||
encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
|
||||
hrp: "1",
|
||||
data: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
}, {
|
||||
name: "5 char hrp with data chosen to produce human-readable data part",
|
||||
encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
|
||||
hrp: "split",
|
||||
data: "c5f38b70305f519bf66d85fb6cf03058f3dde463ecd7918f2dc743918f2d",
|
||||
}, {
|
||||
name: "same as previous but with checksum invalidated",
|
||||
encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w",
|
||||
err: ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", "2y9e2w"},
|
||||
}, {
|
||||
name: "hrp with invalid character (space)",
|
||||
encoded: "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p",
|
||||
err: ErrInvalidCharacter(' '),
|
||||
}, {
|
||||
name: "hrp with invalid character (DEL)",
|
||||
encoded: "spl\x7ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
|
||||
err: ErrInvalidCharacter(127),
|
||||
}, {
|
||||
name: "data part with invalid character (o)",
|
||||
encoded: "split1cheo2y9e2w",
|
||||
err: ErrNonCharsetChar('o'),
|
||||
}, {
|
||||
name: "data part too short",
|
||||
encoded: "split1a2y9w",
|
||||
err: ErrInvalidSeparatorIndex(5),
|
||||
}, {
|
||||
name: "empty hrp",
|
||||
encoded: "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
|
||||
err: ErrInvalidSeparatorIndex(0),
|
||||
}, {
|
||||
name: "no separator",
|
||||
encoded: "pzry9x0s0muk",
|
||||
err: ErrInvalidSeparatorIndex(-1),
|
||||
}, {
|
||||
name: "too long by one char",
|
||||
encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
|
||||
err: ErrInvalidLength(91),
|
||||
}, {
|
||||
name: "invalid due to mixed case in hrp",
|
||||
encoded: "aBcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
|
||||
err: ErrMixedCase{},
|
||||
}, {
|
||||
name: "invalid due to mixed case in data part",
|
||||
encoded: "abcdef1Qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
|
||||
err: ErrMixedCase{},
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the decode either produces an error or not as expected.
|
||||
str := test.encoded
|
||||
gotHRP, gotData, err := DecodeToBase256(str)
|
||||
if test.err != err {
|
||||
t.Errorf("%q: unexpected decode error -- got %v, want %v",
|
||||
test.name, err, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
// End test case here if a decoding error was expected.
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the expected HRP and original data are as expected.
|
||||
if gotHRP != test.hrp {
|
||||
t.Errorf("%q: mismatched decoded HRP -- got %q, want %q", test.name,
|
||||
gotHRP, test.hrp)
|
||||
continue
|
||||
}
|
||||
data, err := hex.DecodeString(test.data)
|
||||
if err != nil {
|
||||
t.Errorf("%q: invalid hex %q: %v", test.name, test.data, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotData, data) {
|
||||
t.Errorf("%q: mismatched data -- got %x, want %x", test.name,
|
||||
gotData, data)
|
||||
continue
|
||||
}
|
||||
|
||||
// Encode the same data with the HRP converted to all uppercase and
|
||||
// ensure the result is the lowercase version of the original encoded
|
||||
// bech32 string.
|
||||
gotEncoded, err := EncodeFromBase256(strings.ToUpper(test.hrp), data)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected uppercase HRP encode error: %v", test.name,
|
||||
err)
|
||||
}
|
||||
wantEncoded := strings.ToLower(str)
|
||||
if gotEncoded != wantEncoded {
|
||||
t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
|
||||
gotEncoded, wantEncoded)
|
||||
}
|
||||
|
||||
// Encode the same data with the HRP converted to all lowercase and
|
||||
// ensure the result is the lowercase version of the original encoded
|
||||
// bech32 string.
|
||||
gotEncoded, err = EncodeFromBase256(strings.ToLower(test.hrp), data)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected lowercase HRP encode error: %v", test.name,
|
||||
err)
|
||||
}
|
||||
if gotEncoded != wantEncoded {
|
||||
t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
|
||||
gotEncoded, wantEncoded)
|
||||
}
|
||||
|
||||
// Encode the same data with the HRP converted to mixed upper and
|
||||
// lowercase and ensure the result is the lowercase version of the
|
||||
// original encoded bech32 string.
|
||||
var mixedHRPBuilder strings.Builder
|
||||
for i, r := range test.hrp {
|
||||
if i%2 == 0 {
|
||||
mixedHRPBuilder.WriteString(strings.ToUpper(string(r)))
|
||||
continue
|
||||
}
|
||||
mixedHRPBuilder.WriteRune(r)
|
||||
}
|
||||
gotEncoded, err = EncodeFromBase256(mixedHRPBuilder.String(), data)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected lowercase HRP encode error: %v", test.name,
|
||||
err)
|
||||
}
|
||||
if gotEncoded != wantEncoded {
|
||||
t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
|
||||
gotEncoded, wantEncoded)
|
||||
}
|
||||
|
||||
// Ensure a bit flip in the string is caught.
|
||||
pos := strings.LastIndexAny(test.encoded, "1")
|
||||
flipped := str[:pos+1] + string((str[pos+1] ^ 1)) + str[pos+2:]
|
||||
_, _, err = DecodeToBase256(flipped)
|
||||
if err == nil {
|
||||
t.Error("expected decoding to fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkEncodeDecodeCycle performs a benchmark for a full encode/decode
|
||||
// cycle of a bech32 string. It also reports the allocation count, which we
|
||||
// expect to be 2 for a fully optimized cycle.
|
||||
func BenchmarkEncodeDecodeCycle(b *testing.B) {
|
||||
// Use a fixed, 49-byte raw data for testing.
|
||||
inputData, err := hex.DecodeString("cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to initialize input data: %v", err)
|
||||
}
|
||||
|
||||
// Convert this into a 79-byte, base 32 byte slice.
|
||||
base32Input, err := ConvertBits(inputData, 8, 5, true)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to convert input to 32 bits-per-element: %v", err)
|
||||
}
|
||||
|
||||
// Use a fixed hrp for the tests. This should generate an encoded bech32
|
||||
// string of size 90 (the maximum allowed by BIP-173).
|
||||
hrp := "bc"
|
||||
|
||||
// Begin the benchmark. Given that we test one roundtrip per iteration
|
||||
// (that is, one Encode() and one Decode() operation), we expect at most
|
||||
// 2 allocations per reported test op.
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
str, err := Encode(hrp, base32Input)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to encode input: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = Decode(str)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to decode string: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestConvertBits tests whether base conversion works using TestConvertBits().
|
||||
func TestConvertBits(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
fromBits uint8
|
||||
toBits uint8
|
||||
pad bool
|
||||
}{
|
||||
// Trivial empty conversions.
|
||||
{"", "", 8, 5, false},
|
||||
{"", "", 8, 5, true},
|
||||
{"", "", 5, 8, false},
|
||||
{"", "", 5, 8, true},
|
||||
|
||||
// Conversions of 0 value with/without padding.
|
||||
{"00", "00", 8, 5, false},
|
||||
{"00", "0000", 8, 5, true},
|
||||
{"0000", "00", 5, 8, false},
|
||||
{"0000", "0000", 5, 8, true},
|
||||
|
||||
// Testing when conversion ends exactly at the byte edge. This makes
|
||||
// both padded and unpadded versions the same.
|
||||
{"0000000000", "0000000000000000", 8, 5, false},
|
||||
{"0000000000", "0000000000000000", 8, 5, true},
|
||||
{"0000000000000000", "0000000000", 5, 8, false},
|
||||
{"0000000000000000", "0000000000", 5, 8, true},
|
||||
|
||||
// Conversions of full byte sequences.
|
||||
{"ffffff", "1f1f1f1f1e", 8, 5, true},
|
||||
{"1f1f1f1f1e", "ffffff", 5, 8, false},
|
||||
{"1f1f1f1f1e", "ffffff00", 5, 8, true},
|
||||
|
||||
// Sample random conversions.
|
||||
{"c9ca", "190705", 8, 5, false},
|
||||
{"c9ca", "19070500", 8, 5, true},
|
||||
{"19070500", "c9ca", 5, 8, false},
|
||||
{"19070500", "c9ca00", 5, 8, true},
|
||||
|
||||
// Test cases tested on TestConvertBitsFailures with their corresponding
|
||||
// fixes.
|
||||
{"ff", "1f1c", 8, 5, true},
|
||||
{"1f1c10", "ff20", 5, 8, true},
|
||||
|
||||
// Large conversions.
|
||||
{
|
||||
"cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1",
|
||||
"190f13030c170e1b1916141a13040a14040b011f01040e01071e0607160b1906070e06130801131b1a0416020e110008081c1f1a0e19040703120e1d0a06181b160d0407070c1a07070d11131d1408",
|
||||
8, 5, true,
|
||||
},
|
||||
{
|
||||
"190f13030c170e1b1916141a13040a14040b011f01040e01071e0607160b1906070e06130801131b1a0416020e110008081c1f1a0e19040703120e1d0a06181b160d0407070c1a07070d11131d1408",
|
||||
"cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed100",
|
||||
5, 8, true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
input, err := hex.DecodeString(tc.input)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid test input data: %v", err)
|
||||
}
|
||||
|
||||
expected, err := hex.DecodeString(tc.output)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid test output data: %v", err)
|
||||
}
|
||||
|
||||
actual, err := ConvertBits(input, tc.fromBits, tc.toBits, tc.pad)
|
||||
if err != nil {
|
||||
t.Fatalf("test case %d failed: %v", i, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("test case %d has wrong output; expected=%x actual=%x",
|
||||
i, expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestConvertBitsFailures tests for the expected conversion failures of
|
||||
// ConvertBits().
|
||||
func TestConvertBitsFailures(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
fromBits uint8
|
||||
toBits uint8
|
||||
pad bool
|
||||
err error
|
||||
}{
|
||||
// Not enough output bytes when not using padding.
|
||||
{"ff", 8, 5, false, ErrInvalidIncompleteGroup{}},
|
||||
{"1f1c10", 5, 8, false, ErrInvalidIncompleteGroup{}},
|
||||
|
||||
// Unsupported bit conversions.
|
||||
{"", 0, 5, false, ErrInvalidBitGroups{}},
|
||||
{"", 10, 5, false, ErrInvalidBitGroups{}},
|
||||
{"", 5, 0, false, ErrInvalidBitGroups{}},
|
||||
{"", 5, 10, false, ErrInvalidBitGroups{}},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
input, err := hex.DecodeString(tc.input)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid test input data: %v", err)
|
||||
}
|
||||
|
||||
_, err = ConvertBits(input, tc.fromBits, tc.toBits, tc.pad)
|
||||
if err != tc.err {
|
||||
t.Fatalf("test case %d failure: expected '%v' got '%v'", i,
|
||||
tc.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// BenchmarkConvertBitsDown benchmarks the speed and memory allocation behavior
|
||||
// of ConvertBits when converting from a higher base into a lower base (e.g. 8
|
||||
// => 5).
|
||||
//
|
||||
// Only a single allocation is expected, which is used for the output array.
|
||||
func BenchmarkConvertBitsDown(b *testing.B) {
|
||||
// Use a fixed, 49-byte raw data for testing.
|
||||
inputData, err := hex.DecodeString("cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to initialize input data: %v", err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := ConvertBits(inputData, 8, 5, true)
|
||||
if err != nil {
|
||||
b.Fatalf("error converting bits: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkConvertBitsDown benchmarks the speed and memory allocation behavior
|
||||
// of ConvertBits when converting from a lower base into a higher base (e.g. 5
|
||||
// => 8).
|
||||
//
|
||||
// Only a single allocation is expected, which is used for the output array.
|
||||
func BenchmarkConvertBitsUp(b *testing.B) {
|
||||
// Use a fixed, 79-byte raw data for testing.
|
||||
inputData, err := hex.DecodeString("190f13030c170e1b1916141a13040a14040b011f01040e01071e0607160b1906070e06130801131b1a0416020e110008081c1f1a0e19040703120e1d0a06181b160d0407070c1a07070d11131d1408")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to initialize input data: %v", err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := ConvertBits(inputData, 8, 5, true)
|
||||
if err != nil {
|
||||
b.Fatalf("error converting bits: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
15
btcutil/bech32/doc.go
Normal file
15
btcutil/bech32/doc.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package bech32 provides a Go implementation of the bech32 format specified in
|
||||
BIP 173.
|
||||
|
||||
Bech32 strings consist of a human-readable part (hrp), followed by the
|
||||
separator 1, then a checksummed data part encoded using the 32 characters
|
||||
"qpzry9x8gf2tvdw0s3jn54khce6mua7l".
|
||||
|
||||
More info: https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki
|
||||
*/
|
||||
package bech32
|
87
btcutil/bech32/error.go
Normal file
87
btcutil/bech32/error.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright (c) 2019 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bech32
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrMixedCase is returned when the bech32 string has both lower and uppercase
|
||||
// characters.
|
||||
type ErrMixedCase struct{}
|
||||
|
||||
func (e ErrMixedCase) Error() string {
|
||||
return "string not all lowercase or all uppercase"
|
||||
}
|
||||
|
||||
// ErrInvalidBitGroups is returned when conversion is attempted between byte
|
||||
// slices using bit-per-element of unsupported value.
|
||||
type ErrInvalidBitGroups struct{}
|
||||
|
||||
func (e ErrInvalidBitGroups) Error() string {
|
||||
return "only bit groups between 1 and 8 allowed"
|
||||
}
|
||||
|
||||
// ErrInvalidIncompleteGroup is returned when then byte slice used as input has
|
||||
// data of wrong length.
|
||||
type ErrInvalidIncompleteGroup struct{}
|
||||
|
||||
func (e ErrInvalidIncompleteGroup) Error() string {
|
||||
return "invalid incomplete group"
|
||||
}
|
||||
|
||||
// ErrInvalidLength is returned when the bech32 string has an invalid length
|
||||
// given the BIP-173 defined restrictions.
|
||||
type ErrInvalidLength int
|
||||
|
||||
func (e ErrInvalidLength) Error() string {
|
||||
return fmt.Sprintf("invalid bech32 string length %d", int(e))
|
||||
}
|
||||
|
||||
// ErrInvalidCharacter is returned when the bech32 string has a character
|
||||
// outside the range of the supported charset.
|
||||
type ErrInvalidCharacter rune
|
||||
|
||||
func (e ErrInvalidCharacter) Error() string {
|
||||
return fmt.Sprintf("invalid character in string: '%c'", rune(e))
|
||||
}
|
||||
|
||||
// ErrInvalidSeparatorIndex is returned when the separator character '1' is
|
||||
// in an invalid position in the bech32 string.
|
||||
type ErrInvalidSeparatorIndex int
|
||||
|
||||
func (e ErrInvalidSeparatorIndex) Error() string {
|
||||
return fmt.Sprintf("invalid separator index %d", int(e))
|
||||
}
|
||||
|
||||
// ErrNonCharsetChar is returned when a character outside of the specific
|
||||
// bech32 charset is used in the string.
|
||||
type ErrNonCharsetChar rune
|
||||
|
||||
func (e ErrNonCharsetChar) Error() string {
|
||||
return fmt.Sprintf("invalid character not part of charset: %v", int(e))
|
||||
}
|
||||
|
||||
// ErrInvalidChecksum is returned when the extracted checksum of the string
|
||||
// is different than what was expected. Both the original version, as well as
|
||||
// the new bech32m checksum may be specified.
|
||||
type ErrInvalidChecksum struct {
|
||||
Expected string
|
||||
ExpectedM string
|
||||
Actual string
|
||||
}
|
||||
|
||||
func (e ErrInvalidChecksum) Error() string {
|
||||
return fmt.Sprintf("invalid checksum (expected (bech32=%v, "+
|
||||
"bech32m=%v), got %v)", e.Expected, e.ExpectedM, e.Actual)
|
||||
}
|
||||
|
||||
// ErrInvalidDataByte is returned when a byte outside the range required for
|
||||
// conversion into a string was found.
|
||||
type ErrInvalidDataByte byte
|
||||
|
||||
func (e ErrInvalidDataByte) Error() string {
|
||||
return fmt.Sprintf("invalid data byte: %v", byte(e))
|
||||
}
|
49
btcutil/bech32/example_test.go
Normal file
49
btcutil/bech32/example_test.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bech32_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/bech32"
|
||||
)
|
||||
|
||||
// This example demonstrates how to decode a bech32 encoded string.
|
||||
func ExampleDecode() {
|
||||
encoded := "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx"
|
||||
hrp, decoded, err := bech32.Decode(encoded)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err)
|
||||
}
|
||||
|
||||
// Show the decoded data.
|
||||
fmt.Println("Decoded human-readable part:", hrp)
|
||||
fmt.Println("Decoded Data:", hex.EncodeToString(decoded))
|
||||
|
||||
// Output:
|
||||
// Decoded human-readable part: bc
|
||||
// Decoded Data: 010e140f070d1a001912060b0d081504140311021d030c1d03040f1814060e1e160e140f070d1a001912060b0d081504140311021d030c1d03040f1814060e1e16
|
||||
}
|
||||
|
||||
// This example demonstrates how to encode data into a bech32 string.
|
||||
func ExampleEncode() {
|
||||
data := []byte("Test data")
|
||||
// Convert test data to base32:
|
||||
conv, err := bech32.ConvertBits(data, 8, 5, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err)
|
||||
}
|
||||
encoded, err := bech32.Encode("customHrp!11111q", conv)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err)
|
||||
}
|
||||
|
||||
// Show the encoded data.
|
||||
fmt.Println("Encoded Data:", encoded)
|
||||
|
||||
// Output:
|
||||
// Encoded Data: customhrp!11111q123jhxapqv3shgcgkxpuhe
|
||||
}
|
43
btcutil/bech32/version.go
Normal file
43
btcutil/bech32/version.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package bech32
|
||||
|
||||
// ChecksumConst is a type that represents the currently defined bech32
|
||||
// checksum constants.
|
||||
type ChecksumConst int
|
||||
|
||||
const (
|
||||
// Version0Const is the original constant used in the checksum
|
||||
// verification for bech32.
|
||||
Version0Const ChecksumConst = 1
|
||||
|
||||
// VersionMConst is the new constant used for bech32m checksum
|
||||
// verification.
|
||||
VersionMConst ChecksumConst = 0x2bc830a3
|
||||
)
|
||||
|
||||
// Version defines the current set of bech32 versions.
|
||||
type Version uint8
|
||||
|
||||
const (
|
||||
// Version0 defines the original bech version.
|
||||
Version0 Version = iota
|
||||
|
||||
// VersionM is the new bech32 version defined in BIP-350, also known as
|
||||
// bech32m.
|
||||
VersionM
|
||||
|
||||
// VersionUnknown denotes an unknown bech version.
|
||||
VersionUnknown
|
||||
)
|
||||
|
||||
// VersionToConsts maps bech32 versions to the checksum constant to be used
|
||||
// when encoding, and asserting a particular version when decoding.
|
||||
var VersionToConsts = map[Version]ChecksumConst{
|
||||
Version0: Version0Const,
|
||||
VersionM: VersionMConst,
|
||||
}
|
||||
|
||||
// ConstsToVersion maps a bech32 constant to the version it's associated with.
|
||||
var ConstsToVersion = map[ChecksumConst]Version{
|
||||
Version0Const: Version0,
|
||||
VersionMConst: VersionM,
|
||||
}
|
265
btcutil/block.go
Normal file
265
btcutil/block.go
Normal file
|
@ -0,0 +1,265 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// OutOfRangeError describes an error due to accessing an element that is out
|
||||
// of range.
|
||||
type OutOfRangeError string
|
||||
|
||||
// BlockHeightUnknown is the value returned for a block height that is unknown.
|
||||
// This is typically because the block has not been inserted into the main chain
|
||||
// yet.
|
||||
const BlockHeightUnknown = int32(-1)
|
||||
|
||||
// Error satisfies the error interface and prints human-readable errors.
|
||||
func (e OutOfRangeError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// Block defines a bitcoin block that provides easier and more efficient
|
||||
// manipulation of raw blocks. It also memoizes hashes for the block and its
|
||||
// transactions on their first access so subsequent accesses don't have to
|
||||
// repeat the relatively expensive hashing operations.
|
||||
type Block struct {
|
||||
msgBlock *wire.MsgBlock // Underlying MsgBlock
|
||||
serializedBlock []byte // Serialized bytes for the block
|
||||
serializedBlockNoWitness []byte // Serialized bytes for block w/o witness data
|
||||
blockHash *chainhash.Hash // Cached block hash
|
||||
blockHeight int32 // Height in the main block chain
|
||||
transactions []*Tx // Transactions
|
||||
txnsGenerated bool // ALL wrapped transactions generated
|
||||
}
|
||||
|
||||
// MsgBlock returns the underlying wire.MsgBlock for the Block.
|
||||
func (b *Block) MsgBlock() *wire.MsgBlock {
|
||||
// Return the cached block.
|
||||
return b.msgBlock
|
||||
}
|
||||
|
||||
// Bytes returns the serialized bytes for the Block. This is equivalent to
|
||||
// calling Serialize on the underlying wire.MsgBlock, however it caches the
|
||||
// result so subsequent calls are more efficient.
|
||||
func (b *Block) Bytes() ([]byte, error) {
|
||||
// Return the cached serialized bytes if it has already been generated.
|
||||
if len(b.serializedBlock) != 0 {
|
||||
return b.serializedBlock, nil
|
||||
}
|
||||
|
||||
// Serialize the MsgBlock.
|
||||
w := bytes.NewBuffer(make([]byte, 0, b.msgBlock.SerializeSize()))
|
||||
err := b.msgBlock.Serialize(w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedBlock := w.Bytes()
|
||||
|
||||
// Cache the serialized bytes and return them.
|
||||
b.serializedBlock = serializedBlock
|
||||
return serializedBlock, nil
|
||||
}
|
||||
|
||||
// BytesNoWitness returns the serialized bytes for the block with transactions
|
||||
// encoded without any witness data.
|
||||
func (b *Block) BytesNoWitness() ([]byte, error) {
|
||||
// Return the cached serialized bytes if it has already been generated.
|
||||
if len(b.serializedBlockNoWitness) != 0 {
|
||||
return b.serializedBlockNoWitness, nil
|
||||
}
|
||||
|
||||
// Serialize the MsgBlock.
|
||||
var w bytes.Buffer
|
||||
err := b.msgBlock.SerializeNoWitness(&w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedBlock := w.Bytes()
|
||||
|
||||
// Cache the serialized bytes and return them.
|
||||
b.serializedBlockNoWitness = serializedBlock
|
||||
return serializedBlock, nil
|
||||
}
|
||||
|
||||
// Hash returns the block identifier hash for the Block. This is equivalent to
|
||||
// calling BlockHash on the underlying wire.MsgBlock, however it caches the
|
||||
// result so subsequent calls are more efficient.
|
||||
func (b *Block) Hash() *chainhash.Hash {
|
||||
// Return the cached block hash if it has already been generated.
|
||||
if b.blockHash != nil {
|
||||
return b.blockHash
|
||||
}
|
||||
|
||||
// Cache the block hash and return it.
|
||||
hash := b.msgBlock.BlockHash()
|
||||
b.blockHash = &hash
|
||||
return &hash
|
||||
}
|
||||
|
||||
// Tx returns a wrapped transaction (btcutil.Tx) for the transaction at the
|
||||
// specified index in the Block. The supplied index is 0 based. That is to
|
||||
// say, the first transaction in the block is txNum 0. This is nearly
|
||||
// equivalent to accessing the raw transaction (wire.MsgTx) from the
|
||||
// underlying wire.MsgBlock, however the wrapped transaction has some helpful
|
||||
// properties such as caching the hash so subsequent calls are more efficient.
|
||||
func (b *Block) Tx(txNum int) (*Tx, error) {
|
||||
// Ensure the requested transaction is in range.
|
||||
numTx := uint64(len(b.msgBlock.Transactions))
|
||||
if txNum < 0 || uint64(txNum) >= numTx {
|
||||
str := fmt.Sprintf("transaction index %d is out of range - max %d",
|
||||
txNum, numTx-1)
|
||||
return nil, OutOfRangeError(str)
|
||||
}
|
||||
|
||||
// Generate slice to hold all of the wrapped transactions if needed.
|
||||
if len(b.transactions) == 0 {
|
||||
b.transactions = make([]*Tx, numTx)
|
||||
}
|
||||
|
||||
// Return the wrapped transaction if it has already been generated.
|
||||
if b.transactions[txNum] != nil {
|
||||
return b.transactions[txNum], nil
|
||||
}
|
||||
|
||||
// Generate and cache the wrapped transaction and return it.
|
||||
newTx := NewTx(b.msgBlock.Transactions[txNum])
|
||||
newTx.SetIndex(txNum)
|
||||
b.transactions[txNum] = newTx
|
||||
return newTx, nil
|
||||
}
|
||||
|
||||
// Transactions returns a slice of wrapped transactions (btcutil.Tx) for all
|
||||
// transactions in the Block. This is nearly equivalent to accessing the raw
|
||||
// transactions (wire.MsgTx) in the underlying wire.MsgBlock, however it
|
||||
// instead provides easy access to wrapped versions (btcutil.Tx) of them.
|
||||
func (b *Block) Transactions() []*Tx {
|
||||
// Return transactions if they have ALL already been generated. This
|
||||
// flag is necessary because the wrapped transactions are lazily
|
||||
// generated in a sparse fashion.
|
||||
if b.txnsGenerated {
|
||||
return b.transactions
|
||||
}
|
||||
|
||||
// Generate slice to hold all of the wrapped transactions if needed.
|
||||
if len(b.transactions) == 0 {
|
||||
b.transactions = make([]*Tx, len(b.msgBlock.Transactions))
|
||||
}
|
||||
|
||||
// Generate and cache the wrapped transactions for all that haven't
|
||||
// already been done.
|
||||
for i, tx := range b.transactions {
|
||||
if tx == nil {
|
||||
newTx := NewTx(b.msgBlock.Transactions[i])
|
||||
newTx.SetIndex(i)
|
||||
b.transactions[i] = newTx
|
||||
}
|
||||
}
|
||||
|
||||
b.txnsGenerated = true
|
||||
return b.transactions
|
||||
}
|
||||
|
||||
// TxHash returns the hash for the requested transaction number in the Block.
|
||||
// The supplied index is 0 based. That is to say, the first transaction in the
|
||||
// block is txNum 0. This is equivalent to calling TxHash on the underlying
|
||||
// wire.MsgTx, however it caches the result so subsequent calls are more
|
||||
// efficient.
|
||||
func (b *Block) TxHash(txNum int) (*chainhash.Hash, error) {
|
||||
// Attempt to get a wrapped transaction for the specified index. It
|
||||
// will be created lazily if needed or simply return the cached version
|
||||
// if it has already been generated.
|
||||
tx, err := b.Tx(txNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Defer to the wrapped transaction which will return the cached hash if
|
||||
// it has already been generated.
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
||||
// TxLoc returns the offsets and lengths of each transaction in a raw block.
|
||||
// It is used to allow fast indexing into transactions within the raw byte
|
||||
// stream.
|
||||
func (b *Block) TxLoc() ([]wire.TxLoc, error) {
|
||||
rawMsg, err := b.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rbuf := bytes.NewBuffer(rawMsg)
|
||||
|
||||
var mblock wire.MsgBlock
|
||||
txLocs, err := mblock.DeserializeTxLoc(rbuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txLocs, err
|
||||
}
|
||||
|
||||
// Height returns the saved height of the block in the block chain. This value
|
||||
// will be BlockHeightUnknown if it hasn't already explicitly been set.
|
||||
func (b *Block) Height() int32 {
|
||||
return b.blockHeight
|
||||
}
|
||||
|
||||
// SetHeight sets the height of the block in the block chain.
|
||||
func (b *Block) SetHeight(height int32) {
|
||||
b.blockHeight = height
|
||||
}
|
||||
|
||||
// NewBlock returns a new instance of a bitcoin block given an underlying
|
||||
// wire.MsgBlock. See Block.
|
||||
func NewBlock(msgBlock *wire.MsgBlock) *Block {
|
||||
return &Block{
|
||||
msgBlock: msgBlock,
|
||||
blockHeight: BlockHeightUnknown,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockFromBytes returns a new instance of a bitcoin block given the
|
||||
// serialized bytes. See Block.
|
||||
func NewBlockFromBytes(serializedBlock []byte) (*Block, error) {
|
||||
br := bytes.NewReader(serializedBlock)
|
||||
b, err := NewBlockFromReader(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.serializedBlock = serializedBlock
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// NewBlockFromReader returns a new instance of a bitcoin block given a
|
||||
// Reader to deserialize the block. See Block.
|
||||
func NewBlockFromReader(r io.Reader) (*Block, error) {
|
||||
// Deserialize the bytes into a MsgBlock.
|
||||
var msgBlock wire.MsgBlock
|
||||
err := msgBlock.Deserialize(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := Block{
|
||||
msgBlock: &msgBlock,
|
||||
blockHeight: BlockHeightUnknown,
|
||||
}
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
// NewBlockFromBlockAndBytes returns a new instance of a bitcoin block given
|
||||
// an underlying wire.MsgBlock and the serialized bytes for it. See Block.
|
||||
func NewBlockFromBlockAndBytes(msgBlock *wire.MsgBlock, serializedBlock []byte) *Block {
|
||||
return &Block{
|
||||
msgBlock: msgBlock,
|
||||
serializedBlock: serializedBlock,
|
||||
blockHeight: BlockHeightUnknown,
|
||||
}
|
||||
}
|
556
btcutil/block_test.go
Normal file
556
btcutil/block_test.go
Normal file
|
@ -0,0 +1,556 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// TestBlock tests the API for Block.
|
||||
func TestBlock(t *testing.T) {
|
||||
b := btcutil.NewBlock(&Block100000)
|
||||
|
||||
// Ensure we get the same data back out.
|
||||
if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) {
|
||||
t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v",
|
||||
spew.Sdump(msgBlock), spew.Sdump(&Block100000))
|
||||
}
|
||||
|
||||
// Ensure block height set and get work properly.
|
||||
wantHeight := int32(100000)
|
||||
b.SetHeight(wantHeight)
|
||||
if gotHeight := b.Height(); gotHeight != wantHeight {
|
||||
t.Errorf("Height: mismatched height - got %v, want %v",
|
||||
gotHeight, wantHeight)
|
||||
}
|
||||
|
||||
// Hash for block 100,000.
|
||||
wantHashStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
||||
wantHash, err := chainhash.NewHashFromStr(wantHashStr)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
}
|
||||
|
||||
// Request the hash multiple times to test generation and caching.
|
||||
for i := 0; i < 2; i++ {
|
||||
hash := b.Hash()
|
||||
if !hash.IsEqual(wantHash) {
|
||||
t.Errorf("Hash #%d mismatched hash - got %v, want %v",
|
||||
i, hash, wantHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Hashes for the transactions in Block100000.
|
||||
wantTxHashes := []string{
|
||||
"8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87",
|
||||
"fff2525b8931402dd09222c50775608f75787bd2b87e56995a7bdd30f79702c4",
|
||||
"6359f0868171b1d194cbee1af2f16ea598ae8fad666d9b012c8ed2b79a236ec4",
|
||||
"e9a66845e05d5abc0ad04ec80f774a7e585c6e8db975962d069a522137b80c1d",
|
||||
}
|
||||
|
||||
// Create a new block to nuke all cached data.
|
||||
b = btcutil.NewBlock(&Block100000)
|
||||
|
||||
// Request hash for all transactions one at a time via Tx.
|
||||
for i, txHash := range wantTxHashes {
|
||||
wantHash, err := chainhash.NewHashFromStr(txHash)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
}
|
||||
|
||||
// Request the hash multiple times to test generation and
|
||||
// caching.
|
||||
for j := 0; j < 2; j++ {
|
||||
tx, err := b.Tx(i)
|
||||
if err != nil {
|
||||
t.Errorf("Tx #%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
hash := tx.Hash()
|
||||
if !hash.IsEqual(wantHash) {
|
||||
t.Errorf("Hash #%d mismatched hash - got %v, "+
|
||||
"want %v", j, hash, wantHash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new block to nuke all cached data.
|
||||
b = btcutil.NewBlock(&Block100000)
|
||||
|
||||
// Request slice of all transactions multiple times to test generation
|
||||
// and caching.
|
||||
for i := 0; i < 2; i++ {
|
||||
transactions := b.Transactions()
|
||||
|
||||
// Ensure we get the expected number of transactions.
|
||||
if len(transactions) != len(wantTxHashes) {
|
||||
t.Errorf("Transactions #%d mismatched number of "+
|
||||
"transactions - got %d, want %d", i,
|
||||
len(transactions), len(wantTxHashes))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure all of the hashes match.
|
||||
for j, tx := range transactions {
|
||||
wantHash, err := chainhash.NewHashFromStr(wantTxHashes[j])
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
}
|
||||
|
||||
hash := tx.Hash()
|
||||
if !hash.IsEqual(wantHash) {
|
||||
t.Errorf("Transactions #%d mismatched hashes "+
|
||||
"- got %v, want %v", j, hash, wantHash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the test block.
|
||||
var block100000Buf bytes.Buffer
|
||||
err = Block100000.Serialize(&block100000Buf)
|
||||
if err != nil {
|
||||
t.Errorf("Serialize: %v", err)
|
||||
}
|
||||
block100000Bytes := block100000Buf.Bytes()
|
||||
|
||||
// Request serialized bytes multiple times to test generation and
|
||||
// caching.
|
||||
for i := 0; i < 2; i++ {
|
||||
serializedBytes, err := b.Bytes()
|
||||
if err != nil {
|
||||
t.Errorf("Bytes: %v", err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(serializedBytes, block100000Bytes) {
|
||||
t.Errorf("Bytes #%d wrong bytes - got %v, want %v", i,
|
||||
spew.Sdump(serializedBytes),
|
||||
spew.Sdump(block100000Bytes))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction offsets and length for the transaction in Block100000.
|
||||
wantTxLocs := []wire.TxLoc{
|
||||
{TxStart: 81, TxLen: 144},
|
||||
{TxStart: 225, TxLen: 259},
|
||||
{TxStart: 484, TxLen: 257},
|
||||
{TxStart: 741, TxLen: 225},
|
||||
}
|
||||
|
||||
// Ensure the transaction location information is accurate.
|
||||
txLocs, err := b.TxLoc()
|
||||
if err != nil {
|
||||
t.Errorf("TxLoc: %v", err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(txLocs, wantTxLocs) {
|
||||
t.Errorf("TxLoc: mismatched transaction location information "+
|
||||
"- got %v, want %v", spew.Sdump(txLocs),
|
||||
spew.Sdump(wantTxLocs))
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewBlockFromBytes tests creation of a Block from serialized bytes.
|
||||
func TestNewBlockFromBytes(t *testing.T) {
|
||||
// Serialize the test block.
|
||||
var block100000Buf bytes.Buffer
|
||||
err := Block100000.Serialize(&block100000Buf)
|
||||
if err != nil {
|
||||
t.Errorf("Serialize: %v", err)
|
||||
}
|
||||
block100000Bytes := block100000Buf.Bytes()
|
||||
|
||||
// Create a new block from the serialized bytes.
|
||||
b, err := btcutil.NewBlockFromBytes(block100000Bytes)
|
||||
if err != nil {
|
||||
t.Errorf("NewBlockFromBytes: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure we get the same data back out.
|
||||
serializedBytes, err := b.Bytes()
|
||||
if err != nil {
|
||||
t.Errorf("Bytes: %v", err)
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(serializedBytes, block100000Bytes) {
|
||||
t.Errorf("Bytes: wrong bytes - got %v, want %v",
|
||||
spew.Sdump(serializedBytes),
|
||||
spew.Sdump(block100000Bytes))
|
||||
}
|
||||
|
||||
// Ensure the generated MsgBlock is correct.
|
||||
if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) {
|
||||
t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v",
|
||||
spew.Sdump(msgBlock), spew.Sdump(&Block100000))
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewBlockFromBlockAndBytes tests creation of a Block from a MsgBlock and
|
||||
// raw bytes.
|
||||
func TestNewBlockFromBlockAndBytes(t *testing.T) {
|
||||
// Serialize the test block.
|
||||
var block100000Buf bytes.Buffer
|
||||
err := Block100000.Serialize(&block100000Buf)
|
||||
if err != nil {
|
||||
t.Errorf("Serialize: %v", err)
|
||||
}
|
||||
block100000Bytes := block100000Buf.Bytes()
|
||||
|
||||
// Create a new block from the serialized bytes.
|
||||
b := btcutil.NewBlockFromBlockAndBytes(&Block100000, block100000Bytes)
|
||||
|
||||
// Ensure we get the same data back out.
|
||||
serializedBytes, err := b.Bytes()
|
||||
if err != nil {
|
||||
t.Errorf("Bytes: %v", err)
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(serializedBytes, block100000Bytes) {
|
||||
t.Errorf("Bytes: wrong bytes - got %v, want %v",
|
||||
spew.Sdump(serializedBytes),
|
||||
spew.Sdump(block100000Bytes))
|
||||
}
|
||||
if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) {
|
||||
t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v",
|
||||
spew.Sdump(msgBlock), spew.Sdump(&Block100000))
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlockErrors tests the error paths for the Block API.
|
||||
func TestBlockErrors(t *testing.T) {
|
||||
// Ensure out of range errors are as expected.
|
||||
wantErr := "transaction index -1 is out of range - max 3"
|
||||
testErr := btcutil.OutOfRangeError(wantErr)
|
||||
if testErr.Error() != wantErr {
|
||||
t.Errorf("OutOfRangeError: wrong error - got %v, want %v",
|
||||
testErr.Error(), wantErr)
|
||||
}
|
||||
|
||||
// Serialize the test block.
|
||||
var block100000Buf bytes.Buffer
|
||||
err := Block100000.Serialize(&block100000Buf)
|
||||
if err != nil {
|
||||
t.Errorf("Serialize: %v", err)
|
||||
}
|
||||
block100000Bytes := block100000Buf.Bytes()
|
||||
|
||||
// Create a new block from the serialized bytes.
|
||||
b, err := btcutil.NewBlockFromBytes(block100000Bytes)
|
||||
if err != nil {
|
||||
t.Errorf("NewBlockFromBytes: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the block byte buffer to force errors.
|
||||
shortBytes := block100000Bytes[:80]
|
||||
_, err = btcutil.NewBlockFromBytes(shortBytes)
|
||||
if err != io.EOF {
|
||||
t.Errorf("NewBlockFromBytes: did not get expected error - "+
|
||||
"got %v, want %v", err, io.EOF)
|
||||
}
|
||||
|
||||
// Ensure TxHash returns expected error on invalid indices.
|
||||
_, err = b.TxHash(-1)
|
||||
if _, ok := err.(btcutil.OutOfRangeError); !ok {
|
||||
t.Errorf("TxHash: wrong error - got: %v <%T>, "+
|
||||
"want: <%T>", err, err, btcutil.OutOfRangeError(""))
|
||||
}
|
||||
_, err = b.TxHash(len(Block100000.Transactions))
|
||||
if _, ok := err.(btcutil.OutOfRangeError); !ok {
|
||||
t.Errorf("TxHash: wrong error - got: %v <%T>, "+
|
||||
"want: <%T>", err, err, btcutil.OutOfRangeError(""))
|
||||
}
|
||||
|
||||
// Ensure Tx returns expected error on invalid indices.
|
||||
_, err = b.Tx(-1)
|
||||
if _, ok := err.(btcutil.OutOfRangeError); !ok {
|
||||
t.Errorf("Tx: wrong error - got: %v <%T>, "+
|
||||
"want: <%T>", err, err, btcutil.OutOfRangeError(""))
|
||||
}
|
||||
_, err = b.Tx(len(Block100000.Transactions))
|
||||
if _, ok := err.(btcutil.OutOfRangeError); !ok {
|
||||
t.Errorf("Tx: wrong error - got: %v <%T>, "+
|
||||
"want: <%T>", err, err, btcutil.OutOfRangeError(""))
|
||||
}
|
||||
|
||||
// Ensure TxLoc returns expected error with short byte buffer.
|
||||
// This makes use of the test package only function, SetBlockBytes, to
|
||||
// inject a short byte buffer.
|
||||
b.SetBlockBytes(shortBytes)
|
||||
_, err = b.TxLoc()
|
||||
if err != io.EOF {
|
||||
t.Errorf("TxLoc: did not get expected error - "+
|
||||
"got %v, want %v", err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
// Block100000 defines block 100,000 of the block chain. It is used to
|
||||
// test Block operations.
|
||||
var Block100000 = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
PrevBlock: chainhash.Hash([32]byte{ // Make go vet happy.
|
||||
0x50, 0x12, 0x01, 0x19, 0x17, 0x2a, 0x61, 0x04,
|
||||
0x21, 0xa6, 0xc3, 0x01, 0x1d, 0xd3, 0x30, 0xd9,
|
||||
0xdf, 0x07, 0xb6, 0x36, 0x16, 0xc2, 0xcc, 0x1f,
|
||||
0x1c, 0xd0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}), // 000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250
|
||||
MerkleRoot: chainhash.Hash([32]byte{ // Make go vet happy.
|
||||
0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0,
|
||||
0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22,
|
||||
0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85,
|
||||
0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3,
|
||||
}), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
|
||||
Timestamp: time.Unix(1293623863, 0), // 2010-12-29 11:57:43 +0000 UTC
|
||||
Bits: 0x1b04864c, // 453281356
|
||||
Nonce: 0x10572b0f, // 274148111
|
||||
},
|
||||
Transactions: []*wire.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02,
|
||||
},
|
||||
Sequence: 0xffffffff,
|
||||
Witness: [][]byte{
|
||||
{0x04, 0x31},
|
||||
{0x01, 0x43},
|
||||
},
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
PkScript: []byte{
|
||||
0x41, // OP_DATA_65
|
||||
0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25,
|
||||
0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73,
|
||||
0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7,
|
||||
0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16,
|
||||
0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24,
|
||||
0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed,
|
||||
0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28,
|
||||
0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf,
|
||||
0x84, // 65-byte signature
|
||||
0xac, // OP_CHECKSIG
|
||||
},
|
||||
},
|
||||
},
|
||||
LockTime: 0,
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash([32]byte{ // Make go vet happy.
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
|
||||
0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87,
|
||||
}), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03
|
||||
Index: 0,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
0x49, // OP_DATA_73
|
||||
0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3,
|
||||
0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6,
|
||||
0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94,
|
||||
0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58,
|
||||
0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00,
|
||||
0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62,
|
||||
0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c,
|
||||
0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60,
|
||||
0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48,
|
||||
0x01, // 73-byte signature
|
||||
0x41, // OP_DATA_65
|
||||
0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d,
|
||||
0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38,
|
||||
0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25,
|
||||
0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e,
|
||||
0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8,
|
||||
0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd,
|
||||
0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b,
|
||||
0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3,
|
||||
0xd3, // 65-byte pubkey
|
||||
},
|
||||
Sequence: 0xffffffff,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0x2123e300, // 556000000
|
||||
PkScript: []byte{
|
||||
0x76, // OP_DUP
|
||||
0xa9, // OP_HASH160
|
||||
0x14, // OP_DATA_20
|
||||
0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60,
|
||||
0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e,
|
||||
0xf7, 0xf5, 0x8b, 0x32,
|
||||
0x88, // OP_EQUALVERIFY
|
||||
0xac, // OP_CHECKSIG
|
||||
},
|
||||
},
|
||||
{
|
||||
Value: 0x108e20f00, // 4444000000
|
||||
PkScript: []byte{
|
||||
0x76, // OP_DUP
|
||||
0xa9, // OP_HASH160
|
||||
0x14, // OP_DATA_20
|
||||
0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f,
|
||||
0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b,
|
||||
0x52, 0xde, 0x3d, 0x7c,
|
||||
0x88, // OP_EQUALVERIFY
|
||||
0xac, // OP_CHECKSIG
|
||||
},
|
||||
},
|
||||
},
|
||||
LockTime: 0,
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash([32]byte{ // Make go vet happy.
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
|
||||
0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf,
|
||||
}), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3
|
||||
Index: 1,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
0x47, // OP_DATA_71
|
||||
0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf,
|
||||
0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5,
|
||||
0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34,
|
||||
0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31,
|
||||
0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee,
|
||||
0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f,
|
||||
0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c,
|
||||
0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e,
|
||||
0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01,
|
||||
0x41, // OP_DATA_65
|
||||
0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78,
|
||||
0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5,
|
||||
0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39,
|
||||
0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21,
|
||||
0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee,
|
||||
0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3,
|
||||
0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95,
|
||||
0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85,
|
||||
0x0f, // 65-byte pubkey
|
||||
},
|
||||
Sequence: 0xffffffff,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
PkScript: []byte{
|
||||
0x76, // OP_DUP
|
||||
0xa9, // OP_HASH160
|
||||
0x14, // OP_DATA_20
|
||||
0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04,
|
||||
0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d,
|
||||
0xad, 0xbe, 0x7e, 0x10,
|
||||
0x88, // OP_EQUALVERIFY
|
||||
0xac, // OP_CHECKSIG
|
||||
},
|
||||
},
|
||||
{
|
||||
Value: 0x11d260c0, // 299000000
|
||||
PkScript: []byte{
|
||||
0x76, // OP_DUP
|
||||
0xa9, // OP_HASH160
|
||||
0x14, // OP_DATA_20
|
||||
0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1,
|
||||
0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab,
|
||||
0xb3, 0x40, 0x9c, 0xd9,
|
||||
0x88, // OP_EQUALVERIFY
|
||||
0xac, // OP_CHECKSIG
|
||||
},
|
||||
},
|
||||
},
|
||||
LockTime: 0,
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash([32]byte{ // Make go vet happy.
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90,
|
||||
0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4,
|
||||
}), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b
|
||||
Index: 0,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
0x49, // OP_DATA_73
|
||||
0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2,
|
||||
0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c,
|
||||
0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd,
|
||||
0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f,
|
||||
0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00,
|
||||
0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14,
|
||||
0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb,
|
||||
0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c,
|
||||
0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3,
|
||||
0x01, // 73-byte signature
|
||||
0x41, // OP_DATA_65
|
||||
0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97,
|
||||
0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18,
|
||||
0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17,
|
||||
0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94,
|
||||
0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65,
|
||||
0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f,
|
||||
0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce,
|
||||
0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f,
|
||||
0xbb, // 65-byte pubkey
|
||||
},
|
||||
Sequence: 0xffffffff,
|
||||
},
|
||||
},
|
||||
TxOut: []*wire.TxOut{
|
||||
{
|
||||
Value: 0xf4240, // 1000000
|
||||
PkScript: []byte{
|
||||
0x76, // OP_DUP
|
||||
0xa9, // OP_HASH160
|
||||
0x14, // OP_DATA_20
|
||||
0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7,
|
||||
0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b,
|
||||
0xf2, 0xeb, 0x9e, 0xe0,
|
||||
0x88, // OP_EQUALVERIFY
|
||||
0xac, // OP_CHECKSIG
|
||||
},
|
||||
},
|
||||
},
|
||||
LockTime: 0,
|
||||
},
|
||||
},
|
||||
}
|
30
btcutil/bloom/README.md
Normal file
30
btcutil/bloom/README.md
Normal file
|
@ -0,0 +1,30 @@
|
|||
bloom
|
||||
=====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcutil/bloom)
|
||||
|
||||
Package bloom provides an API for dealing with bitcoin-specific bloom filters.
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality. See
|
||||
`test_coverage.txt` for the gocov coverage report. Alternatively, if you are
|
||||
running a POSIX OS, you can run the `cov_report.sh` script for a real-time
|
||||
report.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/bloom
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [NewFilter Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/bloom#example-NewFilter)
|
||||
Demonstrates how to create a new bloom filter, add a transaction hash to it,
|
||||
and check if the filter matches the transaction.
|
||||
|
||||
## License
|
||||
|
||||
Package bloom is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
17
btcutil/bloom/cov_report.sh
Normal file
17
btcutil/bloom/cov_report.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
45
btcutil/bloom/example_test.go
Normal file
45
btcutil/bloom/example_test.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil/bloom"
|
||||
)
|
||||
|
||||
// This example demonstrates how to create a new bloom filter, add a transaction
|
||||
// hash to it, and check if the filter matches the transaction.
|
||||
func ExampleNewFilter() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
tweak := rand.Uint32()
|
||||
|
||||
// Create a new bloom filter intended to hold 10 elements with a 0.01%
|
||||
// false positive rate and does not include any automatic update
|
||||
// functionality when transactions are matched.
|
||||
filter := bloom.NewFilter(10, tweak, 0.0001, wire.BloomUpdateNone)
|
||||
|
||||
// Create a transaction hash and add it to the filter. This particular
|
||||
// trasaction is the first transaction in block 310,000 of the main
|
||||
// bitcoin block chain.
|
||||
txHashStr := "fd611c56ca0d378cdcd16244b45c2ba9588da3adac367c4ef43e808b280b8a45"
|
||||
txHash, err := chainhash.NewHashFromStr(txHashStr)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
filter.AddHash(txHash)
|
||||
|
||||
// Show that the filter matches.
|
||||
matches := filter.Matches(txHash[:])
|
||||
fmt.Println("Filter Matches?:", matches)
|
||||
|
||||
// Output:
|
||||
// Filter Matches?: true
|
||||
}
|
354
btcutil/bloom/filter.go
Normal file
354
btcutil/bloom/filter.go
Normal file
|
@ -0,0 +1,354 @@
|
|||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
// ln2Squared is simply the square of the natural log of 2.
|
||||
const ln2Squared = math.Ln2 * math.Ln2
|
||||
|
||||
// minUint32 is a convenience function to return the minimum value of the two
|
||||
// passed uint32 values.
|
||||
func minUint32(a, b uint32) uint32 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Filter defines a bitcoin bloom filter that provides easy manipulation of raw
|
||||
// filter data.
|
||||
type Filter struct {
|
||||
mtx sync.Mutex
|
||||
msgFilterLoad *wire.MsgFilterLoad
|
||||
}
|
||||
|
||||
// NewFilter creates a new bloom filter instance, mainly to be used by SPV
|
||||
// clients. The tweak parameter is a random value added to the seed value.
|
||||
// The false positive rate is the probability of a false positive where 1.0 is
|
||||
// "match everything" and zero is unachievable. Thus, providing any false
|
||||
// positive rates less than 0 or greater than 1 will be adjusted to the valid
|
||||
// range.
|
||||
//
|
||||
// For more information on what values to use for both elements and fprate,
|
||||
// see https://en.wikipedia.org/wiki/Bloom_filter.
|
||||
func NewFilter(elements, tweak uint32, fprate float64, flags wire.BloomUpdateType) *Filter {
|
||||
// Massage the false positive rate to sane values.
|
||||
if fprate > 1.0 {
|
||||
fprate = 1.0
|
||||
}
|
||||
if fprate < 1e-9 {
|
||||
fprate = 1e-9
|
||||
}
|
||||
|
||||
// Calculate the size of the filter in bytes for the given number of
|
||||
// elements and false positive rate.
|
||||
//
|
||||
// Equivalent to m = -(n*ln(p) / ln(2)^2), where m is in bits.
|
||||
// Then clamp it to the maximum filter size and convert to bytes.
|
||||
dataLen := uint32(-1 * float64(elements) * math.Log(fprate) / ln2Squared)
|
||||
dataLen = minUint32(dataLen, wire.MaxFilterLoadFilterSize*8) / 8
|
||||
|
||||
// Calculate the number of hash functions based on the size of the
|
||||
// filter calculated above and the number of elements.
|
||||
//
|
||||
// Equivalent to k = (m/n) * ln(2)
|
||||
// Then clamp it to the maximum allowed hash funcs.
|
||||
hashFuncs := uint32(float64(dataLen*8) / float64(elements) * math.Ln2)
|
||||
hashFuncs = minUint32(hashFuncs, wire.MaxFilterLoadHashFuncs)
|
||||
|
||||
data := make([]byte, dataLen)
|
||||
msg := wire.NewMsgFilterLoad(data, hashFuncs, tweak, flags)
|
||||
|
||||
return &Filter{
|
||||
msgFilterLoad: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadFilter creates a new Filter instance with the given underlying
|
||||
// wire.MsgFilterLoad.
|
||||
func LoadFilter(filter *wire.MsgFilterLoad) *Filter {
|
||||
return &Filter{
|
||||
msgFilterLoad: filter,
|
||||
}
|
||||
}
|
||||
|
||||
// IsLoaded returns true if a filter is loaded, otherwise false.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) IsLoaded() bool {
|
||||
bf.mtx.Lock()
|
||||
loaded := bf.msgFilterLoad != nil
|
||||
bf.mtx.Unlock()
|
||||
return loaded
|
||||
}
|
||||
|
||||
// Reload loads a new filter replacing any existing filter.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) Reload(filter *wire.MsgFilterLoad) {
|
||||
bf.mtx.Lock()
|
||||
bf.msgFilterLoad = filter
|
||||
bf.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Unload unloads the bloom filter.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) Unload() {
|
||||
bf.mtx.Lock()
|
||||
bf.msgFilterLoad = nil
|
||||
bf.mtx.Unlock()
|
||||
}
|
||||
|
||||
// hash returns the bit offset in the bloom filter which corresponds to the
|
||||
// passed data for the given independent hash function number.
|
||||
func (bf *Filter) hash(hashNum uint32, data []byte) uint32 {
|
||||
// bitcoind: 0xfba4c795 chosen as it guarantees a reasonable bit
|
||||
// difference between hashNum values.
|
||||
//
|
||||
// Note that << 3 is equivalent to multiplying by 8, but is faster.
|
||||
// Thus the returned hash is brought into range of the number of bits
|
||||
// the filter has and returned.
|
||||
mm := MurmurHash3(hashNum*0xfba4c795+bf.msgFilterLoad.Tweak, data)
|
||||
return mm % (uint32(len(bf.msgFilterLoad.Filter)) << 3)
|
||||
}
|
||||
|
||||
// matches returns true if the bloom filter might contain the passed data and
|
||||
// false if it definitely does not.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) matches(data []byte) bool {
|
||||
if bf.msgFilterLoad == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// The bloom filter does not contain the data if any of the bit offsets
|
||||
// which result from hashing the data using each independent hash
|
||||
// function are not set. The shifts and masks below are a faster
|
||||
// equivalent of:
|
||||
// arrayIndex := idx / 8 (idx >> 3)
|
||||
// bitOffset := idx % 8 (idx & 7)
|
||||
/// if filter[arrayIndex] & 1<<bitOffset == 0 { ... }
|
||||
for i := uint32(0); i < bf.msgFilterLoad.HashFuncs; i++ {
|
||||
idx := bf.hash(i, data)
|
||||
if bf.msgFilterLoad.Filter[idx>>3]&(1<<(idx&7)) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Matches returns true if the bloom filter might contain the passed data and
|
||||
// false if it definitely does not.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) Matches(data []byte) bool {
|
||||
bf.mtx.Lock()
|
||||
match := bf.matches(data)
|
||||
bf.mtx.Unlock()
|
||||
return match
|
||||
}
|
||||
|
||||
// matchesOutPoint returns true if the bloom filter might contain the passed
|
||||
// outpoint and false if it definitely does not.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) matchesOutPoint(outpoint *wire.OutPoint) bool {
|
||||
// Serialize
|
||||
var buf [chainhash.HashSize + 4]byte
|
||||
copy(buf[:], outpoint.Hash[:])
|
||||
binary.LittleEndian.PutUint32(buf[chainhash.HashSize:], outpoint.Index)
|
||||
|
||||
return bf.matches(buf[:])
|
||||
}
|
||||
|
||||
// MatchesOutPoint returns true if the bloom filter might contain the passed
|
||||
// outpoint and false if it definitely does not.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) MatchesOutPoint(outpoint *wire.OutPoint) bool {
|
||||
bf.mtx.Lock()
|
||||
match := bf.matchesOutPoint(outpoint)
|
||||
bf.mtx.Unlock()
|
||||
return match
|
||||
}
|
||||
|
||||
// add adds the passed byte slice to the bloom filter.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) add(data []byte) {
|
||||
if bf.msgFilterLoad == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Adding data to a bloom filter consists of setting all of the bit
|
||||
// offsets which result from hashing the data using each independent
|
||||
// hash function. The shifts and masks below are a faster equivalent
|
||||
// of:
|
||||
// arrayIndex := idx / 8 (idx >> 3)
|
||||
// bitOffset := idx % 8 (idx & 7)
|
||||
/// filter[arrayIndex] |= 1<<bitOffset
|
||||
for i := uint32(0); i < bf.msgFilterLoad.HashFuncs; i++ {
|
||||
idx := bf.hash(i, data)
|
||||
bf.msgFilterLoad.Filter[idx>>3] |= (1 << (7 & idx))
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds the passed byte slice to the bloom filter.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) Add(data []byte) {
|
||||
bf.mtx.Lock()
|
||||
bf.add(data)
|
||||
bf.mtx.Unlock()
|
||||
}
|
||||
|
||||
// AddHash adds the passed chainhash.Hash to the Filter.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) AddHash(hash *chainhash.Hash) {
|
||||
bf.mtx.Lock()
|
||||
bf.add(hash[:])
|
||||
bf.mtx.Unlock()
|
||||
}
|
||||
|
||||
// addOutPoint adds the passed transaction outpoint to the bloom filter.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) addOutPoint(outpoint *wire.OutPoint) {
|
||||
// Serialize
|
||||
var buf [chainhash.HashSize + 4]byte
|
||||
copy(buf[:], outpoint.Hash[:])
|
||||
binary.LittleEndian.PutUint32(buf[chainhash.HashSize:], outpoint.Index)
|
||||
|
||||
bf.add(buf[:])
|
||||
}
|
||||
|
||||
// AddOutPoint adds the passed transaction outpoint to the bloom filter.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) AddOutPoint(outpoint *wire.OutPoint) {
|
||||
bf.mtx.Lock()
|
||||
bf.addOutPoint(outpoint)
|
||||
bf.mtx.Unlock()
|
||||
}
|
||||
|
||||
// maybeAddOutpoint potentially adds the passed outpoint to the bloom filter
|
||||
// depending on the bloom update flags and the type of the passed public key
|
||||
// script.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) maybeAddOutpoint(pkScript []byte, outHash *chainhash.Hash, outIdx uint32) {
|
||||
switch bf.msgFilterLoad.Flags {
|
||||
case wire.BloomUpdateAll:
|
||||
outpoint := wire.NewOutPoint(outHash, outIdx)
|
||||
bf.addOutPoint(outpoint)
|
||||
case wire.BloomUpdateP2PubkeyOnly:
|
||||
class := txscript.GetScriptClass(pkScript)
|
||||
if class == txscript.PubKeyTy || class == txscript.MultiSigTy {
|
||||
outpoint := wire.NewOutPoint(outHash, outIdx)
|
||||
bf.addOutPoint(outpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// matchTxAndUpdate returns true if the bloom filter matches data within the
|
||||
// passed transaction, otherwise false is returned. If the filter does match
|
||||
// the passed transaction, it will also update the filter depending on the bloom
|
||||
// update flags set via the loaded filter if needed.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) matchTxAndUpdate(tx *btcutil.Tx) bool {
|
||||
// Check if the filter matches the hash of the transaction.
|
||||
// This is useful for finding transactions when they appear in a block.
|
||||
matched := bf.matches(tx.Hash()[:])
|
||||
|
||||
// Check if the filter matches any data elements in the public key
|
||||
// scripts of any of the outputs. When it does, add the outpoint that
|
||||
// matched so transactions which spend from the matched transaction are
|
||||
// also included in the filter. This removes the burden of updating the
|
||||
// filter for this scenario from the client. It is also more efficient
|
||||
// on the network since it avoids the need for another filteradd message
|
||||
// from the client and avoids some potential races that could otherwise
|
||||
// occur.
|
||||
for i, txOut := range tx.MsgTx().TxOut {
|
||||
pushedData, err := txscript.PushedData(txOut.PkScript)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, data := range pushedData {
|
||||
if !bf.matches(data) {
|
||||
continue
|
||||
}
|
||||
|
||||
matched = true
|
||||
bf.maybeAddOutpoint(txOut.PkScript, tx.Hash(), uint32(i))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing more to do if a match has already been made.
|
||||
if matched {
|
||||
return true
|
||||
}
|
||||
|
||||
// At this point, the transaction and none of the data elements in the
|
||||
// public key scripts of its outputs matched.
|
||||
|
||||
// Check if the filter matches any outpoints this transaction spends or
|
||||
// any data elements in the signature scripts of any of the inputs.
|
||||
for _, txin := range tx.MsgTx().TxIn {
|
||||
if bf.matchesOutPoint(&txin.PreviousOutPoint) {
|
||||
return true
|
||||
}
|
||||
|
||||
pushedData, err := txscript.PushedData(txin.SignatureScript)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, data := range pushedData {
|
||||
if bf.matches(data) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MatchTxAndUpdate returns true if the bloom filter matches data within the
|
||||
// passed transaction, otherwise false is returned. If the filter does match
|
||||
// the passed transaction, it will also update the filter depending on the bloom
|
||||
// update flags set via the loaded filter if needed.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) MatchTxAndUpdate(tx *btcutil.Tx) bool {
|
||||
bf.mtx.Lock()
|
||||
match := bf.matchTxAndUpdate(tx)
|
||||
bf.mtx.Unlock()
|
||||
return match
|
||||
}
|
||||
|
||||
// MsgFilterLoad returns the underlying wire.MsgFilterLoad for the bloom
|
||||
// filter.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bf *Filter) MsgFilterLoad() *wire.MsgFilterLoad {
|
||||
bf.mtx.Lock()
|
||||
msg := bf.msgFilterLoad
|
||||
bf.mtx.Unlock()
|
||||
return msg
|
||||
}
|
660
btcutil/bloom/filter_test.go
Normal file
660
btcutil/bloom/filter_test.go
Normal file
|
@ -0,0 +1,660 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/btcutil/bloom"
|
||||
)
|
||||
|
||||
// TestFilterLarge ensures a maximum sized filter can be created.
|
||||
func TestFilterLarge(t *testing.T) {
|
||||
f := bloom.NewFilter(100000000, 0, 0.01, wire.BloomUpdateNone)
|
||||
if len(f.MsgFilterLoad().Filter) > wire.MaxFilterLoadFilterSize {
|
||||
t.Errorf("TestFilterLarge test failed: %d > %d",
|
||||
len(f.MsgFilterLoad().Filter), wire.MaxFilterLoadFilterSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterLoad ensures loading and unloading of a filter pass.
|
||||
func TestFilterLoad(t *testing.T) {
|
||||
merkle := wire.MsgFilterLoad{}
|
||||
|
||||
f := bloom.LoadFilter(&merkle)
|
||||
if !f.IsLoaded() {
|
||||
t.Errorf("TestFilterLoad IsLoaded test failed: want %v got %v",
|
||||
true, !f.IsLoaded())
|
||||
return
|
||||
}
|
||||
f.Unload()
|
||||
if f.IsLoaded() {
|
||||
t.Errorf("TestFilterLoad IsLoaded test failed: want %v got %v",
|
||||
f.IsLoaded(), false)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterInsert ensures inserting data into the filter causes that data
|
||||
// to be matched and the resulting serialized MsgFilterLoad is the expected
|
||||
// value.
|
||||
func TestFilterInsert(t *testing.T) {
|
||||
var tests = []struct {
|
||||
hex string
|
||||
insert bool
|
||||
}{
|
||||
{"99108ad8ed9bb6274d3980bab5a85c048f0950c8", true},
|
||||
{"19108ad8ed9bb6274d3980bab5a85c048f0950c8", false},
|
||||
{"b5a2c786d9ef4658287ced5914b37a1b4aa32eee", true},
|
||||
{"b9300670b4c5366e95b2699e8b18bc75e5f729c5", true},
|
||||
}
|
||||
|
||||
f := bloom.NewFilter(3, 0, 0.01, wire.BloomUpdateAll)
|
||||
|
||||
for i, test := range tests {
|
||||
data, err := hex.DecodeString(test.hex)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsert DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
if test.insert {
|
||||
f.Add(data)
|
||||
}
|
||||
|
||||
result := f.Matches(data)
|
||||
if test.insert != result {
|
||||
t.Errorf("TestFilterInsert Matches test #%d failure: got %v want %v\n",
|
||||
i, result, test.insert)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
want, err := hex.DecodeString("03614e9b050000000000000001")
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsert DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
got := bytes.NewBuffer(nil)
|
||||
err = f.MsgFilterLoad().BtcEncode(got, wire.ProtocolVersion, wire.LatestEncoding)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsert BtcDecode failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(got.Bytes(), want) {
|
||||
t.Errorf("TestFilterInsert failure: got %v want %v\n",
|
||||
got.Bytes(), want)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterFPRange checks that new filters made with out of range
|
||||
// false positive targets result in either max or min false positive rates.
|
||||
func TestFilterFPRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hash string
|
||||
want string
|
||||
filter *bloom.Filter
|
||||
}{
|
||||
{
|
||||
name: "fprates > 1 should be clipped at 1",
|
||||
hash: "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041",
|
||||
want: "00000000000000000001",
|
||||
filter: bloom.NewFilter(1, 0, 20.9999999769, wire.BloomUpdateAll),
|
||||
},
|
||||
{
|
||||
name: "fprates less than 1e-9 should be clipped at min",
|
||||
hash: "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041",
|
||||
want: "0566d97a91a91b0000000000000001",
|
||||
filter: bloom.NewFilter(1, 0, 0, wire.BloomUpdateAll),
|
||||
},
|
||||
{
|
||||
name: "negative fprates should be clipped at min",
|
||||
hash: "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041",
|
||||
want: "0566d97a91a91b0000000000000001",
|
||||
filter: bloom.NewFilter(1, 0, -1, wire.BloomUpdateAll),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Convert test input to appropriate types.
|
||||
hash, err := chainhash.NewHashFromStr(test.hash)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
want, err := hex.DecodeString(test.want)
|
||||
if err != nil {
|
||||
t.Errorf("DecodeString unexpected error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the test hash to the bloom filter and ensure the
|
||||
// filter serializes to the expected bytes.
|
||||
f := test.filter
|
||||
f.AddHash(hash)
|
||||
got := bytes.NewBuffer(nil)
|
||||
err = f.MsgFilterLoad().BtcEncode(got, wire.ProtocolVersion, wire.LatestEncoding)
|
||||
if err != nil {
|
||||
t.Errorf("BtcDecode unexpected error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(got.Bytes(), want) {
|
||||
t.Errorf("serialized filter mismatch: got %x want %x\n",
|
||||
got.Bytes(), want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterInsert ensures inserting data into the filter with a tweak causes
|
||||
// that data to be matched and the resulting serialized MsgFilterLoad is the
|
||||
// expected value.
|
||||
func TestFilterInsertWithTweak(t *testing.T) {
|
||||
var tests = []struct {
|
||||
hex string
|
||||
insert bool
|
||||
}{
|
||||
{"99108ad8ed9bb6274d3980bab5a85c048f0950c8", true},
|
||||
{"19108ad8ed9bb6274d3980bab5a85c048f0950c8", false},
|
||||
{"b5a2c786d9ef4658287ced5914b37a1b4aa32eee", true},
|
||||
{"b9300670b4c5366e95b2699e8b18bc75e5f729c5", true},
|
||||
}
|
||||
|
||||
f := bloom.NewFilter(3, 2147483649, 0.01, wire.BloomUpdateAll)
|
||||
|
||||
for i, test := range tests {
|
||||
data, err := hex.DecodeString(test.hex)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertWithTweak DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
if test.insert {
|
||||
f.Add(data)
|
||||
}
|
||||
|
||||
result := f.Matches(data)
|
||||
if test.insert != result {
|
||||
t.Errorf("TestFilterInsertWithTweak Matches test #%d failure: got %v want %v\n",
|
||||
i, result, test.insert)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
want, err := hex.DecodeString("03ce4299050000000100008001")
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertWithTweak DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
got := bytes.NewBuffer(nil)
|
||||
err = f.MsgFilterLoad().BtcEncode(got, wire.ProtocolVersion, wire.LatestEncoding)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertWithTweak BtcDecode failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(got.Bytes(), want) {
|
||||
t.Errorf("TestFilterInsertWithTweak failure: got %v want %v\n",
|
||||
got.Bytes(), want)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilterInsertKey ensures inserting public keys and addresses works as
|
||||
// expected.
|
||||
func TestFilterInsertKey(t *testing.T) {
|
||||
secret := "5Kg1gnAjaLfKiwhhPpGS3QfRg2m6awQvaj98JCZBZQ5SuS2F15C"
|
||||
|
||||
wif, err := btcutil.DecodeWIF(secret)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertKey DecodeWIF failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
f := bloom.NewFilter(2, 0, 0.001, wire.BloomUpdateAll)
|
||||
f.Add(wif.SerializePubKey())
|
||||
f.Add(btcutil.Hash160(wif.SerializePubKey()))
|
||||
|
||||
want, err := hex.DecodeString("038fc16b080000000000000001")
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertWithTweak DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
got := bytes.NewBuffer(nil)
|
||||
err = f.MsgFilterLoad().BtcEncode(got, wire.ProtocolVersion, wire.LatestEncoding)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertWithTweak BtcDecode failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(got.Bytes(), want) {
|
||||
t.Errorf("TestFilterInsertWithTweak failure: got %v want %v\n",
|
||||
got.Bytes(), want)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterBloomMatch(t *testing.T) {
|
||||
str := "01000000010b26e9b7735eb6aabdf358bab62f9816a21ba9ebdb719d5299e" +
|
||||
"88607d722c190000000008b4830450220070aca44506c5cef3a16ed519d7" +
|
||||
"c3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d2" +
|
||||
"7d8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a0141046d11fee" +
|
||||
"51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5e" +
|
||||
"eef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c33" +
|
||||
"9ffffffff021bff3d11000000001976a91404943fdd508053c75000106d3" +
|
||||
"bc6e2754dbcff1988ac2f15de00000000001976a914a266436d296554760" +
|
||||
"8b9e15d9032a7b9d64fa43188ac00000000"
|
||||
strBytes, err := hex.DecodeString(str)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failure: %v", err)
|
||||
return
|
||||
}
|
||||
tx, err := btcutil.NewTxFromBytes(strBytes)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewTxFromBytes failure: %v", err)
|
||||
return
|
||||
}
|
||||
spendingTxBytes := []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f,
|
||||
0xcd, 0x4f, 0x85, 0x65, 0xef, 0x40, 0x6d, 0xd5, 0xd6,
|
||||
0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8, 0x20, 0x27,
|
||||
0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f,
|
||||
0x74, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30,
|
||||
0x46, 0x02, 0x21, 0x00, 0xda, 0x0d, 0xc6, 0xae, 0xce,
|
||||
0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77, 0x37, 0x57,
|
||||
0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0,
|
||||
0x3f, 0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c,
|
||||
0x02, 0x21, 0x00, 0xd2, 0x5b, 0x5c, 0x87, 0x04, 0x00,
|
||||
0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e, 0x76, 0x3e,
|
||||
0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27,
|
||||
0xc4, 0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01,
|
||||
0x41, 0x04, 0xe6, 0xc2, 0x6e, 0xf6, 0x7d, 0xc6, 0x10,
|
||||
0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a, 0x6c, 0xf9,
|
||||
0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5,
|
||||
0x34, 0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff,
|
||||
0x9a, 0x2e, 0xe1, 0x97, 0x8d, 0xd7, 0xfd, 0x01, 0xdf,
|
||||
0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b, 0x06, 0xa9,
|
||||
0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb,
|
||||
0x0f, 0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b,
|
||||
0xa7, 0x94, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76,
|
||||
0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef, 0x05, 0x07,
|
||||
0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0,
|
||||
0x39, 0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8,
|
||||
0x02, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14,
|
||||
0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93, 0xed, 0x51,
|
||||
0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70,
|
||||
0x43, 0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
spendingTx, err := btcutil.NewTxFromBytes(spendingTxBytes)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewTxFromBytes failure: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr := "b4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b"
|
||||
hash, err := chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.AddHash(hash)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match hash %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "6bff7fcd4f8565ef406dd5d63d4ff94f318fe82027fd4dc451b04474019f74b4"
|
||||
hashBytes, err := hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.Add(hashBytes)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match hash %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "30450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e1c90d065" +
|
||||
"f37b8a4af6141022100a8e160b856c2d43d27d8fba71e5aef6405b8643" +
|
||||
"ac4cb7cb3c462aced7f14711a01"
|
||||
hashBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.Add(hashBytes)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match input signature %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "046d11fee51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95" +
|
||||
"c9a40ac5eeef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe" +
|
||||
"76036c339"
|
||||
hashBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.Add(hashBytes)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match input pubkey %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "04943fdd508053c75000106d3bc6e2754dbcff19"
|
||||
hashBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.Add(hashBytes)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match output address %s", inputStr)
|
||||
}
|
||||
if !f.MatchTxAndUpdate(spendingTx) {
|
||||
t.Errorf("TestFilterBloomMatch spendingTx didn't match output address %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "a266436d2965547608b9e15d9032a7b9d64fa431"
|
||||
hashBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.Add(hashBytes)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match output address %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"
|
||||
hash, err = chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
outpoint := wire.NewOutPoint(hash, 0)
|
||||
f.AddOutPoint(outpoint)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match outpoint %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "00000009e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436"
|
||||
hash, err = chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.AddHash(hash)
|
||||
if f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch matched hash %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "0000006d2965547608b9e15d9032a7b9d64fa431"
|
||||
hashBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch DecodeString failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
f.Add(hashBytes)
|
||||
if f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch matched address %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"
|
||||
hash, err = chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 1)
|
||||
f.AddOutPoint(outpoint)
|
||||
if f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch matched outpoint %s", inputStr)
|
||||
}
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "000000d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"
|
||||
hash, err = chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 0)
|
||||
f.AddOutPoint(outpoint)
|
||||
if f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch matched outpoint %s", inputStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterInsertUpdateNone(t *testing.T) {
|
||||
f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateNone)
|
||||
|
||||
// Add the generation pubkey
|
||||
inputStr := "04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c" +
|
||||
"876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a" +
|
||||
"2252247d97a46a91"
|
||||
inputBytes, err := hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertUpdateNone DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
f.Add(inputBytes)
|
||||
|
||||
// Add the output address for the 4th transaction
|
||||
inputStr = "b6efd80d99179f4f4ff6f4dd0a007d018c385d21"
|
||||
inputBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertUpdateNone DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
f.Add(inputBytes)
|
||||
|
||||
inputStr = "147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"
|
||||
hash, err := chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertUpdateNone NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint := wire.NewOutPoint(hash, 0)
|
||||
|
||||
if f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestFilterInsertUpdateNone matched outpoint %s", inputStr)
|
||||
return
|
||||
}
|
||||
|
||||
inputStr = "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"
|
||||
hash, err = chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertUpdateNone NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 0)
|
||||
|
||||
if f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestFilterInsertUpdateNone matched outpoint %s", inputStr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterInsertP2PubKeyOnly(t *testing.T) {
|
||||
blockStr := "0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc" +
|
||||
"880670100000000007f16c5962e8bd963659c793ce370d95f093bc7e367" +
|
||||
"117b3c30c1f8fdd0d9728776381b4d4c86041b554b85290701000000010" +
|
||||
"00000000000000000000000000000000000000000000000000000000000" +
|
||||
"0000ffffffff07044c86041b0136ffffffff0100f2052a0100000043410" +
|
||||
"4eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2" +
|
||||
"c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a22522" +
|
||||
"47d97a46a91ac000000000100000001bcad20a6a29827d1424f08989255" +
|
||||
"120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356" +
|
||||
"e834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062e" +
|
||||
"a10221009253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa" +
|
||||
"608cd5bab753901ffffffff02008d380c010000001976a9142b4b8072ec" +
|
||||
"bba129b6453c63e129e643207249ca88ac0065cd1d000000001976a9141" +
|
||||
"b8dd13b994bcfc787b32aeadf58ccb3615cbd5488ac0000000001000000" +
|
||||
"03fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26a" +
|
||||
"f16cb0b000000008c493046022100ea1608e70911ca0de5af51ba57ad23" +
|
||||
"b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8acc8634c6" +
|
||||
"b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf49e" +
|
||||
"29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245b" +
|
||||
"d69fcad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585" +
|
||||
"caffffffff309e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e7" +
|
||||
"5429df397b5af83000000004948304502202bdb79c596a9ffc24e96f438" +
|
||||
"6199aba386e9bc7b6071516e2b51dda942b3a1ed022100c53a857e76b72" +
|
||||
"4fc14d45311eac5019650d415c3abb5428f3aae16d8e69bec2301ffffff" +
|
||||
"ff2089e33491695080c9edc18a428f7d834db5b6d372df13ce2b1b0e0cb" +
|
||||
"cb1e6c10000000049483045022100d4ce67c5896ee251c810ac1ff9cecc" +
|
||||
"d328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d31f11" +
|
||||
"87779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff010071" +
|
||||
"4460030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8" +
|
||||
"d88ac0000000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397" +
|
||||
"cde8dd08138f4b72a00681743447000000008b48304502200c45de8c4f3" +
|
||||
"e2c1821f2fc878cba97b1e6f8807d94930713aa1c86a67b9bf1e4022100" +
|
||||
"8581abfef2e30f957815fc89978423746b2086375ca8ecf359c85c2a5b7" +
|
||||
"c88ad01410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf64852" +
|
||||
"61c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d" +
|
||||
"3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d2253d88e0f2" +
|
||||
"48e29b599c80bbcec344a83dda5f9aa72c000000008a473044022078124" +
|
||||
"c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e93" +
|
||||
"0220691d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346" +
|
||||
"669507a3601410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6" +
|
||||
"485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270e" +
|
||||
"fb1d3ae37079b794a92d7ec95fffffffff878af0d93f5229a68166cf051" +
|
||||
"fd372bb7a537232946e0a46f53636b4dafdaa4000000008c49304602210" +
|
||||
"0c717d1714551663f69c3c5759bdbb3a0fcd3fab023abc0e522fe6440de" +
|
||||
"35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7fe1c2e7b46f" +
|
||||
"c37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561f5c0" +
|
||||
"f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f" +
|
||||
"4d87270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f8" +
|
||||
"94aa0fd2d9e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493" +
|
||||
"046022100e00847147cbf517bcc2f502f3ddc6d284358d102ed20d47a8a" +
|
||||
"a788a62f0db780022100d17b2d6fa84dcaf1c95d88d7e7c30385aecf415" +
|
||||
"588d749afd3ec81f6022cecd701410462bb73f76ca0994fcb8b4271e6fb" +
|
||||
"7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018" +
|
||||
"ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff0100c817a8" +
|
||||
"040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c385d2188a" +
|
||||
"c000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758d" +
|
||||
"f616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34" +
|
||||
"fdce11eeb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243" +
|
||||
"bd399ff96b649a0fad05fa759d6a882f0af8c90cf7632c2840c29070aec" +
|
||||
"20141045e58067e815c2f464c6a2a15f987758374203895710c2d452442" +
|
||||
"e28496ff38ba8f5fd901dc20e29e88477167fe4fc299bf818fd0d9e1632" +
|
||||
"d467b2a3d9503b1aaffffffff0280d7e636030000001976a914f34c3e10" +
|
||||
"eb387efe872acb614c89e78bfca7815d88ac404b4c00000000001976a91" +
|
||||
"4a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac00000000010000" +
|
||||
"000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d850" +
|
||||
"927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec" +
|
||||
"2c1ac1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a25" +
|
||||
"7b5c63ebd90f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e5" +
|
||||
"21fa7946d12edbb1d1e95a15c34bd4398195e86433c92b431cd315f455f" +
|
||||
"e30032ede69cad9d1e1ed6c3c4ec0dbfced53438c625462afb792dcb098" +
|
||||
"544bffffffff0240420f00000000001976a9144676d1b820d63ec272f19" +
|
||||
"00d59d43bc6463d96f888ac40420f00000000001976a914648d04341d00" +
|
||||
"d7968b3405c034adc38d4d8fb9bd88ac00000000010000000248cc91750" +
|
||||
"1ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3f10000" +
|
||||
"00008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e" +
|
||||
"280007b52b133021acd9acc02205e325d613e555f772802bf413d36ba80" +
|
||||
"7892ed1a690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b" +
|
||||
"5557b2c0b9df7b2b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c4" +
|
||||
"7d7b173dbc9db8d37db0a33ae487982c59c6f8606e9d1791ffffffff41e" +
|
||||
"d70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d0" +
|
||||
"68000000008b4830450221008513ad65187b903aed1102d1d0c47688127" +
|
||||
"658c51106753fed0151ce9c16b80902201432b9ebcb87bd04ceb2de6603" +
|
||||
"5fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf80125bf50b" +
|
||||
"e1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b0682" +
|
||||
"0edca9ef982c35fda2d255afba340068c5035552368bc7200c1488fffff" +
|
||||
"fff0100093d00000000001976a9148edb68822f1ad580b043c7b3df2e40" +
|
||||
"0f8699eb4888ac00000000"
|
||||
blockBytes, err := hex.DecodeString(blockStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
block, err := btcutil.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertP2PubKeyOnly NewBlockFromBytes failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateP2PubkeyOnly)
|
||||
|
||||
// Generation pubkey
|
||||
inputStr := "04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c" +
|
||||
"876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a" +
|
||||
"2252247d97a46a91"
|
||||
inputBytes, err := hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
f.Add(inputBytes)
|
||||
|
||||
// Output address of 4th transaction
|
||||
inputStr = "b6efd80d99179f4f4ff6f4dd0a007d018c385d21"
|
||||
inputBytes, err = hex.DecodeString(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
f.Add(inputBytes)
|
||||
|
||||
// Ignore return value -- this is just used to update the filter.
|
||||
_, _ = bloom.NewMerkleBlock(block, f)
|
||||
|
||||
// We should match the generation pubkey
|
||||
inputStr = "147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"
|
||||
hash, err := chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint := wire.NewOutPoint(hash, 0)
|
||||
if !f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly didn't match the generation "+
|
||||
"outpoint %s", inputStr)
|
||||
return
|
||||
}
|
||||
|
||||
// We should not match the 4th transaction, which is not p2pk
|
||||
inputStr = "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"
|
||||
hash, err = chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 0)
|
||||
if f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly matched outpoint %s", inputStr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterReload(t *testing.T) {
|
||||
f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
|
||||
bFilter := bloom.LoadFilter(f.MsgFilterLoad())
|
||||
if bFilter.MsgFilterLoad() == nil {
|
||||
t.Errorf("TestFilterReload LoadFilter test failed")
|
||||
return
|
||||
}
|
||||
bFilter.Reload(nil)
|
||||
|
||||
if bFilter.MsgFilterLoad() != nil {
|
||||
t.Errorf("TestFilterReload Reload test failed")
|
||||
}
|
||||
}
|
125
btcutil/bloom/merkleblock.go
Normal file
125
btcutil/bloom/merkleblock.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/blockchain"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
// merkleBlock is used to house intermediate information needed to generate a
|
||||
// wire.MsgMerkleBlock according to a filter.
|
||||
type merkleBlock struct {
|
||||
numTx uint32
|
||||
allHashes []*chainhash.Hash
|
||||
finalHashes []*chainhash.Hash
|
||||
matchedBits []byte
|
||||
bits []byte
|
||||
}
|
||||
|
||||
// calcTreeWidth calculates and returns the the number of nodes (width) or a
|
||||
// merkle tree at the given depth-first height.
|
||||
func (m *merkleBlock) calcTreeWidth(height uint32) uint32 {
|
||||
return (m.numTx + (1 << height) - 1) >> height
|
||||
}
|
||||
|
||||
// calcHash returns the hash for a sub-tree given a depth-first height and
|
||||
// node position.
|
||||
func (m *merkleBlock) calcHash(height, pos uint32) *chainhash.Hash {
|
||||
if height == 0 {
|
||||
return m.allHashes[pos]
|
||||
}
|
||||
|
||||
var right *chainhash.Hash
|
||||
left := m.calcHash(height-1, pos*2)
|
||||
if pos*2+1 < m.calcTreeWidth(height-1) {
|
||||
right = m.calcHash(height-1, pos*2+1)
|
||||
} else {
|
||||
right = left
|
||||
}
|
||||
return blockchain.HashMerkleBranches(left, right)
|
||||
}
|
||||
|
||||
// traverseAndBuild builds a partial merkle tree using a recursive depth-first
|
||||
// approach. As it calculates the hashes, it also saves whether or not each
|
||||
// node is a parent node and a list of final hashes to be included in the
|
||||
// merkle block.
|
||||
func (m *merkleBlock) traverseAndBuild(height, pos uint32) {
|
||||
// Determine whether this node is a parent of a matched node.
|
||||
var isParent byte
|
||||
for i := pos << height; i < (pos+1)<<height && i < m.numTx; i++ {
|
||||
isParent |= m.matchedBits[i]
|
||||
}
|
||||
m.bits = append(m.bits, isParent)
|
||||
|
||||
// When the node is a leaf node or not a parent of a matched node,
|
||||
// append the hash to the list that will be part of the final merkle
|
||||
// block.
|
||||
if height == 0 || isParent == 0x00 {
|
||||
m.finalHashes = append(m.finalHashes, m.calcHash(height, pos))
|
||||
return
|
||||
}
|
||||
|
||||
// At this point, the node is an internal node and it is the parent of
|
||||
// of an included leaf node.
|
||||
|
||||
// Descend into the left child and process its sub-tree.
|
||||
m.traverseAndBuild(height-1, pos*2)
|
||||
|
||||
// Descend into the right child and process its sub-tree if
|
||||
// there is one.
|
||||
if pos*2+1 < m.calcTreeWidth(height-1) {
|
||||
m.traverseAndBuild(height-1, pos*2+1)
|
||||
}
|
||||
}
|
||||
|
||||
// NewMerkleBlock returns a new *wire.MsgMerkleBlock and an array of the matched
|
||||
// transaction index numbers based on the passed block and filter.
|
||||
func NewMerkleBlock(block *btcutil.Block, filter *Filter) (*wire.MsgMerkleBlock, []uint32) {
|
||||
numTx := uint32(len(block.Transactions()))
|
||||
mBlock := merkleBlock{
|
||||
numTx: numTx,
|
||||
allHashes: make([]*chainhash.Hash, 0, numTx),
|
||||
matchedBits: make([]byte, 0, numTx),
|
||||
}
|
||||
|
||||
// Find and keep track of any transactions that match the filter.
|
||||
var matchedIndices []uint32
|
||||
for txIndex, tx := range block.Transactions() {
|
||||
if filter.MatchTxAndUpdate(tx) {
|
||||
mBlock.matchedBits = append(mBlock.matchedBits, 0x01)
|
||||
matchedIndices = append(matchedIndices, uint32(txIndex))
|
||||
} else {
|
||||
mBlock.matchedBits = append(mBlock.matchedBits, 0x00)
|
||||
}
|
||||
mBlock.allHashes = append(mBlock.allHashes, tx.Hash())
|
||||
}
|
||||
|
||||
// Calculate the number of merkle branches (height) in the tree.
|
||||
height := uint32(0)
|
||||
for mBlock.calcTreeWidth(height) > 1 {
|
||||
height++
|
||||
}
|
||||
|
||||
// Build the depth-first partial merkle tree.
|
||||
mBlock.traverseAndBuild(height, 0)
|
||||
|
||||
// Create and return the merkle block.
|
||||
msgMerkleBlock := wire.MsgMerkleBlock{
|
||||
Header: block.MsgBlock().Header,
|
||||
Transactions: mBlock.numTx,
|
||||
Hashes: make([]*chainhash.Hash, 0, len(mBlock.finalHashes)),
|
||||
Flags: make([]byte, (len(mBlock.bits)+7)/8),
|
||||
}
|
||||
for _, hash := range mBlock.finalHashes {
|
||||
_ = msgMerkleBlock.AddTxHash(hash)
|
||||
}
|
||||
for i := uint32(0); i < uint32(len(mBlock.bits)); i++ {
|
||||
msgMerkleBlock.Flags[i/8] |= mBlock.bits[i] << (i % 8)
|
||||
}
|
||||
return &msgMerkleBlock, matchedIndices
|
||||
}
|
74
btcutil/bloom/merkleblock_test.go
Normal file
74
btcutil/bloom/merkleblock_test.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/btcutil/bloom"
|
||||
)
|
||||
|
||||
func TestMerkleBlock3(t *testing.T) {
|
||||
blockStr := "0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b" +
|
||||
"4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdc" +
|
||||
"c96b2c3ff60abe184f196367291b4d4c86041b8fa45d630101000000010" +
|
||||
"00000000000000000000000000000000000000000000000000000000000" +
|
||||
"0000ffffffff08044c86041b020a02ffffffff0100f2052a01000000434" +
|
||||
"104ecd3229b0571c3be876feaac0442a9f13c5a572742927af1dc623353" +
|
||||
"ecf8c202225f64868137a18cdd85cbbb4c74fbccfd4f49639cf1bdc94a5" +
|
||||
"672bb15ad5d4cac00000000"
|
||||
blockBytes, err := hex.DecodeString(blockStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
blk, err := btcutil.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlock3 NewBlockFromBytes failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
|
||||
inputStr := "63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5"
|
||||
hash, err := chainhash.NewHashFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlock3 NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
f.AddHash(hash)
|
||||
|
||||
mBlock, _ := bloom.NewMerkleBlock(blk, f)
|
||||
|
||||
wantStr := "0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4" +
|
||||
"b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc" +
|
||||
"96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50c" +
|
||||
"c069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196" +
|
||||
"30101"
|
||||
want, err := hex.DecodeString(wantStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
got := bytes.NewBuffer(nil)
|
||||
err = mBlock.BtcEncode(got, wire.ProtocolVersion, wire.LatestEncoding)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlock3 BtcEncode failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(want, got.Bytes()) {
|
||||
t.Errorf("TestMerkleBlock3 failed merkle block comparison: "+
|
||||
"got %v want %v", got.Bytes(), want)
|
||||
return
|
||||
}
|
||||
}
|
72
btcutil/bloom/murmurhash3.go
Normal file
72
btcutil/bloom/murmurhash3.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright (c) 2013, 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// The following constants are used by the MurmurHash3 algorithm.
|
||||
const (
|
||||
murmurC1 = 0xcc9e2d51
|
||||
murmurC2 = 0x1b873593
|
||||
murmurR1 = 15
|
||||
murmurR2 = 13
|
||||
murmurM = 5
|
||||
murmurN = 0xe6546b64
|
||||
)
|
||||
|
||||
// MurmurHash3 implements a non-cryptographic hash function using the
|
||||
// MurmurHash3 algorithm. This implementation yields a 32-bit hash value which
|
||||
// is suitable for general hash-based lookups. The seed can be used to
|
||||
// effectively randomize the hash function. This makes it ideal for use in
|
||||
// bloom filters which need multiple independent hash functions.
|
||||
func MurmurHash3(seed uint32, data []byte) uint32 {
|
||||
dataLen := uint32(len(data))
|
||||
hash := seed
|
||||
k := uint32(0)
|
||||
numBlocks := dataLen / 4
|
||||
|
||||
// Calculate the hash in 4-byte chunks.
|
||||
for i := uint32(0); i < numBlocks; i++ {
|
||||
k = binary.LittleEndian.Uint32(data[i*4:])
|
||||
k *= murmurC1
|
||||
k = (k << murmurR1) | (k >> (32 - murmurR1))
|
||||
k *= murmurC2
|
||||
|
||||
hash ^= k
|
||||
hash = (hash << murmurR2) | (hash >> (32 - murmurR2))
|
||||
hash = hash*murmurM + murmurN
|
||||
}
|
||||
|
||||
// Handle remaining bytes.
|
||||
tailIdx := numBlocks * 4
|
||||
k = 0
|
||||
|
||||
switch dataLen & 3 {
|
||||
case 3:
|
||||
k ^= uint32(data[tailIdx+2]) << 16
|
||||
fallthrough
|
||||
case 2:
|
||||
k ^= uint32(data[tailIdx+1]) << 8
|
||||
fallthrough
|
||||
case 1:
|
||||
k ^= uint32(data[tailIdx])
|
||||
k *= murmurC1
|
||||
k = (k << murmurR1) | (k >> (32 - murmurR1))
|
||||
k *= murmurC2
|
||||
hash ^= k
|
||||
}
|
||||
|
||||
// Finalization.
|
||||
hash ^= dataLen
|
||||
hash ^= hash >> 16
|
||||
hash *= 0x85ebca6b
|
||||
hash ^= hash >> 13
|
||||
hash *= 0xc2b2ae35
|
||||
hash ^= hash >> 16
|
||||
|
||||
return hash
|
||||
}
|
45
btcutil/bloom/murmurhash3_test.go
Normal file
45
btcutil/bloom/murmurhash3_test.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright (c) 2013, 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bloom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/bloom"
|
||||
)
|
||||
|
||||
// TestMurmurHash3 ensure the MurmurHash3 function produces the correct hash
|
||||
// when given various seeds and data.
|
||||
func TestMurmurHash3(t *testing.T) {
|
||||
var tests = []struct {
|
||||
seed uint32
|
||||
data []byte
|
||||
out uint32
|
||||
}{
|
||||
{0x00000000, []byte{}, 0x00000000},
|
||||
{0xfba4c795, []byte{}, 0x6a396f08},
|
||||
{0xffffffff, []byte{}, 0x81f16f39},
|
||||
{0x00000000, []byte{0x00}, 0x514e28b7},
|
||||
{0xfba4c795, []byte{0x00}, 0xea3f0b17},
|
||||
{0x00000000, []byte{0xff}, 0xfd6cf10d},
|
||||
{0x00000000, []byte{0x00, 0x11}, 0x16c6b7ab},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22}, 0x8eb51c3d},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22, 0x33}, 0xb4471bf8},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22, 0x33, 0x44}, 0xe2301fa8},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55}, 0xfc2e4a15},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}, 0xb074502c},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77}, 0x8034d2a0},
|
||||
{0x00000000, []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}, 0xb4698def},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result := bloom.MurmurHash3(test.seed, test.data)
|
||||
if result != test.out {
|
||||
t.Errorf("MurmurHash3 test #%d failed: got %v want %v\n",
|
||||
i, result, test.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
28
btcutil/bloom/test_coverage.txt
Normal file
28
btcutil/bloom/test_coverage.txt
Normal file
|
@ -0,0 +1,28 @@
|
|||
|
||||
github.com/conformal/btcutil/bloom/murmurhash3.go MurmurHash3 100.00% (31/31)
|
||||
github.com/conformal/btcutil/bloom/merkleblock.go NewMerkleBlock 100.00% (19/19)
|
||||
github.com/conformal/btcutil/bloom/merkleblock.go merkleBlock.traverseAndBuild 100.00% (10/10)
|
||||
github.com/conformal/btcutil/bloom/merkleblock.go merkleBlock.calcHash 100.00% (8/8)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.maybeAddOutpoint 100.00% (7/7)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.addOutPoint 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.IsLoaded 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.MsgFilterLoad 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.matchesOutPoint 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.MatchesOutPoint 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.MatchTxAndUpdate 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.Matches 100.00% (4/4)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.Add 100.00% (3/3)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.Reload 100.00% (3/3)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.Unload 100.00% (3/3)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.AddShaHash 100.00% (3/3)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.AddOutPoint 100.00% (3/3)
|
||||
github.com/conformal/btcutil/bloom/filter.go minUint32 100.00% (3/3)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.hash 100.00% (2/2)
|
||||
github.com/conformal/btcutil/bloom/merkleblock.go merkleBlock.calcTreeWidth 100.00% (1/1)
|
||||
github.com/conformal/btcutil/bloom/filter.go LoadFilter 100.00% (1/1)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.matchTxAndUpdate 91.30% (21/23)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.matches 85.71% (6/7)
|
||||
github.com/conformal/btcutil/bloom/filter.go NewFilter 81.82% (9/11)
|
||||
github.com/conformal/btcutil/bloom/filter.go Filter.add 80.00% (4/5)
|
||||
github.com/conformal/btcutil/bloom ---------------------------- 96.49% (165/171)
|
||||
|
144
btcutil/certgen.go
Normal file
144
btcutil/certgen.go
Normal file
|
@ -0,0 +1,144 @@
|
|||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
_ "crypto/sha512" // Needed for RegisterHash in init
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewTLSCertPair returns a new PEM-encoded x.509 certificate pair
|
||||
// based on a 521-bit ECDSA private key. The machine's local interface
|
||||
// addresses and all variants of IPv4 and IPv6 localhost are included as
|
||||
// valid IP addresses.
|
||||
func NewTLSCertPair(organization string, validUntil time.Time, extraHosts []string) (cert, key []byte, err error) {
|
||||
now := time.Now()
|
||||
if validUntil.Before(now) {
|
||||
return nil, nil, errors.New("validUntil would create an already-expired certificate")
|
||||
}
|
||||
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// end of ASN.1 time
|
||||
endOfTime := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
|
||||
if validUntil.After(endOfTime) {
|
||||
validUntil = endOfTime
|
||||
}
|
||||
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate serial number: %s", err)
|
||||
}
|
||||
|
||||
host, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}
|
||||
dnsNames := []string{host}
|
||||
if host != "localhost" {
|
||||
dnsNames = append(dnsNames, "localhost")
|
||||
}
|
||||
|
||||
addIP := func(ipAddr net.IP) {
|
||||
for _, ip := range ipAddresses {
|
||||
if ip.Equal(ipAddr) {
|
||||
return
|
||||
}
|
||||
}
|
||||
ipAddresses = append(ipAddresses, ipAddr)
|
||||
}
|
||||
addHost := func(host string) {
|
||||
for _, dnsName := range dnsNames {
|
||||
if host == dnsName {
|
||||
return
|
||||
}
|
||||
}
|
||||
dnsNames = append(dnsNames, host)
|
||||
}
|
||||
|
||||
addrs, err := interfaceAddrs()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, a := range addrs {
|
||||
ipAddr, _, err := net.ParseCIDR(a.String())
|
||||
if err == nil {
|
||||
addIP(ipAddr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hostStr := range extraHosts {
|
||||
host, _, err := net.SplitHostPort(hostStr)
|
||||
if err != nil {
|
||||
host = hostStr
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
addIP(ip)
|
||||
} else {
|
||||
addHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{organization},
|
||||
CommonName: host,
|
||||
},
|
||||
NotBefore: now.Add(-time.Hour * 24),
|
||||
NotAfter: validUntil,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature |
|
||||
x509.KeyUsageCertSign,
|
||||
IsCA: true, // so can sign self.
|
||||
BasicConstraintsValid: true,
|
||||
|
||||
DNSNames: dnsNames,
|
||||
IPAddresses: ipAddresses,
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template,
|
||||
&template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create certificate: %v", err)
|
||||
}
|
||||
|
||||
certBuf := &bytes.Buffer{}
|
||||
err = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to encode certificate: %v", err)
|
||||
}
|
||||
|
||||
keybytes, err := x509.MarshalECPrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to marshal private key: %v", err)
|
||||
}
|
||||
|
||||
keyBuf := &bytes.Buffer{}
|
||||
err = pem.Encode(keyBuf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keybytes})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to encode private key: %v", err)
|
||||
}
|
||||
|
||||
return certBuf.Bytes(), keyBuf.Bytes(), nil
|
||||
}
|
123
btcutil/certgen_test.go
Normal file
123
btcutil/certgen_test.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil_test
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
//"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// TestNewTLSCertPair ensures the NewTLSCertPair function works as expected.
|
||||
func TestNewTLSCertPair(t *testing.T) {
|
||||
// Certs don't support sub-second precision, so truncate it now to
|
||||
// ensure the checks later don't fail due to nanosecond precision
|
||||
// differences.
|
||||
validUntil := time.Unix(time.Now().Add(10*365*24*time.Hour).Unix(), 0)
|
||||
org := "test autogenerated cert"
|
||||
extraHosts := []string{"testtlscert.bogus", "localhost", "127.0.0.1"}
|
||||
cert, key, err := btcutil.NewTLSCertPair(org, validUntil, extraHosts)
|
||||
if err != nil {
|
||||
t.Fatalf("failed with unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Ensure the PEM-encoded cert that is returned can be decoded.
|
||||
pemCert, _ := pem.Decode(cert)
|
||||
if pemCert == nil {
|
||||
t.Fatalf("pem.Decode was unable to decode the certificate")
|
||||
}
|
||||
|
||||
// Ensure the PEM-encoded key that is returned can be decoded.
|
||||
pemKey, _ := pem.Decode(key)
|
||||
if pemCert == nil {
|
||||
t.Fatalf("pem.Decode was unable to decode the key")
|
||||
}
|
||||
|
||||
// Ensure the DER-encoded key bytes can be successfully parsed.
|
||||
_, err = x509.ParseECPrivateKey(pemKey.Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed with unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Ensure the DER-encoded cert bytes can be successfully into an X.509
|
||||
// certificate.
|
||||
x509Cert, err := x509.ParseCertificate(pemCert.Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed with unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Ensure the specified organization is correct.
|
||||
x509Orgs := x509Cert.Subject.Organization
|
||||
if len(x509Orgs) == 0 || x509Orgs[0] != org {
|
||||
x509Org := "<no organization>"
|
||||
if len(x509Orgs) > 0 {
|
||||
x509Org = x509Orgs[0]
|
||||
}
|
||||
t.Fatalf("generated cert organization field mismatch, got "+
|
||||
"'%v', want '%v'", x509Org, org)
|
||||
}
|
||||
|
||||
// Ensure the specified valid until value is correct.
|
||||
if !x509Cert.NotAfter.Equal(validUntil) {
|
||||
t.Fatalf("generated cert valid until field mismatch, got %v, "+
|
||||
"want %v", x509Cert.NotAfter, validUntil)
|
||||
}
|
||||
|
||||
// Ensure the specified extra hosts are present.
|
||||
for _, host := range extraHosts {
|
||||
if err := x509Cert.VerifyHostname(host); err != nil {
|
||||
t.Fatalf("failed to verify extra host '%s'", host)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the Common Name is also the first SAN DNS name.
|
||||
cn := x509Cert.Subject.CommonName
|
||||
san0 := x509Cert.DNSNames[0]
|
||||
if cn != san0 {
|
||||
t.Errorf("common name %s does not match first SAN %s", cn, san0)
|
||||
}
|
||||
|
||||
// Ensure there are no duplicate hosts or IPs.
|
||||
hostCounts := make(map[string]int)
|
||||
for _, host := range x509Cert.DNSNames {
|
||||
hostCounts[host]++
|
||||
}
|
||||
ipCounts := make(map[string]int)
|
||||
for _, ip := range x509Cert.IPAddresses {
|
||||
ipCounts[string(ip)]++
|
||||
}
|
||||
for host, count := range hostCounts {
|
||||
if count != 1 {
|
||||
t.Errorf("host %s appears %d times in certificate", host, count)
|
||||
}
|
||||
}
|
||||
for ipStr, count := range ipCounts {
|
||||
if count != 1 {
|
||||
t.Errorf("ip %s appears %d times in certificate", net.IP(ipStr), count)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the cert can be use for the intended purposes.
|
||||
if !x509Cert.IsCA {
|
||||
t.Fatal("generated cert is not a certificate authority")
|
||||
}
|
||||
if x509Cert.KeyUsage&x509.KeyUsageKeyEncipherment == 0 {
|
||||
t.Fatal("generated cert can't be used for key encipherment")
|
||||
}
|
||||
if x509Cert.KeyUsage&x509.KeyUsageDigitalSignature == 0 {
|
||||
t.Fatal("generated cert can't be used for digital signatures")
|
||||
}
|
||||
if x509Cert.KeyUsage&x509.KeyUsageCertSign == 0 {
|
||||
t.Fatal("generated cert can't be used for signing other certs")
|
||||
}
|
||||
if !x509Cert.BasicConstraintsValid {
|
||||
t.Fatal("generated cert does not have valid basic constraints")
|
||||
}
|
||||
}
|
71
btcutil/coinset/README.md
Normal file
71
btcutil/coinset/README.md
Normal file
|
@ -0,0 +1,71 @@
|
|||
coinset
|
||||
=======
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcutil/coinset)
|
||||
|
||||
Package coinset provides bitcoin-specific convenience functions for selecting
|
||||
from and managing sets of unspent transaction outpoints (UTXOs).
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality. See
|
||||
`test_coverage.txt` for the gocov coverage report. Alternatively, if you are
|
||||
running a POSIX OS, you can run the `cov_report.sh` script for a real-time
|
||||
report.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/coinset
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Each unspent transaction outpoint is represented by the Coin interface. An
|
||||
example of a concrete type that implements Coin is coinset.SimpleCoin.
|
||||
|
||||
The typical use case for this library is for creating raw bitcoin transactions
|
||||
given a set of Coins that may be spent by the user, for example as below:
|
||||
|
||||
```Go
|
||||
var unspentCoins = []coinset.Coin{ ... }
|
||||
```
|
||||
|
||||
When the user needs to spend a certain amount, they will need to select a
|
||||
subset of these coins which contain at least that value. CoinSelector is
|
||||
an interface that represents types that implement coin selection algos,
|
||||
subject to various criteria. There are a few examples of CoinSelector's:
|
||||
|
||||
- MinIndexCoinSelector
|
||||
|
||||
- MinNumberCoinSelector
|
||||
|
||||
- MaxValueAgeCoinSelector
|
||||
|
||||
- MinPriorityCoinSelector
|
||||
|
||||
For example, if the user wishes to maximize the probability that their
|
||||
transaction is mined quickly, they could use the MaxValueAgeCoinSelector to
|
||||
select high priority coins, then also attach a relatively high fee.
|
||||
|
||||
```Go
|
||||
selector := &coinset.MaxValueAgeCoinSelector{
|
||||
MaxInputs: 10,
|
||||
MinAmountChange: 10000,
|
||||
}
|
||||
selectedCoins, err := selector.CoinSelect(targetAmount + bigFee, unspentCoins)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgTx := coinset.NewMsgTxWithInputCoins(selectedCoins)
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
The user can then create the msgTx.TxOut's as required, then sign the
|
||||
transaction and transmit it to the network.
|
||||
|
||||
## License
|
||||
|
||||
Package coinset is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
396
btcutil/coinset/coins.go
Normal file
396
btcutil/coinset/coins.go
Normal file
|
@ -0,0 +1,396 @@
|
|||
// Copyright (c) 2014-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package coinset
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
// Coin represents a spendable transaction outpoint
|
||||
type Coin interface {
|
||||
Hash() *chainhash.Hash
|
||||
Index() uint32
|
||||
Value() btcutil.Amount
|
||||
PkScript() []byte
|
||||
NumConfs() int64
|
||||
ValueAge() int64
|
||||
}
|
||||
|
||||
// Coins represents a set of Coins
|
||||
type Coins interface {
|
||||
Coins() []Coin
|
||||
}
|
||||
|
||||
// CoinSet is a utility struct for the modifications of a set of
|
||||
// Coins that implements the Coins interface. To create a CoinSet,
|
||||
// you must call NewCoinSet with nil for an empty set or a slice of
|
||||
// coins as the initial contents.
|
||||
//
|
||||
// It is important to note that the all the Coins being added or removed
|
||||
// from a CoinSet must have a constant ValueAge() during the use of
|
||||
// the CoinSet, otherwise the cached values will be incorrect.
|
||||
type CoinSet struct {
|
||||
coinList *list.List
|
||||
totalValue btcutil.Amount
|
||||
totalValueAge int64
|
||||
}
|
||||
|
||||
// Ensure that CoinSet is a Coins
|
||||
var _ Coins = NewCoinSet(nil)
|
||||
|
||||
// NewCoinSet creates a CoinSet containing the coins provided.
|
||||
// To create an empty CoinSet, you may pass null as the coins input parameter.
|
||||
func NewCoinSet(coins []Coin) *CoinSet {
|
||||
newCoinSet := &CoinSet{
|
||||
coinList: list.New(),
|
||||
totalValue: 0,
|
||||
totalValueAge: 0,
|
||||
}
|
||||
for _, coin := range coins {
|
||||
newCoinSet.PushCoin(coin)
|
||||
}
|
||||
return newCoinSet
|
||||
}
|
||||
|
||||
// Coins returns a new slice of the coins contained in the set.
|
||||
func (cs *CoinSet) Coins() []Coin {
|
||||
coins := make([]Coin, cs.coinList.Len())
|
||||
for i, e := 0, cs.coinList.Front(); e != nil; i, e = i+1, e.Next() {
|
||||
coins[i] = e.Value.(Coin)
|
||||
}
|
||||
return coins
|
||||
}
|
||||
|
||||
// TotalValue returns the total value of the coins in the set.
|
||||
func (cs *CoinSet) TotalValue() (value btcutil.Amount) {
|
||||
return cs.totalValue
|
||||
}
|
||||
|
||||
// TotalValueAge returns the total value * number of confirmations
|
||||
// of the coins in the set.
|
||||
func (cs *CoinSet) TotalValueAge() (valueAge int64) {
|
||||
return cs.totalValueAge
|
||||
}
|
||||
|
||||
// Num returns the number of coins in the set
|
||||
func (cs *CoinSet) Num() int {
|
||||
return cs.coinList.Len()
|
||||
}
|
||||
|
||||
// PushCoin adds a coin to the end of the list and updates
|
||||
// the cached value amounts.
|
||||
func (cs *CoinSet) PushCoin(c Coin) {
|
||||
cs.coinList.PushBack(c)
|
||||
cs.totalValue += c.Value()
|
||||
cs.totalValueAge += c.ValueAge()
|
||||
}
|
||||
|
||||
// PopCoin removes the last coin on the list and returns it.
|
||||
func (cs *CoinSet) PopCoin() Coin {
|
||||
back := cs.coinList.Back()
|
||||
if back == nil {
|
||||
return nil
|
||||
}
|
||||
return cs.removeElement(back)
|
||||
}
|
||||
|
||||
// ShiftCoin removes the first coin on the list and returns it.
|
||||
func (cs *CoinSet) ShiftCoin() Coin {
|
||||
front := cs.coinList.Front()
|
||||
if front == nil {
|
||||
return nil
|
||||
}
|
||||
return cs.removeElement(front)
|
||||
}
|
||||
|
||||
// removeElement updates the cached value amounts in the CoinSet,
|
||||
// removes the element from the list, then returns the Coin that
|
||||
// was removed to the caller.
|
||||
func (cs *CoinSet) removeElement(e *list.Element) Coin {
|
||||
c := e.Value.(Coin)
|
||||
cs.coinList.Remove(e)
|
||||
cs.totalValue -= c.Value()
|
||||
cs.totalValueAge -= c.ValueAge()
|
||||
return c
|
||||
}
|
||||
|
||||
// NewMsgTxWithInputCoins takes the coins in the CoinSet and makes them
|
||||
// the inputs to a new wire.MsgTx which is returned.
|
||||
func NewMsgTxWithInputCoins(txVersion int32, inputCoins Coins) *wire.MsgTx {
|
||||
msgTx := wire.NewMsgTx(txVersion)
|
||||
coins := inputCoins.Coins()
|
||||
msgTx.TxIn = make([]*wire.TxIn, len(coins))
|
||||
for i, coin := range coins {
|
||||
msgTx.TxIn[i] = &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: *coin.Hash(),
|
||||
Index: coin.Index(),
|
||||
},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
}
|
||||
return msgTx
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrCoinsNoSelectionAvailable is returned when a CoinSelector believes there is no
|
||||
// possible combination of coins which can meet the requirements provided to the selector.
|
||||
ErrCoinsNoSelectionAvailable = errors.New("no coin selection possible")
|
||||
)
|
||||
|
||||
// satisfiesTargetValue checks that the totalValue is either exactly the targetValue
|
||||
// or is greater than the targetValue by at least the minChange amount.
|
||||
func satisfiesTargetValue(targetValue, minChange, totalValue btcutil.Amount) bool {
|
||||
return (totalValue == targetValue || totalValue >= targetValue+minChange)
|
||||
}
|
||||
|
||||
// CoinSelector is an interface that wraps the CoinSelect method.
|
||||
//
|
||||
// CoinSelect will attempt to select a subset of the coins which has at
|
||||
// least the targetValue amount. CoinSelect is not guaranteed to return a
|
||||
// selection of coins even if the total value of coins given is greater
|
||||
// than the target value.
|
||||
//
|
||||
// The exact choice of coins in the subset will be implementation specific.
|
||||
//
|
||||
// It is important to note that the Coins being used as inputs need to have
|
||||
// a constant ValueAge() during the execution of CoinSelect.
|
||||
type CoinSelector interface {
|
||||
CoinSelect(targetValue btcutil.Amount, coins []Coin) (Coins, error)
|
||||
}
|
||||
|
||||
// MinIndexCoinSelector is a CoinSelector that attempts to construct a
|
||||
// selection of coins whose total value is at least targetValue and prefers
|
||||
// any number of lower indexes (as in the ordered array) over higher ones.
|
||||
type MinIndexCoinSelector struct {
|
||||
MaxInputs int
|
||||
MinChangeAmount btcutil.Amount
|
||||
}
|
||||
|
||||
// CoinSelect will attempt to select coins using the algorithm described
|
||||
// in the MinIndexCoinSelector struct.
|
||||
func (s MinIndexCoinSelector) CoinSelect(targetValue btcutil.Amount, coins []Coin) (Coins, error) {
|
||||
cs := NewCoinSet(nil)
|
||||
for n := 0; n < len(coins) && n < s.MaxInputs; n++ {
|
||||
cs.PushCoin(coins[n])
|
||||
if satisfiesTargetValue(targetValue, s.MinChangeAmount, cs.TotalValue()) {
|
||||
return cs, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrCoinsNoSelectionAvailable
|
||||
}
|
||||
|
||||
// MinNumberCoinSelector is a CoinSelector that attempts to construct
|
||||
// a selection of coins whose total value is at least targetValue
|
||||
// that uses as few of the inputs as possible.
|
||||
type MinNumberCoinSelector struct {
|
||||
MaxInputs int
|
||||
MinChangeAmount btcutil.Amount
|
||||
}
|
||||
|
||||
// CoinSelect will attempt to select coins using the algorithm described
|
||||
// in the MinNumberCoinSelector struct.
|
||||
func (s MinNumberCoinSelector) CoinSelect(targetValue btcutil.Amount, coins []Coin) (Coins, error) {
|
||||
sortedCoins := make([]Coin, 0, len(coins))
|
||||
sortedCoins = append(sortedCoins, coins...)
|
||||
sort.Sort(sort.Reverse(byAmount(sortedCoins)))
|
||||
|
||||
return MinIndexCoinSelector(s).CoinSelect(targetValue, sortedCoins)
|
||||
}
|
||||
|
||||
// MaxValueAgeCoinSelector is a CoinSelector that attempts to construct
|
||||
// a selection of coins whose total value is at least targetValue
|
||||
// that has as much input value-age as possible.
|
||||
//
|
||||
// This would be useful in the case where you want to maximize
|
||||
// likelihood of the inclusion of your transaction in the next mined
|
||||
// block.
|
||||
type MaxValueAgeCoinSelector struct {
|
||||
MaxInputs int
|
||||
MinChangeAmount btcutil.Amount
|
||||
}
|
||||
|
||||
// CoinSelect will attempt to select coins using the algorithm described
|
||||
// in the MaxValueAgeCoinSelector struct.
|
||||
func (s MaxValueAgeCoinSelector) CoinSelect(targetValue btcutil.Amount, coins []Coin) (Coins, error) {
|
||||
sortedCoins := make([]Coin, 0, len(coins))
|
||||
sortedCoins = append(sortedCoins, coins...)
|
||||
sort.Sort(sort.Reverse(byValueAge(sortedCoins)))
|
||||
|
||||
return MinIndexCoinSelector(s).CoinSelect(targetValue, sortedCoins)
|
||||
}
|
||||
|
||||
// MinPriorityCoinSelector is a CoinSelector that attempts to construct
|
||||
// a selection of coins whose total value is at least targetValue and
|
||||
// whose average value-age per input is greater than MinAvgValueAgePerInput.
|
||||
// If there is change, it must exceed MinChangeAmount to be a valid selection.
|
||||
//
|
||||
// When possible, MinPriorityCoinSelector will attempt to reduce the average
|
||||
// input priority over the threshold, but no guarantees will be made as to
|
||||
// minimality of the selection. The selection below is almost certainly
|
||||
// suboptimal.
|
||||
//
|
||||
type MinPriorityCoinSelector struct {
|
||||
MaxInputs int
|
||||
MinChangeAmount btcutil.Amount
|
||||
MinAvgValueAgePerInput int64
|
||||
}
|
||||
|
||||
// CoinSelect will attempt to select coins using the algorithm described
|
||||
// in the MinPriorityCoinSelector struct.
|
||||
func (s MinPriorityCoinSelector) CoinSelect(targetValue btcutil.Amount, coins []Coin) (Coins, error) {
|
||||
possibleCoins := make([]Coin, 0, len(coins))
|
||||
possibleCoins = append(possibleCoins, coins...)
|
||||
|
||||
sort.Sort(byValueAge(possibleCoins))
|
||||
|
||||
// find the first coin with sufficient valueAge
|
||||
cutoffIndex := -1
|
||||
for i := 0; i < len(possibleCoins); i++ {
|
||||
if possibleCoins[i].ValueAge() >= s.MinAvgValueAgePerInput {
|
||||
cutoffIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if cutoffIndex < 0 {
|
||||
return nil, ErrCoinsNoSelectionAvailable
|
||||
}
|
||||
|
||||
// create sets of input coins that will obey minimum average valueAge
|
||||
for i := cutoffIndex; i < len(possibleCoins); i++ {
|
||||
possibleHighCoins := possibleCoins[cutoffIndex : i+1]
|
||||
|
||||
// choose a set of high-enough valueAge coins
|
||||
highSelect, err := (&MinNumberCoinSelector{
|
||||
MaxInputs: s.MaxInputs,
|
||||
MinChangeAmount: s.MinChangeAmount,
|
||||
}).CoinSelect(targetValue, possibleHighCoins)
|
||||
|
||||
if err != nil {
|
||||
// attempt to add available low priority to make a solution
|
||||
|
||||
for numLow := 1; numLow <= cutoffIndex && numLow+(i-cutoffIndex) <= s.MaxInputs; numLow++ {
|
||||
allHigh := NewCoinSet(possibleCoins[cutoffIndex : i+1])
|
||||
newTargetValue := targetValue - allHigh.TotalValue()
|
||||
newMaxInputs := allHigh.Num() + numLow
|
||||
if newMaxInputs > numLow {
|
||||
newMaxInputs = numLow
|
||||
}
|
||||
newMinAvgValueAge := ((s.MinAvgValueAgePerInput * int64(allHigh.Num()+numLow)) - allHigh.TotalValueAge()) / int64(numLow)
|
||||
|
||||
// find the minimum priority that can be added to set
|
||||
lowSelect, err := (&MinPriorityCoinSelector{
|
||||
MaxInputs: newMaxInputs,
|
||||
MinChangeAmount: s.MinChangeAmount,
|
||||
MinAvgValueAgePerInput: newMinAvgValueAge,
|
||||
}).CoinSelect(newTargetValue, possibleCoins[0:cutoffIndex])
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, coin := range lowSelect.Coins() {
|
||||
allHigh.PushCoin(coin)
|
||||
}
|
||||
|
||||
return allHigh, nil
|
||||
}
|
||||
// oh well, couldn't fix, try to add more high priority to the set.
|
||||
} else {
|
||||
extendedCoins := NewCoinSet(highSelect.Coins())
|
||||
|
||||
// attempt to lower priority towards target with lowest ones first
|
||||
for n := 0; n < cutoffIndex; n++ {
|
||||
if extendedCoins.Num() >= s.MaxInputs {
|
||||
break
|
||||
}
|
||||
if possibleCoins[n].ValueAge() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
extendedCoins.PushCoin(possibleCoins[n])
|
||||
if extendedCoins.TotalValueAge()/int64(extendedCoins.Num()) < s.MinAvgValueAgePerInput {
|
||||
extendedCoins.PopCoin()
|
||||
continue
|
||||
}
|
||||
}
|
||||
return extendedCoins, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrCoinsNoSelectionAvailable
|
||||
}
|
||||
|
||||
type byValueAge []Coin
|
||||
|
||||
func (a byValueAge) Len() int { return len(a) }
|
||||
func (a byValueAge) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byValueAge) Less(i, j int) bool { return a[i].ValueAge() < a[j].ValueAge() }
|
||||
|
||||
type byAmount []Coin
|
||||
|
||||
func (a byAmount) Len() int { return len(a) }
|
||||
func (a byAmount) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byAmount) Less(i, j int) bool { return a[i].Value() < a[j].Value() }
|
||||
|
||||
// SimpleCoin defines a concrete instance of Coin that is backed by a
|
||||
// btcutil.Tx, a specific outpoint index, and the number of confirmations
|
||||
// that transaction has had.
|
||||
type SimpleCoin struct {
|
||||
Tx *btcutil.Tx
|
||||
TxIndex uint32
|
||||
TxNumConfs int64
|
||||
}
|
||||
|
||||
// Ensure that SimpleCoin is a Coin
|
||||
var _ Coin = &SimpleCoin{}
|
||||
|
||||
// Hash returns the hash value of the transaction on which the Coin is an output
|
||||
func (c *SimpleCoin) Hash() *chainhash.Hash {
|
||||
return c.Tx.Hash()
|
||||
}
|
||||
|
||||
// Index returns the index of the output on the transaction which the Coin represents
|
||||
func (c *SimpleCoin) Index() uint32 {
|
||||
return c.TxIndex
|
||||
}
|
||||
|
||||
// txOut returns the TxOut of the transaction the Coin represents
|
||||
func (c *SimpleCoin) txOut() *wire.TxOut {
|
||||
return c.Tx.MsgTx().TxOut[c.TxIndex]
|
||||
}
|
||||
|
||||
// Value returns the value of the Coin
|
||||
func (c *SimpleCoin) Value() btcutil.Amount {
|
||||
return btcutil.Amount(c.txOut().Value)
|
||||
}
|
||||
|
||||
// PkScript returns the outpoint script of the Coin.
|
||||
//
|
||||
// This can be used to determine what type of script the Coin uses
|
||||
// and extract standard addresses if possible using
|
||||
// txscript.ExtractPkScriptAddrs for example.
|
||||
func (c *SimpleCoin) PkScript() []byte {
|
||||
return c.txOut().PkScript
|
||||
}
|
||||
|
||||
// NumConfs returns the number of confirmations that the transaction the Coin references
|
||||
// has had.
|
||||
func (c *SimpleCoin) NumConfs() int64 {
|
||||
return c.TxNumConfs
|
||||
}
|
||||
|
||||
// ValueAge returns the product of the value and the number of confirmations. This is
|
||||
// used as an input to calculate the priority of the transaction.
|
||||
func (c *SimpleCoin) ValueAge() int64 {
|
||||
return c.TxNumConfs * int64(c.Value())
|
||||
}
|
260
btcutil/coinset/coins_test.go
Normal file
260
btcutil/coinset/coins_test.go
Normal file
|
@ -0,0 +1,260 @@
|
|||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package coinset_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/btcutil/coinset"
|
||||
)
|
||||
|
||||
type TestCoin struct {
|
||||
TxHash *chainhash.Hash
|
||||
TxIndex uint32
|
||||
TxValue btcutil.Amount
|
||||
TxNumConfs int64
|
||||
}
|
||||
|
||||
func (c *TestCoin) Hash() *chainhash.Hash { return c.TxHash }
|
||||
func (c *TestCoin) Index() uint32 { return c.TxIndex }
|
||||
func (c *TestCoin) Value() btcutil.Amount { return c.TxValue }
|
||||
func (c *TestCoin) PkScript() []byte { return nil }
|
||||
func (c *TestCoin) NumConfs() int64 { return c.TxNumConfs }
|
||||
func (c *TestCoin) ValueAge() int64 { return int64(c.TxValue) * c.TxNumConfs }
|
||||
|
||||
func NewCoin(index int64, value btcutil.Amount, numConfs int64) coinset.Coin {
|
||||
h := sha256.New()
|
||||
_, _ = h.Write([]byte(fmt.Sprintf("%d", index)))
|
||||
hash, _ := chainhash.NewHash(h.Sum(nil))
|
||||
c := &TestCoin{
|
||||
TxHash: hash,
|
||||
TxIndex: 0,
|
||||
TxValue: value,
|
||||
TxNumConfs: numConfs,
|
||||
}
|
||||
return coinset.Coin(c)
|
||||
}
|
||||
|
||||
type coinSelectTest struct {
|
||||
selector coinset.CoinSelector
|
||||
inputCoins []coinset.Coin
|
||||
targetValue btcutil.Amount
|
||||
expectedCoins []coinset.Coin
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func testCoinSelector(tests []coinSelectTest, t *testing.T) {
|
||||
for testIndex, test := range tests {
|
||||
cs, err := test.selector.CoinSelect(test.targetValue, test.inputCoins)
|
||||
if err != test.expectedError {
|
||||
t.Errorf("[%d] expected a different error: got=%v, expected=%v", testIndex, err, test.expectedError)
|
||||
continue
|
||||
}
|
||||
if test.expectedCoins != nil {
|
||||
if cs == nil {
|
||||
t.Errorf("[%d] expected non-nil coinset", testIndex)
|
||||
continue
|
||||
}
|
||||
coins := cs.Coins()
|
||||
if len(coins) != len(test.expectedCoins) {
|
||||
t.Errorf("[%d] expected different number of coins: got=%d, expected=%d", testIndex, len(coins), len(test.expectedCoins))
|
||||
continue
|
||||
}
|
||||
for n := 0; n < len(test.expectedCoins); n++ {
|
||||
if coins[n] != test.expectedCoins[n] {
|
||||
t.Errorf("[%d] expected different coins at coin index %d: got=%#v, expected=%#v", testIndex, n, coins[n], test.expectedCoins[n])
|
||||
continue
|
||||
}
|
||||
}
|
||||
coinSet := coinset.NewCoinSet(coins)
|
||||
if coinSet.TotalValue() < test.targetValue {
|
||||
t.Errorf("[%d] targetValue not satistifed", testIndex)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var coins = []coinset.Coin{
|
||||
NewCoin(1, 100000000, 1),
|
||||
NewCoin(2, 10000000, 20),
|
||||
NewCoin(3, 50000000, 0),
|
||||
NewCoin(4, 25000000, 6),
|
||||
}
|
||||
|
||||
func TestCoinSet(t *testing.T) {
|
||||
cs := coinset.NewCoinSet(nil)
|
||||
if cs.PopCoin() != nil {
|
||||
t.Error("Expected popCoin of empty to be nil")
|
||||
}
|
||||
if cs.ShiftCoin() != nil {
|
||||
t.Error("Expected shiftCoin of empty to be nil")
|
||||
}
|
||||
|
||||
cs.PushCoin(coins[0])
|
||||
cs.PushCoin(coins[1])
|
||||
cs.PushCoin(coins[2])
|
||||
if cs.PopCoin() != coins[2] {
|
||||
t.Error("Expected third coin")
|
||||
}
|
||||
if cs.ShiftCoin() != coins[0] {
|
||||
t.Error("Expected first coin")
|
||||
}
|
||||
|
||||
mtx := coinset.NewMsgTxWithInputCoins(wire.TxVersion, cs)
|
||||
if len(mtx.TxIn) != 1 {
|
||||
t.Errorf("Expected only 1 TxIn, got %d", len(mtx.TxIn))
|
||||
}
|
||||
op := mtx.TxIn[0].PreviousOutPoint
|
||||
if !op.Hash.IsEqual(coins[1].Hash()) || op.Index != coins[1].Index() {
|
||||
t.Errorf("Expected the second coin to be added as input to mtx")
|
||||
}
|
||||
}
|
||||
|
||||
var minIndexSelectors = []coinset.MinIndexCoinSelector{
|
||||
{MaxInputs: 10, MinChangeAmount: 10000},
|
||||
{MaxInputs: 2, MinChangeAmount: 10000},
|
||||
}
|
||||
|
||||
var minIndexTests = []coinSelectTest{
|
||||
{minIndexSelectors[0], coins, coins[0].Value() - minIndexSelectors[0].MinChangeAmount, []coinset.Coin{coins[0]}, nil},
|
||||
{minIndexSelectors[0], coins, coins[0].Value() - minIndexSelectors[0].MinChangeAmount + 1, []coinset.Coin{coins[0], coins[1]}, nil},
|
||||
{minIndexSelectors[0], coins, 100000000, []coinset.Coin{coins[0]}, nil},
|
||||
{minIndexSelectors[0], coins, 110000000, []coinset.Coin{coins[0], coins[1]}, nil},
|
||||
{minIndexSelectors[0], coins, 140000000, []coinset.Coin{coins[0], coins[1], coins[2]}, nil},
|
||||
{minIndexSelectors[0], coins, 200000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minIndexSelectors[1], coins, 10000000, []coinset.Coin{coins[0]}, nil},
|
||||
{minIndexSelectors[1], coins, 110000000, []coinset.Coin{coins[0], coins[1]}, nil},
|
||||
{minIndexSelectors[1], coins, 140000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
}
|
||||
|
||||
func TestMinIndexSelector(t *testing.T) {
|
||||
testCoinSelector(minIndexTests, t)
|
||||
}
|
||||
|
||||
var minNumberSelectors = []coinset.MinNumberCoinSelector{
|
||||
{MaxInputs: 10, MinChangeAmount: 10000},
|
||||
{MaxInputs: 2, MinChangeAmount: 10000},
|
||||
}
|
||||
|
||||
var minNumberTests = []coinSelectTest{
|
||||
{minNumberSelectors[0], coins, coins[0].Value() - minNumberSelectors[0].MinChangeAmount, []coinset.Coin{coins[0]}, nil},
|
||||
{minNumberSelectors[0], coins, coins[0].Value() - minNumberSelectors[0].MinChangeAmount + 1, []coinset.Coin{coins[0], coins[2]}, nil},
|
||||
{minNumberSelectors[0], coins, 100000000, []coinset.Coin{coins[0]}, nil},
|
||||
{minNumberSelectors[0], coins, 110000000, []coinset.Coin{coins[0], coins[2]}, nil},
|
||||
{minNumberSelectors[0], coins, 160000000, []coinset.Coin{coins[0], coins[2], coins[3]}, nil},
|
||||
{minNumberSelectors[0], coins, 184990000, []coinset.Coin{coins[0], coins[2], coins[3], coins[1]}, nil},
|
||||
{minNumberSelectors[0], coins, 184990001, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minNumberSelectors[0], coins, 200000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minNumberSelectors[1], coins, 10000000, []coinset.Coin{coins[0]}, nil},
|
||||
{minNumberSelectors[1], coins, 110000000, []coinset.Coin{coins[0], coins[2]}, nil},
|
||||
{minNumberSelectors[1], coins, 140000000, []coinset.Coin{coins[0], coins[2]}, nil},
|
||||
}
|
||||
|
||||
func TestMinNumberSelector(t *testing.T) {
|
||||
testCoinSelector(minNumberTests, t)
|
||||
}
|
||||
|
||||
var maxValueAgeSelectors = []coinset.MaxValueAgeCoinSelector{
|
||||
{MaxInputs: 10, MinChangeAmount: 10000},
|
||||
{MaxInputs: 2, MinChangeAmount: 10000},
|
||||
}
|
||||
|
||||
var maxValueAgeTests = []coinSelectTest{
|
||||
{maxValueAgeSelectors[0], coins, 100000, []coinset.Coin{coins[1]}, nil},
|
||||
{maxValueAgeSelectors[0], coins, 10000000, []coinset.Coin{coins[1]}, nil},
|
||||
{maxValueAgeSelectors[0], coins, 10000001, []coinset.Coin{coins[1], coins[3]}, nil},
|
||||
{maxValueAgeSelectors[0], coins, 35000000, []coinset.Coin{coins[1], coins[3]}, nil},
|
||||
{maxValueAgeSelectors[0], coins, 135000000, []coinset.Coin{coins[1], coins[3], coins[0]}, nil},
|
||||
{maxValueAgeSelectors[0], coins, 185000000, []coinset.Coin{coins[1], coins[3], coins[0], coins[2]}, nil},
|
||||
{maxValueAgeSelectors[0], coins, 200000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{maxValueAgeSelectors[1], coins, 40000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{maxValueAgeSelectors[1], coins, 35000000, []coinset.Coin{coins[1], coins[3]}, nil},
|
||||
{maxValueAgeSelectors[1], coins, 34990001, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
}
|
||||
|
||||
func TestMaxValueAgeSelector(t *testing.T) {
|
||||
testCoinSelector(maxValueAgeTests, t)
|
||||
}
|
||||
|
||||
var minPrioritySelectors = []coinset.MinPriorityCoinSelector{
|
||||
{MaxInputs: 10, MinChangeAmount: 10000, MinAvgValueAgePerInput: 100000000},
|
||||
{MaxInputs: 02, MinChangeAmount: 10000, MinAvgValueAgePerInput: 200000000},
|
||||
{MaxInputs: 02, MinChangeAmount: 10000, MinAvgValueAgePerInput: 150000000},
|
||||
{MaxInputs: 03, MinChangeAmount: 10000, MinAvgValueAgePerInput: 150000000},
|
||||
{MaxInputs: 10, MinChangeAmount: 10000, MinAvgValueAgePerInput: 1000000000},
|
||||
{MaxInputs: 10, MinChangeAmount: 10000, MinAvgValueAgePerInput: 175000000},
|
||||
{MaxInputs: 02, MinChangeAmount: 10000, MinAvgValueAgePerInput: 125000000},
|
||||
}
|
||||
|
||||
var connectedCoins = []coinset.Coin{coins[0], coins[1], coins[3]}
|
||||
|
||||
var minPriorityTests = []coinSelectTest{
|
||||
{minPrioritySelectors[0], connectedCoins, 100000000, []coinset.Coin{coins[0]}, nil},
|
||||
{minPrioritySelectors[0], connectedCoins, 125000000, []coinset.Coin{coins[0], coins[3]}, nil},
|
||||
{minPrioritySelectors[0], connectedCoins, 135000000, []coinset.Coin{coins[0], coins[3], coins[1]}, nil},
|
||||
{minPrioritySelectors[0], connectedCoins, 140000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minPrioritySelectors[1], connectedCoins, 100000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minPrioritySelectors[1], connectedCoins, 10000000, []coinset.Coin{coins[1]}, nil},
|
||||
{minPrioritySelectors[1], connectedCoins, 100000000, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minPrioritySelectors[2], connectedCoins, 11000000, []coinset.Coin{coins[3]}, nil},
|
||||
{minPrioritySelectors[2], connectedCoins, 25000001, []coinset.Coin{coins[3], coins[1]}, nil},
|
||||
{minPrioritySelectors[3], connectedCoins, 25000001, []coinset.Coin{coins[3], coins[1], coins[0]}, nil},
|
||||
{minPrioritySelectors[3], connectedCoins, 100000000, []coinset.Coin{coins[3], coins[1], coins[0]}, nil},
|
||||
{minPrioritySelectors[3], []coinset.Coin{coins[1], coins[2]}, 10000000, []coinset.Coin{coins[1]}, nil},
|
||||
{minPrioritySelectors[4], connectedCoins, 1, nil, coinset.ErrCoinsNoSelectionAvailable},
|
||||
{minPrioritySelectors[5], connectedCoins, 20000000, []coinset.Coin{coins[1], coins[3]}, nil},
|
||||
{minPrioritySelectors[6], connectedCoins, 25000000, []coinset.Coin{coins[3], coins[0]}, nil},
|
||||
}
|
||||
|
||||
func TestMinPrioritySelector(t *testing.T) {
|
||||
testCoinSelector(minPriorityTests, t)
|
||||
}
|
||||
|
||||
var (
|
||||
// should be two outpoints, with 1st one having 0.035BTC value.
|
||||
testSimpleCoinNumConfs = int64(1)
|
||||
testSimpleCoinTxHash = "9b5965c86de51d5dc824e179a05cf232db78c80ae86ca9d7cb2a655b5e19c1e2"
|
||||
testSimpleCoinTxHex = "0100000001a214a110f79e4abe073865ea5b3745c6e82c913bad44be70652804a5e4003b0a010000008c493046022100edd18a69664efa57264be207100c203e6cade1888cbb88a0ad748548256bb2f0022100f1027dc2e6c7f248d78af1dd90027b5b7d8ec563bb62aa85d4e74d6376f3868c0141048f3757b65ed301abd1b0e8942d1ab5b50594d3314cff0299f300c696376a0a9bf72e74710a8af7a5372d4af4bb519e2701a094ef48c8e48e3b65b28502452dceffffffff02e0673500000000001976a914686dd149a79b4a559d561fbc396d3e3c6628b98d88ace86ef102000000001976a914ac3f995655e81b875b38b64351d6f896ddbfc68588ac00000000"
|
||||
testSimpleCoinTxValue0 = btcutil.Amount(3500000)
|
||||
testSimpleCoinTxValueAge0 = int64(testSimpleCoinTxValue0) * testSimpleCoinNumConfs
|
||||
testSimpleCoinTxPkScript0Hex = "76a914686dd149a79b4a559d561fbc396d3e3c6628b98d88ac"
|
||||
testSimpleCoinTxPkScript0Bytes, _ = hex.DecodeString(testSimpleCoinTxPkScript0Hex)
|
||||
testSimpleCoinTxBytes, _ = hex.DecodeString(testSimpleCoinTxHex)
|
||||
testSimpleCoinTx, _ = btcutil.NewTxFromBytes(testSimpleCoinTxBytes)
|
||||
testSimpleCoin = &coinset.SimpleCoin{
|
||||
Tx: testSimpleCoinTx,
|
||||
TxIndex: 0,
|
||||
TxNumConfs: testSimpleCoinNumConfs,
|
||||
}
|
||||
)
|
||||
|
||||
func TestSimpleCoin(t *testing.T) {
|
||||
if testSimpleCoin.Hash().String() != testSimpleCoinTxHash {
|
||||
t.Error("Different value for tx hash than expected")
|
||||
}
|
||||
if testSimpleCoin.Index() != 0 {
|
||||
t.Error("Different value for index of outpoint than expected")
|
||||
}
|
||||
if testSimpleCoin.Value() != testSimpleCoinTxValue0 {
|
||||
t.Error("Different value of coin value than expected")
|
||||
}
|
||||
if !bytes.Equal(testSimpleCoin.PkScript(), testSimpleCoinTxPkScript0Bytes) {
|
||||
t.Error("Different value of coin pkScript than expected")
|
||||
}
|
||||
if testSimpleCoin.NumConfs() != 1 {
|
||||
t.Error("Differet value of num confs than expected")
|
||||
}
|
||||
if testSimpleCoin.ValueAge() != testSimpleCoinTxValueAge0 {
|
||||
t.Error("Different value of coin value * age than expected")
|
||||
}
|
||||
}
|
17
btcutil/coinset/cov_report.sh
Normal file
17
btcutil/coinset/cov_report.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
31
btcutil/coinset/test_coverage.txt
Normal file
31
btcutil/coinset/test_coverage.txt
Normal file
|
@ -0,0 +1,31 @@
|
|||
|
||||
github.com/conformal/btcutil/coinset/coins.go MinPriorityCoinSelector.CoinSelect 100.00% (39/39)
|
||||
github.com/conformal/btcutil/coinset/coins.go NewMsgTxWithInputCoins 100.00% (6/6)
|
||||
github.com/conformal/btcutil/coinset/coins.go MinIndexCoinSelector.CoinSelect 100.00% (6/6)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.removeElement 100.00% (5/5)
|
||||
github.com/conformal/btcutil/coinset/coins.go NewCoinSet 100.00% (4/4)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.Coins 100.00% (4/4)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.PopCoin 100.00% (4/4)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.ShiftCoin 100.00% (4/4)
|
||||
github.com/conformal/btcutil/coinset/coins.go MinNumberCoinSelector.CoinSelect 100.00% (4/4)
|
||||
github.com/conformal/btcutil/coinset/coins.go MaxValueAgeCoinSelector.CoinSelect 100.00% (4/4)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.PushCoin 100.00% (3/3)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.TotalValueAge 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.NumConfs 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.ValueAge 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.TotalValue 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go byValueAge.Len 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go byValueAge.Swap 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go byValueAge.Less 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go byAmount.Len 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go byAmount.Swap 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go byAmount.Less 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.Hash 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.Index 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.txOut 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.Value 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go SimpleCoin.PkScript 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go CoinSet.Num 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset/coins.go satisfiesTargetValue 100.00% (1/1)
|
||||
github.com/conformal/btcutil/coinset ---------------------------------- 100.00% (100/100)
|
||||
|
16
btcutil/const.go
Normal file
16
btcutil/const.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
const (
|
||||
// SatoshiPerBitcent is the number of satoshi in one bitcoin cent.
|
||||
SatoshiPerBitcent = 1e6
|
||||
|
||||
// SatoshiPerBitcoin is the number of satoshi in one bitcoin (1 BTC).
|
||||
SatoshiPerBitcoin = 1e8
|
||||
|
||||
// MaxSatoshi is the maximum transaction amount allowed in satoshi.
|
||||
MaxSatoshi = 21e6 * SatoshiPerBitcoin
|
||||
)
|
17
btcutil/cov_report.sh
Normal file
17
btcutil/cov_report.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
46
btcutil/doc.go
Normal file
46
btcutil/doc.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package btcutil provides bitcoin-specific convenience functions and types.
|
||||
|
||||
Block Overview
|
||||
|
||||
A Block defines a bitcoin block that provides easier and more efficient
|
||||
manipulation of raw wire protocol blocks. It also memoizes hashes for the
|
||||
block and its transactions on their first access so subsequent accesses don't
|
||||
have to repeat the relatively expensive hashing operations.
|
||||
|
||||
Tx Overview
|
||||
|
||||
A Tx defines a bitcoin transaction that provides more efficient manipulation of
|
||||
raw wire protocol transactions. It memoizes the hash for the transaction on its
|
||||
first access so subsequent accesses don't have to repeat the relatively
|
||||
expensive hashing operations.
|
||||
|
||||
Address Overview
|
||||
|
||||
The Address interface provides an abstraction for a Bitcoin address. While the
|
||||
most common type is a pay-to-pubkey-hash, Bitcoin already supports others and
|
||||
may well support more in the future. This package currently provides
|
||||
implementations for the pay-to-pubkey, pay-to-pubkey-hash, and
|
||||
pay-to-script-hash address types.
|
||||
|
||||
To decode/encode an address:
|
||||
|
||||
// NOTE: The default network is only used for address types which do not
|
||||
// already contain that information. At this time, that is only
|
||||
// pay-to-pubkey addresses.
|
||||
addrString := "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962" +
|
||||
"e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d57" +
|
||||
"8a4c702b6bf11d5f"
|
||||
defaultNet := &chaincfg.MainNetParams
|
||||
addr, err := btcutil.DecodeAddress(addrString, defaultNet)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(addr.EncodeAddress())
|
||||
*/
|
||||
package btcutil
|
76
btcutil/example_test.go
Normal file
76
btcutil/example_test.go
Normal file
|
@ -0,0 +1,76 @@
|
|||
package btcutil_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
func ExampleAmount() {
|
||||
|
||||
a := btcutil.Amount(0)
|
||||
fmt.Println("Zero Satoshi:", a)
|
||||
|
||||
a = btcutil.Amount(1e8)
|
||||
fmt.Println("100,000,000 Satoshis:", a)
|
||||
|
||||
a = btcutil.Amount(1e5)
|
||||
fmt.Println("100,000 Satoshis:", a)
|
||||
// Output:
|
||||
// Zero Satoshi: 0 BTC
|
||||
// 100,000,000 Satoshis: 1 BTC
|
||||
// 100,000 Satoshis: 0.001 BTC
|
||||
}
|
||||
|
||||
func ExampleNewAmount() {
|
||||
amountOne, err := btcutil.NewAmount(1)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(amountOne) //Output 1
|
||||
|
||||
amountFraction, err := btcutil.NewAmount(0.01234567)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(amountFraction) //Output 2
|
||||
|
||||
amountZero, err := btcutil.NewAmount(0)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(amountZero) //Output 3
|
||||
|
||||
amountNaN, err := btcutil.NewAmount(math.NaN())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(amountNaN) //Output 4
|
||||
|
||||
// Output: 1 BTC
|
||||
// 0.01234567 BTC
|
||||
// 0 BTC
|
||||
// invalid bitcoin amount
|
||||
}
|
||||
|
||||
func ExampleAmount_unitConversions() {
|
||||
amount := btcutil.Amount(44433322211100)
|
||||
|
||||
fmt.Println("Satoshi to kBTC:", amount.Format(btcutil.AmountKiloBTC))
|
||||
fmt.Println("Satoshi to BTC:", amount)
|
||||
fmt.Println("Satoshi to MilliBTC:", amount.Format(btcutil.AmountMilliBTC))
|
||||
fmt.Println("Satoshi to MicroBTC:", amount.Format(btcutil.AmountMicroBTC))
|
||||
fmt.Println("Satoshi to Satoshi:", amount.Format(btcutil.AmountSatoshi))
|
||||
|
||||
// Output:
|
||||
// Satoshi to kBTC: 444.333222111 kBTC
|
||||
// Satoshi to BTC: 444333.222111 BTC
|
||||
// Satoshi to MilliBTC: 444333222.111 mBTC
|
||||
// Satoshi to MicroBTC: 444333222111 μBTC
|
||||
// Satoshi to Satoshi: 44433322211100 Satoshi
|
||||
}
|
24
btcutil/gcs/README.md
Normal file
24
btcutil/gcs/README.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
gcs
|
||||
==========
|
||||
|
||||
[]
|
||||
(https://travis-ci.org/btcsuite/btcutil) [![ISC License]
|
||||
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
|
||||
[]
|
||||
(http://godoc.org/github.com/btcsuite/btcd/btcutil/gcs)
|
||||
|
||||
Package gcs provides an API for building and using a Golomb-coded set filter
|
||||
similar to that described [here](http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters).
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/gcs
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package gcs is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
371
btcutil/gcs/builder/builder.go
Normal file
371
btcutil/gcs/builder/builder.go
Normal file
|
@ -0,0 +1,371 @@
|
|||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil/gcs"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultP is the default collision probability (2^-19)
|
||||
DefaultP = 19
|
||||
|
||||
// DefaultM is the default value used for the hash range.
|
||||
DefaultM uint64 = 784931
|
||||
)
|
||||
|
||||
// GCSBuilder is a utility class that makes building GCS filters convenient.
|
||||
type GCSBuilder struct {
|
||||
p uint8
|
||||
|
||||
m uint64
|
||||
|
||||
key [gcs.KeySize]byte
|
||||
|
||||
// data is a set of entries represented as strings. This is done to
|
||||
// deduplicate items as they are added.
|
||||
data map[string]struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
// RandomKey is a utility function that returns a cryptographically random
|
||||
// [gcs.KeySize]byte usable as a key for a GCS filter.
|
||||
func RandomKey() ([gcs.KeySize]byte, error) {
|
||||
var key [gcs.KeySize]byte
|
||||
|
||||
// Read a byte slice from rand.Reader.
|
||||
randKey := make([]byte, gcs.KeySize)
|
||||
_, err := rand.Read(randKey)
|
||||
|
||||
// This shouldn't happen unless the user is on a system that doesn't
|
||||
// have a system CSPRNG. OK to panic in this case.
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
// Copy the byte slice to a [gcs.KeySize]byte array and return it.
|
||||
copy(key[:], randKey)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// DeriveKey is a utility function that derives a key from a chainhash.Hash by
|
||||
// truncating the bytes of the hash to the appopriate key size.
|
||||
func DeriveKey(keyHash *chainhash.Hash) [gcs.KeySize]byte {
|
||||
var key [gcs.KeySize]byte
|
||||
copy(key[:], keyHash.CloneBytes())
|
||||
return key
|
||||
}
|
||||
|
||||
// Key retrieves the key with which the builder will build a filter. This is
|
||||
// useful if the builder is created with a random initial key.
|
||||
func (b *GCSBuilder) Key() ([gcs.KeySize]byte, error) {
|
||||
// Do nothing if the builder's errored out.
|
||||
if b.err != nil {
|
||||
return [gcs.KeySize]byte{}, b.err
|
||||
}
|
||||
|
||||
return b.key, nil
|
||||
}
|
||||
|
||||
// SetKey sets the key with which the builder will build a filter to the passed
|
||||
// [gcs.KeySize]byte.
|
||||
func (b *GCSBuilder) SetKey(key [gcs.KeySize]byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
copy(b.key[:], key[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// SetKeyFromHash sets the key with which the builder will build a filter to a
|
||||
// key derived from the passed chainhash.Hash using DeriveKey().
|
||||
func (b *GCSBuilder) SetKeyFromHash(keyHash *chainhash.Hash) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.SetKey(DeriveKey(keyHash))
|
||||
}
|
||||
|
||||
// SetP sets the filter's probability after calling Builder().
|
||||
func (b *GCSBuilder) SetP(p uint8) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
// Basic sanity check.
|
||||
if p > 32 {
|
||||
b.err = gcs.ErrPTooBig
|
||||
return b
|
||||
}
|
||||
|
||||
b.p = p
|
||||
return b
|
||||
}
|
||||
|
||||
// SetM sets the filter's modulous value after calling Builder().
|
||||
func (b *GCSBuilder) SetM(m uint64) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
// Basic sanity check.
|
||||
if m > uint64(math.MaxUint32) {
|
||||
b.err = gcs.ErrPTooBig
|
||||
return b
|
||||
}
|
||||
|
||||
b.m = m
|
||||
return b
|
||||
}
|
||||
|
||||
// Preallocate sets the estimated filter size after calling Builder() to reduce
|
||||
// the probability of memory reallocations. If the builder has already had data
|
||||
// added to it, Preallocate has no effect.
|
||||
func (b *GCSBuilder) Preallocate(n uint32) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
if b.data == nil {
|
||||
b.data = make(map[string]struct{}, n)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// AddEntry adds a []byte to the list of entries to be included in the GCS
|
||||
// filter when it's built.
|
||||
func (b *GCSBuilder) AddEntry(data []byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
b.data[string(data)] = struct{}{}
|
||||
return b
|
||||
}
|
||||
|
||||
// AddEntries adds all the []byte entries in a [][]byte to the list of entries
|
||||
// to be included in the GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddEntries(data [][]byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
for _, entry := range data {
|
||||
b.AddEntry(entry)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AddHash adds a chainhash.Hash to the list of entries to be included in the
|
||||
// GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddHash(hash *chainhash.Hash) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.AddEntry(hash.CloneBytes())
|
||||
}
|
||||
|
||||
// AddWitness adds each item of the passed filter stack to the filter, and then
|
||||
// adds each item as a script.
|
||||
func (b *GCSBuilder) AddWitness(witness wire.TxWitness) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.AddEntries(witness)
|
||||
}
|
||||
|
||||
// Build returns a function which builds a GCS filter with the given parameters
|
||||
// and data.
|
||||
func (b *GCSBuilder) Build() (*gcs.Filter, error) {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return nil, b.err
|
||||
}
|
||||
|
||||
// We'll ensure that all the parmaters we need to actually build the
|
||||
// filter properly are set.
|
||||
if b.p == 0 {
|
||||
return nil, fmt.Errorf("p value is not set, cannot build")
|
||||
}
|
||||
if b.m == 0 {
|
||||
return nil, fmt.Errorf("m value is not set, cannot build")
|
||||
}
|
||||
|
||||
dataSlice := make([][]byte, 0, len(b.data))
|
||||
for item := range b.data {
|
||||
dataSlice = append(dataSlice, []byte(item))
|
||||
}
|
||||
|
||||
return gcs.BuildGCSFilter(b.p, b.m, b.key, dataSlice)
|
||||
}
|
||||
|
||||
// WithKeyPNM creates a GCSBuilder with specified key and the passed
|
||||
// probability, modulus and estimated filter size.
|
||||
func WithKeyPNM(key [gcs.KeySize]byte, p uint8, n uint32, m uint64) *GCSBuilder {
|
||||
b := GCSBuilder{}
|
||||
return b.SetKey(key).SetP(p).SetM(m).Preallocate(n)
|
||||
}
|
||||
|
||||
// WithKeyPM creates a GCSBuilder with specified key and the passed
|
||||
// probability. Estimated filter size is set to zero, which means more
|
||||
// reallocations are done when building the filter.
|
||||
func WithKeyPM(key [gcs.KeySize]byte, p uint8, m uint64) *GCSBuilder {
|
||||
return WithKeyPNM(key, p, 0, m)
|
||||
}
|
||||
|
||||
// WithKey creates a GCSBuilder with specified key. Probability is set to 19
|
||||
// (2^-19 collision probability). Estimated filter size is set to zero, which
|
||||
// means more reallocations are done when building the filter.
|
||||
func WithKey(key [gcs.KeySize]byte) *GCSBuilder {
|
||||
return WithKeyPNM(key, DefaultP, 0, DefaultM)
|
||||
}
|
||||
|
||||
// WithKeyHashPNM creates a GCSBuilder with key derived from the specified
|
||||
// chainhash.Hash and the passed probability and estimated filter size.
|
||||
func WithKeyHashPNM(keyHash *chainhash.Hash, p uint8, n uint32,
|
||||
m uint64) *GCSBuilder {
|
||||
|
||||
return WithKeyPNM(DeriveKey(keyHash), p, n, m)
|
||||
}
|
||||
|
||||
// WithKeyHashPM creates a GCSBuilder with key derived from the specified
|
||||
// chainhash.Hash and the passed probability. Estimated filter size is set to
|
||||
// zero, which means more reallocations are done when building the filter.
|
||||
func WithKeyHashPM(keyHash *chainhash.Hash, p uint8, m uint64) *GCSBuilder {
|
||||
return WithKeyHashPNM(keyHash, p, 0, m)
|
||||
}
|
||||
|
||||
// WithKeyHash creates a GCSBuilder with key derived from the specified
|
||||
// chainhash.Hash. Probability is set to 20 (2^-20 collision probability).
|
||||
// Estimated filter size is set to zero, which means more reallocations are
|
||||
// done when building the filter.
|
||||
func WithKeyHash(keyHash *chainhash.Hash) *GCSBuilder {
|
||||
return WithKeyHashPNM(keyHash, DefaultP, 0, DefaultM)
|
||||
}
|
||||
|
||||
// WithRandomKeyPNM creates a GCSBuilder with a cryptographically random key and
|
||||
// the passed probability and estimated filter size.
|
||||
func WithRandomKeyPNM(p uint8, n uint32, m uint64) *GCSBuilder {
|
||||
key, err := RandomKey()
|
||||
if err != nil {
|
||||
b := GCSBuilder{err: err}
|
||||
return &b
|
||||
}
|
||||
return WithKeyPNM(key, p, n, m)
|
||||
}
|
||||
|
||||
// WithRandomKeyPM creates a GCSBuilder with a cryptographically random key and
|
||||
// the passed probability. Estimated filter size is set to zero, which means
|
||||
// more reallocations are done when building the filter.
|
||||
func WithRandomKeyPM(p uint8, m uint64) *GCSBuilder {
|
||||
return WithRandomKeyPNM(p, 0, m)
|
||||
}
|
||||
|
||||
// WithRandomKey creates a GCSBuilder with a cryptographically random key.
|
||||
// Probability is set to 20 (2^-20 collision probability). Estimated filter
|
||||
// size is set to zero, which means more reallocations are done when
|
||||
// building the filter.
|
||||
func WithRandomKey() *GCSBuilder {
|
||||
return WithRandomKeyPNM(DefaultP, 0, DefaultM)
|
||||
}
|
||||
|
||||
// BuildBasicFilter builds a basic GCS filter from a block. A basic GCS filter
|
||||
// will contain all the previous output scripts spent by inputs within a block,
|
||||
// as well as the data pushes within all the outputs created within a block.
|
||||
func BuildBasicFilter(block *wire.MsgBlock, prevOutScripts [][]byte) (*gcs.Filter, error) {
|
||||
blockHash := block.BlockHash()
|
||||
b := WithKeyHash(&blockHash)
|
||||
|
||||
// If the filter had an issue with the specified key, then we force it
|
||||
// to bubble up here by calling the Key() function.
|
||||
_, err := b.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// In order to build a basic filter, we'll range over the entire block,
|
||||
// adding each whole script itself.
|
||||
for _, tx := range block.Transactions {
|
||||
// For each output in a transaction, we'll add each of the
|
||||
// individual data pushes within the script.
|
||||
for _, txOut := range tx.TxOut {
|
||||
if len(txOut.PkScript) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// In order to allow the filters to later be committed
|
||||
// to within an OP_RETURN output, we ignore all
|
||||
// OP_RETURNs to avoid a circular dependency.
|
||||
if txOut.PkScript[0] == txscript.OP_RETURN {
|
||||
continue
|
||||
}
|
||||
|
||||
b.AddEntry(txOut.PkScript)
|
||||
}
|
||||
}
|
||||
|
||||
// In the second pass, we'll also add all the prevOutScripts
|
||||
// individually as elements.
|
||||
for _, prevScript := range prevOutScripts {
|
||||
if len(prevScript) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
b.AddEntry(prevScript)
|
||||
}
|
||||
|
||||
return b.Build()
|
||||
}
|
||||
|
||||
// GetFilterHash returns the double-SHA256 of the filter.
|
||||
func GetFilterHash(filter *gcs.Filter) (chainhash.Hash, error) {
|
||||
filterData, err := filter.NBytes()
|
||||
if err != nil {
|
||||
return chainhash.Hash{}, err
|
||||
}
|
||||
|
||||
return chainhash.DoubleHashH(filterData), nil
|
||||
}
|
||||
|
||||
// MakeHeaderForFilter makes a filter chain header for a filter, given the
|
||||
// filter and the previous filter chain header.
|
||||
func MakeHeaderForFilter(filter *gcs.Filter, prevHeader chainhash.Hash) (chainhash.Hash, error) {
|
||||
filterTip := make([]byte, 2*chainhash.HashSize)
|
||||
filterHash, err := GetFilterHash(filter)
|
||||
if err != nil {
|
||||
return chainhash.Hash{}, err
|
||||
}
|
||||
|
||||
// In the buffer we created above we'll compute hash || prevHash as an
|
||||
// intermediate value.
|
||||
copy(filterTip, filterHash[:])
|
||||
copy(filterTip[chainhash.HashSize:], prevHeader[:])
|
||||
|
||||
// The final filter hash is the double-sha256 of the hash computed
|
||||
// above.
|
||||
return chainhash.DoubleHashH(filterTip), nil
|
||||
}
|
281
btcutil/gcs/builder/builder_test.go
Normal file
281
btcutil/gcs/builder/builder_test.go
Normal file
|
@ -0,0 +1,281 @@
|
|||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package builder_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/btcutil/gcs"
|
||||
"github.com/btcsuite/btcd/btcutil/gcs/builder"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
// List of values for building a filter
|
||||
contents = [][]byte{
|
||||
[]byte("Alex"),
|
||||
[]byte("Bob"),
|
||||
[]byte("Charlie"),
|
||||
[]byte("Dick"),
|
||||
[]byte("Ed"),
|
||||
[]byte("Frank"),
|
||||
[]byte("George"),
|
||||
[]byte("Harry"),
|
||||
[]byte("Ilya"),
|
||||
[]byte("John"),
|
||||
[]byte("Kevin"),
|
||||
[]byte("Larry"),
|
||||
[]byte("Michael"),
|
||||
[]byte("Nate"),
|
||||
[]byte("Owen"),
|
||||
[]byte("Paul"),
|
||||
[]byte("Quentin"),
|
||||
}
|
||||
|
||||
testKey = [16]byte{0x4c, 0xb1, 0xab, 0x12, 0x57, 0x62, 0x1e, 0x41,
|
||||
0x3b, 0x8b, 0x0e, 0x26, 0x64, 0x8d, 0x4a, 0x15}
|
||||
|
||||
testHash = "000000000000000000496d7ff9bd2c96154a8d64260e8b3b411e625712abb14c"
|
||||
|
||||
testAddr = "3Nxwenay9Z8Lc9JBiywExpnEFiLp6Afp8v"
|
||||
|
||||
witness = [][]byte{
|
||||
{0x4c, 0xb1, 0xab, 0x12, 0x57, 0x62, 0x1e, 0x41,
|
||||
0x3b, 0x8b, 0x0e, 0x26, 0x64, 0x8d, 0x4a, 0x15,
|
||||
0x3b, 0x8b, 0x0e, 0x26, 0x64, 0x8d, 0x4a, 0x15,
|
||||
0x3b, 0x8b, 0x0e, 0x26, 0x64, 0x8d, 0x4a, 0x15},
|
||||
|
||||
{0xdd, 0xa3, 0x5a, 0x14, 0x88, 0xfb, 0x97, 0xb6,
|
||||
0xeb, 0x3f, 0xe6, 0xe9, 0xef, 0x2a, 0x25, 0x81,
|
||||
0x4e, 0x39, 0x6f, 0xb5, 0xdc, 0x29, 0x5f, 0xe9,
|
||||
0x94, 0xb9, 0x67, 0x89, 0xb2, 0x1a, 0x03, 0x98,
|
||||
0x94, 0xb9, 0x67, 0x89, 0xb2, 0x1a, 0x03, 0x98,
|
||||
0x94, 0xb9, 0x67, 0x89, 0xb2, 0x1a, 0x03, 0x98},
|
||||
}
|
||||
)
|
||||
|
||||
// TestUseBlockHash tests using a block hash as a filter key.
|
||||
func TestUseBlockHash(t *testing.T) {
|
||||
// Block hash #448710, pretty high difficulty.
|
||||
hash, err := chainhash.NewHashFromStr(testHash)
|
||||
if err != nil {
|
||||
t.Fatalf("Hash from string failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// wire.OutPoint
|
||||
outPoint := wire.OutPoint{
|
||||
Hash: *hash,
|
||||
Index: 4321,
|
||||
}
|
||||
|
||||
// btcutil.Address
|
||||
addr, err := btcutil.DecodeAddress(testAddr, &chaincfg.MainNetParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Address decode failed: %s", err.Error())
|
||||
}
|
||||
addrBytes, err := txscript.NewScriptBuilder().
|
||||
AddOp(txscript.OP_HASH160).AddData(addr.ScriptAddress()).
|
||||
AddOp(txscript.OP_EQUAL).Script()
|
||||
if err != nil {
|
||||
t.Fatalf("Address script build failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// Create a GCSBuilder with a key hash and check that the key is derived
|
||||
// correctly, then test it.
|
||||
b := builder.WithKeyHash(hash)
|
||||
key, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with key hash failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not derived correctly from key hash:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, hash, builder.DefaultP, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a key hash and non-default P and test it.
|
||||
b = builder.WithKeyHashPM(hash, 30, 90)
|
||||
BuilderTest(b, hash, 30, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a random key, set the key from a hash
|
||||
// manually, check that the key is correct, and test it.
|
||||
b = builder.WithRandomKey()
|
||||
b.SetKeyFromHash(hash)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not copied correctly from known key:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, hash, builder.DefaultP, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a random key and test it.
|
||||
b = builder.WithRandomKey()
|
||||
key1, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with random key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
t.Logf("Random Key 1: %s", hex.EncodeToString(key1[:]))
|
||||
BuilderTest(b, hash, builder.DefaultP, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a random key and non-default P and test it.
|
||||
b = builder.WithRandomKeyPM(30, 90)
|
||||
key2, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with random key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
t.Logf("Random Key 2: %s", hex.EncodeToString(key2[:]))
|
||||
if key2 == key1 {
|
||||
t.Fatalf("Random keys are the same!")
|
||||
}
|
||||
BuilderTest(b, hash, 30, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a known key and test it.
|
||||
b = builder.WithKey(testKey)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not copied correctly from known key:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, hash, builder.DefaultP, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a known key and non-default P and test it.
|
||||
b = builder.WithKeyPM(testKey, 30, 90)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not copied correctly from known key:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, hash, 30, outPoint, addrBytes, witness, t)
|
||||
|
||||
// Create a GCSBuilder with a known key and too-high P and ensure error
|
||||
// works throughout all functions that use it.
|
||||
b = builder.WithRandomKeyPM(33, 99).SetKeyFromHash(hash).SetKey(testKey)
|
||||
b.SetP(30).AddEntry(hash.CloneBytes()).AddEntries(contents).
|
||||
AddHash(hash).AddEntry(addrBytes)
|
||||
_, err = b.Key()
|
||||
if err != gcs.ErrPTooBig {
|
||||
t.Fatalf("No error on P too big!")
|
||||
}
|
||||
_, err = b.Build()
|
||||
if err != gcs.ErrPTooBig {
|
||||
t.Fatalf("No error on P too big!")
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderTest(b *builder.GCSBuilder, hash *chainhash.Hash, p uint8,
|
||||
outPoint wire.OutPoint, addrBytes []byte, witness wire.TxWitness,
|
||||
t *testing.T) {
|
||||
|
||||
key, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with key hash failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
|
||||
// Build a filter and test matches.
|
||||
b.AddEntries(contents)
|
||||
f, err := b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
if f.P() != p {
|
||||
t.Fatalf("Filter built with wrong probability")
|
||||
}
|
||||
match, err := f.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = f.Match(key, []byte("weks"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!",
|
||||
builder.DefaultP)
|
||||
}
|
||||
|
||||
// Add a hash, build a filter, and test matches
|
||||
b.AddHash(hash)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.Match(key, hash.CloneBytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
|
||||
// Add a script, build a filter, and test matches
|
||||
b.AddEntry(addrBytes)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.MatchAny(key, [][]byte{addrBytes})
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
|
||||
// Add a routine witness stack, build a filter, and test that it
|
||||
// matches.
|
||||
b.AddWitness(witness)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.MatchAny(key, witness)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
|
||||
// Check that adding duplicate items does not increase filter size.
|
||||
originalSize := f.N()
|
||||
b.AddEntry(addrBytes)
|
||||
b.AddWitness(witness)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
if f.N() != originalSize {
|
||||
t.Fatal("Filter size increased with duplicate items")
|
||||
}
|
||||
}
|
24
btcutil/gcs/doc.go
Normal file
24
btcutil/gcs/doc.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gcs provides an API for building and using a Golomb-coded set filter.
|
||||
|
||||
Golomb-Coded Set
|
||||
|
||||
A Golomb-coded set is a probabilistic data structure used similarly to a Bloom
|
||||
filter. A filter uses constant-size overhead plus on average n+2 bits per
|
||||
item added to the filter, where 2^-n is the desired false positive (collision)
|
||||
probability.
|
||||
|
||||
GCS use in Bitcoin
|
||||
|
||||
GCS filters are a proposed mechanism for storing and transmitting per-block
|
||||
filters in Bitcoin. The usage is intended to be the inverse of Bloom filters:
|
||||
a full node would send an SPV node the GCS filter for a block, which the SPV
|
||||
node would check against its list of relevant items. The suggested collision
|
||||
probability for Bitcoin use is 2^-20.
|
||||
*/
|
||||
package gcs
|
541
btcutil/gcs/gcs.go
Normal file
541
btcutil/gcs/gcs.go
Normal file
|
@ -0,0 +1,541 @@
|
|||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/aead/siphash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/kkdai/bstream"
|
||||
)
|
||||
|
||||
// Inspired by https://github.com/rasky/gcs
|
||||
|
||||
var (
|
||||
// ErrNTooBig signifies that the filter can't handle N items.
|
||||
ErrNTooBig = fmt.Errorf("N is too big to fit in uint32")
|
||||
|
||||
// ErrPTooBig signifies that the filter can't handle `1/2**P`
|
||||
// collision probability.
|
||||
ErrPTooBig = fmt.Errorf("P is too big to fit in uint32")
|
||||
)
|
||||
|
||||
const (
|
||||
// KeySize is the size of the byte array required for key material for
|
||||
// the SipHash keyed hash function.
|
||||
KeySize = 16
|
||||
|
||||
// varIntProtoVer is the protocol version to use for serializing N as a
|
||||
// VarInt.
|
||||
varIntProtoVer uint32 = 0
|
||||
)
|
||||
|
||||
// fastReduction calculates a mapping that's more ore less equivalent to: x mod
|
||||
// N. However, instead of using a mod operation, which using a non-power of two
|
||||
// will lead to slowness on many processors due to unnecessary division, we
|
||||
// instead use a "multiply-and-shift" trick which eliminates all divisions,
|
||||
// described in:
|
||||
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
//
|
||||
// * v * N >> log_2(N)
|
||||
//
|
||||
// In our case, using 64-bit integers, log_2 is 64. As most processors don't
|
||||
// support 128-bit arithmetic natively, we'll be super portable and unfold the
|
||||
// operation into several operations with 64-bit arithmetic. As inputs, we the
|
||||
// number to reduce, and our modulus N divided into its high 32-bits and lower
|
||||
// 32-bits.
|
||||
func fastReduction(v, nHi, nLo uint64) uint64 {
|
||||
// First, we'll spit the item we need to reduce into its higher and
|
||||
// lower bits.
|
||||
vhi := v >> 32
|
||||
vlo := uint64(uint32(v))
|
||||
|
||||
// Then, we distribute multiplication over each part.
|
||||
vnphi := vhi * nHi
|
||||
vnpmid := vhi * nLo
|
||||
npvmid := nHi * vlo
|
||||
vnplo := vlo * nLo
|
||||
|
||||
// We calculate the carry bit.
|
||||
carry := (uint64(uint32(vnpmid)) + uint64(uint32(npvmid)) +
|
||||
(vnplo >> 32)) >> 32
|
||||
|
||||
// Last, we add the high bits, the middle bits, and the carry.
|
||||
v = vnphi + (vnpmid >> 32) + (npvmid >> 32) + carry
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Filter describes an immutable filter that can be built from a set of data
|
||||
// elements, serialized, deserialized, and queried in a thread-safe manner. The
|
||||
// serialized form is compressed as a Golomb Coded Set (GCS), but does not
|
||||
// include N or P to allow the user to encode the metadata separately if
|
||||
// necessary. The hash function used is SipHash, a keyed function; the key used
|
||||
// in building the filter is required in order to match filter values and is
|
||||
// not included in the serialized form.
|
||||
type Filter struct {
|
||||
n uint32
|
||||
p uint8
|
||||
modulusNP uint64
|
||||
|
||||
filterData []byte
|
||||
}
|
||||
|
||||
// BuildGCSFilter builds a new GCS filter with the collision probability of
|
||||
// `1/(2**P)`, key `key`, and including every `[]byte` in `data` as a member of
|
||||
// the set.
|
||||
func BuildGCSFilter(P uint8, M uint64, key [KeySize]byte, data [][]byte) (*Filter, error) { // nolint:gocritic
|
||||
// Some initial parameter checks: make sure we have data from which to
|
||||
// build the filter, and make sure our parameters will fit the hash
|
||||
// function we're using.
|
||||
if uint64(len(data)) >= (1 << 32) {
|
||||
return nil, ErrNTooBig
|
||||
}
|
||||
if P > 32 {
|
||||
return nil, ErrPTooBig
|
||||
}
|
||||
|
||||
// Create the filter object and insert metadata.
|
||||
f := Filter{
|
||||
n: uint32(len(data)),
|
||||
p: P,
|
||||
}
|
||||
|
||||
// First we'll compute the value of m, which is the modulus we use
|
||||
// within our finite field. We want to compute: mScalar * 2^P. We use
|
||||
// math.Round in order to round the value up, rather than down.
|
||||
f.modulusNP = uint64(f.n) * M
|
||||
|
||||
// Shortcut if the filter is empty.
|
||||
if f.n == 0 {
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// Build the filter.
|
||||
values := make([]uint64, 0, len(data))
|
||||
b := bstream.NewBStreamWriter(0)
|
||||
|
||||
// Insert the hash (fast-ranged over a space of N*P) of each data
|
||||
// element into a slice and sort the slice. This can be greatly
|
||||
// optimized with native 128-bit multiplication, but we're going to be
|
||||
// fully portable for now.
|
||||
//
|
||||
// First, we cache the high and low bits of modulusNP for the
|
||||
// multiplication of 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
for _, d := range data {
|
||||
// For each datum, we assign the initial hash to a uint64.
|
||||
v := siphash.Sum64(d, &key)
|
||||
|
||||
v = fastReduction(v, nphi, nplo)
|
||||
values = append(values, v)
|
||||
}
|
||||
sort.Slice(values, func(i, j int) bool { return values[i] < values[j] })
|
||||
|
||||
// Write the sorted list of values into the filter bitstream,
|
||||
// compressing it using Golomb coding.
|
||||
var value, lastValue, remainder uint64
|
||||
for _, v := range values {
|
||||
// Calculate the difference between this value and the last,
|
||||
// modulo P.
|
||||
remainder = (v - lastValue) & ((uint64(1) << f.p) - 1)
|
||||
|
||||
// Calculate the difference between this value and the last,
|
||||
// divided by P.
|
||||
value = (v - lastValue - remainder) >> f.p
|
||||
lastValue = v
|
||||
|
||||
// Write the P multiple into the bitstream in unary; the
|
||||
// average should be around 1 (2 bits - 0b10).
|
||||
for value > 0 {
|
||||
b.WriteBit(true)
|
||||
value--
|
||||
}
|
||||
b.WriteBit(false)
|
||||
|
||||
// Write the remainder as a big-endian integer with enough bits
|
||||
// to represent the appropriate collision probability.
|
||||
b.WriteBits(remainder, int(f.p))
|
||||
}
|
||||
|
||||
// Copy the bitstream into the filter object and return the object.
|
||||
f.filterData = b.Bytes()
|
||||
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// FromBytes deserializes a GCS filter from a known N, P, and serialized filter
|
||||
// as returned by Bytes().
|
||||
func FromBytes(N uint32, P uint8, M uint64, d []byte) (*Filter, error) { // nolint:gocritic
|
||||
// Basic sanity check.
|
||||
if P > 32 {
|
||||
return nil, ErrPTooBig
|
||||
}
|
||||
|
||||
// Create the filter object and insert metadata.
|
||||
f := &Filter{
|
||||
n: N,
|
||||
p: P,
|
||||
}
|
||||
|
||||
// First we'll compute the value of m, which is the modulus we use
|
||||
// within our finite field. We want to compute: mScalar * 2^P. We use
|
||||
// math.Round in order to round the value up, rather than down.
|
||||
f.modulusNP = uint64(f.n) * M
|
||||
|
||||
// Copy the filter.
|
||||
f.filterData = make([]byte, len(d))
|
||||
copy(f.filterData, d)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// FromNBytes deserializes a GCS filter from a known P, and serialized N and
|
||||
// filter as returned by NBytes().
|
||||
func FromNBytes(P uint8, M uint64, d []byte) (*Filter, error) { // nolint:gocritic
|
||||
buffer := bytes.NewBuffer(d)
|
||||
N, err := wire.ReadVarInt(buffer, varIntProtoVer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if N >= (1 << 32) {
|
||||
return nil, ErrNTooBig
|
||||
}
|
||||
return FromBytes(uint32(N), P, M, buffer.Bytes())
|
||||
}
|
||||
|
||||
// Bytes returns the serialized format of the GCS filter, which does not
|
||||
// include N or P (returned by separate methods) or the key used by SipHash.
|
||||
func (f *Filter) Bytes() ([]byte, error) {
|
||||
filterData := make([]byte, len(f.filterData))
|
||||
copy(filterData, f.filterData)
|
||||
return filterData, nil
|
||||
}
|
||||
|
||||
// NBytes returns the serialized format of the GCS filter with N, which does
|
||||
// not include P (returned by a separate method) or the key used by SipHash.
|
||||
func (f *Filter) NBytes() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.Grow(wire.VarIntSerializeSize(uint64(f.n)) + len(f.filterData))
|
||||
|
||||
err := wire.WriteVarInt(&buffer, varIntProtoVer, uint64(f.n))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = buffer.Write(f.filterData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// PBytes returns the serialized format of the GCS filter with P, which does
|
||||
// not include N (returned by a separate method) or the key used by SipHash.
|
||||
func (f *Filter) PBytes() ([]byte, error) {
|
||||
filterData := make([]byte, len(f.filterData)+1)
|
||||
filterData[0] = f.p
|
||||
copy(filterData[1:], f.filterData)
|
||||
return filterData, nil
|
||||
}
|
||||
|
||||
// NPBytes returns the serialized format of the GCS filter with N and P, which
|
||||
// does not include the key used by SipHash.
|
||||
func (f *Filter) NPBytes() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.Grow(wire.VarIntSerializeSize(uint64(f.n)) + 1 + len(f.filterData))
|
||||
|
||||
err := wire.WriteVarInt(&buffer, varIntProtoVer, uint64(f.n))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = buffer.WriteByte(f.p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = buffer.Write(f.filterData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// P returns the filter's collision probability as a negative power of 2 (that
|
||||
// is, a collision probability of `1/2**20` is represented as 20).
|
||||
func (f *Filter) P() uint8 {
|
||||
return f.p
|
||||
}
|
||||
|
||||
// N returns the size of the data set used to build the filter.
|
||||
func (f *Filter) N() uint32 {
|
||||
return f.n
|
||||
}
|
||||
|
||||
// Match checks whether a []byte value is likely (within collision probability)
|
||||
// to be a member of the set represented by the filter.
|
||||
func (f *Filter) Match(key [KeySize]byte, data []byte) (bool, error) {
|
||||
// Create a filter bitstream.
|
||||
filterData, err := f.Bytes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b := bstream.NewBStreamReader(filterData)
|
||||
|
||||
// We take the high and low bits of modulusNP for the multiplication
|
||||
// of 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
|
||||
// Then we hash our search term with the same parameters as the filter.
|
||||
term := siphash.Sum64(data, &key)
|
||||
term = fastReduction(term, nphi, nplo)
|
||||
|
||||
// Go through the search filter and look for the desired value.
|
||||
var value uint64
|
||||
for i := uint32(0); i < f.N(); i++ {
|
||||
// Read the difference between previous and new value from
|
||||
// bitstream.
|
||||
delta, err := f.readFullUint64(b)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Add the delta to the previous value.
|
||||
value += delta
|
||||
switch {
|
||||
|
||||
// The current value matches our query term, success.
|
||||
case value == term:
|
||||
return true, nil
|
||||
|
||||
// The current value is greater than our query term, thus no
|
||||
// future decoded value can match because the values
|
||||
// monotonically increase.
|
||||
case value > term:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All values were decoded and none produced a successful match. This
|
||||
// indicates that the items in the filter were all smaller than our
|
||||
// target.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// MatchAny returns checks whether any []byte value is likely (within collision
|
||||
// probability) to be a member of the set represented by the filter faster than
|
||||
// calling Match() for each value individually.
|
||||
func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
|
||||
// TODO(conner): add real heuristics to query optimization
|
||||
switch {
|
||||
|
||||
case len(data) >= int(f.N()/2):
|
||||
return f.HashMatchAny(key, data)
|
||||
|
||||
default:
|
||||
return f.ZipMatchAny(key, data)
|
||||
}
|
||||
}
|
||||
|
||||
// ZipMatchAny returns checks whether any []byte value is likely (within
|
||||
// collision probability) to be a member of the set represented by the filter
|
||||
// faster than calling Match() for each value individually.
|
||||
//
|
||||
// NOTE: This method should outperform HashMatchAny when the number of query
|
||||
// entries is smaller than the number of filter entries.
|
||||
func (f *Filter) ZipMatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
|
||||
// Basic anity check.
|
||||
if len(data) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Create a filter bitstream.
|
||||
filterData, err := f.Bytes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b := bstream.NewBStreamReader(filterData)
|
||||
|
||||
// Create an uncompressed filter of the search values.
|
||||
values := make([]uint64, 0, len(data))
|
||||
|
||||
// First, we cache the high and low bits of modulusNP for the
|
||||
// multiplication of 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
for _, d := range data {
|
||||
// For each datum, we assign the initial hash to a uint64.
|
||||
v := siphash.Sum64(d, &key)
|
||||
|
||||
// We'll then reduce the value down to the range of our
|
||||
// modulus.
|
||||
v = fastReduction(v, nphi, nplo)
|
||||
values = append(values, v)
|
||||
}
|
||||
sort.Slice(values, func(i, j int) bool { return values[i] < values[j] })
|
||||
|
||||
querySize := len(values)
|
||||
|
||||
// Zip down the filters, comparing values until we either run out of
|
||||
// values to compare in one of the filters or we reach a matching
|
||||
// value.
|
||||
var (
|
||||
value uint64
|
||||
queryIndex int
|
||||
)
|
||||
out:
|
||||
for i := uint32(0); i < f.N(); i++ {
|
||||
// Advance filter we're searching or return false if we're at
|
||||
// the end because nothing matched.
|
||||
delta, err := f.readFullUint64(b)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
value += delta
|
||||
|
||||
for {
|
||||
switch {
|
||||
|
||||
// All query items have been exhausted and we haven't
|
||||
// had a match, therefore there are no matches.
|
||||
case queryIndex == querySize:
|
||||
return false, nil
|
||||
|
||||
// The current item in the query matches the decoded
|
||||
// value, success.
|
||||
case values[queryIndex] == value:
|
||||
return true, nil
|
||||
|
||||
// The current item in the query is greater than the
|
||||
// current decoded value, continue to decode the next
|
||||
// delta and try again.
|
||||
case values[queryIndex] > value:
|
||||
continue out
|
||||
}
|
||||
|
||||
queryIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// All items in the filter were decoded and none produced a successful
|
||||
// match.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// HashMatchAny returns checks whether any []byte value is likely (within
|
||||
// collision probability) to be a member of the set represented by the filter
|
||||
// faster than calling Match() for each value individually.
|
||||
//
|
||||
// NOTE: This method should outperform MatchAny if the number of query entries
|
||||
// approaches the number of filter entries, len(data) >= f.N().
|
||||
func (f *Filter) HashMatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
|
||||
// Basic sanity check.
|
||||
if len(data) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Create a filter bitstream.
|
||||
filterData, err := f.Bytes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b := bstream.NewBStreamReader(filterData)
|
||||
|
||||
var (
|
||||
values = make(map[uint32]struct{}, f.N())
|
||||
lastValue uint64
|
||||
)
|
||||
|
||||
// First, decompress the filter and construct an index of the keys
|
||||
// contained within the filter. Index construction terminates after all
|
||||
// values have been read from the bitstream.
|
||||
for {
|
||||
// Read the next diff value from the filter, add it to the
|
||||
// last value, and set the new value in the index.
|
||||
value, err := f.readFullUint64(b)
|
||||
if err == nil {
|
||||
lastValue += value
|
||||
values[uint32(lastValue)] = struct{}{}
|
||||
continue
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// We cache the high and low bits of modulusNP for the multiplication of
|
||||
// 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
|
||||
// Finally, run through the provided data items, querying the index to
|
||||
// determine if the filter contains any elements of interest.
|
||||
for _, d := range data {
|
||||
// For each datum, we assign the initial hash to
|
||||
// a uint64.
|
||||
v := siphash.Sum64(d, &key)
|
||||
|
||||
// We'll then reduce the value down to the range
|
||||
// of our modulus.
|
||||
v = fastReduction(v, nphi, nplo)
|
||||
|
||||
if _, ok := values[uint32(v)]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// readFullUint64 reads a value represented by the sum of a unary multiple of
|
||||
// the filter's P modulus (`2**P`) and a big-endian P-bit remainder.
|
||||
func (f *Filter) readFullUint64(b *bstream.BStream) (uint64, error) {
|
||||
var quotient uint64
|
||||
|
||||
// Count the 1s until we reach a 0.
|
||||
c, err := b.ReadBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for c {
|
||||
quotient++
|
||||
c, err = b.ReadBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Read P bits.
|
||||
remainder, err := b.ReadBits(int(f.p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Add the multiple and the remainder.
|
||||
v := (quotient << f.p) + remainder
|
||||
return v, nil
|
||||
}
|
368
btcutil/gcs/gcs_test.go
Normal file
368
btcutil/gcs/gcs_test.go
Normal file
|
@ -0,0 +1,368 @@
|
|||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/gcs"
|
||||
)
|
||||
|
||||
var (
|
||||
// No need to allocate an err variable in every test
|
||||
err error
|
||||
|
||||
// Collision probability for the tests (1/2**19)
|
||||
P = uint8(19)
|
||||
|
||||
// Modulus value for the tests.
|
||||
M uint64 = 784931
|
||||
|
||||
// Filters are conserved between tests but we must define with an
|
||||
// interface which functions we're testing because the gcsFilter type
|
||||
// isn't exported
|
||||
filter, filter2, filter3 *gcs.Filter
|
||||
|
||||
// We need to use the same key for building and querying the filters
|
||||
key [gcs.KeySize]byte
|
||||
|
||||
// List of values for building a filter
|
||||
contents = [][]byte{
|
||||
[]byte("Alex"),
|
||||
[]byte("Bob"),
|
||||
[]byte("Charlie"),
|
||||
[]byte("Dick"),
|
||||
[]byte("Ed"),
|
||||
[]byte("Frank"),
|
||||
[]byte("George"),
|
||||
[]byte("Harry"),
|
||||
[]byte("Ilya"),
|
||||
[]byte("John"),
|
||||
[]byte("Kevin"),
|
||||
[]byte("Larry"),
|
||||
[]byte("Michael"),
|
||||
[]byte("Nate"),
|
||||
[]byte("Owen"),
|
||||
[]byte("Paul"),
|
||||
[]byte("Quentin"),
|
||||
}
|
||||
|
||||
// List of values for querying a filter using MatchAny()
|
||||
contents2 = [][]byte{
|
||||
[]byte("Alice"),
|
||||
[]byte("Betty"),
|
||||
[]byte("Charmaine"),
|
||||
[]byte("Donna"),
|
||||
[]byte("Edith"),
|
||||
[]byte("Faina"),
|
||||
[]byte("Georgia"),
|
||||
[]byte("Hannah"),
|
||||
[]byte("Ilsbeth"),
|
||||
[]byte("Jennifer"),
|
||||
[]byte("Kayla"),
|
||||
[]byte("Lena"),
|
||||
[]byte("Michelle"),
|
||||
[]byte("Natalie"),
|
||||
[]byte("Ophelia"),
|
||||
[]byte("Peggy"),
|
||||
[]byte("Queenie"),
|
||||
}
|
||||
)
|
||||
|
||||
// TestGCSFilterBuild builds a test filter with a randomized key. For Bitcoin
|
||||
// use, deterministic filter generation is desired. Therefore, a key that's
|
||||
// derived deterministically would be required.
|
||||
func TestGCSFilterBuild(t *testing.T) {
|
||||
for i := 0; i < gcs.KeySize; i += 4 {
|
||||
binary.BigEndian.PutUint32(key[i:], rand.Uint32())
|
||||
}
|
||||
filter, err = gcs.BuildGCSFilter(P, M, key, contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSMatchZeroHash ensures that Match and MatchAny properly match an item
|
||||
// if it's hash after the reduction is zero. This is accomplished by brute
|
||||
// forcing a specific target whose hash is zero given a certain (P, M, key,
|
||||
// len(elements)) combination. In this case, P and M are the default, key was
|
||||
// chosen randomly, and len(elements) is 13. The target 4-byte value of 16060032
|
||||
// is the first such 32-bit value, thus we use the number 0-11 as the other
|
||||
// elements in the filter since we know they won't collide. We test both the
|
||||
// positive and negative cases, when the zero hash item is in the filter and
|
||||
// when it is excluded. In the negative case, the 32-bit value of 12 is added to
|
||||
// the filter instead of the target.
|
||||
func TestGCSMatchZeroHash(t *testing.T) {
|
||||
t.Run("include zero", func(t *testing.T) {
|
||||
testGCSMatchZeroHash(t, true)
|
||||
})
|
||||
t.Run("exclude zero", func(t *testing.T) {
|
||||
testGCSMatchZeroHash(t, false)
|
||||
})
|
||||
}
|
||||
|
||||
func testGCSMatchZeroHash(t *testing.T, includeZeroHash bool) {
|
||||
key := [gcs.KeySize]byte{
|
||||
0x25, 0x28, 0x0d, 0x25, 0x26, 0xe1, 0xd3, 0xc7,
|
||||
0xa5, 0x71, 0x85, 0x34, 0x92, 0xa5, 0x7e, 0x68,
|
||||
}
|
||||
|
||||
// Construct the target data to match, whose hash is zero after applying
|
||||
// the reduction with the parameters in the test.
|
||||
target := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(target, 16060032)
|
||||
|
||||
// Construct the set of 13 items including the target, using the 32-bit
|
||||
// values of 0 through 11 as the first 12 items. We known none of these
|
||||
// hash to zero since the brute force ended well beyond them.
|
||||
elements := make([][]byte, 0, 13)
|
||||
for i := 0; i < 12; i++ {
|
||||
data := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(data, uint32(i))
|
||||
elements = append(elements, data)
|
||||
}
|
||||
|
||||
// If the filter should include the zero hash element, add the target
|
||||
// which we know hashes to zero. Otherwise add 32-bit value of 12 which
|
||||
// we know does not hash to zero.
|
||||
if includeZeroHash {
|
||||
elements = append(elements, target)
|
||||
} else {
|
||||
data := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(data, 12)
|
||||
elements = append(elements, data)
|
||||
}
|
||||
|
||||
filter, err := gcs.BuildGCSFilter(P, M, key, elements)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to build filter: %v", err)
|
||||
}
|
||||
|
||||
match, err := filter.Match(key, target)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to match: %v", err)
|
||||
}
|
||||
|
||||
// We should only get a match iff the target was included.
|
||||
if match != includeZeroHash {
|
||||
t.Fatalf("expected match from Match: %t, got %t",
|
||||
includeZeroHash, match)
|
||||
}
|
||||
|
||||
match, err = filter.MatchAny(key, [][]byte{target})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to match any: %v", err)
|
||||
}
|
||||
|
||||
// We should only get a match iff the target was included.
|
||||
if match != includeZeroHash {
|
||||
t.Fatalf("expected match from MatchAny: %t, got %t",
|
||||
includeZeroHash, match)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterCopy deserializes and serializes a filter to create a copy.
|
||||
func TestGCSFilterCopy(t *testing.T) {
|
||||
serialized2, err := filter.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
filter2, err = gcs.FromBytes(filter.N(), P, M, serialized2)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter copy failed: %s", err.Error())
|
||||
}
|
||||
serialized3, err := filter.NBytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter NBytes() failed: %v", err)
|
||||
}
|
||||
filter3, err = gcs.FromNBytes(filter.P(), M, serialized3)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter copy failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterMetadata checks that the filter metadata is built and copied
|
||||
// correctly.
|
||||
func TestGCSFilterMetadata(t *testing.T) {
|
||||
if filter.P() != P {
|
||||
t.Fatal("P not correctly stored in filter metadata")
|
||||
}
|
||||
if filter.N() != uint32(len(contents)) {
|
||||
t.Fatal("N not correctly stored in filter metadata")
|
||||
}
|
||||
if filter.P() != filter2.P() {
|
||||
t.Fatal("P doesn't match between copied filters")
|
||||
}
|
||||
if filter.P() != filter3.P() {
|
||||
t.Fatal("P doesn't match between copied filters")
|
||||
}
|
||||
if filter.N() != filter2.N() {
|
||||
t.Fatal("N doesn't match between copied filters")
|
||||
}
|
||||
if filter.N() != filter3.N() {
|
||||
t.Fatal("N doesn't match between copied filters")
|
||||
}
|
||||
serialized, err := filter.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
serialized2, err := filter2.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized2) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
serialized3, err := filter3.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized3) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
serialized4, err := filter3.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized4) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterMatch checks that both the built and copied filters match
|
||||
// correctly, logging any false positives without failing on them.
|
||||
func TestGCSFilterMatch(t *testing.T) {
|
||||
match, err := filter.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter.Match(key, []byte("Quentin"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Quentin"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter.Match(key, []byte("Nates"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Nates"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter.Match(key, []byte("Quentins"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Quentins"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
}
|
||||
|
||||
// AnyMatcher is the function signature of our matching algorithms.
|
||||
type AnyMatcher func(key [gcs.KeySize]byte, data [][]byte) (bool, error)
|
||||
|
||||
// TestGCSFilterMatchAnySuite checks that all of our matching algorithms
|
||||
// properly match a list correctly when using built or copied filters, logging
|
||||
// any false positives without failing on them.
|
||||
func TestGCSFilterMatchAnySuite(t *testing.T) {
|
||||
funcs := []struct {
|
||||
name string
|
||||
matchAny func(*gcs.Filter) AnyMatcher
|
||||
}{
|
||||
{
|
||||
"default",
|
||||
func(f *gcs.Filter) AnyMatcher {
|
||||
return f.MatchAny
|
||||
},
|
||||
},
|
||||
{
|
||||
"hash",
|
||||
func(f *gcs.Filter) AnyMatcher {
|
||||
return f.HashMatchAny
|
||||
},
|
||||
},
|
||||
{
|
||||
"zip",
|
||||
func(f *gcs.Filter) AnyMatcher {
|
||||
return f.ZipMatchAny
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range funcs {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
contentsCopy := make([][]byte, len(contents2))
|
||||
copy(contentsCopy, contents2)
|
||||
|
||||
match, err := test.matchAny(filter)(key, contentsCopy)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = test.matchAny(filter2)(key, contentsCopy)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
contentsCopy = append(contentsCopy, []byte("Nate"))
|
||||
match, err = test.matchAny(filter)(key, contentsCopy)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match any when it should have!")
|
||||
}
|
||||
match, err = test.matchAny(filter2)(key, contentsCopy)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match any when it should have!")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
237
btcutil/gcs/gcsbench_test.go
Normal file
237
btcutil/gcs/gcsbench_test.go
Normal file
|
@ -0,0 +1,237 @@
|
|||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/gcs"
|
||||
)
|
||||
|
||||
func genRandFilterElements(numElements uint) ([][]byte, error) {
|
||||
testContents := make([][]byte, numElements)
|
||||
for i := range testContents {
|
||||
randElem := make([]byte, 32)
|
||||
if _, err := rand.Read(randElem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
testContents[i] = randElem
|
||||
}
|
||||
|
||||
return testContents, nil
|
||||
}
|
||||
|
||||
var (
|
||||
generatedFilter *gcs.Filter
|
||||
)
|
||||
|
||||
// BenchmarkGCSFilterBuild benchmarks building a filter.
|
||||
func BenchmarkGCSFilterBuild50000(b *testing.B) {
|
||||
var testKey [gcs.KeySize]byte
|
||||
for i := 0; i < gcs.KeySize; i += 4 {
|
||||
binary.BigEndian.PutUint32(testKey[i:], rand.Uint32())
|
||||
}
|
||||
|
||||
randFilterElems, genErr := genRandFilterElements(50000)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate random item: %v", genErr)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
var localFilter *gcs.Filter
|
||||
for i := 0; i < b.N; i++ {
|
||||
localFilter, err = gcs.BuildGCSFilter(
|
||||
P, M, key, randFilterElems,
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate filter: %v", err)
|
||||
}
|
||||
}
|
||||
generatedFilter = localFilter
|
||||
}
|
||||
|
||||
// BenchmarkGCSFilterBuild benchmarks building a filter.
|
||||
func BenchmarkGCSFilterBuild100000(b *testing.B) {
|
||||
var testKey [gcs.KeySize]byte
|
||||
for i := 0; i < gcs.KeySize; i += 4 {
|
||||
binary.BigEndian.PutUint32(testKey[i:], rand.Uint32())
|
||||
}
|
||||
|
||||
randFilterElems, genErr := genRandFilterElements(100000)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate random item: %v", genErr)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
var localFilter *gcs.Filter
|
||||
for i := 0; i < b.N; i++ {
|
||||
localFilter, err = gcs.BuildGCSFilter(
|
||||
P, M, key, randFilterElems,
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate filter: %v", err)
|
||||
}
|
||||
}
|
||||
generatedFilter = localFilter
|
||||
}
|
||||
|
||||
var (
|
||||
match bool
|
||||
)
|
||||
|
||||
// BenchmarkGCSFilterMatch benchmarks querying a filter for a single value.
|
||||
func BenchmarkGCSFilterMatch(b *testing.B) {
|
||||
filter, err := gcs.BuildGCSFilter(P, M, key, contents)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to build filter")
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
var localMatch bool
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = filter.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
|
||||
localMatch, err = filter.Match(key, []byte("Nates"))
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
}
|
||||
match = localMatch
|
||||
}
|
||||
|
||||
var (
|
||||
randElems100, _ = genRandFilterElements(100)
|
||||
randElems1000, _ = genRandFilterElements(1000)
|
||||
randElems10000, _ = genRandFilterElements(10000)
|
||||
randElems100000, _ = genRandFilterElements(100000)
|
||||
randElems1000000, _ = genRandFilterElements(1000000)
|
||||
randElems10000000, _ = genRandFilterElements(10000000)
|
||||
|
||||
filterElems1000, _ = genRandFilterElements(1000)
|
||||
filter1000, _ = gcs.BuildGCSFilter(P, M, key, filterElems1000)
|
||||
filterElems5000, _ = genRandFilterElements(5000)
|
||||
filter5000, _ = gcs.BuildGCSFilter(P, M, key, filterElems5000)
|
||||
filterElems10000, _ = genRandFilterElements(10000)
|
||||
filter10000, _ = gcs.BuildGCSFilter(P, M, key, filterElems10000)
|
||||
)
|
||||
|
||||
// matchAnyBenchmarks contains combinations of random filters and queries used
|
||||
// to measure performance of various MatchAny implementations.
|
||||
var matchAnyBenchmarks = []struct {
|
||||
name string
|
||||
query [][]byte
|
||||
filter *gcs.Filter
|
||||
}{
|
||||
{"q100-f1K", randElems100, filter1000},
|
||||
{"q1K-f1K", randElems1000, filter1000},
|
||||
{"q10K-f1K", randElems10000, filter1000},
|
||||
{"q100K-f1K", randElems100000, filter1000},
|
||||
{"q1M-f1K", randElems1000000, filter1000},
|
||||
{"q10M-f1K", randElems10000000, filter1000},
|
||||
|
||||
{"q100-f5K", randElems100, filter5000},
|
||||
{"q1K-f5K", randElems1000, filter5000},
|
||||
{"q10K-f5K", randElems10000, filter5000},
|
||||
{"q100K-f5K", randElems100000, filter5000},
|
||||
{"q1M-f5K", randElems1000000, filter5000},
|
||||
{"q10M-f5K", randElems10000000, filter5000},
|
||||
|
||||
{"q100-f10K", randElems100, filter10000},
|
||||
{"q1K-f10K", randElems1000, filter10000},
|
||||
{"q10K-f10K", randElems10000, filter10000},
|
||||
{"q100K-f10K", randElems100000, filter10000},
|
||||
{"q1M-f10K", randElems1000000, filter10000},
|
||||
{"q10M-f10K", randElems10000000, filter10000},
|
||||
}
|
||||
|
||||
// BenchmarkGCSFilterMatchAny benchmarks the sort-and-zip MatchAny impl.
|
||||
func BenchmarkGCSFilterZipMatchAny(b *testing.B) {
|
||||
for _, test := range matchAnyBenchmarks {
|
||||
test := test
|
||||
|
||||
b.Run(test.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
var (
|
||||
localMatch bool
|
||||
err error
|
||||
)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
localMatch, err = test.filter.ZipMatchAny(
|
||||
key, test.query,
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
}
|
||||
match = localMatch
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkGCSFilterMatchAny benchmarks the hash-join MatchAny impl.
|
||||
func BenchmarkGCSFilterHashMatchAny(b *testing.B) {
|
||||
for _, test := range matchAnyBenchmarks {
|
||||
test := test
|
||||
|
||||
b.Run(test.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
var (
|
||||
localMatch bool
|
||||
err error
|
||||
)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
localMatch, err = test.filter.HashMatchAny(
|
||||
key, test.query,
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
}
|
||||
match = localMatch
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkGCSFilterMatchAny benchmarks the hybrid MatchAny impl.
|
||||
func BenchmarkGCSFilterMatchAny(b *testing.B) {
|
||||
for _, test := range matchAnyBenchmarks {
|
||||
test := test
|
||||
|
||||
b.Run(test.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
var (
|
||||
localMatch bool
|
||||
err error
|
||||
)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
localMatch, err = test.filter.MatchAny(
|
||||
key, test.query,
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
}
|
||||
match = localMatch
|
||||
})
|
||||
}
|
||||
}
|
13
btcutil/go.mod
Normal file
13
btcutil/go.mod
Normal file
|
@ -0,0 +1,13 @@
|
|||
module github.com/btcsuite/btcd/btcutil
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/aead/siphash v1.0.1
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
)
|
||||
|
||||
replace github.com/btcsuite/btcd => ../
|
43
btcutil/go.sum
Normal file
43
btcutil/go.sum
Normal file
|
@ -0,0 +1,43 @@
|
|||
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
23
btcutil/hash160.go
Normal file
23
btcutil/hash160.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
// Calculate the hash of hasher over buf.
|
||||
func calcHash(buf []byte, hasher hash.Hash) []byte {
|
||||
_, _ = hasher.Write(buf)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
// Hash160 calculates the hash ripemd160(sha256(b)).
|
||||
func Hash160(buf []byte) []byte {
|
||||
return calcHash(calcHash(buf, sha256.New()), ripemd160.New())
|
||||
}
|
59
btcutil/hdkeychain/README.md
Normal file
59
btcutil/hdkeychain/README.md
Normal file
|
@ -0,0 +1,59 @@
|
|||
hdkeychain
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcutil/hdkeychain)
|
||||
|
||||
Package hdkeychain provides an API for bitcoin hierarchical deterministic
|
||||
extended keys (BIP0032).
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality. See
|
||||
`test_coverage.txt` for the gocov coverage report. Alternatively, if you are
|
||||
running a POSIX OS, you can run the `cov_report.sh` script for a real-time
|
||||
report.
|
||||
|
||||
## Feature Overview
|
||||
|
||||
- Full BIP0032 implementation
|
||||
- Single type for private and public extended keys
|
||||
- Convenient cryptograpically secure seed generation
|
||||
- Simple creation of master nodes
|
||||
- Support for multi-layer derivation
|
||||
- Easy serialization and deserialization for both private and public extended
|
||||
keys
|
||||
- Support for custom networks by registering them with chaincfg
|
||||
- Obtaining the underlying EC pubkeys, EC privkeys, and associated bitcoin
|
||||
addresses ties in seamlessly with existing btcec and btcutil types which
|
||||
provide powerful tools for working with them to do things like sign
|
||||
transations and generate payment scripts
|
||||
- Uses the btcec package which is highly optimized for secp256k1
|
||||
- Code examples including:
|
||||
- Generating a cryptographically secure random seed and deriving a
|
||||
master node from it
|
||||
- Default HD wallet layout as described by BIP0032
|
||||
- Audits use case as described by BIP0032
|
||||
- Comprehensive test coverage including the BIP0032 test vectors
|
||||
- Benchmarks
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/hdkeychain
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [NewMaster Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/hdkeychain#example-NewMaster)
|
||||
Demonstrates how to generate a cryptographically random seed then use it to
|
||||
create a new master node (extended key).
|
||||
* [Default Wallet Layout Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/hdkeychain#example-package--DefaultWalletLayout)
|
||||
Demonstrates the default hierarchical deterministic wallet layout as described
|
||||
in BIP0032.
|
||||
* [Audits Use Case Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/hdkeychain#example-package--Audits)
|
||||
Demonstrates the audits use case in BIP0032.
|
||||
|
||||
## License
|
||||
|
||||
Package hdkeychain is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
84
btcutil/hdkeychain/bench_test.go
Normal file
84
btcutil/hdkeychain/bench_test.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hdkeychain_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/hdkeychain"
|
||||
)
|
||||
|
||||
// bip0032MasterPriv1 is the master private extended key from the first set of
|
||||
// test vectors in BIP0032.
|
||||
const bip0032MasterPriv1 = "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbP" +
|
||||
"y6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
|
||||
|
||||
// BenchmarkDeriveHardened benchmarks how long it takes to derive a hardened
|
||||
// child from a master private extended key.
|
||||
func BenchmarkDeriveHardened(b *testing.B) {
|
||||
b.StopTimer()
|
||||
masterKey, err := hdkeychain.NewKeyFromString(bip0032MasterPriv1)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to decode master seed: %v", err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = masterKey.Derive(hdkeychain.HardenedKeyStart)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkDeriveNormal benchmarks how long it takes to derive a normal
|
||||
// (non-hardened) child from a master private extended key.
|
||||
func BenchmarkDeriveNormal(b *testing.B) {
|
||||
b.StopTimer()
|
||||
masterKey, err := hdkeychain.NewKeyFromString(bip0032MasterPriv1)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to decode master seed: %v", err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = masterKey.Derive(0)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkPrivToPub benchmarks how long it takes to convert a private extended
|
||||
// key to a public extended key.
|
||||
func BenchmarkPrivToPub(b *testing.B) {
|
||||
b.StopTimer()
|
||||
masterKey, err := hdkeychain.NewKeyFromString(bip0032MasterPriv1)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to decode master seed: %v", err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = masterKey.Neuter()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkDeserialize benchmarks how long it takes to deserialize a private
|
||||
// extended key.
|
||||
func BenchmarkDeserialize(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = hdkeychain.NewKeyFromString(bip0032MasterPriv1)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSerialize benchmarks how long it takes to serialize a private
|
||||
// extended key.
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
b.StopTimer()
|
||||
masterKey, err := hdkeychain.NewKeyFromString(bip0032MasterPriv1)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to decode master seed: %v", err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = masterKey.String()
|
||||
}
|
||||
}
|
17
btcutil/hdkeychain/cov_report.sh
Normal file
17
btcutil/hdkeychain/cov_report.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
84
btcutil/hdkeychain/doc.go
Normal file
84
btcutil/hdkeychain/doc.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package hdkeychain provides an API for bitcoin hierarchical deterministic
|
||||
extended keys (BIP0032).
|
||||
|
||||
Overview
|
||||
|
||||
The ability to implement hierarchical deterministic wallets depends on the
|
||||
ability to create and derive hierarchical deterministic extended keys.
|
||||
|
||||
At a high level, this package provides support for those hierarchical
|
||||
deterministic extended keys by providing an ExtendedKey type and supporting
|
||||
functions. Each extended key can either be a private or public extended key
|
||||
which itself is capable of deriving a child extended key.
|
||||
|
||||
Determining the Extended Key Type
|
||||
|
||||
Whether an extended key is a private or public extended key can be determined
|
||||
with the IsPrivate function.
|
||||
|
||||
Transaction Signing Keys and Payment Addresses
|
||||
|
||||
In order to create and sign transactions, or provide others with addresses to
|
||||
send funds to, the underlying key and address material must be accessible. This
|
||||
package provides the ECPubKey, ECPrivKey, and Address functions for this
|
||||
purpose.
|
||||
|
||||
The Master Node
|
||||
|
||||
As previously mentioned, the extended keys are hierarchical meaning they are
|
||||
used to form a tree. The root of that tree is called the master node and this
|
||||
package provides the NewMaster function to create it from a cryptographically
|
||||
random seed. The GenerateSeed function is provided as a convenient way to
|
||||
create a random seed for use with the NewMaster function.
|
||||
|
||||
Deriving Children
|
||||
|
||||
Once you have created a tree root (or have deserialized an extended key as
|
||||
discussed later), the child extended keys can be derived by using the Derive
|
||||
function. The Derive function supports deriving both normal (non-hardened) and
|
||||
hardened child extended keys. In order to derive a hardened extended key, use
|
||||
the HardenedKeyStart constant + the hardened key number as the index to the
|
||||
Derive function. This provides the ability to cascade the keys into a tree and
|
||||
hence generate the hierarchical deterministic key chains.
|
||||
|
||||
Normal vs Hardened Derived Extended Keys
|
||||
|
||||
A private extended key can be used to derive both hardened and non-hardened
|
||||
(normal) child private and public extended keys. A public extended key can only
|
||||
be used to derive non-hardened child public extended keys. As enumerated in
|
||||
BIP0032 "knowledge of the extended public key plus any non-hardened private key
|
||||
descending from it is equivalent to knowing the extended private key (and thus
|
||||
every private and public key descending from it). This means that extended
|
||||
public keys must be treated more carefully than regular public keys. It is also
|
||||
the reason for the existence of hardened keys, and why they are used for the
|
||||
account level in the tree. This way, a leak of an account-specific (or below)
|
||||
private key never risks compromising the master or other accounts."
|
||||
|
||||
Neutering a Private Extended Key
|
||||
|
||||
A private extended key can be converted to a new instance of the corresponding
|
||||
public extended key with the Neuter function. The original extended key is not
|
||||
modified. A public extended key is still capable of deriving non-hardened child
|
||||
public extended keys.
|
||||
|
||||
Serializing and Deserializing Extended Keys
|
||||
|
||||
Extended keys are serialized and deserialized with the String and
|
||||
NewKeyFromString functions. The serialized key is a Base58-encoded string which
|
||||
looks like the following:
|
||||
public key: xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw
|
||||
private key: xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7
|
||||
|
||||
Network
|
||||
|
||||
Extended keys are much like normal Bitcoin addresses in that they have version
|
||||
bytes which tie them to a specific network. The SetNet and IsForNet functions
|
||||
are provided to set and determinine which network an extended key is associated
|
||||
with.
|
||||
*/
|
||||
package hdkeychain
|
182
btcutil/hdkeychain/example_test.go
Normal file
182
btcutil/hdkeychain/example_test.go
Normal file
|
@ -0,0 +1,182 @@
|
|||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hdkeychain_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/btcutil/hdkeychain"
|
||||
)
|
||||
|
||||
// This example demonstrates how to generate a cryptographically random seed
|
||||
// then use it to create a new master node (extended key).
|
||||
func ExampleNewMaster() {
|
||||
// Generate a random seed at the recommended length.
|
||||
seed, err := hdkeychain.GenerateSeed(hdkeychain.RecommendedSeedLen)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a new master node using the seed.
|
||||
key, err := hdkeychain.NewMaster(seed, &chaincfg.MainNetParams)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Show that the generated master node extended key is private.
|
||||
fmt.Println("Private Extended Key?:", key.IsPrivate())
|
||||
|
||||
// Output:
|
||||
// Private Extended Key?: true
|
||||
}
|
||||
|
||||
// This example demonstrates the default hierarchical deterministic wallet
|
||||
// layout as described in BIP0032.
|
||||
func Example_defaultWalletLayout() {
|
||||
// The default wallet layout described in BIP0032 is:
|
||||
//
|
||||
// Each account is composed of two keypair chains: an internal and an
|
||||
// external one. The external keychain is used to generate new public
|
||||
// addresses, while the internal keychain is used for all other
|
||||
// operations (change addresses, generation addresses, ..., anything
|
||||
// that doesn't need to be communicated).
|
||||
//
|
||||
// * m/iH/0/k
|
||||
// corresponds to the k'th keypair of the external chain of account
|
||||
// number i of the HDW derived from master m.
|
||||
// * m/iH/1/k
|
||||
// corresponds to the k'th keypair of the internal chain of account
|
||||
// number i of the HDW derived from master m.
|
||||
|
||||
// Ordinarily this would either be read from some encrypted source
|
||||
// and be decrypted or generated as the NewMaster example shows, but
|
||||
// for the purposes of this example, the private extended key for the
|
||||
// master node is being hard coded here.
|
||||
master := "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jP" +
|
||||
"PqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
|
||||
|
||||
// Start by getting an extended key instance for the master node.
|
||||
// This gives the path:
|
||||
// m
|
||||
masterKey, err := hdkeychain.NewKeyFromString(master)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Derive the extended key for account 0. This gives the path:
|
||||
// m/0H
|
||||
acct0, err := masterKey.Derive(hdkeychain.HardenedKeyStart + 0)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Derive the extended key for the account 0 external chain. This
|
||||
// gives the path:
|
||||
// m/0H/0
|
||||
acct0Ext, err := acct0.Derive(0)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Derive the extended key for the account 0 internal chain. This gives
|
||||
// the path:
|
||||
// m/0H/1
|
||||
acct0Int, err := acct0.Derive(1)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// At this point, acct0Ext and acct0Int are ready to derive the keys for
|
||||
// the external and internal wallet chains.
|
||||
|
||||
// Derive the 10th extended key for the account 0 external chain. This
|
||||
// gives the path:
|
||||
// m/0H/0/10
|
||||
acct0Ext10, err := acct0Ext.Derive(10)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Derive the 1st extended key for the account 0 internal chain. This
|
||||
// gives the path:
|
||||
// m/0H/1/0
|
||||
acct0Int0, err := acct0Int.Derive(0)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get and show the address associated with the extended keys for the
|
||||
// main bitcoin network.
|
||||
acct0ExtAddr, err := acct0Ext10.Address(&chaincfg.MainNetParams)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
acct0IntAddr, err := acct0Int0.Address(&chaincfg.MainNetParams)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println("Account 0 External Address 10:", acct0ExtAddr)
|
||||
fmt.Println("Account 0 Internal Address 0:", acct0IntAddr)
|
||||
|
||||
// Output:
|
||||
// Account 0 External Address 10: 1HVccubUT8iKTapMJ5AnNA4sLRN27xzQ4F
|
||||
// Account 0 Internal Address 0: 1J5rebbkQaunJTUoNVREDbeB49DqMNFFXk
|
||||
}
|
||||
|
||||
// This example demonstrates the audits use case in BIP0032.
|
||||
func Example_audits() {
|
||||
// The audits use case described in BIP0032 is:
|
||||
//
|
||||
// In case an auditor needs full access to the list of incoming and
|
||||
// outgoing payments, one can share all account public extended keys.
|
||||
// This will allow the auditor to see all transactions from and to the
|
||||
// wallet, in all accounts, but not a single secret key.
|
||||
//
|
||||
// * N(m/*)
|
||||
// corresponds to the neutered master extended key (also called
|
||||
// the master public extended key)
|
||||
|
||||
// Ordinarily this would either be read from some encrypted source
|
||||
// and be decrypted or generated as the NewMaster example shows, but
|
||||
// for the purposes of this example, the private extended key for the
|
||||
// master node is being hard coded here.
|
||||
master := "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jP" +
|
||||
"PqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
|
||||
|
||||
// Start by getting an extended key instance for the master node.
|
||||
// This gives the path:
|
||||
// m
|
||||
masterKey, err := hdkeychain.NewKeyFromString(master)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Neuter the master key to generate a master public extended key. This
|
||||
// gives the path:
|
||||
// N(m/*)
|
||||
masterPubKey, err := masterKey.Neuter()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Share the master public extended key with the auditor.
|
||||
fmt.Println("Audit key N(m/*):", masterPubKey)
|
||||
|
||||
// Output:
|
||||
// Audit key N(m/*): xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8
|
||||
}
|
706
btcutil/hdkeychain/extendedkey.go
Normal file
706
btcutil/hdkeychain/extendedkey.go
Normal file
|
@ -0,0 +1,706 @@
|
|||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hdkeychain
|
||||
|
||||
// References:
|
||||
// [BIP32]: BIP0032 - Hierarchical Deterministic Wallets
|
||||
// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
)
|
||||
|
||||
const (
|
||||
// RecommendedSeedLen is the recommended length in bytes for a seed
|
||||
// to a master node.
|
||||
RecommendedSeedLen = 32 // 256 bits
|
||||
|
||||
// HardenedKeyStart is the index at which a hardened key starts. Each
|
||||
// extended key has 2^31 normal child keys and 2^31 hardened child keys.
|
||||
// Thus the range for normal child keys is [0, 2^31 - 1] and the range
|
||||
// for hardened child keys is [2^31, 2^32 - 1].
|
||||
HardenedKeyStart = 0x80000000 // 2^31
|
||||
|
||||
// MinSeedBytes is the minimum number of bytes allowed for a seed to
|
||||
// a master node.
|
||||
MinSeedBytes = 16 // 128 bits
|
||||
|
||||
// MaxSeedBytes is the maximum number of bytes allowed for a seed to
|
||||
// a master node.
|
||||
MaxSeedBytes = 64 // 512 bits
|
||||
|
||||
// serializedKeyLen is the length of a serialized public or private
|
||||
// extended key. It consists of 4 bytes version, 1 byte depth, 4 bytes
|
||||
// fingerprint, 4 bytes child number, 32 bytes chain code, and 33 bytes
|
||||
// public/private key data.
|
||||
serializedKeyLen = 4 + 1 + 4 + 4 + 32 + 33 // 78 bytes
|
||||
|
||||
// maxUint8 is the max positive integer which can be serialized in a uint8
|
||||
maxUint8 = 1<<8 - 1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDeriveHardFromPublic describes an error in which the caller
|
||||
// attempted to derive a hardened extended key from a public key.
|
||||
ErrDeriveHardFromPublic = errors.New("cannot derive a hardened key " +
|
||||
"from a public key")
|
||||
|
||||
// ErrDeriveBeyondMaxDepth describes an error in which the caller
|
||||
// has attempted to derive more than 255 keys from a root key.
|
||||
ErrDeriveBeyondMaxDepth = errors.New("cannot derive a key with more than " +
|
||||
"255 indices in its path")
|
||||
|
||||
// ErrNotPrivExtKey describes an error in which the caller attempted
|
||||
// to extract a private key from a public extended key.
|
||||
ErrNotPrivExtKey = errors.New("unable to create private keys from a " +
|
||||
"public extended key")
|
||||
|
||||
// ErrInvalidChild describes an error in which the child at a specific
|
||||
// index is invalid due to the derived key falling outside of the valid
|
||||
// range for secp256k1 private keys. This error indicates the caller
|
||||
// should simply ignore the invalid child extended key at this index and
|
||||
// increment to the next index.
|
||||
ErrInvalidChild = errors.New("the extended key at this index is invalid")
|
||||
|
||||
// ErrUnusableSeed describes an error in which the provided seed is not
|
||||
// usable due to the derived key falling outside of the valid range for
|
||||
// secp256k1 private keys. This error indicates the caller must choose
|
||||
// another seed.
|
||||
ErrUnusableSeed = errors.New("unusable seed")
|
||||
|
||||
// ErrInvalidSeedLen describes an error in which the provided seed or
|
||||
// seed length is not in the allowed range.
|
||||
ErrInvalidSeedLen = fmt.Errorf("seed length must be between %d and %d "+
|
||||
"bits", MinSeedBytes*8, MaxSeedBytes*8)
|
||||
|
||||
// ErrBadChecksum describes an error in which the checksum encoded with
|
||||
// a serialized extended key does not match the calculated value.
|
||||
ErrBadChecksum = errors.New("bad extended key checksum")
|
||||
|
||||
// ErrInvalidKeyLen describes an error in which the provided serialized
|
||||
// key is not the expected length.
|
||||
ErrInvalidKeyLen = errors.New("the provided serialized extended key " +
|
||||
"length is invalid")
|
||||
)
|
||||
|
||||
// masterKey is the master key used along with a random seed used to generate
|
||||
// the master node in the hierarchical tree.
|
||||
var masterKey = []byte("Bitcoin seed")
|
||||
|
||||
// ExtendedKey houses all the information needed to support a hierarchical
|
||||
// deterministic extended key. See the package overview documentation for
|
||||
// more details on how to use extended keys.
|
||||
type ExtendedKey struct {
|
||||
key []byte // This will be the pubkey for extended pub keys
|
||||
pubKey []byte // This will only be set for extended priv keys
|
||||
chainCode []byte
|
||||
depth uint8
|
||||
parentFP []byte
|
||||
childNum uint32
|
||||
version []byte
|
||||
isPrivate bool
|
||||
}
|
||||
|
||||
// NewExtendedKey returns a new instance of an extended key with the given
|
||||
// fields. No error checking is performed here as it's only intended to be a
|
||||
// convenience method used to create a populated struct. This function should
|
||||
// only be used by applications that need to create custom ExtendedKeys. All
|
||||
// other applications should just use NewMaster, Derive, or Neuter.
|
||||
func NewExtendedKey(version, key, chainCode, parentFP []byte, depth uint8,
|
||||
childNum uint32, isPrivate bool) *ExtendedKey {
|
||||
|
||||
// NOTE: The pubKey field is intentionally left nil so it is only
|
||||
// computed and memoized as required.
|
||||
return &ExtendedKey{
|
||||
key: key,
|
||||
chainCode: chainCode,
|
||||
depth: depth,
|
||||
parentFP: parentFP,
|
||||
childNum: childNum,
|
||||
version: version,
|
||||
isPrivate: isPrivate,
|
||||
}
|
||||
}
|
||||
|
||||
// pubKeyBytes returns bytes for the serialized compressed public key associated
|
||||
// with this extended key in an efficient manner including memoization as
|
||||
// necessary.
|
||||
//
|
||||
// When the extended key is already a public key, the key is simply returned as
|
||||
// is since it's already in the correct form. However, when the extended key is
|
||||
// a private key, the public key will be calculated and memoized so future
|
||||
// accesses can simply return the cached result.
|
||||
func (k *ExtendedKey) pubKeyBytes() []byte {
|
||||
// Just return the key if it's already an extended public key.
|
||||
if !k.isPrivate {
|
||||
return k.key
|
||||
}
|
||||
|
||||
// This is a private extended key, so calculate and memoize the public
|
||||
// key if needed.
|
||||
if len(k.pubKey) == 0 {
|
||||
pkx, pky := btcec.S256().ScalarBaseMult(k.key)
|
||||
pubKey := btcec.PublicKey{Curve: btcec.S256(), X: pkx, Y: pky}
|
||||
k.pubKey = pubKey.SerializeCompressed()
|
||||
}
|
||||
|
||||
return k.pubKey
|
||||
}
|
||||
|
||||
// IsPrivate returns whether or not the extended key is a private extended key.
|
||||
//
|
||||
// A private extended key can be used to derive both hardened and non-hardened
|
||||
// child private and public extended keys. A public extended key can only be
|
||||
// used to derive non-hardened child public extended keys.
|
||||
func (k *ExtendedKey) IsPrivate() bool {
|
||||
return k.isPrivate
|
||||
}
|
||||
|
||||
// Depth returns the current derivation level with respect to the root.
|
||||
//
|
||||
// The root key has depth zero, and the field has a maximum of 255 due to
|
||||
// how depth is serialized.
|
||||
func (k *ExtendedKey) Depth() uint8 {
|
||||
return k.depth
|
||||
}
|
||||
|
||||
// Version returns the extended key's hardened derivation version. This can be
|
||||
// used to identify the extended key's type.
|
||||
func (k *ExtendedKey) Version() []byte {
|
||||
return k.version
|
||||
}
|
||||
|
||||
// ParentFingerprint returns a fingerprint of the parent extended key from which
|
||||
// this one was derived.
|
||||
func (k *ExtendedKey) ParentFingerprint() uint32 {
|
||||
return binary.BigEndian.Uint32(k.parentFP)
|
||||
}
|
||||
|
||||
// ChainCode returns the chain code part of this extended key.
|
||||
//
|
||||
// It is identical for both public and private extended keys.
|
||||
func (k *ExtendedKey) ChainCode() []byte {
|
||||
return append([]byte{}, k.chainCode...)
|
||||
}
|
||||
|
||||
// Derive returns a derived child extended key at the given index.
|
||||
//
|
||||
// IMPORTANT: if you were previously using the Child method, this method is incompatible.
|
||||
// The Child method had a BIP-32 standard compatibility issue. You have to check whether
|
||||
// any hardened derivations in your derivation path are affected by this issue, via the
|
||||
// IsAffectedByIssue172 method and migrate the wallet if so. This method does conform
|
||||
// to the standard. If you need the old behavior, use DeriveNonStandard.
|
||||
//
|
||||
// When this extended key is a private extended key (as determined by the IsPrivate
|
||||
// function), a private extended key will be derived. Otherwise, the derived
|
||||
// extended key will be also be a public extended key.
|
||||
//
|
||||
// When the index is greater to or equal than the HardenedKeyStart constant, the
|
||||
// derived extended key will be a hardened extended key. It is only possible to
|
||||
// derive a hardened extended key from a private extended key. Consequently,
|
||||
// this function will return ErrDeriveHardFromPublic if a hardened child
|
||||
// extended key is requested from a public extended key.
|
||||
//
|
||||
// A hardened extended key is useful since, as previously mentioned, it requires
|
||||
// a parent private extended key to derive. In other words, normal child
|
||||
// extended public keys can be derived from a parent public extended key (no
|
||||
// knowledge of the parent private key) whereas hardened extended keys may not
|
||||
// be.
|
||||
//
|
||||
// NOTE: There is an extremely small chance (< 1 in 2^127) the specific child
|
||||
// index does not derive to a usable child. The ErrInvalidChild error will be
|
||||
// returned if this should occur, and the caller is expected to ignore the
|
||||
// invalid child and simply increment to the next index.
|
||||
func (k *ExtendedKey) Derive(i uint32) (*ExtendedKey, error) {
|
||||
// Prevent derivation of children beyond the max allowed depth.
|
||||
if k.depth == maxUint8 {
|
||||
return nil, ErrDeriveBeyondMaxDepth
|
||||
}
|
||||
|
||||
// There are four scenarios that could happen here:
|
||||
// 1) Private extended key -> Hardened child private extended key
|
||||
// 2) Private extended key -> Non-hardened child private extended key
|
||||
// 3) Public extended key -> Non-hardened child public extended key
|
||||
// 4) Public extended key -> Hardened child public extended key (INVALID!)
|
||||
|
||||
// Case #4 is invalid, so error out early.
|
||||
// A hardened child extended key may not be created from a public
|
||||
// extended key.
|
||||
isChildHardened := i >= HardenedKeyStart
|
||||
if !k.isPrivate && isChildHardened {
|
||||
return nil, ErrDeriveHardFromPublic
|
||||
}
|
||||
|
||||
// The data used to derive the child key depends on whether or not the
|
||||
// child is hardened per [BIP32].
|
||||
//
|
||||
// For hardened children:
|
||||
// 0x00 || ser256(parentKey) || ser32(i)
|
||||
//
|
||||
// For normal children:
|
||||
// serP(parentPubKey) || ser32(i)
|
||||
keyLen := 33
|
||||
data := make([]byte, keyLen+4)
|
||||
if isChildHardened {
|
||||
// Case #1.
|
||||
// When the child is a hardened child, the key is known to be a
|
||||
// private key due to the above early return. Pad it with a
|
||||
// leading zero as required by [BIP32] for deriving the child.
|
||||
// Additionally, right align it if it's shorter than 32 bytes.
|
||||
offset := 33 - len(k.key)
|
||||
copy(data[offset:], k.key)
|
||||
} else {
|
||||
// Case #2 or #3.
|
||||
// This is either a public or private extended key, but in
|
||||
// either case, the data which is used to derive the child key
|
||||
// starts with the secp256k1 compressed public key bytes.
|
||||
copy(data, k.pubKeyBytes())
|
||||
}
|
||||
binary.BigEndian.PutUint32(data[keyLen:], i)
|
||||
|
||||
// Take the HMAC-SHA512 of the current key's chain code and the derived
|
||||
// data:
|
||||
// I = HMAC-SHA512(Key = chainCode, Data = data)
|
||||
hmac512 := hmac.New(sha512.New, k.chainCode)
|
||||
_, _ = hmac512.Write(data)
|
||||
ilr := hmac512.Sum(nil)
|
||||
|
||||
// Split "I" into two 32-byte sequences Il and Ir where:
|
||||
// Il = intermediate key used to derive the child
|
||||
// Ir = child chain code
|
||||
il := ilr[:len(ilr)/2]
|
||||
childChainCode := ilr[len(ilr)/2:]
|
||||
|
||||
// Both derived public or private keys rely on treating the left 32-byte
|
||||
// sequence calculated above (Il) as a 256-bit integer that must be
|
||||
// within the valid range for a secp256k1 private key. There is a small
|
||||
// chance (< 1 in 2^127) this condition will not hold, and in that case,
|
||||
// a child extended key can't be created for this index and the caller
|
||||
// should simply increment to the next index.
|
||||
ilNum := new(big.Int).SetBytes(il)
|
||||
if ilNum.Cmp(btcec.S256().N) >= 0 || ilNum.Sign() == 0 {
|
||||
return nil, ErrInvalidChild
|
||||
}
|
||||
|
||||
// The algorithm used to derive the child key depends on whether or not
|
||||
// a private or public child is being derived.
|
||||
//
|
||||
// For private children:
|
||||
// childKey = parse256(Il) + parentKey
|
||||
//
|
||||
// For public children:
|
||||
// childKey = serP(point(parse256(Il)) + parentKey)
|
||||
var isPrivate bool
|
||||
var childKey []byte
|
||||
if k.isPrivate {
|
||||
// Case #1 or #2.
|
||||
// Add the parent private key to the intermediate private key to
|
||||
// derive the final child key.
|
||||
//
|
||||
// childKey = parse256(Il) + parenKey
|
||||
keyNum := new(big.Int).SetBytes(k.key)
|
||||
ilNum.Add(ilNum, keyNum)
|
||||
ilNum.Mod(ilNum, btcec.S256().N)
|
||||
childKey = ilNum.Bytes()
|
||||
isPrivate = true
|
||||
} else {
|
||||
// Case #3.
|
||||
// Calculate the corresponding intermediate public key for
|
||||
// intermediate private key.
|
||||
ilx, ily := btcec.S256().ScalarBaseMult(il)
|
||||
if ilx.Sign() == 0 || ily.Sign() == 0 {
|
||||
return nil, ErrInvalidChild
|
||||
}
|
||||
|
||||
// Convert the serialized compressed parent public key into X
|
||||
// and Y coordinates so it can be added to the intermediate
|
||||
// public key.
|
||||
pubKey, err := btcec.ParsePubKey(k.key, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add the intermediate public key to the parent public key to
|
||||
// derive the final child key.
|
||||
//
|
||||
// childKey = serP(point(parse256(Il)) + parentKey)
|
||||
childX, childY := btcec.S256().Add(ilx, ily, pubKey.X, pubKey.Y)
|
||||
pk := btcec.PublicKey{Curve: btcec.S256(), X: childX, Y: childY}
|
||||
childKey = pk.SerializeCompressed()
|
||||
}
|
||||
|
||||
// The fingerprint of the parent for the derived child is the first 4
|
||||
// bytes of the RIPEMD160(SHA256(parentPubKey)).
|
||||
parentFP := btcutil.Hash160(k.pubKeyBytes())[:4]
|
||||
return NewExtendedKey(k.version, childKey, childChainCode, parentFP,
|
||||
k.depth+1, i, isPrivate), nil
|
||||
}
|
||||
|
||||
// Returns true if this key was affected by the BIP-32 issue in the Child
|
||||
// method (since renamed to DeriveNonStandard).
|
||||
func (k *ExtendedKey) IsAffectedByIssue172() bool {
|
||||
return len(k.key) < 32
|
||||
}
|
||||
|
||||
// Deprecated: This is a non-standard derivation that is affected by issue #172.
|
||||
// 1-of-256 hardened derivations will be wrong. See note in the Derive method
|
||||
// and IsAffectedByIssue172.
|
||||
func (k *ExtendedKey) DeriveNonStandard(i uint32) (*ExtendedKey, error) {
|
||||
if k.depth == maxUint8 {
|
||||
return nil, ErrDeriveBeyondMaxDepth
|
||||
}
|
||||
|
||||
isChildHardened := i >= HardenedKeyStart
|
||||
if !k.isPrivate && isChildHardened {
|
||||
return nil, ErrDeriveHardFromPublic
|
||||
}
|
||||
|
||||
keyLen := 33
|
||||
data := make([]byte, keyLen+4)
|
||||
if isChildHardened {
|
||||
copy(data[1:], k.key)
|
||||
} else {
|
||||
copy(data, k.pubKeyBytes())
|
||||
}
|
||||
binary.BigEndian.PutUint32(data[keyLen:], i)
|
||||
|
||||
hmac512 := hmac.New(sha512.New, k.chainCode)
|
||||
_, _ = hmac512.Write(data)
|
||||
ilr := hmac512.Sum(nil)
|
||||
|
||||
il := ilr[:len(ilr)/2]
|
||||
childChainCode := ilr[len(ilr)/2:]
|
||||
|
||||
ilNum := new(big.Int).SetBytes(il)
|
||||
if ilNum.Cmp(btcec.S256().N) >= 0 || ilNum.Sign() == 0 {
|
||||
return nil, ErrInvalidChild
|
||||
}
|
||||
|
||||
var isPrivate bool
|
||||
var childKey []byte
|
||||
if k.isPrivate {
|
||||
keyNum := new(big.Int).SetBytes(k.key)
|
||||
ilNum.Add(ilNum, keyNum)
|
||||
ilNum.Mod(ilNum, btcec.S256().N)
|
||||
childKey = ilNum.Bytes()
|
||||
isPrivate = true
|
||||
} else {
|
||||
ilx, ily := btcec.S256().ScalarBaseMult(il)
|
||||
if ilx.Sign() == 0 || ily.Sign() == 0 {
|
||||
return nil, ErrInvalidChild
|
||||
}
|
||||
|
||||
pubKey, err := btcec.ParsePubKey(k.key, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
childX, childY := btcec.S256().Add(ilx, ily, pubKey.X, pubKey.Y)
|
||||
pk := btcec.PublicKey{Curve: btcec.S256(), X: childX, Y: childY}
|
||||
childKey = pk.SerializeCompressed()
|
||||
}
|
||||
|
||||
parentFP := btcutil.Hash160(k.pubKeyBytes())[:4]
|
||||
return NewExtendedKey(k.version, childKey, childChainCode, parentFP,
|
||||
k.depth+1, i, isPrivate), nil
|
||||
}
|
||||
|
||||
// ChildNum returns the index at which the child extended key was derived.
|
||||
//
|
||||
// Extended keys with ChildNum value between 0 and 2^31-1 are normal child
|
||||
// keys, and those with a value between 2^31 and 2^32-1 are hardened keys.
|
||||
func (k *ExtendedKey) ChildIndex() uint32 {
|
||||
return k.childNum
|
||||
}
|
||||
|
||||
// Neuter returns a new extended public key from this extended private key. The
|
||||
// same extended key will be returned unaltered if it is already an extended
|
||||
// public key.
|
||||
//
|
||||
// As the name implies, an extended public key does not have access to the
|
||||
// private key, so it is not capable of signing transactions or deriving
|
||||
// child extended private keys. However, it is capable of deriving further
|
||||
// child extended public keys.
|
||||
func (k *ExtendedKey) Neuter() (*ExtendedKey, error) {
|
||||
// Already an extended public key.
|
||||
if !k.isPrivate {
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// Get the associated public extended key version bytes.
|
||||
version, err := chaincfg.HDPrivateKeyToPublicKeyID(k.version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it to an extended public key. The key for the new extended
|
||||
// key will simply be the pubkey of the current extended private key.
|
||||
//
|
||||
// This is the function N((k,c)) -> (K, c) from [BIP32].
|
||||
return NewExtendedKey(version, k.pubKeyBytes(), k.chainCode, k.parentFP,
|
||||
k.depth, k.childNum, false), nil
|
||||
}
|
||||
|
||||
// CloneWithVersion returns a new extended key cloned from this extended key,
|
||||
// but using the provided HD version bytes. The version must be a private HD
|
||||
// key ID for an extended private key, and a public HD key ID for an extended
|
||||
// public key.
|
||||
//
|
||||
// This method creates a new copy and therefore does not mutate the original
|
||||
// extended key instance.
|
||||
//
|
||||
// Unlike Neuter(), this does NOT convert an extended private key to an
|
||||
// extended public key. It is particularly useful for converting between
|
||||
// standard BIP0032 extended keys (serializable to xprv/xpub) and keys based
|
||||
// on the SLIP132 standard (serializable to yprv/ypub, zprv/zpub, etc.).
|
||||
//
|
||||
// References:
|
||||
// [SLIP132]: SLIP-0132 - Registered HD version bytes for BIP-0032
|
||||
// https://github.com/satoshilabs/slips/blob/master/slip-0132.md
|
||||
func (k *ExtendedKey) CloneWithVersion(version []byte) (*ExtendedKey, error) {
|
||||
if len(version) != 4 {
|
||||
// TODO: The semantically correct error to return here is
|
||||
// ErrInvalidHDKeyID (introduced in btcsuite/btcd#1617). Update the
|
||||
// error type once available in a stable btcd / chaincfg release.
|
||||
return nil, chaincfg.ErrUnknownHDKeyID
|
||||
}
|
||||
|
||||
// Initialize a new extended key instance with the same fields as the
|
||||
// current extended private/public key and the provided HD version bytes.
|
||||
return NewExtendedKey(version, k.key, k.chainCode, k.parentFP,
|
||||
k.depth, k.childNum, k.isPrivate), nil
|
||||
}
|
||||
|
||||
// ECPubKey converts the extended key to a btcec public key and returns it.
|
||||
func (k *ExtendedKey) ECPubKey() (*btcec.PublicKey, error) {
|
||||
return btcec.ParsePubKey(k.pubKeyBytes(), btcec.S256())
|
||||
}
|
||||
|
||||
// ECPrivKey converts the extended key to a btcec private key and returns it.
|
||||
// As you might imagine this is only possible if the extended key is a private
|
||||
// extended key (as determined by the IsPrivate function). The ErrNotPrivExtKey
|
||||
// error will be returned if this function is called on a public extended key.
|
||||
func (k *ExtendedKey) ECPrivKey() (*btcec.PrivateKey, error) {
|
||||
if !k.isPrivate {
|
||||
return nil, ErrNotPrivExtKey
|
||||
}
|
||||
|
||||
privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), k.key)
|
||||
return privKey, nil
|
||||
}
|
||||
|
||||
// Address converts the extended key to a standard bitcoin pay-to-pubkey-hash
|
||||
// address for the passed network.
|
||||
func (k *ExtendedKey) Address(net *chaincfg.Params) (*btcutil.AddressPubKeyHash, error) {
|
||||
pkHash := btcutil.Hash160(k.pubKeyBytes())
|
||||
return btcutil.NewAddressPubKeyHash(pkHash, net)
|
||||
}
|
||||
|
||||
// paddedAppend appends the src byte slice to dst, returning the new slice.
|
||||
// If the length of the source is smaller than the passed size, leading zero
|
||||
// bytes are appended to the dst slice before appending src.
|
||||
func paddedAppend(size uint, dst, src []byte) []byte {
|
||||
for i := 0; i < int(size)-len(src); i++ {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
return append(dst, src...)
|
||||
}
|
||||
|
||||
// String returns the extended key as a human-readable base58-encoded string.
|
||||
func (k *ExtendedKey) String() string {
|
||||
if len(k.key) == 0 {
|
||||
return "zeroed extended key"
|
||||
}
|
||||
|
||||
var childNumBytes [4]byte
|
||||
binary.BigEndian.PutUint32(childNumBytes[:], k.childNum)
|
||||
|
||||
// The serialized format is:
|
||||
// version (4) || depth (1) || parent fingerprint (4)) ||
|
||||
// child num (4) || chain code (32) || key data (33) || checksum (4)
|
||||
serializedBytes := make([]byte, 0, serializedKeyLen+4)
|
||||
serializedBytes = append(serializedBytes, k.version...)
|
||||
serializedBytes = append(serializedBytes, k.depth)
|
||||
serializedBytes = append(serializedBytes, k.parentFP...)
|
||||
serializedBytes = append(serializedBytes, childNumBytes[:]...)
|
||||
serializedBytes = append(serializedBytes, k.chainCode...)
|
||||
if k.isPrivate {
|
||||
serializedBytes = append(serializedBytes, 0x00)
|
||||
serializedBytes = paddedAppend(32, serializedBytes, k.key)
|
||||
} else {
|
||||
serializedBytes = append(serializedBytes, k.pubKeyBytes()...)
|
||||
}
|
||||
|
||||
checkSum := chainhash.DoubleHashB(serializedBytes)[:4]
|
||||
serializedBytes = append(serializedBytes, checkSum...)
|
||||
return base58.Encode(serializedBytes)
|
||||
}
|
||||
|
||||
// IsForNet returns whether or not the extended key is associated with the
|
||||
// passed bitcoin network.
|
||||
func (k *ExtendedKey) IsForNet(net *chaincfg.Params) bool {
|
||||
return bytes.Equal(k.version, net.HDPrivateKeyID[:]) ||
|
||||
bytes.Equal(k.version, net.HDPublicKeyID[:])
|
||||
}
|
||||
|
||||
// SetNet associates the extended key, and any child keys yet to be derived from
|
||||
// it, with the passed network.
|
||||
func (k *ExtendedKey) SetNet(net *chaincfg.Params) {
|
||||
if k.isPrivate {
|
||||
k.version = net.HDPrivateKeyID[:]
|
||||
} else {
|
||||
k.version = net.HDPublicKeyID[:]
|
||||
}
|
||||
}
|
||||
|
||||
// zero sets all bytes in the passed slice to zero. This is used to
|
||||
// explicitly clear private key material from memory.
|
||||
func zero(b []byte) {
|
||||
lenb := len(b)
|
||||
for i := 0; i < lenb; i++ {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Zero manually clears all fields and bytes in the extended key. This can be
|
||||
// used to explicitly clear key material from memory for enhanced security
|
||||
// against memory scraping. This function only clears this particular key and
|
||||
// not any children that have already been derived.
|
||||
func (k *ExtendedKey) Zero() {
|
||||
zero(k.key)
|
||||
zero(k.pubKey)
|
||||
zero(k.chainCode)
|
||||
zero(k.parentFP)
|
||||
k.version = nil
|
||||
k.key = nil
|
||||
k.depth = 0
|
||||
k.childNum = 0
|
||||
k.isPrivate = false
|
||||
}
|
||||
|
||||
// NewMaster creates a new master node for use in creating a hierarchical
|
||||
// deterministic key chain. The seed must be between 128 and 512 bits and
|
||||
// should be generated by a cryptographically secure random generation source.
|
||||
//
|
||||
// NOTE: There is an extremely small chance (< 1 in 2^127) the provided seed
|
||||
// will derive to an unusable secret key. The ErrUnusable error will be
|
||||
// returned if this should occur, so the caller must check for it and generate a
|
||||
// new seed accordingly.
|
||||
func NewMaster(seed []byte, net *chaincfg.Params) (*ExtendedKey, error) {
|
||||
// Per [BIP32], the seed must be in range [MinSeedBytes, MaxSeedBytes].
|
||||
if len(seed) < MinSeedBytes || len(seed) > MaxSeedBytes {
|
||||
return nil, ErrInvalidSeedLen
|
||||
}
|
||||
|
||||
// First take the HMAC-SHA512 of the master key and the seed data:
|
||||
// I = HMAC-SHA512(Key = "Bitcoin seed", Data = S)
|
||||
hmac512 := hmac.New(sha512.New, masterKey)
|
||||
_, _ = hmac512.Write(seed)
|
||||
lr := hmac512.Sum(nil)
|
||||
|
||||
// Split "I" into two 32-byte sequences Il and Ir where:
|
||||
// Il = master secret key
|
||||
// Ir = master chain code
|
||||
secretKey := lr[:len(lr)/2]
|
||||
chainCode := lr[len(lr)/2:]
|
||||
|
||||
// Ensure the key in usable.
|
||||
secretKeyNum := new(big.Int).SetBytes(secretKey)
|
||||
if secretKeyNum.Cmp(btcec.S256().N) >= 0 || secretKeyNum.Sign() == 0 {
|
||||
return nil, ErrUnusableSeed
|
||||
}
|
||||
|
||||
parentFP := []byte{0x00, 0x00, 0x00, 0x00}
|
||||
return NewExtendedKey(net.HDPrivateKeyID[:], secretKey, chainCode,
|
||||
parentFP, 0, 0, true), nil
|
||||
}
|
||||
|
||||
// NewKeyFromString returns a new extended key instance from a base58-encoded
|
||||
// extended key.
|
||||
func NewKeyFromString(key string) (*ExtendedKey, error) {
|
||||
// The base58-decoded extended key must consist of a serialized payload
|
||||
// plus an additional 4 bytes for the checksum.
|
||||
decoded := base58.Decode(key)
|
||||
if len(decoded) != serializedKeyLen+4 {
|
||||
return nil, ErrInvalidKeyLen
|
||||
}
|
||||
|
||||
// The serialized format is:
|
||||
// version (4) || depth (1) || parent fingerprint (4)) ||
|
||||
// child num (4) || chain code (32) || key data (33) || checksum (4)
|
||||
|
||||
// Split the payload and checksum up and ensure the checksum matches.
|
||||
payload := decoded[:len(decoded)-4]
|
||||
checkSum := decoded[len(decoded)-4:]
|
||||
expectedCheckSum := chainhash.DoubleHashB(payload)[:4]
|
||||
if !bytes.Equal(checkSum, expectedCheckSum) {
|
||||
return nil, ErrBadChecksum
|
||||
}
|
||||
|
||||
// Deserialize each of the payload fields.
|
||||
version := payload[:4]
|
||||
depth := payload[4:5][0]
|
||||
parentFP := payload[5:9]
|
||||
childNum := binary.BigEndian.Uint32(payload[9:13])
|
||||
chainCode := payload[13:45]
|
||||
keyData := payload[45:78]
|
||||
|
||||
// The key data is a private key if it starts with 0x00. Serialized
|
||||
// compressed pubkeys either start with 0x02 or 0x03.
|
||||
isPrivate := keyData[0] == 0x00
|
||||
if isPrivate {
|
||||
// Ensure the private key is valid. It must be within the range
|
||||
// of the order of the secp256k1 curve and not be 0.
|
||||
keyData = keyData[1:]
|
||||
keyNum := new(big.Int).SetBytes(keyData)
|
||||
if keyNum.Cmp(btcec.S256().N) >= 0 || keyNum.Sign() == 0 {
|
||||
return nil, ErrUnusableSeed
|
||||
}
|
||||
} else {
|
||||
// Ensure the public key parses correctly and is actually on the
|
||||
// secp256k1 curve.
|
||||
_, err := btcec.ParsePubKey(keyData, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return NewExtendedKey(version, keyData, chainCode, parentFP, depth,
|
||||
childNum, isPrivate), nil
|
||||
}
|
||||
|
||||
// GenerateSeed returns a cryptographically secure random seed that can be used
|
||||
// as the input for the NewMaster function to generate a new master node.
|
||||
//
|
||||
// The length is in bytes and it must be between 16 and 64 (128 to 512 bits).
|
||||
// The recommended length is 32 (256 bits) as defined by the RecommendedSeedLen
|
||||
// constant.
|
||||
func GenerateSeed(length uint8) ([]byte, error) {
|
||||
// Per [BIP32], the seed must be in range [MinSeedBytes, MaxSeedBytes].
|
||||
if length < MinSeedBytes || length > MaxSeedBytes {
|
||||
return nil, ErrInvalidSeedLen
|
||||
}
|
||||
|
||||
buf := make([]byte, length)
|
||||
_, err := rand.Read(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
1209
btcutil/hdkeychain/extendedkey_test.go
Normal file
1209
btcutil/hdkeychain/extendedkey_test.go
Normal file
File diff suppressed because it is too large
Load diff
20
btcutil/hdkeychain/test_coverage.txt
Normal file
20
btcutil/hdkeychain/test_coverage.txt
Normal file
|
@ -0,0 +1,20 @@
|
|||
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.String 100.00% (18/18)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.Zero 100.00% (9/9)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.pubKeyBytes 100.00% (7/7)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.Neuter 100.00% (6/6)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.ECPrivKey 100.00% (4/4)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go zero 100.00% (3/3)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.SetNet 100.00% (3/3)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.Address 100.00% (2/2)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go newExtendedKey 100.00% (1/1)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.IsPrivate 100.00% (1/1)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.ParentFingerprint 100.00% (1/1)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.ECPubKey 100.00% (1/1)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.IsForNet 100.00% (1/1)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go NewKeyFromString 95.83% (23/24)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go ExtendedKey.Child 91.67% (33/36)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go NewMaster 91.67% (11/12)
|
||||
github.com/conformal/btcutil/hdkeychain/extendedkey.go GenerateSeed 85.71% (6/7)
|
||||
github.com/conformal/btcutil/hdkeychain ----------------------------- 95.59% (130/136)
|
||||
|
147
btcutil/internal_test.go
Normal file
147
btcutil/internal_test.go
Normal file
|
@ -0,0 +1,147 @@
|
|||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
This test file is part of the btcutil package rather than than the
|
||||
btcutil_test package so it can bridge access to the internals to properly test
|
||||
cases which are either not possible or can't reliably be tested via the public
|
||||
interface. The functions are only exported while the tests are being run.
|
||||
*/
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
"github.com/btcsuite/btcd/btcutil/bech32"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
// SetBlockBytes sets the internal serialized block byte buffer to the passed
|
||||
// buffer. It is used to inject errors and is only available to the test
|
||||
// package.
|
||||
func (b *Block) SetBlockBytes(buf []byte) {
|
||||
b.serializedBlock = buf
|
||||
}
|
||||
|
||||
// TstAppDataDir makes the internal appDataDir function available to the test
|
||||
// package.
|
||||
func TstAppDataDir(goos, appName string, roaming bool) string {
|
||||
return appDataDir(goos, appName, roaming)
|
||||
}
|
||||
|
||||
// TstAddressPubKeyHash makes an AddressPubKeyHash, setting the
|
||||
// unexported fields with the parameters hash and netID.
|
||||
func TstAddressPubKeyHash(hash [ripemd160.Size]byte,
|
||||
netID byte) *AddressPubKeyHash {
|
||||
|
||||
return &AddressPubKeyHash{
|
||||
hash: hash,
|
||||
netID: netID,
|
||||
}
|
||||
}
|
||||
|
||||
// TstAddressScriptHash makes an AddressScriptHash, setting the
|
||||
// unexported fields with the parameters hash and netID.
|
||||
func TstAddressScriptHash(hash [ripemd160.Size]byte,
|
||||
netID byte) *AddressScriptHash {
|
||||
|
||||
return &AddressScriptHash{
|
||||
hash: hash,
|
||||
netID: netID,
|
||||
}
|
||||
}
|
||||
|
||||
// TstAddressWitnessPubKeyHash creates an AddressWitnessPubKeyHash, initiating
|
||||
// the fields as given.
|
||||
func TstAddressWitnessPubKeyHash(version byte, program [20]byte,
|
||||
hrp string) *AddressWitnessPubKeyHash {
|
||||
|
||||
return &AddressWitnessPubKeyHash{
|
||||
AddressSegWit{
|
||||
hrp: hrp,
|
||||
witnessVersion: version,
|
||||
witnessProgram: program[:],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TstAddressWitnessScriptHash creates an AddressWitnessScriptHash, initiating
|
||||
// the fields as given.
|
||||
func TstAddressWitnessScriptHash(version byte, program [32]byte,
|
||||
hrp string) *AddressWitnessScriptHash {
|
||||
|
||||
return &AddressWitnessScriptHash{
|
||||
AddressSegWit{
|
||||
hrp: hrp,
|
||||
witnessVersion: version,
|
||||
witnessProgram: program[:],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TstAddressTaproot creates an AddressTaproot, initiating the fields as given.
|
||||
func TstAddressTaproot(version byte, program [32]byte,
|
||||
hrp string) *AddressTaproot {
|
||||
|
||||
return &AddressTaproot{
|
||||
AddressSegWit{
|
||||
hrp: hrp,
|
||||
witnessVersion: version,
|
||||
witnessProgram: program[:],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TstAddressPubKey makes an AddressPubKey, setting the unexported fields with
|
||||
// the parameters.
|
||||
func TstAddressPubKey(serializedPubKey []byte, pubKeyFormat PubKeyFormat,
|
||||
netID byte) *AddressPubKey {
|
||||
|
||||
pubKey, _ := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
return &AddressPubKey{
|
||||
pubKeyFormat: pubKeyFormat,
|
||||
pubKey: pubKey,
|
||||
pubKeyHashID: netID,
|
||||
}
|
||||
}
|
||||
|
||||
// TstAddressSAddr returns the expected script address bytes for
|
||||
// P2PKH and P2SH bitcoin addresses.
|
||||
func TstAddressSAddr(addr string) []byte {
|
||||
decoded := base58.Decode(addr)
|
||||
return decoded[1 : 1+ripemd160.Size]
|
||||
}
|
||||
|
||||
// TstAddressSegwitSAddr returns the expected witness program bytes for
|
||||
// bech32 encoded P2WPKH and P2WSH bitcoin addresses.
|
||||
func TstAddressSegwitSAddr(addr string) []byte {
|
||||
_, data, err := bech32.Decode(addr)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// First byte is version, rest is base 32 encoded data.
|
||||
data, err = bech32.ConvertBits(data[1:], 5, 8, false)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// TstAddressTaprootSAddr returns the expected witness program bytes for a
|
||||
// bech32m encoded P2TR bitcoin address.
|
||||
func TstAddressTaprootSAddr(addr string) []byte {
|
||||
_, data, err := bech32.Decode(addr)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// First byte is version, rest is base 32 encoded data.
|
||||
data, err = bech32.ConvertBits(data[1:], 5, 8, false)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
return data
|
||||
}
|
18
btcutil/net.go
Normal file
18
btcutil/net.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// interfaceAddrs returns a list of the system's network interface addresses.
|
||||
// It is wrapped here so that we can substitute it for other functions when
|
||||
// building for systems that do not allow access to net.InterfaceAddrs().
|
||||
func interfaceAddrs() ([]net.Addr, error) {
|
||||
return net.InterfaceAddrs()
|
||||
}
|
19
btcutil/net_noop.go
Normal file
19
btcutil/net_noop.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// interfaceAddrs returns a list of the system's network interface addresses.
|
||||
// It is wrapped here so that we can substitute it for a no-op function that
|
||||
// returns an empty slice of net.Addr when building for systems that do not
|
||||
// allow access to net.InterfaceAddrs().
|
||||
func interfaceAddrs() ([]net.Addr, error) {
|
||||
return []net.Addr{}, nil
|
||||
}
|
77
btcutil/psbt/bip32.go
Normal file
77
btcutil/psbt/bip32.go
Normal file
|
@ -0,0 +1,77 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// Bip32Derivation encapsulates the data for the input and output
|
||||
// Bip32Derivation key-value fields.
|
||||
//
|
||||
// TODO(roasbeef): use hdkeychain here instead?
|
||||
type Bip32Derivation struct {
|
||||
// PubKey is the raw pubkey serialized in compressed format.
|
||||
PubKey []byte
|
||||
|
||||
// MasterKeyFingerprint is the finger print of the master pubkey.
|
||||
MasterKeyFingerprint uint32
|
||||
|
||||
// Bip32Path is the BIP 32 path with child index as a distinct integer.
|
||||
Bip32Path []uint32
|
||||
}
|
||||
|
||||
// checkValid ensures that the PubKey in the Bip32Derivation struct is valid.
|
||||
func (pb *Bip32Derivation) checkValid() bool {
|
||||
return validatePubkey(pb.PubKey)
|
||||
}
|
||||
|
||||
// Bip32Sorter implements sort.Interface for the Bip32Derivation struct.
|
||||
type Bip32Sorter []*Bip32Derivation
|
||||
|
||||
func (s Bip32Sorter) Len() int { return len(s) }
|
||||
|
||||
func (s Bip32Sorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s Bip32Sorter) Less(i, j int) bool {
|
||||
return bytes.Compare(s[i].PubKey, s[j].PubKey) < 0
|
||||
}
|
||||
|
||||
// readBip32Derivation deserializes a byte slice containing chunks of 4 byte
|
||||
// little endian encodings of uint32 values, the first of which is the
|
||||
// masterkeyfingerprint and the remainder of which are the derivation path.
|
||||
func readBip32Derivation(path []byte) (uint32, []uint32, error) {
|
||||
|
||||
if len(path)%4 != 0 || len(path)/4-1 < 1 {
|
||||
return 0, nil, ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
masterKeyInt := binary.LittleEndian.Uint32(path[:4])
|
||||
|
||||
var paths []uint32
|
||||
for i := 4; i < len(path); i += 4 {
|
||||
paths = append(paths, binary.LittleEndian.Uint32(path[i:i+4]))
|
||||
}
|
||||
|
||||
return masterKeyInt, paths, nil
|
||||
}
|
||||
|
||||
// SerializeBIP32Derivation takes a master key fingerprint as defined in BIP32,
|
||||
// along with a path specified as a list of uint32 values, and returns a
|
||||
// bytestring specifying the derivation in the format required by BIP174: //
|
||||
// master key fingerprint (4) || child index (4) || child index (4) || ....
|
||||
func SerializeBIP32Derivation(masterKeyFingerprint uint32,
|
||||
bip32Path []uint32) []byte {
|
||||
|
||||
var masterKeyBytes [4]byte
|
||||
binary.LittleEndian.PutUint32(masterKeyBytes[:], masterKeyFingerprint)
|
||||
|
||||
derivationPath := make([]byte, 0, 4+4*len(bip32Path))
|
||||
derivationPath = append(derivationPath, masterKeyBytes[:]...)
|
||||
for _, path := range bip32Path {
|
||||
var pathbytes [4]byte
|
||||
binary.LittleEndian.PutUint32(pathbytes[:], path)
|
||||
derivationPath = append(derivationPath, pathbytes[:]...)
|
||||
}
|
||||
|
||||
return derivationPath
|
||||
}
|
63
btcutil/psbt/creator.go
Normal file
63
btcutil/psbt/creator.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package psbt
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// MinTxVersion is the lowest transaction version that we'll permit.
|
||||
const MinTxVersion = 1
|
||||
|
||||
// New on provision of an input and output 'skeleton' for the transaction, a
|
||||
// new partially populated PBST packet. The populated packet will include the
|
||||
// unsigned transaction, and the set of known inputs and outputs contained
|
||||
// within the unsigned transaction. The values of nLockTime, nSequence (per
|
||||
// input) and transaction version (must be 1 of 2) must be specified here. Note
|
||||
// that the default nSequence value is wire.MaxTxInSequenceNum. Referencing
|
||||
// the PSBT BIP, this function serves the roles of teh Creator.
|
||||
func New(inputs []*wire.OutPoint,
|
||||
outputs []*wire.TxOut, version int32, nLockTime uint32,
|
||||
nSequences []uint32) (*Packet, error) {
|
||||
|
||||
// Create the new struct; the input and output lists will be empty, the
|
||||
// unsignedTx object must be constructed and serialized, and that
|
||||
// serialization should be entered as the only entry for the
|
||||
// globalKVPairs list.
|
||||
//
|
||||
// Ensure that the version of the transaction is greater then our
|
||||
// minimum allowed transaction version. There must be one sequence
|
||||
// number per input.
|
||||
if version < MinTxVersion || len(nSequences) != len(inputs) {
|
||||
return nil, ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
unsignedTx := wire.NewMsgTx(version)
|
||||
unsignedTx.LockTime = nLockTime
|
||||
for i, in := range inputs {
|
||||
unsignedTx.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: *in,
|
||||
Sequence: nSequences[i],
|
||||
})
|
||||
}
|
||||
for _, out := range outputs {
|
||||
unsignedTx.AddTxOut(out)
|
||||
}
|
||||
|
||||
// The input and output lists are empty, but there is a list of those
|
||||
// two lists, and each one must be of length matching the unsigned
|
||||
// transaction; the unknown list can be nil.
|
||||
pInputs := make([]PInput, len(unsignedTx.TxIn))
|
||||
pOutputs := make([]POutput, len(unsignedTx.TxOut))
|
||||
|
||||
// This new Psbt is "raw" and contains no key-value fields, so sanity
|
||||
// checking with c.Cpsbt.SanityCheck() is not required.
|
||||
return &Packet{
|
||||
UnsignedTx: unsignedTx,
|
||||
Inputs: pInputs,
|
||||
Outputs: pOutputs,
|
||||
Unknowns: nil,
|
||||
}, nil
|
||||
}
|
81
btcutil/psbt/extractor.go
Normal file
81
btcutil/psbt/extractor.go
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package psbt
|
||||
|
||||
// The Extractor requires provision of a single PSBT
|
||||
// in which all necessary signatures are encoded, and
|
||||
// uses it to construct a fully valid network serialized
|
||||
// transaction.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// Extract takes a finalized psbt.Packet and outputs a finalized transaction
|
||||
// instance. Note that if the PSBT is in-complete, then an error
|
||||
// ErrIncompletePSBT will be returned. As the extracted transaction has been
|
||||
// fully finalized, it will be ready for network broadcast once returned.
|
||||
func Extract(p *Packet) (*wire.MsgTx, error) {
|
||||
// If the packet isn't complete, then we'll return an error as it
|
||||
// doesn't have all the required witness data.
|
||||
if !p.IsComplete() {
|
||||
return nil, ErrIncompletePSBT
|
||||
}
|
||||
|
||||
// First, we'll make a copy of the underlying unsigned transaction (the
|
||||
// initial template) so we don't mutate it during our activates below.
|
||||
finalTx := p.UnsignedTx.Copy()
|
||||
|
||||
// For each input, we'll now populate any relevant witness and
|
||||
// sigScript data.
|
||||
for i, tin := range finalTx.TxIn {
|
||||
// We'll grab the corresponding internal packet input which
|
||||
// matches this materialized transaction input and emplace that
|
||||
// final sigScript (if present).
|
||||
pInput := p.Inputs[i]
|
||||
if pInput.FinalScriptSig != nil {
|
||||
tin.SignatureScript = pInput.FinalScriptSig
|
||||
}
|
||||
|
||||
// Similarly, if there's a final witness, then we'll also need
|
||||
// to extract that as well, parsing the lower-level transaction
|
||||
// encoding.
|
||||
if pInput.FinalScriptWitness != nil {
|
||||
// In order to set the witness, need to re-deserialize
|
||||
// the field as encoded within the PSBT packet. For
|
||||
// each input, the witness is encoded as a stack with
|
||||
// one or more items.
|
||||
witnessReader := bytes.NewReader(
|
||||
pInput.FinalScriptWitness,
|
||||
)
|
||||
|
||||
// First we extract the number of witness elements
|
||||
// encoded in the above witnessReader.
|
||||
witCount, err := wire.ReadVarInt(witnessReader, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now that we know how may inputs we'll need, we'll
|
||||
// construct a packing slice, then read out each input
|
||||
// (with a varint prefix) from the witnessReader.
|
||||
tin.Witness = make(wire.TxWitness, witCount)
|
||||
for j := uint64(0); j < witCount; j++ {
|
||||
wit, err := wire.ReadVarBytes(
|
||||
witnessReader, 0, txscript.MaxScriptSize, "witness",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tin.Witness[j] = wit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return finalTx, nil
|
||||
}
|
462
btcutil/psbt/finalizer.go
Normal file
462
btcutil/psbt/finalizer.go
Normal file
|
@ -0,0 +1,462 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package psbt
|
||||
|
||||
// The Finalizer requires provision of a single PSBT input
|
||||
// in which all necessary signatures are encoded, and
|
||||
// uses it to construct valid final sigScript and scriptWitness
|
||||
// fields.
|
||||
// NOTE that p2sh (legacy) and p2wsh currently support only
|
||||
// multisig and no other custom script.
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
)
|
||||
|
||||
// isFinalized considers this input finalized if it contains at least one of
|
||||
// the FinalScriptSig or FinalScriptWitness are filled (which only occurs in a
|
||||
// successful call to Finalize*).
|
||||
func isFinalized(p *Packet, inIndex int) bool {
|
||||
input := p.Inputs[inIndex]
|
||||
return input.FinalScriptSig != nil || input.FinalScriptWitness != nil
|
||||
}
|
||||
|
||||
// isFinalizableWitnessInput returns true if the target input is a witness UTXO
|
||||
// that can be finalized.
|
||||
func isFinalizableWitnessInput(pInput *PInput) bool {
|
||||
pkScript := pInput.WitnessUtxo.PkScript
|
||||
|
||||
switch {
|
||||
// If this is a native witness output, then we require both
|
||||
// the witness script, but not a redeem script.
|
||||
case txscript.IsWitnessProgram(pkScript):
|
||||
if txscript.IsPayToWitnessScriptHash(pkScript) {
|
||||
if pInput.WitnessScript == nil ||
|
||||
pInput.RedeemScript != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// A P2WKH output on the other hand doesn't need
|
||||
// neither a witnessScript or redeemScript.
|
||||
if pInput.WitnessScript != nil ||
|
||||
pInput.RedeemScript != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// For nested P2SH inputs, we verify that a witness script is known.
|
||||
case txscript.IsPayToScriptHash(pkScript):
|
||||
if pInput.RedeemScript == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If this is a nested P2SH input, then it must also have a
|
||||
// witness script, while we don't need one for P2WKH.
|
||||
if txscript.IsPayToWitnessScriptHash(pInput.RedeemScript) {
|
||||
if pInput.WitnessScript == nil {
|
||||
return false
|
||||
}
|
||||
} else if txscript.IsPayToWitnessPubKeyHash(pInput.RedeemScript) {
|
||||
if pInput.WitnessScript != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// unrecognized type
|
||||
return false
|
||||
}
|
||||
|
||||
// If this isn't a nested nested P2SH output or a native witness
|
||||
// output, then we can't finalize this input as we don't understand it.
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isFinalizableLegacyInput returns true of the passed input a legacy input
|
||||
// (non-witness) that can be finalized.
|
||||
func isFinalizableLegacyInput(p *Packet, pInput *PInput, inIndex int) bool {
|
||||
// If the input has a witness, then it's invalid.
|
||||
if pInput.WitnessScript != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Otherwise, we'll verify that we only have a RedeemScript if the prev
|
||||
// output script is P2SH.
|
||||
outIndex := p.UnsignedTx.TxIn[inIndex].PreviousOutPoint.Index
|
||||
if txscript.IsPayToScriptHash(pInput.NonWitnessUtxo.TxOut[outIndex].PkScript) {
|
||||
if pInput.RedeemScript == nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if pInput.RedeemScript != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isFinalizable checks whether the structure of the entry for the input of the
|
||||
// psbt.Packet at index inIndex contains sufficient information to finalize
|
||||
// this input.
|
||||
func isFinalizable(p *Packet, inIndex int) bool {
|
||||
pInput := p.Inputs[inIndex]
|
||||
|
||||
// The input cannot be finalized without any signatures
|
||||
if pInput.PartialSigs == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// For an input to be finalized, we'll one of two possible top-level
|
||||
// UTXOs present. Each UTXO type has a distinct set of requirements to
|
||||
// be considered finalized.
|
||||
switch {
|
||||
|
||||
// A witness input must be either native P2WSH or nested P2SH with all
|
||||
// relevant sigScript or witness data populated.
|
||||
case pInput.WitnessUtxo != nil:
|
||||
if !isFinalizableWitnessInput(&pInput) {
|
||||
return false
|
||||
}
|
||||
|
||||
case pInput.NonWitnessUtxo != nil:
|
||||
if !isFinalizableLegacyInput(p, &pInput, inIndex) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If neither a known UTXO type isn't present at all, then we'll
|
||||
// return false as we need one of them.
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// MaybeFinalize attempts to finalize the input at index inIndex in the PSBT p,
|
||||
// returning true with no error if it succeeds, OR if the input has already
|
||||
// been finalized.
|
||||
func MaybeFinalize(p *Packet, inIndex int) (bool, error) {
|
||||
if isFinalized(p, inIndex) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !isFinalizable(p, inIndex) {
|
||||
return false, ErrNotFinalizable
|
||||
}
|
||||
|
||||
if err := Finalize(p, inIndex); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// MaybeFinalizeAll attempts to finalize all inputs of the psbt.Packet that are
|
||||
// not already finalized, and returns an error if it fails to do so.
|
||||
func MaybeFinalizeAll(p *Packet) error {
|
||||
|
||||
for i := range p.UnsignedTx.TxIn {
|
||||
success, err := MaybeFinalize(p, i)
|
||||
if err != nil || !success {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finalize assumes that the provided psbt.Packet struct has all partial
|
||||
// signatures and redeem scripts/witness scripts already prepared for the
|
||||
// specified input, and so removes all temporary data and replaces them with
|
||||
// completed sigScript and witness fields, which are stored in key-types 07 and
|
||||
// 08. The witness/non-witness utxo fields in the inputs (key-types 00 and 01)
|
||||
// are left intact as they may be needed for validation (?). If there is any
|
||||
// invalid or incomplete data, an error is returned.
|
||||
func Finalize(p *Packet, inIndex int) error {
|
||||
pInput := p.Inputs[inIndex]
|
||||
|
||||
// Depending on the UTXO type, we either attempt to finalize it as a
|
||||
// witness or legacy UTXO.
|
||||
switch {
|
||||
case pInput.WitnessUtxo != nil:
|
||||
if err := finalizeWitnessInput(p, inIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case pInput.NonWitnessUtxo != nil:
|
||||
if err := finalizeNonWitnessInput(p, inIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
// Before returning we sanity check the PSBT to ensure we don't extract
|
||||
// an invalid transaction or produce an invalid intermediate state.
|
||||
if err := p.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkFinalScriptSigWitness checks whether a given input in the psbt.Packet
|
||||
// struct already has the fields 07 (FinalInScriptSig) or 08 (FinalInWitness).
|
||||
// If so, it returns true. It does not modify the Psbt.
|
||||
func checkFinalScriptSigWitness(p *Packet, inIndex int) bool {
|
||||
pInput := p.Inputs[inIndex]
|
||||
|
||||
if pInput.FinalScriptSig != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if pInput.FinalScriptWitness != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// finalizeNonWitnessInput attempts to create a PsbtInFinalScriptSig field for
|
||||
// the input at index inIndex, and removes all other fields except for the UTXO
|
||||
// field, for an input of type non-witness, or returns an error.
|
||||
func finalizeNonWitnessInput(p *Packet, inIndex int) error {
|
||||
// If this input has already been finalized, then we'll return an error
|
||||
// as we can't proceed.
|
||||
if checkFinalScriptSigWitness(p, inIndex) {
|
||||
return ErrInputAlreadyFinalized
|
||||
}
|
||||
|
||||
// Our goal here is to construct a sigScript given the pubkey,
|
||||
// signature (keytype 02), of which there might be multiple, and the
|
||||
// redeem script field (keytype 04) if present (note, it is not present
|
||||
// for p2pkh type inputs).
|
||||
var sigScript []byte
|
||||
|
||||
pInput := p.Inputs[inIndex]
|
||||
containsRedeemScript := pInput.RedeemScript != nil
|
||||
|
||||
var (
|
||||
pubKeys [][]byte
|
||||
sigs [][]byte
|
||||
)
|
||||
for _, ps := range pInput.PartialSigs {
|
||||
pubKeys = append(pubKeys, ps.PubKey)
|
||||
|
||||
sigOK := checkSigHashFlags(ps.Signature, &pInput)
|
||||
if !sigOK {
|
||||
return ErrInvalidSigHashFlags
|
||||
}
|
||||
|
||||
sigs = append(sigs, ps.Signature)
|
||||
}
|
||||
|
||||
// We have failed to identify at least 1 (sig, pub) pair in the PSBT,
|
||||
// which indicates it was not ready to be finalized. As a result, we
|
||||
// can't proceed.
|
||||
if len(sigs) < 1 || len(pubKeys) < 1 {
|
||||
return ErrNotFinalizable
|
||||
}
|
||||
|
||||
// If this input doesn't need a redeem script (P2PKH), then we'll
|
||||
// construct a simple sigScript that's just the signature then the
|
||||
// pubkey (OP_CHECKSIG).
|
||||
var err error
|
||||
if !containsRedeemScript {
|
||||
// At this point, we should only have a single signature and
|
||||
// pubkey.
|
||||
if len(sigs) != 1 || len(pubKeys) != 1 {
|
||||
return ErrNotFinalizable
|
||||
}
|
||||
|
||||
// In this case, our sigScript is just: <sig> <pubkey>.
|
||||
builder := txscript.NewScriptBuilder()
|
||||
builder.AddData(sigs[0]).AddData(pubKeys[0])
|
||||
sigScript, err = builder.Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// This is assumed p2sh multisig Given redeemScript and pubKeys
|
||||
// we can decide in what order signatures must be appended.
|
||||
orderedSigs, err := extractKeyOrderFromScript(
|
||||
pInput.RedeemScript, pubKeys, sigs,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// At this point, we assume that this is a mult-sig input, so
|
||||
// we construct our sigScript which looks something like this
|
||||
// (mind the extra element for the extra multi-sig pop):
|
||||
// * <nil> <sigs...> <redeemScript>
|
||||
//
|
||||
// TODO(waxwing): the below is specific to the multisig case.
|
||||
builder := txscript.NewScriptBuilder()
|
||||
builder.AddOp(txscript.OP_FALSE)
|
||||
for _, os := range orderedSigs {
|
||||
builder.AddData(os)
|
||||
}
|
||||
builder.AddData(pInput.RedeemScript)
|
||||
sigScript, err = builder.Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, a sigScript has been constructed. Remove all fields
|
||||
// other than non-witness utxo (00) and finaliscriptsig (07)
|
||||
newInput := NewPsbtInput(pInput.NonWitnessUtxo, nil)
|
||||
newInput.FinalScriptSig = sigScript
|
||||
|
||||
// Overwrite the entry in the input list at the correct index. Note
|
||||
// that this removes all the other entries in the list for this input
|
||||
// index.
|
||||
p.Inputs[inIndex] = *newInput
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// finalizeWitnessInput attempts to create PsbtInFinalScriptSig field and
|
||||
// PsbtInFinalScriptWitness field for input at index inIndex, and removes all
|
||||
// other fields except for the utxo field, for an input of type witness, or
|
||||
// returns an error.
|
||||
func finalizeWitnessInput(p *Packet, inIndex int) error {
|
||||
// If this input has already been finalized, then we'll return an error
|
||||
// as we can't proceed.
|
||||
if checkFinalScriptSigWitness(p, inIndex) {
|
||||
return ErrInputAlreadyFinalized
|
||||
}
|
||||
|
||||
// Depending on the actual output type, we'll either populate a
|
||||
// serializedWitness or a witness as well asa sigScript.
|
||||
var (
|
||||
sigScript []byte
|
||||
serializedWitness []byte
|
||||
)
|
||||
|
||||
pInput := p.Inputs[inIndex]
|
||||
|
||||
// First we'll validate and collect the pubkey+sig pairs from the set
|
||||
// of partial signatures.
|
||||
var (
|
||||
pubKeys [][]byte
|
||||
sigs [][]byte
|
||||
)
|
||||
for _, ps := range pInput.PartialSigs {
|
||||
pubKeys = append(pubKeys, ps.PubKey)
|
||||
|
||||
sigOK := checkSigHashFlags(ps.Signature, &pInput)
|
||||
if !sigOK {
|
||||
return ErrInvalidSigHashFlags
|
||||
|
||||
}
|
||||
|
||||
sigs = append(sigs, ps.Signature)
|
||||
}
|
||||
|
||||
// If at this point, we don't have any pubkey+sig pairs, then we bail
|
||||
// as we can't proceed.
|
||||
if len(sigs) == 0 || len(pubKeys) == 0 {
|
||||
return ErrNotFinalizable
|
||||
}
|
||||
|
||||
containsRedeemScript := pInput.RedeemScript != nil
|
||||
cointainsWitnessScript := pInput.WitnessScript != nil
|
||||
|
||||
// If there's no redeem script, then we assume that this is native
|
||||
// segwit input.
|
||||
var err error
|
||||
if !containsRedeemScript {
|
||||
// If we have only a sigley pubkey+sig pair, and no witness
|
||||
// script, then we assume this is a P2WKH input.
|
||||
if len(pubKeys) == 1 && len(sigs) == 1 &&
|
||||
!cointainsWitnessScript {
|
||||
|
||||
serializedWitness, err = writePKHWitness(
|
||||
sigs[0], pubKeys[0],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we must have a witnessScript field, so
|
||||
// we'll generate a valid multi-sig witness.
|
||||
//
|
||||
// NOTE: We tacitly assume multisig.
|
||||
//
|
||||
// TODO(roasbeef): need to add custom finalize for
|
||||
// non-multisig P2WSH outputs (HTLCs, delay outputs,
|
||||
// etc).
|
||||
if !cointainsWitnessScript {
|
||||
return ErrNotFinalizable
|
||||
}
|
||||
|
||||
serializedWitness, err = getMultisigScriptWitness(
|
||||
pInput.WitnessScript, pubKeys, sigs,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we assume that this is a p2wsh multi-sig output,
|
||||
// which is nested in a p2sh, or a p2wkh nested in a p2sh.
|
||||
//
|
||||
// In this case, we'll take the redeem script (the witness
|
||||
// program in this case), and push it on the stack within the
|
||||
// sigScript.
|
||||
builder := txscript.NewScriptBuilder()
|
||||
builder.AddData(pInput.RedeemScript)
|
||||
sigScript, err = builder.Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If don't have a witness script, then we assume this is a
|
||||
// nested p2wkh output.
|
||||
if !cointainsWitnessScript {
|
||||
// Assumed p2sh-p2wkh Here the witness is just (sig,
|
||||
// pub) as for p2pkh case
|
||||
if len(sigs) != 1 || len(pubKeys) != 1 {
|
||||
return ErrNotFinalizable
|
||||
}
|
||||
|
||||
serializedWitness, err = writePKHWitness(sigs[0], pubKeys[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Otherwise, we assume that this is a p2wsh multi-sig,
|
||||
// so we generate the proper witness.
|
||||
serializedWitness, err = getMultisigScriptWitness(
|
||||
pInput.WitnessScript, pubKeys, sigs,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, a witness has been constructed, and a sigScript (if
|
||||
// nested; else it's []). Remove all fields other than witness utxo
|
||||
// (01) and finalscriptsig (07), finalscriptwitness (08).
|
||||
newInput := NewPsbtInput(nil, pInput.WitnessUtxo)
|
||||
if len(sigScript) > 0 {
|
||||
newInput.FinalScriptSig = sigScript
|
||||
}
|
||||
|
||||
newInput.FinalScriptWitness = serializedWitness
|
||||
|
||||
// Finally, we overwrite the entry in the input list at the correct
|
||||
// index.
|
||||
p.Inputs[inIndex] = *newInput
|
||||
return nil
|
||||
}
|
17
btcutil/psbt/go.mod
Normal file
17
btcutil/psbt/go.mod
Normal file
|
@ -0,0 +1,17 @@
|
|||
module github.com/btcsuite/btcd/btcutil/psbt
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/btcsuite/btcd/btcutil v0.0.0-20190425235716-9e5f4b9a998d
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
|
||||
)
|
||||
|
||||
replace github.com/btcsuite/btcd/btcutil => ../
|
39
btcutil/psbt/go.sum
Normal file
39
btcutil/psbt/go.sum
Normal file
|
@ -0,0 +1,39 @@
|
|||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
361
btcutil/psbt/partial_input.go
Normal file
361
btcutil/psbt/partial_input.go
Normal file
|
@ -0,0 +1,361 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// PInput is a struct encapsulating all the data that can be attached to any
|
||||
// specific input of the PSBT.
|
||||
type PInput struct {
|
||||
NonWitnessUtxo *wire.MsgTx
|
||||
WitnessUtxo *wire.TxOut
|
||||
PartialSigs []*PartialSig
|
||||
SighashType txscript.SigHashType
|
||||
RedeemScript []byte
|
||||
WitnessScript []byte
|
||||
Bip32Derivation []*Bip32Derivation
|
||||
FinalScriptSig []byte
|
||||
FinalScriptWitness []byte
|
||||
Unknowns []*Unknown
|
||||
}
|
||||
|
||||
// NewPsbtInput creates an instance of PsbtInput given either a nonWitnessUtxo
|
||||
// or a witnessUtxo.
|
||||
//
|
||||
// NOTE: Only one of the two arguments should be specified, with the other
|
||||
// being `nil`; otherwise the created PsbtInput object will fail IsSane()
|
||||
// checks and will not be usable.
|
||||
func NewPsbtInput(nonWitnessUtxo *wire.MsgTx,
|
||||
witnessUtxo *wire.TxOut) *PInput {
|
||||
|
||||
return &PInput{
|
||||
NonWitnessUtxo: nonWitnessUtxo,
|
||||
WitnessUtxo: witnessUtxo,
|
||||
PartialSigs: []*PartialSig{},
|
||||
SighashType: 0,
|
||||
RedeemScript: nil,
|
||||
WitnessScript: nil,
|
||||
Bip32Derivation: []*Bip32Derivation{},
|
||||
FinalScriptSig: nil,
|
||||
FinalScriptWitness: nil,
|
||||
Unknowns: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// IsSane returns true only if there are no conflicting values in the Psbt
|
||||
// PInput. For segwit v0 no checks are currently implemented.
|
||||
func (pi *PInput) IsSane() bool {
|
||||
|
||||
// TODO(guggero): Implement sanity checks for segwit v1. For segwit v0
|
||||
// it is unsafe to only rely on the witness UTXO so we don't check that
|
||||
// only one is set anymore.
|
||||
// See https://github.com/bitcoin/bitcoin/pull/19215.
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// deserialize attempts to deserialize a new PInput from the passed io.Reader.
|
||||
func (pi *PInput) deserialize(r io.Reader) error {
|
||||
for {
|
||||
keyint, keydata, err := getKey(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if keyint == -1 {
|
||||
// Reached separator byte
|
||||
break
|
||||
}
|
||||
value, err := wire.ReadVarBytes(
|
||||
r, 0, MaxPsbtValueLength, "PSBT value",
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch InputType(keyint) {
|
||||
|
||||
case NonWitnessUtxoType:
|
||||
if pi.NonWitnessUtxo != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
tx := wire.NewMsgTx(2)
|
||||
|
||||
err := tx.Deserialize(bytes.NewReader(value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pi.NonWitnessUtxo = tx
|
||||
|
||||
case WitnessUtxoType:
|
||||
if pi.WitnessUtxo != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
txout, err := readTxOut(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pi.WitnessUtxo = txout
|
||||
|
||||
case PartialSigType:
|
||||
newPartialSig := PartialSig{
|
||||
PubKey: keydata,
|
||||
Signature: value,
|
||||
}
|
||||
|
||||
if !newPartialSig.checkValid() {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
// Duplicate keys are not allowed
|
||||
for _, x := range pi.PartialSigs {
|
||||
if bytes.Equal(x.PubKey, newPartialSig.PubKey) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
pi.PartialSigs = append(pi.PartialSigs, &newPartialSig)
|
||||
|
||||
case SighashType:
|
||||
if pi.SighashType != 0 {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
|
||||
// Bounds check on value here since the sighash type must be a
|
||||
// 32-bit unsigned integer.
|
||||
if len(value) != 4 {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
|
||||
shtype := txscript.SigHashType(
|
||||
binary.LittleEndian.Uint32(value),
|
||||
)
|
||||
pi.SighashType = shtype
|
||||
|
||||
case RedeemScriptInputType:
|
||||
if pi.RedeemScript != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
pi.RedeemScript = value
|
||||
|
||||
case WitnessScriptInputType:
|
||||
if pi.WitnessScript != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
pi.WitnessScript = value
|
||||
|
||||
case Bip32DerivationInputType:
|
||||
if !validatePubkey(keydata) {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
master, derivationPath, err := readBip32Derivation(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Duplicate keys are not allowed
|
||||
for _, x := range pi.Bip32Derivation {
|
||||
if bytes.Equal(x.PubKey, keydata) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
pi.Bip32Derivation = append(
|
||||
pi.Bip32Derivation,
|
||||
&Bip32Derivation{
|
||||
PubKey: keydata,
|
||||
MasterKeyFingerprint: master,
|
||||
Bip32Path: derivationPath,
|
||||
},
|
||||
)
|
||||
|
||||
case FinalScriptSigType:
|
||||
if pi.FinalScriptSig != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
|
||||
pi.FinalScriptSig = value
|
||||
|
||||
case FinalScriptWitnessType:
|
||||
if pi.FinalScriptWitness != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
|
||||
pi.FinalScriptWitness = value
|
||||
|
||||
default:
|
||||
// A fall through case for any proprietary types.
|
||||
keyintanddata := []byte{byte(keyint)}
|
||||
keyintanddata = append(keyintanddata, keydata...)
|
||||
newUnknown := &Unknown{
|
||||
Key: keyintanddata,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
// Duplicate key+keydata are not allowed
|
||||
for _, x := range pi.Unknowns {
|
||||
if bytes.Equal(x.Key, newUnknown.Key) &&
|
||||
bytes.Equal(x.Value, newUnknown.Value) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
pi.Unknowns = append(pi.Unknowns, newUnknown)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serialize attempts to serialize the target PInput into the passed io.Writer.
|
||||
func (pi *PInput) serialize(w io.Writer) error {
|
||||
|
||||
if !pi.IsSane() {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
if pi.NonWitnessUtxo != nil {
|
||||
var buf bytes.Buffer
|
||||
err := pi.NonWitnessUtxo.Serialize(&buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serializeKVPairWithType(
|
||||
w, uint8(NonWitnessUtxoType), nil, buf.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pi.WitnessUtxo != nil {
|
||||
var buf bytes.Buffer
|
||||
err := wire.WriteTxOut(&buf, 0, 0, pi.WitnessUtxo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serializeKVPairWithType(
|
||||
w, uint8(WitnessUtxoType), nil, buf.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pi.FinalScriptSig == nil && pi.FinalScriptWitness == nil {
|
||||
sort.Sort(PartialSigSorter(pi.PartialSigs))
|
||||
for _, ps := range pi.PartialSigs {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(PartialSigType), ps.PubKey,
|
||||
ps.Signature,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pi.SighashType != 0 {
|
||||
var shtBytes [4]byte
|
||||
binary.LittleEndian.PutUint32(
|
||||
shtBytes[:], uint32(pi.SighashType),
|
||||
)
|
||||
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(SighashType), nil, shtBytes[:],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pi.RedeemScript != nil {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(RedeemScriptInputType), nil,
|
||||
pi.RedeemScript,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pi.WitnessScript != nil {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(WitnessScriptInputType), nil,
|
||||
pi.WitnessScript,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(Bip32Sorter(pi.Bip32Derivation))
|
||||
for _, kd := range pi.Bip32Derivation {
|
||||
err := serializeKVPairWithType(
|
||||
w,
|
||||
uint8(Bip32DerivationInputType), kd.PubKey,
|
||||
SerializeBIP32Derivation(
|
||||
kd.MasterKeyFingerprint, kd.Bip32Path,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pi.FinalScriptSig != nil {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(FinalScriptSigType), nil, pi.FinalScriptSig,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pi.FinalScriptWitness != nil {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(FinalScriptWitnessType), nil, pi.FinalScriptWitness,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Unknown is a special case; we don't have a key type, only a key and
|
||||
// a value field
|
||||
for _, kv := range pi.Unknowns {
|
||||
err := serializeKVpair(w, kv.Key, kv.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
139
btcutil/psbt/partial_output.go
Normal file
139
btcutil/psbt/partial_output.go
Normal file
|
@ -0,0 +1,139 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// POutput is a struct encapsulating all the data that can be attached
|
||||
// to any specific output of the PSBT.
|
||||
type POutput struct {
|
||||
RedeemScript []byte
|
||||
WitnessScript []byte
|
||||
Bip32Derivation []*Bip32Derivation
|
||||
}
|
||||
|
||||
// NewPsbtOutput creates an instance of PsbtOutput; the three parameters
|
||||
// redeemScript, witnessScript and Bip32Derivation are all allowed to be
|
||||
// `nil`.
|
||||
func NewPsbtOutput(redeemScript []byte, witnessScript []byte,
|
||||
bip32Derivation []*Bip32Derivation) *POutput {
|
||||
return &POutput{
|
||||
RedeemScript: redeemScript,
|
||||
WitnessScript: witnessScript,
|
||||
Bip32Derivation: bip32Derivation,
|
||||
}
|
||||
}
|
||||
|
||||
// deserialize attempts to recode a new POutput from the passed io.Reader.
|
||||
func (po *POutput) deserialize(r io.Reader) error {
|
||||
for {
|
||||
keyint, keydata, err := getKey(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if keyint == -1 {
|
||||
// Reached separator byte
|
||||
break
|
||||
}
|
||||
|
||||
value, err := wire.ReadVarBytes(
|
||||
r, 0, MaxPsbtValueLength, "PSBT value",
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch OutputType(keyint) {
|
||||
|
||||
case RedeemScriptOutputType:
|
||||
if po.RedeemScript != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
po.RedeemScript = value
|
||||
|
||||
case WitnessScriptOutputType:
|
||||
if po.WitnessScript != nil {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
if keydata != nil {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
po.WitnessScript = value
|
||||
|
||||
case Bip32DerivationOutputType:
|
||||
if !validatePubkey(keydata) {
|
||||
return ErrInvalidKeydata
|
||||
}
|
||||
master, derivationPath, err := readBip32Derivation(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Duplicate keys are not allowed
|
||||
for _, x := range po.Bip32Derivation {
|
||||
if bytes.Equal(x.PubKey, keydata) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
po.Bip32Derivation = append(po.Bip32Derivation,
|
||||
&Bip32Derivation{
|
||||
PubKey: keydata,
|
||||
MasterKeyFingerprint: master,
|
||||
Bip32Path: derivationPath,
|
||||
},
|
||||
)
|
||||
|
||||
default:
|
||||
// Unknown type is allowed for inputs but not outputs.
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serialize attempts to write out the target POutput into the passed
|
||||
// io.Writer.
|
||||
func (po *POutput) serialize(w io.Writer) error {
|
||||
if po.RedeemScript != nil {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(RedeemScriptOutputType), nil, po.RedeemScript,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if po.WitnessScript != nil {
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(WitnessScriptOutputType), nil, po.WitnessScript,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(Bip32Sorter(po.Bip32Derivation))
|
||||
for _, kd := range po.Bip32Derivation {
|
||||
err := serializeKVPairWithType(w,
|
||||
uint8(Bip32DerivationOutputType),
|
||||
kd.PubKey,
|
||||
SerializeBIP32Derivation(
|
||||
kd.MasterKeyFingerprint,
|
||||
kd.Bip32Path,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
52
btcutil/psbt/partialsig.go
Normal file
52
btcutil/psbt/partialsig.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
// PartialSig encapsulate a (BTC public key, ECDSA signature)
|
||||
// pair, note that the fields are stored as byte slices, not
|
||||
// btcec.PublicKey or btcec.Signature (because manipulations will
|
||||
// be with the former not the latter, here); compliance with consensus
|
||||
// serialization is enforced with .checkValid()
|
||||
type PartialSig struct {
|
||||
PubKey []byte
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// PartialSigSorter implements sort.Interface for PartialSig.
|
||||
type PartialSigSorter []*PartialSig
|
||||
|
||||
func (s PartialSigSorter) Len() int { return len(s) }
|
||||
|
||||
func (s PartialSigSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s PartialSigSorter) Less(i, j int) bool {
|
||||
return bytes.Compare(s[i].PubKey, s[j].PubKey) < 0
|
||||
}
|
||||
|
||||
// validatePubkey checks if pubKey is *any* valid pubKey serialization in a
|
||||
// Bitcoin context (compressed/uncomp. OK).
|
||||
func validatePubkey(pubKey []byte) bool {
|
||||
_, err := btcec.ParsePubKey(pubKey, btcec.S256())
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// validateSignature checks that the passed byte slice is a valid DER-encoded
|
||||
// ECDSA signature, including the sighash flag. It does *not* of course
|
||||
// validate the signature against any message or public key.
|
||||
func validateSignature(sig []byte) bool {
|
||||
_, err := btcec.ParseDERSignature(sig, btcec.S256())
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// checkValid checks that both the pbukey and sig are valid. See the methods
|
||||
// (PartialSig, validatePubkey, validateSignature) for more details.
|
||||
//
|
||||
// TODO(waxwing): update for Schnorr will be needed here if/when that
|
||||
// activates.
|
||||
func (ps *PartialSig) checkValid() bool {
|
||||
return validatePubkey(ps.PubKey) && validateSignature(ps.Signature)
|
||||
}
|
407
btcutil/psbt/psbt.go
Normal file
407
btcutil/psbt/psbt.go
Normal file
|
@ -0,0 +1,407 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package psbt is an implementation of Partially Signed Bitcoin
|
||||
// Transactions (PSBT). The format is defined in BIP 174:
|
||||
// https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
|
||||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
||||
"io"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// psbtMagicLength is the length of the magic bytes used to signal the start of
|
||||
// a serialized PSBT packet.
|
||||
const psbtMagicLength = 5
|
||||
|
||||
var (
|
||||
// psbtMagic is the separator
|
||||
psbtMagic = [psbtMagicLength]byte{0x70,
|
||||
0x73, 0x62, 0x74, 0xff, // = "psbt" + 0xff sep
|
||||
}
|
||||
)
|
||||
|
||||
// MaxPsbtValueLength is the size of the largest transaction serialization
|
||||
// that could be passed in a NonWitnessUtxo field. This is definitely
|
||||
//less than 4M.
|
||||
const MaxPsbtValueLength = 4000000
|
||||
|
||||
// MaxPsbtKeyLength is the length of the largest key that we'll successfully
|
||||
// deserialize from the wire. Anything more will return ErrInvalidKeydata.
|
||||
const MaxPsbtKeyLength = 10000
|
||||
|
||||
var (
|
||||
|
||||
// ErrInvalidPsbtFormat is a generic error for any situation in which a
|
||||
// provided Psbt serialization does not conform to the rules of BIP174.
|
||||
ErrInvalidPsbtFormat = errors.New("Invalid PSBT serialization format")
|
||||
|
||||
// ErrDuplicateKey indicates that a passed Psbt serialization is invalid
|
||||
// due to having the same key repeated in the same key-value pair.
|
||||
ErrDuplicateKey = errors.New("Invalid Psbt due to duplicate key")
|
||||
|
||||
// ErrInvalidKeydata indicates that a key-value pair in the PSBT
|
||||
// serialization contains data in the key which is not valid.
|
||||
ErrInvalidKeydata = errors.New("Invalid key data")
|
||||
|
||||
// ErrInvalidMagicBytes indicates that a passed Psbt serialization is invalid
|
||||
// due to having incorrect magic bytes.
|
||||
ErrInvalidMagicBytes = errors.New("Invalid Psbt due to incorrect magic bytes")
|
||||
|
||||
// ErrInvalidRawTxSigned indicates that the raw serialized transaction in the
|
||||
// global section of the passed Psbt serialization is invalid because it
|
||||
// contains scriptSigs/witnesses (i.e. is fully or partially signed), which
|
||||
// is not allowed by BIP174.
|
||||
ErrInvalidRawTxSigned = errors.New("Invalid Psbt, raw transaction must " +
|
||||
"be unsigned.")
|
||||
|
||||
// ErrInvalidPrevOutNonWitnessTransaction indicates that the transaction
|
||||
// hash (i.e. SHA256^2) of the fully serialized previous transaction
|
||||
// provided in the NonWitnessUtxo key-value field doesn't match the prevout
|
||||
// hash in the UnsignedTx field in the PSBT itself.
|
||||
ErrInvalidPrevOutNonWitnessTransaction = errors.New("Prevout hash does " +
|
||||
"not match the provided non-witness utxo serialization")
|
||||
|
||||
// ErrInvalidSignatureForInput indicates that the signature the user is
|
||||
// trying to append to the PSBT is invalid, either because it does
|
||||
// not correspond to the previous transaction hash, or redeem script,
|
||||
// or witness script.
|
||||
// NOTE this does not include ECDSA signature checking.
|
||||
ErrInvalidSignatureForInput = errors.New("Signature does not correspond " +
|
||||
"to this input")
|
||||
|
||||
// ErrInputAlreadyFinalized indicates that the PSBT passed to a Finalizer
|
||||
// already contains the finalized scriptSig or witness.
|
||||
ErrInputAlreadyFinalized = errors.New("Cannot finalize PSBT, finalized " +
|
||||
"scriptSig or scriptWitnes already exists")
|
||||
|
||||
// ErrIncompletePSBT indicates that the Extractor object
|
||||
// was unable to successfully extract the passed Psbt struct because
|
||||
// it is not complete
|
||||
ErrIncompletePSBT = errors.New("PSBT cannot be extracted as it is " +
|
||||
"incomplete")
|
||||
|
||||
// ErrNotFinalizable indicates that the PSBT struct does not have
|
||||
// sufficient data (e.g. signatures) for finalization
|
||||
ErrNotFinalizable = errors.New("PSBT is not finalizable")
|
||||
|
||||
// ErrInvalidSigHashFlags indicates that a signature added to the PSBT
|
||||
// uses Sighash flags that are not in accordance with the requirement
|
||||
// according to the entry in PsbtInSighashType, or otherwise not the
|
||||
// default value (SIGHASH_ALL)
|
||||
ErrInvalidSigHashFlags = errors.New("Invalid Sighash Flags")
|
||||
|
||||
// ErrUnsupportedScriptType indicates that the redeem script or
|
||||
// scriptwitness given is not supported by this codebase, or is otherwise
|
||||
// not valid.
|
||||
ErrUnsupportedScriptType = errors.New("Unsupported script type")
|
||||
)
|
||||
|
||||
// Unknown is a struct encapsulating a key-value pair for which the key type is
|
||||
// unknown by this package; these fields are allowed in both the 'Global' and
|
||||
// the 'Input' section of a PSBT.
|
||||
type Unknown struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Packet is the actual psbt repreesntation. It is a is a set of 1 + N + M
|
||||
// key-value pair lists, 1 global, defining the unsigned transaction structure
|
||||
// with N inputs and M outputs. These key-value pairs can contain scripts,
|
||||
// signatures, key derivations and other transaction-defining data.
|
||||
type Packet struct {
|
||||
// UnsignedTx is the decoded unsigned transaction for this PSBT.
|
||||
UnsignedTx *wire.MsgTx // Deserialization of unsigned tx
|
||||
|
||||
// Inputs contains all the information needed to properly sign this
|
||||
// target input within the above transaction.
|
||||
Inputs []PInput
|
||||
|
||||
// Outputs contains all information required to spend any outputs
|
||||
// produced by this PSBT.
|
||||
Outputs []POutput
|
||||
|
||||
// Unknowns are the set of custom types (global only) within this PSBT.
|
||||
Unknowns []Unknown
|
||||
}
|
||||
|
||||
// validateUnsignedTx returns true if the transaction is unsigned. Note that
|
||||
// more basic sanity requirements, such as the presence of inputs and outputs,
|
||||
// is implicitly checked in the call to MsgTx.Deserialize().
|
||||
func validateUnsignedTX(tx *wire.MsgTx) bool {
|
||||
for _, tin := range tx.TxIn {
|
||||
if len(tin.SignatureScript) != 0 || len(tin.Witness) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// NewFromUnsignedTx creates a new Psbt struct, without any signatures (i.e.
|
||||
// only the global section is non-empty) using the passed unsigned transaction.
|
||||
func NewFromUnsignedTx(tx *wire.MsgTx) (*Packet, error) {
|
||||
|
||||
if !validateUnsignedTX(tx) {
|
||||
return nil, ErrInvalidRawTxSigned
|
||||
}
|
||||
|
||||
inSlice := make([]PInput, len(tx.TxIn))
|
||||
outSlice := make([]POutput, len(tx.TxOut))
|
||||
unknownSlice := make([]Unknown, 0)
|
||||
|
||||
retPsbt := Packet{
|
||||
UnsignedTx: tx,
|
||||
Inputs: inSlice,
|
||||
Outputs: outSlice,
|
||||
Unknowns: unknownSlice,
|
||||
}
|
||||
|
||||
return &retPsbt, nil
|
||||
}
|
||||
|
||||
// NewFromRawBytes returns a new instance of a Packet struct created by reading
|
||||
// from a byte slice. If the format is invalid, an error is returned. If the
|
||||
// argument b64 is true, the passed byte slice is decoded from base64 encoding
|
||||
// before processing.
|
||||
//
|
||||
// NOTE: To create a Packet from one's own data, rather than reading in a
|
||||
// serialization from a counterparty, one should use a psbt.New.
|
||||
func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) {
|
||||
|
||||
// If the PSBT is encoded in bas64, then we'll create a new wrapper
|
||||
// reader that'll allow us to incrementally decode the contents of the
|
||||
// io.Reader.
|
||||
if b64 {
|
||||
based64EncodedReader := r
|
||||
r = base64.NewDecoder(base64.StdEncoding, based64EncodedReader)
|
||||
}
|
||||
|
||||
// The Packet struct does not store the fixed magic bytes, but they
|
||||
// must be present or the serialization must be explicitly rejected.
|
||||
var magic [5]byte
|
||||
if _, err := io.ReadFull(r, magic[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if magic != psbtMagic {
|
||||
return nil, ErrInvalidMagicBytes
|
||||
}
|
||||
|
||||
// Next we parse the GLOBAL section. There is currently only 1 known
|
||||
// key type, UnsignedTx. We insist this exists first; unknowns are
|
||||
// allowed, but only after.
|
||||
keyint, keydata, err := getKey(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if GlobalType(keyint) != UnsignedTxType || keydata != nil {
|
||||
return nil, ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
// Now that we've verified the global type is present, we'll decode it
|
||||
// into a proper unsigned transaction, and validate it.
|
||||
value, err := wire.ReadVarBytes(
|
||||
r, 0, MaxPsbtValueLength, "PSBT value",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgTx := wire.NewMsgTx(2)
|
||||
err = msgTx.Deserialize(bytes.NewReader(value))
|
||||
if err != nil {
|
||||
// If there are no inputs in this yet incomplete transaction,
|
||||
// the wire package still incorrectly assumes it's encoded in
|
||||
// the witness format. We can fix this by just trying the non-
|
||||
// witness encoding too. If that also fails, it's probably an
|
||||
// invalid transaction.
|
||||
msgTx = wire.NewMsgTx(2)
|
||||
err2 := msgTx.DeserializeNoWitness(bytes.NewReader(value))
|
||||
|
||||
// If the second attempt also failed, something else is wrong
|
||||
// and it probably makes more sense to return the original
|
||||
// error instead of the error from the workaround.
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !validateUnsignedTX(msgTx) {
|
||||
return nil, ErrInvalidRawTxSigned
|
||||
}
|
||||
|
||||
// Next we parse any unknowns that may be present, making sure that we
|
||||
// break at the separator.
|
||||
var unknownSlice []Unknown
|
||||
for {
|
||||
keyint, keydata, err := getKey(r)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidPsbtFormat
|
||||
}
|
||||
if keyint == -1 {
|
||||
break
|
||||
}
|
||||
|
||||
value, err := wire.ReadVarBytes(
|
||||
r, 0, MaxPsbtValueLength, "PSBT value",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyintanddata := []byte{byte(keyint)}
|
||||
keyintanddata = append(keyintanddata, keydata...)
|
||||
|
||||
newUnknown := Unknown{
|
||||
Key: keyintanddata,
|
||||
Value: value,
|
||||
}
|
||||
unknownSlice = append(unknownSlice, newUnknown)
|
||||
}
|
||||
|
||||
// Next we parse the INPUT section.
|
||||
inSlice := make([]PInput, len(msgTx.TxIn))
|
||||
for i := range msgTx.TxIn {
|
||||
input := PInput{}
|
||||
err = input.deserialize(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inSlice[i] = input
|
||||
}
|
||||
|
||||
// Next we parse the OUTPUT section.
|
||||
outSlice := make([]POutput, len(msgTx.TxOut))
|
||||
for i := range msgTx.TxOut {
|
||||
output := POutput{}
|
||||
err = output.deserialize(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outSlice[i] = output
|
||||
}
|
||||
|
||||
// Populate the new Packet object
|
||||
newPsbt := Packet{
|
||||
UnsignedTx: msgTx,
|
||||
Inputs: inSlice,
|
||||
Outputs: outSlice,
|
||||
Unknowns: unknownSlice,
|
||||
}
|
||||
|
||||
// Extended sanity checking is applied here to make sure the
|
||||
// externally-passed Packet follows all the rules.
|
||||
if err = newPsbt.SanityCheck(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &newPsbt, nil
|
||||
}
|
||||
|
||||
// Serialize creates a binary serialization of the referenced Packet struct
|
||||
// with lexicographical ordering (by key) of the subsections.
|
||||
func (p *Packet) Serialize(w io.Writer) error {
|
||||
|
||||
// First we write out the precise set of magic bytes that identify a
|
||||
// valid PSBT transaction.
|
||||
if _, err := w.Write(psbtMagic[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next we prep to write out the unsigned transaction by first
|
||||
// serializing it into an intermediate buffer.
|
||||
serializedTx := bytes.NewBuffer(
|
||||
make([]byte, 0, p.UnsignedTx.SerializeSize()),
|
||||
)
|
||||
if err := p.UnsignedTx.Serialize(serializedTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that we have the serialized transaction, we'll write it out to
|
||||
// the proper global type.
|
||||
err := serializeKVPairWithType(
|
||||
w, uint8(UnsignedTxType), nil, serializedTx.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// With that our global section is done, so we'll write out the
|
||||
// separator.
|
||||
separator := []byte{0x00}
|
||||
if _, err := w.Write(separator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pInput := range p.Inputs {
|
||||
err := pInput.serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(separator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, pOutput := range p.Outputs {
|
||||
err := pOutput.serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(separator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// B64Encode returns the base64 encoding of the serialization of
|
||||
// the current PSBT, or an error if the encoding fails.
|
||||
func (p *Packet) B64Encode() (string, error) {
|
||||
var b bytes.Buffer
|
||||
if err := p.Serialize(&b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return base64.StdEncoding.EncodeToString(b.Bytes()), nil
|
||||
}
|
||||
|
||||
// IsComplete returns true only if all of the inputs are
|
||||
// finalized; this is particularly important in that it decides
|
||||
// whether the final extraction to a network serialized signed
|
||||
// transaction will be possible.
|
||||
func (p *Packet) IsComplete() bool {
|
||||
for i := 0; i < len(p.UnsignedTx.TxIn); i++ {
|
||||
if !isFinalized(p, i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SanityCheck checks conditions on a PSBT to ensure that it obeys the
|
||||
// rules of BIP174, and returns true if so, false if not.
|
||||
func (p *Packet) SanityCheck() error {
|
||||
|
||||
if !validateUnsignedTX(p.UnsignedTx) {
|
||||
return ErrInvalidRawTxSigned
|
||||
}
|
||||
|
||||
for _, tin := range p.Inputs {
|
||||
if !tin.IsSane() {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1448
btcutil/psbt/psbt_test.go
Normal file
1448
btcutil/psbt/psbt_test.go
Normal file
File diff suppressed because it is too large
Load diff
155
btcutil/psbt/signer.go
Normal file
155
btcutil/psbt/signer.go
Normal file
|
@ -0,0 +1,155 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package psbt
|
||||
|
||||
// signer encapsulates the role 'Signer' as specified in BIP174; it controls
|
||||
// the insertion of signatures; the Sign() function will attempt to insert
|
||||
// signatures using Updater.addPartialSignature, after first ensuring the Psbt
|
||||
// is in the correct state.
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
)
|
||||
|
||||
// SignOutcome is a enum-like value that expresses the outcome of a call to the
|
||||
// Sign method.
|
||||
type SignOutcome int
|
||||
|
||||
const (
|
||||
// SignSuccesful indicates that the partial signature was successfully
|
||||
// attached.
|
||||
SignSuccesful = 0
|
||||
|
||||
// SignFinalized indicates that this input is already finalized, so the provided
|
||||
// signature was *not* attached
|
||||
SignFinalized = 1
|
||||
|
||||
// SignInvalid indicates that the provided signature data was not valid. In this case
|
||||
// an error will also be returned.
|
||||
SignInvalid = -1
|
||||
)
|
||||
|
||||
// Sign allows the caller to sign a PSBT at a particular input; they
|
||||
// must provide a signature and a pubkey, both as byte slices; they can also
|
||||
// optionally provide both witnessScript and/or redeemScript, otherwise these
|
||||
// arguments must be set as nil (and in that case, they must already be present
|
||||
// in the PSBT if required for signing to succeed).
|
||||
//
|
||||
// This serves as a wrapper around Updater.addPartialSignature; it ensures that
|
||||
// the redeemScript and witnessScript are updated as needed (note that the
|
||||
// Updater is allowed to add redeemScripts and witnessScripts independently,
|
||||
// before signing), and ensures that the right form of utxo field
|
||||
// (NonWitnessUtxo or WitnessUtxo) is included in the input so that signature
|
||||
// insertion (and then finalization) can take place.
|
||||
func (u *Updater) Sign(inIndex int, sig []byte, pubKey []byte,
|
||||
redeemScript []byte, witnessScript []byte) (SignOutcome, error) {
|
||||
|
||||
if isFinalized(u.Upsbt, inIndex) {
|
||||
return SignFinalized, nil
|
||||
}
|
||||
|
||||
// Add the witnessScript to the PSBT in preparation. If it already
|
||||
// exists, it will be overwritten.
|
||||
if witnessScript != nil {
|
||||
err := u.AddInWitnessScript(witnessScript, inIndex)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the redeemScript to the PSBT in preparation. If it already
|
||||
// exists, it will be overwritten.
|
||||
if redeemScript != nil {
|
||||
err := u.AddInRedeemScript(redeemScript, inIndex)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, the PSBT must have the requisite witnessScript or
|
||||
// redeemScript fields for signing to succeed.
|
||||
//
|
||||
// Case 1: if witnessScript is present, it must be of type witness;
|
||||
// if not, signature insertion will of course fail.
|
||||
switch {
|
||||
case u.Upsbt.Inputs[inIndex].WitnessScript != nil:
|
||||
if u.Upsbt.Inputs[inIndex].WitnessUtxo == nil {
|
||||
err := nonWitnessToWitness(u.Upsbt, inIndex)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
}
|
||||
|
||||
err := u.addPartialSignature(inIndex, sig, pubKey)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
|
||||
// Case 2: no witness script, only redeem script; can be legacy p2sh or
|
||||
// p2sh-wrapped p2wkh.
|
||||
case u.Upsbt.Inputs[inIndex].RedeemScript != nil:
|
||||
// We only need to decide if the input is witness, and we don't
|
||||
// rely on the witnessutxo/nonwitnessutxo in the PSBT, instead
|
||||
// we check the redeemScript content.
|
||||
if txscript.IsWitnessProgram(redeemScript) {
|
||||
if u.Upsbt.Inputs[inIndex].WitnessUtxo == nil {
|
||||
err := nonWitnessToWitness(u.Upsbt, inIndex)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it is not a valid witness program, we here assume that
|
||||
// the provided WitnessUtxo/NonWitnessUtxo field was correct.
|
||||
err := u.addPartialSignature(inIndex, sig, pubKey)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
|
||||
// Case 3: Neither provided only works for native p2wkh, or non-segwit
|
||||
// non-p2sh. To check if it's segwit, check the scriptPubKey of the
|
||||
// output.
|
||||
default:
|
||||
if u.Upsbt.Inputs[inIndex].WitnessUtxo == nil {
|
||||
outIndex := u.Upsbt.UnsignedTx.TxIn[inIndex].PreviousOutPoint.Index
|
||||
script := u.Upsbt.Inputs[inIndex].NonWitnessUtxo.TxOut[outIndex].PkScript
|
||||
|
||||
if txscript.IsWitnessProgram(script) {
|
||||
err := nonWitnessToWitness(u.Upsbt, inIndex)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := u.addPartialSignature(inIndex, sig, pubKey)
|
||||
if err != nil {
|
||||
return SignInvalid, err
|
||||
}
|
||||
}
|
||||
|
||||
return SignSuccesful, nil
|
||||
}
|
||||
|
||||
// nonWitnessToWitness extracts the TxOut from the existing NonWitnessUtxo
|
||||
// field in the given PSBT input and sets it as type witness by replacing the
|
||||
// NonWitnessUtxo field with a WitnessUtxo field. See
|
||||
// https://github.com/bitcoin/bitcoin/pull/14197.
|
||||
func nonWitnessToWitness(p *Packet, inIndex int) error {
|
||||
outIndex := p.UnsignedTx.TxIn[inIndex].PreviousOutPoint.Index
|
||||
txout := p.Inputs[inIndex].NonWitnessUtxo.TxOut[outIndex]
|
||||
|
||||
// TODO(guggero): For segwit v1, we'll want to remove the NonWitnessUtxo
|
||||
// from the packet. For segwit v0 it is unsafe to only rely on the
|
||||
// witness UTXO. See https://github.com/bitcoin/bitcoin/pull/19215.
|
||||
// p.Inputs[inIndex].NonWitnessUtxo = nil
|
||||
|
||||
u := Updater{
|
||||
Upsbt: p,
|
||||
}
|
||||
|
||||
return u.AddInWitnessUtxo(txout, inIndex)
|
||||
}
|
102
btcutil/psbt/sort.go
Normal file
102
btcutil/psbt/sort.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
)
|
||||
|
||||
// InPlaceSort modifies the passed packet's wire TX inputs and outputs to be
|
||||
// sorted based on BIP 69. The sorting happens in a way that the packet's
|
||||
// partial inputs and outputs are also modified to match the sorted TxIn and
|
||||
// TxOuts of the wire transaction.
|
||||
//
|
||||
// WARNING: This function must NOT be called with packages that already contain
|
||||
// (partial) witness data since it will mutate the transaction if it's not
|
||||
// already sorted. This can cause issues if you mutate a tx in a block, for
|
||||
// example, which would invalidate the block. It could also cause cached hashes,
|
||||
// such as in a btcutil.Tx to become invalidated.
|
||||
//
|
||||
// The function should only be used if the caller is creating the transaction or
|
||||
// is otherwise 100% positive mutating will not cause adverse affects due to
|
||||
// other dependencies.
|
||||
func InPlaceSort(packet *Packet) error {
|
||||
// To make sure we don't run into any nil pointers or array index
|
||||
// violations during sorting, do a very basic sanity check first.
|
||||
err := VerifyInputOutputLen(packet, false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Sort(&sortableInputs{p: packet})
|
||||
sort.Sort(&sortableOutputs{p: packet})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortableInputs is a simple wrapper around a packet that implements the
|
||||
// sort.Interface for sorting the wire and partial inputs of a packet.
|
||||
type sortableInputs struct {
|
||||
p *Packet
|
||||
}
|
||||
|
||||
// sortableOutputs is a simple wrapper around a packet that implements the
|
||||
// sort.Interface for sorting the wire and partial outputs of a packet.
|
||||
type sortableOutputs struct {
|
||||
p *Packet
|
||||
}
|
||||
|
||||
// For sortableInputs and sortableOutputs, three functions are needed to make
|
||||
// them sortable with sort.Sort() -- Len, Less, and Swap.
|
||||
// Len and Swap are trivial. Less is BIP 69 specific.
|
||||
func (s *sortableInputs) Len() int { return len(s.p.UnsignedTx.TxIn) }
|
||||
func (s sortableOutputs) Len() int { return len(s.p.UnsignedTx.TxOut) }
|
||||
|
||||
// Swap swaps two inputs.
|
||||
func (s *sortableInputs) Swap(i, j int) {
|
||||
tx := s.p.UnsignedTx
|
||||
tx.TxIn[i], tx.TxIn[j] = tx.TxIn[j], tx.TxIn[i]
|
||||
s.p.Inputs[i], s.p.Inputs[j] = s.p.Inputs[j], s.p.Inputs[i]
|
||||
}
|
||||
|
||||
// Swap swaps two outputs.
|
||||
func (s *sortableOutputs) Swap(i, j int) {
|
||||
tx := s.p.UnsignedTx
|
||||
tx.TxOut[i], tx.TxOut[j] = tx.TxOut[j], tx.TxOut[i]
|
||||
s.p.Outputs[i], s.p.Outputs[j] = s.p.Outputs[j], s.p.Outputs[i]
|
||||
}
|
||||
|
||||
// Less is the input comparison function. First sort based on input hash
|
||||
// (reversed / rpc-style), then index.
|
||||
func (s *sortableInputs) Less(i, j int) bool {
|
||||
ins := s.p.UnsignedTx.TxIn
|
||||
|
||||
// Input hashes are the same, so compare the index.
|
||||
ihash := ins[i].PreviousOutPoint.Hash
|
||||
jhash := ins[j].PreviousOutPoint.Hash
|
||||
if ihash == jhash {
|
||||
return ins[i].PreviousOutPoint.Index <
|
||||
ins[j].PreviousOutPoint.Index
|
||||
}
|
||||
|
||||
// At this point, the hashes are not equal, so reverse them to
|
||||
// big-endian and return the result of the comparison.
|
||||
const hashSize = chainhash.HashSize
|
||||
for b := 0; b < hashSize/2; b++ {
|
||||
ihash[b], ihash[hashSize-1-b] = ihash[hashSize-1-b], ihash[b]
|
||||
jhash[b], jhash[hashSize-1-b] = jhash[hashSize-1-b], jhash[b]
|
||||
}
|
||||
return bytes.Compare(ihash[:], jhash[:]) == -1
|
||||
}
|
||||
|
||||
// Less is the output comparison function. First sort based on amount (smallest
|
||||
// first), then PkScript.
|
||||
func (s *sortableOutputs) Less(i, j int) bool {
|
||||
outs := s.p.UnsignedTx.TxOut
|
||||
|
||||
if outs[i].Value == outs[j].Value {
|
||||
return bytes.Compare(outs[i].PkScript, outs[j].PkScript) < 0
|
||||
}
|
||||
return outs[i].Value < outs[j].Value
|
||||
}
|
167
btcutil/psbt/sort_test.go
Normal file
167
btcutil/psbt/sort_test.go
Normal file
|
@ -0,0 +1,167 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
func TestInPlaceSort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
packet *Packet
|
||||
expectedTxIn []*wire.TxIn
|
||||
expectedTxOut []*wire.TxOut
|
||||
expectedPIn []PInput
|
||||
expectedPOut []POutput
|
||||
expectErr bool
|
||||
}{{
|
||||
name: "packet nil",
|
||||
packet: nil,
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "no inputs or outputs",
|
||||
packet: &Packet{UnsignedTx: &wire.MsgTx{}},
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "inputs only",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{99, 88},
|
||||
Index: 7,
|
||||
},
|
||||
}, {
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{77, 88},
|
||||
Index: 12,
|
||||
},
|
||||
}, {
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{77, 88},
|
||||
Index: 7,
|
||||
},
|
||||
}},
|
||||
},
|
||||
// Abuse the SighashType as an index to make sure the
|
||||
// partial inputs are also sorted together with the wire
|
||||
// inputs.
|
||||
Inputs: []PInput{{
|
||||
SighashType: 0,
|
||||
}, {
|
||||
SighashType: 1,
|
||||
}, {
|
||||
SighashType: 2,
|
||||
}},
|
||||
},
|
||||
expectedTxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{77, 88},
|
||||
Index: 7,
|
||||
},
|
||||
}, {
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{77, 88},
|
||||
Index: 12,
|
||||
},
|
||||
}, {
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{99, 88},
|
||||
Index: 7,
|
||||
},
|
||||
}},
|
||||
expectedPIn: []PInput{{
|
||||
SighashType: 2,
|
||||
}, {
|
||||
SighashType: 1,
|
||||
}, {
|
||||
SighashType: 0,
|
||||
}},
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "outputs only",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{
|
||||
TxOut: []*wire.TxOut{{
|
||||
PkScript: []byte{99, 88},
|
||||
Value: 7,
|
||||
}, {
|
||||
PkScript: []byte{77, 88},
|
||||
Value: 12,
|
||||
}, {
|
||||
PkScript: []byte{77, 88},
|
||||
Value: 7,
|
||||
}},
|
||||
},
|
||||
// Abuse the RedeemScript as an index to make sure the
|
||||
// partial inputs are also sorted together with the wire
|
||||
// inputs.
|
||||
Outputs: []POutput{{
|
||||
RedeemScript: []byte{0},
|
||||
}, {
|
||||
RedeemScript: []byte{1},
|
||||
}, {
|
||||
RedeemScript: []byte{2},
|
||||
}},
|
||||
},
|
||||
expectedTxOut: []*wire.TxOut{{
|
||||
PkScript: []byte{77, 88},
|
||||
Value: 7,
|
||||
}, {
|
||||
PkScript: []byte{99, 88},
|
||||
Value: 7,
|
||||
}, {
|
||||
PkScript: []byte{77, 88},
|
||||
Value: 12,
|
||||
}},
|
||||
expectedPOut: []POutput{{
|
||||
RedeemScript: []byte{2},
|
||||
}, {
|
||||
RedeemScript: []byte{0},
|
||||
}, {
|
||||
RedeemScript: []byte{1},
|
||||
}},
|
||||
expectErr: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
p := tc.packet
|
||||
err := InPlaceSort(p)
|
||||
if (tc.expectErr && err == nil) ||
|
||||
(!tc.expectErr && err != nil) {
|
||||
|
||||
t.Fatalf("got error '%v' but wanted it to be "+
|
||||
"nil: %v", err, tc.expectErr)
|
||||
}
|
||||
|
||||
// Don't continue on this special test case.
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
|
||||
tx := p.UnsignedTx
|
||||
if !reflect.DeepEqual(tx.TxIn, tc.expectedTxIn) {
|
||||
t.Fatalf("unexpected txin, got %#v wanted %#v",
|
||||
tx.TxIn, tc.expectedTxIn)
|
||||
}
|
||||
if !reflect.DeepEqual(tx.TxOut, tc.expectedTxOut) {
|
||||
t.Fatalf("unexpected txout, got %#v wanted %#v",
|
||||
tx.TxOut, tc.expectedTxOut)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(p.Inputs, tc.expectedPIn) {
|
||||
t.Fatalf("unexpected pin, got %#v wanted %#v",
|
||||
p.Inputs, tc.expectedPIn)
|
||||
}
|
||||
if !reflect.DeepEqual(p.Outputs, tc.expectedPOut) {
|
||||
t.Fatalf("unexpected pout, got %#v wanted %#v",
|
||||
p.Inputs, tc.expectedPOut)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
149
btcutil/psbt/types.go
Normal file
149
btcutil/psbt/types.go
Normal file
|
@ -0,0 +1,149 @@
|
|||
package psbt
|
||||
|
||||
// GlobalType is the set of types that are used at the global scope level
|
||||
// within the PSBT.
|
||||
type GlobalType uint8
|
||||
|
||||
const (
|
||||
// UnsignedTxType is the global scope key that houses the unsigned
|
||||
// transaction of the PSBT. The value is a transaction in network
|
||||
// serialization. The scriptSigs and witnesses for each input must be
|
||||
// empty. The transaction must be in the old serialization format
|
||||
// (without witnesses). A PSBT must have a transaction, otherwise it is
|
||||
// invalid.
|
||||
UnsignedTxType GlobalType = 0
|
||||
|
||||
// XpubType houses a global xpub for the entire PSBT packet.
|
||||
//
|
||||
// The key ({0x01}|{xpub}) is he 78 byte serialized extended public key
|
||||
// as defined by BIP 32. Extended public keys are those that can be
|
||||
// used to derive public keys used in the inputs and outputs of this
|
||||
// transaction. It should be the public key at the highest hardened
|
||||
// derivation index so that
|
||||
// the unhardened child keys used in the transaction can be derived.
|
||||
//
|
||||
// The value is the master key fingerprint as defined by BIP 32
|
||||
// concatenated with the derivation path of the public key. The
|
||||
// derivation path is represented as 32-bit little endian unsigned
|
||||
// integer indexes concatenated with each other. The number of 32 bit
|
||||
// unsigned integer indexes must match the depth provided in the
|
||||
// extended public key.
|
||||
XpubType GlobalType = 1
|
||||
|
||||
// VersionType houses the global version number of this PSBT. There is
|
||||
// no key (only contains the byte type), then the value if omitted, is
|
||||
// assumed to be zero.
|
||||
VersionType GlobalType = 0xFB
|
||||
|
||||
// ProprietaryGlobalType is used to house any proper chary global-scope
|
||||
// keys within the PSBT.
|
||||
//
|
||||
// The key is ({0xFC}|<prefix>|{subtype}|{key data}) a variable length
|
||||
// identifier prefix, followed by a subtype, followed by the key data
|
||||
// itself.
|
||||
//
|
||||
// The value is any data as defined by the proprietary type user.
|
||||
ProprietaryGlobalType = 0xFC
|
||||
)
|
||||
|
||||
// InputType is the set of types that are defined for each input included
|
||||
// within the PSBT.
|
||||
type InputType uint32
|
||||
|
||||
const (
|
||||
// NonWitnessUtxoType has no key ({0x00}) and houses the transaction in
|
||||
// network serialization format the current input spends from. This
|
||||
// should only be present for inputs which spend non-segwit outputs.
|
||||
// However, if it is unknown whether an input spends a segwit output,
|
||||
// this type should be used. The entire input transaction is needed in
|
||||
// order to be able to verify the values of the input (pre-segwit they
|
||||
// aren't in the signature digest).
|
||||
NonWitnessUtxoType InputType = 0
|
||||
|
||||
// WitnessUtxoType has no key ({0x01}), and houses the entire
|
||||
// transaction output in network serialization which the current input
|
||||
// spends from. This should only be present for inputs which spend
|
||||
// segwit outputs, including P2SH embedded ones (value || script).
|
||||
WitnessUtxoType InputType = 1
|
||||
|
||||
// PartialSigType is used to include a partial signature with key
|
||||
// ({0x02}|{public key}).
|
||||
//
|
||||
// The value is the signature as would be pushed to the stack from a
|
||||
// scriptSig or witness..
|
||||
PartialSigType InputType = 2
|
||||
|
||||
// SighashType is an empty key ({0x03}).
|
||||
//
|
||||
// The value contains the 32-bit unsigned integer specifying the
|
||||
// sighash type to be used for this input. Signatures for this input
|
||||
// must use the sighash type, finalizers must fail to finalize inputs
|
||||
// which have signatures that do not match the specified sighash type.
|
||||
// Signers who cannot produce signatures with the sighash type must not
|
||||
// provide a signature.
|
||||
SighashType InputType = 3
|
||||
|
||||
// RedeemScriptInputType is an empty key ({0x40}).
|
||||
//
|
||||
// The value is the redeem script of the input if present.
|
||||
RedeemScriptInputType InputType = 4
|
||||
|
||||
// WitnessScriptInputType is an empty key ({0x05}).
|
||||
//
|
||||
// The value is the witness script of this input, if it has one.
|
||||
WitnessScriptInputType InputType = 5
|
||||
|
||||
// Bip32DerivationInputType is a type that carries the pubkey along
|
||||
// with the key ({0x06}|{public key}).
|
||||
//
|
||||
// The value is master key fingerprint as defined by BIP 32
|
||||
// concatenated with the derivation path of the public key. The
|
||||
// derivation path is represented as 32 bit unsigned integer indexes
|
||||
// concatenated with each other. Public keys are those that will be
|
||||
// needed to sign this input.
|
||||
Bip32DerivationInputType InputType = 6
|
||||
|
||||
// FinalScriptSigType is an empty key ({0x07}).
|
||||
//
|
||||
// The value contains a fully constructed scriptSig with signatures and
|
||||
// any other scripts necessary for the input to pass validation.
|
||||
FinalScriptSigType InputType = 7
|
||||
|
||||
// FinalScriptWitnessType is an empty key ({0x08}). The value is a
|
||||
// fully constructed scriptWitness with signatures and any other
|
||||
// scripts necessary for the input to pass validation.
|
||||
FinalScriptWitnessType InputType = 8
|
||||
|
||||
// ProprietaryInputType is a custom type for use by devs.
|
||||
//
|
||||
// The key ({0xFC}|<prefix>|{subtype}|{key data}), is a Variable length
|
||||
// identifier prefix, followed by a subtype, followed by the key data
|
||||
// itself.
|
||||
//
|
||||
// The value is any value data as defined by the proprietary type user.
|
||||
ProprietaryInputType InputType = 0xFC
|
||||
)
|
||||
|
||||
// OutputType is the set of types defined per output within the PSBT.
|
||||
type OutputType uint32
|
||||
|
||||
const (
|
||||
// RedeemScriptOutputType is an empty key ({0x00}>
|
||||
//
|
||||
// The value is the redeemScript for this output if it has one.
|
||||
RedeemScriptOutputType OutputType = 0
|
||||
|
||||
// WitnessScriptOutputType is an empty key ({0x01}).
|
||||
//
|
||||
// The value is the witness script of this input, if it has one.
|
||||
WitnessScriptOutputType OutputType = 1
|
||||
|
||||
j // Bip32DerivationOutputType is used to communicate derivation information
|
||||
// needed to spend this output. The key is ({0x02}|{public key}).
|
||||
//
|
||||
// The value is master key fingerprint concatenated with the derivation
|
||||
// path of the public key. The derivation path is represented as 32-bit
|
||||
// little endian unsigned integer indexes concatenated with each other.
|
||||
// Public keys are those needed to spend this output.
|
||||
Bip32DerivationOutputType OutputType = 2
|
||||
)
|
377
btcutil/psbt/updater.go
Normal file
377
btcutil/psbt/updater.go
Normal file
|
@ -0,0 +1,377 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package psbt
|
||||
|
||||
// The Updater requires provision of a single PSBT and is able to add data to
|
||||
// both input and output sections. It can be called repeatedly to add more
|
||||
// data. It also allows addition of signatures via the addPartialSignature
|
||||
// function; this is called internally to the package in the Sign() function of
|
||||
// Updater, located in signer.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
// Updater encapsulates the role 'Updater' as specified in BIP174; it accepts
|
||||
// Psbt structs and has methods to add fields to the inputs and outputs.
|
||||
type Updater struct {
|
||||
Upsbt *Packet
|
||||
}
|
||||
|
||||
// NewUpdater returns a new instance of Updater, if the passed Psbt struct is
|
||||
// in a valid form, else an error.
|
||||
func NewUpdater(p *Packet) (*Updater, error) {
|
||||
if err := p.SanityCheck(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Updater{Upsbt: p}, nil
|
||||
|
||||
}
|
||||
|
||||
// AddInNonWitnessUtxo adds the utxo information for an input which is
|
||||
// non-witness. This requires provision of a full transaction (which is the
|
||||
// source of the corresponding prevOut), and the input index. If addition of
|
||||
// this key-value pair to the Psbt fails, an error is returned.
|
||||
func (p *Updater) AddInNonWitnessUtxo(tx *wire.MsgTx, inIndex int) error {
|
||||
if inIndex > len(p.Upsbt.Inputs)-1 {
|
||||
return ErrInvalidPrevOutNonWitnessTransaction
|
||||
}
|
||||
|
||||
p.Upsbt.Inputs[inIndex].NonWitnessUtxo = tx
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddInWitnessUtxo adds the utxo information for an input which is witness.
|
||||
// This requires provision of a full transaction *output* (which is the source
|
||||
// of the corresponding prevOut); not the full transaction because BIP143 means
|
||||
// the output information is sufficient, and the input index. If addition of
|
||||
// this key-value pair to the Psbt fails, an error is returned.
|
||||
func (p *Updater) AddInWitnessUtxo(txout *wire.TxOut, inIndex int) error {
|
||||
if inIndex > len(p.Upsbt.Inputs)-1 {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
p.Upsbt.Inputs[inIndex].WitnessUtxo = txout
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addPartialSignature allows the Updater role to insert fields of type partial
|
||||
// signature into a Psbt, consisting of both the pubkey (as keydata) and the
|
||||
// ECDSA signature (as value). Note that the Signer role is encapsulated in
|
||||
// this function; signatures are only allowed to be added that follow the
|
||||
// sanity-check on signing rules explained in the BIP under `Signer`; if the
|
||||
// rules are not satisfied, an ErrInvalidSignatureForInput is returned.
|
||||
//
|
||||
// NOTE: This function does *not* validate the ECDSA signature itself.
|
||||
func (p *Updater) addPartialSignature(inIndex int, sig []byte,
|
||||
pubkey []byte) error {
|
||||
|
||||
partialSig := PartialSig{
|
||||
PubKey: pubkey, Signature: sig,
|
||||
}
|
||||
|
||||
// First validate the passed (sig, pub).
|
||||
if !partialSig.checkValid() {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
pInput := p.Upsbt.Inputs[inIndex]
|
||||
|
||||
// First check; don't add duplicates.
|
||||
for _, x := range pInput.PartialSigs {
|
||||
if bytes.Equal(x.PubKey, partialSig.PubKey) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
// Attaching signature without utxo field is not allowed.
|
||||
if pInput.WitnessUtxo == nil && pInput.NonWitnessUtxo == nil {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
// Next, we perform a series of additional sanity checks.
|
||||
if pInput.NonWitnessUtxo != nil {
|
||||
if len(p.Upsbt.UnsignedTx.TxIn) < inIndex+1 {
|
||||
return ErrInvalidPrevOutNonWitnessTransaction
|
||||
}
|
||||
|
||||
if pInput.NonWitnessUtxo.TxHash() !=
|
||||
p.Upsbt.UnsignedTx.TxIn[inIndex].PreviousOutPoint.Hash {
|
||||
return ErrInvalidSignatureForInput
|
||||
}
|
||||
|
||||
// To validate that the redeem script matches, we must pull out
|
||||
// the scriptPubKey of the corresponding output and compare
|
||||
// that with the P2SH scriptPubKey that is generated by
|
||||
// redeemScript.
|
||||
if pInput.RedeemScript != nil {
|
||||
outIndex := p.Upsbt.UnsignedTx.TxIn[inIndex].PreviousOutPoint.Index
|
||||
scriptPubKey := pInput.NonWitnessUtxo.TxOut[outIndex].PkScript
|
||||
scriptHash := btcutil.Hash160(pInput.RedeemScript)
|
||||
|
||||
scriptHashScript, err := txscript.NewScriptBuilder().
|
||||
AddOp(txscript.OP_HASH160).
|
||||
AddData(scriptHash).
|
||||
AddOp(txscript.OP_EQUAL).
|
||||
Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(scriptHashScript, scriptPubKey) {
|
||||
return ErrInvalidSignatureForInput
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// It could be that we set both the non-witness and witness UTXO fields
|
||||
// in case it's from a wallet that patched the CVE-2020-14199
|
||||
// vulnerability. We detect whether the input being spent is actually a
|
||||
// witness input and then copy it over to the witness UTXO field in the
|
||||
// signer. Run the witness checks as well, even if we might already have
|
||||
// checked the script hash. But that should be a negligible performance
|
||||
// penalty.
|
||||
if pInput.WitnessUtxo != nil {
|
||||
scriptPubKey := pInput.WitnessUtxo.PkScript
|
||||
|
||||
var script []byte
|
||||
if pInput.RedeemScript != nil {
|
||||
scriptHash := btcutil.Hash160(pInput.RedeemScript)
|
||||
scriptHashScript, err := txscript.NewScriptBuilder().
|
||||
AddOp(txscript.OP_HASH160).
|
||||
AddData(scriptHash).
|
||||
AddOp(txscript.OP_EQUAL).
|
||||
Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(scriptHashScript, scriptPubKey) {
|
||||
return ErrInvalidSignatureForInput
|
||||
}
|
||||
|
||||
script = pInput.RedeemScript
|
||||
} else {
|
||||
script = scriptPubKey
|
||||
}
|
||||
|
||||
// If a witnessScript field is present, this is a P2WSH,
|
||||
// whether nested or not (that is handled by the assignment to
|
||||
// `script` above); in that case, sanity check that `script` is
|
||||
// the p2wsh of witnessScript. Contrariwise, if no
|
||||
// witnessScript field is present, this will be signed as
|
||||
// p2wkh.
|
||||
if pInput.WitnessScript != nil {
|
||||
witnessScriptHash := sha256.Sum256(pInput.WitnessScript)
|
||||
witnessScriptHashScript, err := txscript.NewScriptBuilder().
|
||||
AddOp(txscript.OP_0).
|
||||
AddData(witnessScriptHash[:]).
|
||||
Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(script, witnessScriptHashScript[:]) {
|
||||
return ErrInvalidSignatureForInput
|
||||
}
|
||||
} else {
|
||||
// Otherwise, this is a p2wkh input.
|
||||
pubkeyHash := btcutil.Hash160(pubkey)
|
||||
pubkeyHashScript, err := txscript.NewScriptBuilder().
|
||||
AddOp(txscript.OP_0).
|
||||
AddData(pubkeyHash).
|
||||
Script()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate that we're able to properly reconstruct the
|
||||
// witness program.
|
||||
if !bytes.Equal(pubkeyHashScript, script) {
|
||||
return ErrInvalidSignatureForInput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.Upsbt.Inputs[inIndex].PartialSigs = append(
|
||||
p.Upsbt.Inputs[inIndex].PartialSigs, &partialSig,
|
||||
)
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Addition of a non-duplicate-key partial signature cannot violate
|
||||
// sanity-check rules.
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddInSighashType adds the sighash type information for an input. The
|
||||
// sighash type is passed as a 32 bit unsigned integer, along with the index
|
||||
// for the input. An error is returned if addition of this key-value pair to
|
||||
// the Psbt fails.
|
||||
func (p *Updater) AddInSighashType(sighashType txscript.SigHashType,
|
||||
inIndex int) error {
|
||||
|
||||
p.Upsbt.Inputs[inIndex].SighashType = sighashType
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddInRedeemScript adds the redeem script information for an input. The
|
||||
// redeem script is passed serialized, as a byte slice, along with the index of
|
||||
// the input. An error is returned if addition of this key-value pair to the
|
||||
// Psbt fails.
|
||||
func (p *Updater) AddInRedeemScript(redeemScript []byte,
|
||||
inIndex int) error {
|
||||
|
||||
p.Upsbt.Inputs[inIndex].RedeemScript = redeemScript
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddInWitnessScript adds the witness script information for an input. The
|
||||
// witness script is passed serialized, as a byte slice, along with the index
|
||||
// of the input. An error is returned if addition of this key-value pair to the
|
||||
// Psbt fails.
|
||||
func (p *Updater) AddInWitnessScript(witnessScript []byte,
|
||||
inIndex int) error {
|
||||
|
||||
p.Upsbt.Inputs[inIndex].WitnessScript = witnessScript
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddInBip32Derivation takes a master key fingerprint as defined in BIP32, a
|
||||
// BIP32 path as a slice of uint32 values, and a serialized pubkey as a byte
|
||||
// slice, along with the integer index of the input, and inserts this data into
|
||||
// that input.
|
||||
//
|
||||
// NOTE: This can be called multiple times for the same input. An error is
|
||||
// returned if addition of this key-value pair to the Psbt fails.
|
||||
func (p *Updater) AddInBip32Derivation(masterKeyFingerprint uint32,
|
||||
bip32Path []uint32, pubKeyData []byte, inIndex int) error {
|
||||
|
||||
bip32Derivation := Bip32Derivation{
|
||||
PubKey: pubKeyData,
|
||||
MasterKeyFingerprint: masterKeyFingerprint,
|
||||
Bip32Path: bip32Path,
|
||||
}
|
||||
|
||||
if !bip32Derivation.checkValid() {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
// Don't allow duplicate keys
|
||||
for _, x := range p.Upsbt.Inputs[inIndex].Bip32Derivation {
|
||||
if bytes.Equal(x.PubKey, bip32Derivation.PubKey) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
p.Upsbt.Inputs[inIndex].Bip32Derivation = append(
|
||||
p.Upsbt.Inputs[inIndex].Bip32Derivation, &bip32Derivation,
|
||||
)
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddOutBip32Derivation takes a master key fingerprint as defined in BIP32, a
|
||||
// BIP32 path as a slice of uint32 values, and a serialized pubkey as a byte
|
||||
// slice, along with the integer index of the output, and inserts this data
|
||||
// into that output.
|
||||
//
|
||||
// NOTE: That this can be called multiple times for the same output. An error
|
||||
// is returned if addition of this key-value pair to the Psbt fails.
|
||||
func (p *Updater) AddOutBip32Derivation(masterKeyFingerprint uint32,
|
||||
bip32Path []uint32, pubKeyData []byte, outIndex int) error {
|
||||
|
||||
bip32Derivation := Bip32Derivation{
|
||||
PubKey: pubKeyData,
|
||||
MasterKeyFingerprint: masterKeyFingerprint,
|
||||
Bip32Path: bip32Path,
|
||||
}
|
||||
|
||||
if !bip32Derivation.checkValid() {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
// Don't allow duplicate keys
|
||||
for _, x := range p.Upsbt.Outputs[outIndex].Bip32Derivation {
|
||||
if bytes.Equal(x.PubKey, bip32Derivation.PubKey) {
|
||||
return ErrDuplicateKey
|
||||
}
|
||||
}
|
||||
|
||||
p.Upsbt.Outputs[outIndex].Bip32Derivation = append(
|
||||
p.Upsbt.Outputs[outIndex].Bip32Derivation, &bip32Derivation,
|
||||
)
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddOutRedeemScript takes a redeem script as a byte slice and appends it to
|
||||
// the output at index outIndex.
|
||||
func (p *Updater) AddOutRedeemScript(redeemScript []byte,
|
||||
outIndex int) error {
|
||||
|
||||
p.Upsbt.Outputs[outIndex].RedeemScript = redeemScript
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddOutWitnessScript takes a witness script as a byte slice and appends it to
|
||||
// the output at index outIndex.
|
||||
func (p *Updater) AddOutWitnessScript(witnessScript []byte,
|
||||
outIndex int) error {
|
||||
|
||||
p.Upsbt.Outputs[outIndex].WitnessScript = witnessScript
|
||||
|
||||
if err := p.Upsbt.SanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
422
btcutil/psbt/utils.go
Normal file
422
btcutil/psbt/utils.go
Normal file
|
@ -0,0 +1,422 @@
|
|||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// WriteTxWitness is a utility function due to non-exported witness
|
||||
// serialization (writeTxWitness encodes the bitcoin protocol encoding for a
|
||||
// transaction input's witness into w).
|
||||
func WriteTxWitness(w io.Writer, wit [][]byte) error {
|
||||
if err := wire.WriteVarInt(w, 0, uint64(len(wit))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range wit {
|
||||
err := wire.WriteVarBytes(w, 0, item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writePKHWitness writes a witness for a p2wkh spending input
|
||||
func writePKHWitness(sig []byte, pub []byte) ([]byte, error) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
witnessItems = [][]byte{sig, pub}
|
||||
)
|
||||
|
||||
if err := WriteTxWitness(&buf, witnessItems); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// checkIsMultisigScript is a utility function to check whether a given
|
||||
// redeemscript fits the standard multisig template used in all P2SH based
|
||||
// multisig, given a set of pubkeys for redemption.
|
||||
func checkIsMultiSigScript(pubKeys [][]byte, sigs [][]byte,
|
||||
script []byte) bool {
|
||||
|
||||
// First insist that the script type is multisig.
|
||||
if txscript.GetScriptClass(script) != txscript.MultiSigTy {
|
||||
return false
|
||||
}
|
||||
|
||||
// Inspect the script to ensure that the number of sigs and pubkeys is
|
||||
// correct
|
||||
_, numSigs, err := txscript.CalcMultiSigStats(script)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the number of sigs provided, doesn't match the number of required
|
||||
// pubkeys, then we can't proceed as we're not yet final.
|
||||
if numSigs != len(pubKeys) || numSigs != len(sigs) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// extractKeyOrderFromScript is a utility function to extract an ordered list
|
||||
// of signatures, given a serialized script (redeemscript or witness script), a
|
||||
// list of pubkeys and the signatures corresponding to those pubkeys. This
|
||||
// function is used to ensure that the signatures will be embedded in the final
|
||||
// scriptSig or scriptWitness in the correct order.
|
||||
func extractKeyOrderFromScript(script []byte, expectedPubkeys [][]byte,
|
||||
sigs [][]byte) ([][]byte, error) {
|
||||
|
||||
// If this isn't a proper finalized multi-sig script, then we can't
|
||||
// proceed.
|
||||
if !checkIsMultiSigScript(expectedPubkeys, sigs, script) {
|
||||
return nil, ErrUnsupportedScriptType
|
||||
}
|
||||
|
||||
// Arrange the pubkeys and sigs into a slice of format:
|
||||
// * [[pub,sig], [pub,sig],..]
|
||||
type sigWithPub struct {
|
||||
pubKey []byte
|
||||
sig []byte
|
||||
}
|
||||
var pubsSigs []sigWithPub
|
||||
for i, pub := range expectedPubkeys {
|
||||
pubsSigs = append(pubsSigs, sigWithPub{
|
||||
pubKey: pub,
|
||||
sig: sigs[i],
|
||||
})
|
||||
}
|
||||
|
||||
// Now that we have the set of (pubkey, sig) pairs, we'll construct a
|
||||
// position map that we can use to swap the order in the slice above to
|
||||
// match how things are laid out in the script.
|
||||
type positionEntry struct {
|
||||
index int
|
||||
value sigWithPub
|
||||
}
|
||||
var positionMap []positionEntry
|
||||
|
||||
// For each pubkey in our pubsSigs slice, we'll now construct a proper
|
||||
// positionMap entry, based on _where_ in the script the pubkey first
|
||||
// appears.
|
||||
for _, p := range pubsSigs {
|
||||
pos := bytes.Index(script, p.pubKey)
|
||||
if pos < 0 {
|
||||
return nil, errors.New("script does not contain pubkeys")
|
||||
}
|
||||
|
||||
positionMap = append(positionMap, positionEntry{
|
||||
index: pos,
|
||||
value: p,
|
||||
})
|
||||
}
|
||||
|
||||
// Now that we have the position map full populated, we'll use the
|
||||
// index data to properly sort the entries in the map based on where
|
||||
// they appear in the script.
|
||||
sort.Slice(positionMap, func(i, j int) bool {
|
||||
return positionMap[i].index < positionMap[j].index
|
||||
})
|
||||
|
||||
// Finally, we can simply iterate through the position map in order to
|
||||
// extract the proper signature ordering.
|
||||
sortedSigs := make([][]byte, 0, len(positionMap))
|
||||
for _, x := range positionMap {
|
||||
sortedSigs = append(sortedSigs, x.value.sig)
|
||||
}
|
||||
|
||||
return sortedSigs, nil
|
||||
}
|
||||
|
||||
// getMultisigScriptWitness creates a full psbt serialized Witness field for
|
||||
// the transaction, given the public keys and signatures to be appended. This
|
||||
// function will only accept witnessScripts of the type M of N multisig. This
|
||||
// is used for both p2wsh and nested p2wsh multisig cases.
|
||||
func getMultisigScriptWitness(witnessScript []byte, pubKeys [][]byte,
|
||||
sigs [][]byte) ([]byte, error) {
|
||||
|
||||
// First using the script as a guide, we'll properly order the sigs
|
||||
// according to how their corresponding pubkeys appear in the
|
||||
// witnessScript.
|
||||
orderedSigs, err := extractKeyOrderFromScript(
|
||||
witnessScript, pubKeys, sigs,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now that we know the proper order, we'll append each of the
|
||||
// signatures into a new witness stack, then top it off with the
|
||||
// witness script at the end, prepending the nil as we need the extra
|
||||
// pop..
|
||||
witnessElements := make(wire.TxWitness, 0, len(sigs)+2)
|
||||
witnessElements = append(witnessElements, nil)
|
||||
for _, os := range orderedSigs {
|
||||
witnessElements = append(witnessElements, os)
|
||||
}
|
||||
witnessElements = append(witnessElements, witnessScript)
|
||||
|
||||
// Now that we have the full witness stack, we'll serialize it in the
|
||||
// expected format, and return the final bytes.
|
||||
var buf bytes.Buffer
|
||||
if err = WriteTxWitness(&buf, witnessElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// checkSigHashFlags compares the sighash flag byte on a signature with the
|
||||
// value expected according to any PsbtInSighashType field in this section of
|
||||
// the PSBT, and returns true if they match, false otherwise.
|
||||
// If no SighashType field exists, it is assumed to be SIGHASH_ALL.
|
||||
//
|
||||
// TODO(waxwing): sighash type not restricted to one byte in future?
|
||||
func checkSigHashFlags(sig []byte, input *PInput) bool {
|
||||
expectedSighashType := txscript.SigHashAll
|
||||
if input.SighashType != 0 {
|
||||
expectedSighashType = input.SighashType
|
||||
}
|
||||
|
||||
return expectedSighashType == txscript.SigHashType(sig[len(sig)-1])
|
||||
}
|
||||
|
||||
// serializeKVpair writes out a kv pair using a varbyte prefix for each.
|
||||
func serializeKVpair(w io.Writer, key []byte, value []byte) error {
|
||||
if err := wire.WriteVarBytes(w, 0, key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return wire.WriteVarBytes(w, 0, value)
|
||||
}
|
||||
|
||||
// serializeKVPairWithType writes out to the passed writer a type coupled with
|
||||
// a key.
|
||||
func serializeKVPairWithType(w io.Writer, kt uint8, keydata []byte,
|
||||
value []byte) error {
|
||||
|
||||
// If the key has no data, then we write a blank slice.
|
||||
if keydata == nil {
|
||||
keydata = []byte{}
|
||||
}
|
||||
|
||||
// The final key to be written is: {type} || {keyData}
|
||||
serializedKey := append([]byte{kt}, keydata...)
|
||||
return serializeKVpair(w, serializedKey, value)
|
||||
}
|
||||
|
||||
// getKey retrieves a single key - both the key type and the keydata (if
|
||||
// present) from the stream and returns the key type as an integer, or -1 if
|
||||
// the key was of zero length. This integer is is used to indicate the presence
|
||||
// of a separator byte which indicates the end of a given key-value pair list,
|
||||
// and the keydata as a byte slice or nil if none is present.
|
||||
func getKey(r io.Reader) (int, []byte, error) {
|
||||
|
||||
// For the key, we read the varint separately, instead of using the
|
||||
// available ReadVarBytes, because we have a specific treatment of 0x00
|
||||
// here:
|
||||
count, err := wire.ReadVarInt(r, 0)
|
||||
if err != nil {
|
||||
return -1, nil, ErrInvalidPsbtFormat
|
||||
}
|
||||
if count == 0 {
|
||||
// A separator indicates end of key-value pair list.
|
||||
return -1, nil, nil
|
||||
}
|
||||
|
||||
// Check that we don't attempt to decode a dangerously large key.
|
||||
if count > MaxPsbtKeyLength {
|
||||
return -1, nil, ErrInvalidKeydata
|
||||
}
|
||||
|
||||
// Next, we ready out the designated number of bytes, which may include
|
||||
// a type, key, and optional data.
|
||||
keyTypeAndData := make([]byte, count)
|
||||
if _, err := io.ReadFull(r, keyTypeAndData[:]); err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
keyType := int(string(keyTypeAndData)[0])
|
||||
|
||||
// Note that the second return value will usually be empty, since most
|
||||
// keys contain no more than the key type byte.
|
||||
if len(keyTypeAndData) == 1 {
|
||||
return keyType, nil, nil
|
||||
}
|
||||
|
||||
// Otherwise, we return the key, along with any data that it may
|
||||
// contain.
|
||||
return keyType, keyTypeAndData[1:], nil
|
||||
|
||||
}
|
||||
|
||||
// readTxOut is a limited version of wire.ReadTxOut, because the latter is not
|
||||
// exported.
|
||||
func readTxOut(txout []byte) (*wire.TxOut, error) {
|
||||
if len(txout) < 10 {
|
||||
return nil, ErrInvalidPsbtFormat
|
||||
}
|
||||
|
||||
valueSer := binary.LittleEndian.Uint64(txout[:8])
|
||||
scriptPubKey := txout[9:]
|
||||
|
||||
return wire.NewTxOut(int64(valueSer), scriptPubKey), nil
|
||||
}
|
||||
|
||||
// SumUtxoInputValues tries to extract the sum of all inputs specified in the
|
||||
// UTXO fields of the PSBT. An error is returned if an input is specified that
|
||||
// does not contain any UTXO information.
|
||||
func SumUtxoInputValues(packet *Packet) (int64, error) {
|
||||
// We take the TX ins of the unsigned TX as the truth for how many
|
||||
// inputs there should be, as the fields in the extra data part of the
|
||||
// PSBT can be empty.
|
||||
if len(packet.UnsignedTx.TxIn) != len(packet.Inputs) {
|
||||
return 0, fmt.Errorf("TX input length doesn't match PSBT " +
|
||||
"input length")
|
||||
}
|
||||
|
||||
inputSum := int64(0)
|
||||
for idx, in := range packet.Inputs {
|
||||
switch {
|
||||
case in.WitnessUtxo != nil:
|
||||
// Witness UTXOs only need to reference the TxOut.
|
||||
inputSum += in.WitnessUtxo.Value
|
||||
|
||||
case in.NonWitnessUtxo != nil:
|
||||
// Non-witness UTXOs reference to the whole transaction
|
||||
// the UTXO resides in.
|
||||
utxOuts := in.NonWitnessUtxo.TxOut
|
||||
txIn := packet.UnsignedTx.TxIn[idx]
|
||||
|
||||
// Check that utxOuts actually has enough space to
|
||||
// contain the previous outpoint's index.
|
||||
opIdx := txIn.PreviousOutPoint.Index
|
||||
if opIdx >= uint32(len(utxOuts)) {
|
||||
return 0, fmt.Errorf("input %d has malformed "+
|
||||
"TxOut field", idx)
|
||||
}
|
||||
|
||||
inputSum += utxOuts[txIn.PreviousOutPoint.Index].Value
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("input %d has no UTXO information",
|
||||
idx)
|
||||
}
|
||||
}
|
||||
return inputSum, nil
|
||||
}
|
||||
|
||||
// TxOutsEqual returns true if two transaction outputs are equal.
|
||||
func TxOutsEqual(out1, out2 *wire.TxOut) bool {
|
||||
if out1 == nil || out2 == nil {
|
||||
return out1 == out2
|
||||
}
|
||||
return out1.Value == out2.Value &&
|
||||
bytes.Equal(out1.PkScript, out2.PkScript)
|
||||
}
|
||||
|
||||
// VerifyOutputsEqual verifies that the two slices of transaction outputs are
|
||||
// deep equal to each other. We do the length check and manual loop to provide
|
||||
// better error messages to the user than just returning "not equal".
|
||||
func VerifyOutputsEqual(outs1, outs2 []*wire.TxOut) error {
|
||||
if len(outs1) != len(outs2) {
|
||||
return fmt.Errorf("number of outputs are different")
|
||||
}
|
||||
for idx, out := range outs1 {
|
||||
// There is a byte slice in the output so we can't use the
|
||||
// equality operator.
|
||||
if !TxOutsEqual(out, outs2[idx]) {
|
||||
return fmt.Errorf("output %d is different", idx)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyInputPrevOutpointsEqual verifies that the previous outpoints of the
|
||||
// two slices of transaction inputs are deep equal to each other. We do the
|
||||
// length check and manual loop to provide better error messages to the user
|
||||
// than just returning "not equal".
|
||||
func VerifyInputPrevOutpointsEqual(ins1, ins2 []*wire.TxIn) error {
|
||||
if len(ins1) != len(ins2) {
|
||||
return fmt.Errorf("number of inputs are different")
|
||||
}
|
||||
for idx, in := range ins1 {
|
||||
if in.PreviousOutPoint != ins2[idx].PreviousOutPoint {
|
||||
return fmt.Errorf("previous outpoint of input %d is "+
|
||||
"different", idx)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyInputOutputLen makes sure a packet is non-nil, contains a non-nil wire
|
||||
// transaction and that the wire input/output lengths match the partial input/
|
||||
// output lengths. A caller also can specify if they expect any inputs and/or
|
||||
// outputs to be contained in the packet.
|
||||
func VerifyInputOutputLen(packet *Packet, needInputs, needOutputs bool) error {
|
||||
if packet == nil || packet.UnsignedTx == nil {
|
||||
return fmt.Errorf("PSBT packet cannot be nil")
|
||||
}
|
||||
|
||||
if len(packet.UnsignedTx.TxIn) != len(packet.Inputs) {
|
||||
return fmt.Errorf("invalid PSBT, wire inputs don't match " +
|
||||
"partial inputs")
|
||||
}
|
||||
if len(packet.UnsignedTx.TxOut) != len(packet.Outputs) {
|
||||
return fmt.Errorf("invalid PSBT, wire outputs don't match " +
|
||||
"partial outputs")
|
||||
}
|
||||
|
||||
if needInputs && len(packet.UnsignedTx.TxIn) == 0 {
|
||||
return fmt.Errorf("PSBT packet must contain at least one " +
|
||||
"input")
|
||||
}
|
||||
if needOutputs && len(packet.UnsignedTx.TxOut) == 0 {
|
||||
return fmt.Errorf("PSBT packet must contain at least one " +
|
||||
"output")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFromSignedTx is a utility function to create a packet from an
|
||||
// already-signed transaction. Returned are: an unsigned transaction
|
||||
// serialization, a list of scriptSigs, one per input, and a list of witnesses,
|
||||
// one per input.
|
||||
func NewFromSignedTx(tx *wire.MsgTx) (*Packet, [][]byte,
|
||||
[]wire.TxWitness, error) {
|
||||
|
||||
scriptSigs := make([][]byte, 0, len(tx.TxIn))
|
||||
witnesses := make([]wire.TxWitness, 0, len(tx.TxIn))
|
||||
tx2 := tx.Copy()
|
||||
|
||||
// Blank out signature info in inputs
|
||||
for i, tin := range tx2.TxIn {
|
||||
tin.SignatureScript = nil
|
||||
scriptSigs = append(scriptSigs, tx.TxIn[i].SignatureScript)
|
||||
tin.Witness = nil
|
||||
witnesses = append(witnesses, tx.TxIn[i].Witness)
|
||||
}
|
||||
|
||||
// Outputs always contain: (value, scriptPubkey) so don't need
|
||||
// amending. Now tx2 is tx with all signing data stripped out
|
||||
unsignedPsbt, err := NewFromUnsignedTx(tx2)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return unsignedPsbt, scriptSigs, witnesses, nil
|
||||
}
|
370
btcutil/psbt/utils_test.go
Normal file
370
btcutil/psbt/utils_test.go
Normal file
|
@ -0,0 +1,370 @@
|
|||
package psbt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
func TestSumUtxoInputValues(t *testing.T) {
|
||||
// Expect sum to fail for packet with non-matching txIn and PInputs.
|
||||
tx := wire.NewMsgTx(2)
|
||||
badPacket, err := NewFromUnsignedTx(tx)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create packet from TX: %v", err)
|
||||
}
|
||||
badPacket.Inputs = append(badPacket.Inputs, PInput{})
|
||||
|
||||
_, err = SumUtxoInputValues(badPacket)
|
||||
if err == nil {
|
||||
t.Fatalf("expected sum of bad packet to fail")
|
||||
}
|
||||
|
||||
// Expect sum to fail if any inputs don't have UTXO information added.
|
||||
op := []*wire.OutPoint{{}, {}}
|
||||
noUtxoInfoPacket, err := New(op, nil, 2, 0, []uint32{0, 0})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create new packet: %v", err)
|
||||
}
|
||||
|
||||
_, err = SumUtxoInputValues(noUtxoInfoPacket)
|
||||
if err == nil {
|
||||
t.Fatalf("expected sum of missing UTXO info to fail")
|
||||
}
|
||||
|
||||
// Create a packet that is OK and contains both witness and non-witness
|
||||
// UTXO information.
|
||||
okPacket, err := New(op, nil, 2, 0, []uint32{0, 0})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create new packet: %v", err)
|
||||
}
|
||||
okPacket.Inputs[0].WitnessUtxo = &wire.TxOut{Value: 1234}
|
||||
okPacket.Inputs[1].NonWitnessUtxo = &wire.MsgTx{
|
||||
TxOut: []*wire.TxOut{{Value: 6543}},
|
||||
}
|
||||
|
||||
sum, err := SumUtxoInputValues(okPacket)
|
||||
if err != nil {
|
||||
t.Fatalf("could not sum input: %v", err)
|
||||
}
|
||||
if sum != (1234 + 6543) {
|
||||
t.Fatalf("unexpected sum, got %d wanted %d", sum, 1234+6543)
|
||||
}
|
||||
|
||||
// Create a malformed packet where NonWitnessUtxo.TxOut does not
|
||||
// contain the index specified by the PreviousOutPoint in the
|
||||
// packet's Unsigned.TxIn field.
|
||||
badOp := []*wire.OutPoint{{}, {Index: 500}}
|
||||
malformedPacket, err := New(badOp, nil, 2, 0, []uint32{0, 0})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create malformed packet: %v", err)
|
||||
}
|
||||
malformedPacket.Inputs[0].WitnessUtxo = &wire.TxOut{Value: 1234}
|
||||
malformedPacket.Inputs[1].NonWitnessUtxo = &wire.MsgTx{
|
||||
TxOut: []*wire.TxOut{{Value: 6543}},
|
||||
}
|
||||
|
||||
_, err = SumUtxoInputValues(malformedPacket)
|
||||
if err == nil {
|
||||
t.Fatalf("expected sum of malformed packet to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOutsEqual(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
out1 *wire.TxOut
|
||||
out2 *wire.TxOut
|
||||
expectEqual bool
|
||||
}{{
|
||||
name: "both nil",
|
||||
out1: nil,
|
||||
out2: nil,
|
||||
expectEqual: true,
|
||||
}, {
|
||||
name: "one nil",
|
||||
out1: nil,
|
||||
out2: &wire.TxOut{},
|
||||
expectEqual: false,
|
||||
}, {
|
||||
name: "both empty",
|
||||
out1: &wire.TxOut{},
|
||||
out2: &wire.TxOut{},
|
||||
expectEqual: true,
|
||||
}, {
|
||||
name: "one pk script set",
|
||||
out1: &wire.TxOut{},
|
||||
out2: &wire.TxOut{
|
||||
PkScript: []byte("foo"),
|
||||
},
|
||||
expectEqual: false,
|
||||
}, {
|
||||
name: "both fully set",
|
||||
out1: &wire.TxOut{
|
||||
Value: 1234,
|
||||
PkScript: []byte("bar"),
|
||||
},
|
||||
out2: &wire.TxOut{
|
||||
Value: 1234,
|
||||
PkScript: []byte("bar"),
|
||||
},
|
||||
expectEqual: true,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := TxOutsEqual(tc.out1, tc.out2)
|
||||
if result != tc.expectEqual {
|
||||
t.Fatalf("unexpected result, got %v wanted %v",
|
||||
result, tc.expectEqual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyOutputsEqual(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
outs1 []*wire.TxOut
|
||||
outs2 []*wire.TxOut
|
||||
expectErr bool
|
||||
}{{
|
||||
name: "both nil",
|
||||
outs1: nil,
|
||||
outs2: nil,
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "one nil",
|
||||
outs1: nil,
|
||||
outs2: []*wire.TxOut{{}},
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "both empty",
|
||||
outs1: []*wire.TxOut{{}},
|
||||
outs2: []*wire.TxOut{{}},
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "one pk script set",
|
||||
outs1: []*wire.TxOut{{}},
|
||||
outs2: []*wire.TxOut{{
|
||||
PkScript: []byte("foo"),
|
||||
}},
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "both fully set",
|
||||
outs1: []*wire.TxOut{{
|
||||
Value: 1234,
|
||||
PkScript: []byte("bar"),
|
||||
}, {}},
|
||||
outs2: []*wire.TxOut{{
|
||||
Value: 1234,
|
||||
PkScript: []byte("bar"),
|
||||
}, {}},
|
||||
expectErr: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := VerifyOutputsEqual(tc.outs1, tc.outs2)
|
||||
if (tc.expectErr && err == nil) ||
|
||||
(!tc.expectErr && err != nil) {
|
||||
|
||||
t.Fatalf("got error '%v' but wanted it to be "+
|
||||
"nil: %v", err, tc.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyInputPrevOutpointsEqual(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
ins1 []*wire.TxIn
|
||||
ins2 []*wire.TxIn
|
||||
expectErr bool
|
||||
}{{
|
||||
name: "both nil",
|
||||
ins1: nil,
|
||||
ins2: nil,
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "one nil",
|
||||
ins1: nil,
|
||||
ins2: []*wire.TxIn{{}},
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "both empty",
|
||||
ins1: []*wire.TxIn{{}},
|
||||
ins2: []*wire.TxIn{{}},
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "one previous output set",
|
||||
ins1: []*wire.TxIn{{}},
|
||||
ins2: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{11, 22, 33},
|
||||
Index: 7,
|
||||
},
|
||||
}},
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "both fully set",
|
||||
ins1: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{11, 22, 33},
|
||||
Index: 7,
|
||||
},
|
||||
}, {}},
|
||||
ins2: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Hash: chainhash.Hash{11, 22, 33},
|
||||
Index: 7,
|
||||
},
|
||||
}, {}},
|
||||
expectErr: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := VerifyInputPrevOutpointsEqual(tc.ins1, tc.ins2)
|
||||
if (tc.expectErr && err == nil) ||
|
||||
(!tc.expectErr && err != nil) {
|
||||
|
||||
t.Fatalf("got error '%v' but wanted it to be "+
|
||||
"nil: %v", err, tc.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyInputOutputLen(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
packet *Packet
|
||||
needInputs bool
|
||||
needOutputs bool
|
||||
expectErr bool
|
||||
}{{
|
||||
name: "packet nil",
|
||||
packet: nil,
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "wire tx nil",
|
||||
packet: &Packet{},
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "both empty don't need outputs",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{},
|
||||
},
|
||||
expectErr: false,
|
||||
}, {
|
||||
name: "both empty but need outputs",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{},
|
||||
},
|
||||
needOutputs: true,
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "both empty but need inputs",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{},
|
||||
},
|
||||
needInputs: true,
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "input len mismatch",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{{}},
|
||||
},
|
||||
},
|
||||
needInputs: true,
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "output len mismatch",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{
|
||||
TxOut: []*wire.TxOut{{}},
|
||||
},
|
||||
},
|
||||
needOutputs: true,
|
||||
expectErr: true,
|
||||
}, {
|
||||
name: "all fully set",
|
||||
packet: &Packet{
|
||||
UnsignedTx: &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{{}},
|
||||
TxOut: []*wire.TxOut{{}},
|
||||
},
|
||||
Inputs: []PInput{{}},
|
||||
Outputs: []POutput{{}},
|
||||
},
|
||||
needInputs: true,
|
||||
needOutputs: true,
|
||||
expectErr: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := VerifyInputOutputLen(
|
||||
tc.packet, tc.needInputs, tc.needOutputs,
|
||||
)
|
||||
if (tc.expectErr && err == nil) ||
|
||||
(!tc.expectErr && err != nil) {
|
||||
|
||||
t.Fatalf("got error '%v' but wanted it to be "+
|
||||
"nil: %v", err, tc.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFromSignedTx(t *testing.T) {
|
||||
orig := &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{},
|
||||
SignatureScript: []byte("script"),
|
||||
Witness: [][]byte{[]byte("witness")},
|
||||
Sequence: 1234,
|
||||
}},
|
||||
TxOut: []*wire.TxOut{{
|
||||
PkScript: []byte{77, 88},
|
||||
Value: 99,
|
||||
}},
|
||||
}
|
||||
|
||||
packet, scripts, witnesses, err := NewFromSignedTx(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create packet from signed TX: %v", err)
|
||||
}
|
||||
|
||||
tx := packet.UnsignedTx
|
||||
expectedTxIn := []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{},
|
||||
Sequence: 1234,
|
||||
}}
|
||||
if !reflect.DeepEqual(tx.TxIn, expectedTxIn) {
|
||||
t.Fatalf("unexpected txin, got %#v wanted %#v",
|
||||
tx.TxIn, expectedTxIn)
|
||||
}
|
||||
if !reflect.DeepEqual(tx.TxOut, orig.TxOut) {
|
||||
t.Fatalf("unexpected txout, got %#v wanted %#v",
|
||||
tx.TxOut, orig.TxOut)
|
||||
}
|
||||
if len(scripts) != 1 || !bytes.Equal(scripts[0], []byte("script")) {
|
||||
t.Fatalf("script not extracted correctly")
|
||||
}
|
||||
if len(witnesses) != 1 ||
|
||||
!bytes.Equal(witnesses[0][0], []byte("witness")) {
|
||||
|
||||
t.Fatalf("witness not extracted correctly")
|
||||
}
|
||||
}
|
124
btcutil/tx.go
Normal file
124
btcutil/tx.go
Normal file
|
@ -0,0 +1,124 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// TxIndexUnknown is the value returned for a transaction index that is unknown.
|
||||
// This is typically because the transaction has not been inserted into a block
|
||||
// yet.
|
||||
const TxIndexUnknown = -1
|
||||
|
||||
// Tx defines a bitcoin transaction that provides easier and more efficient
|
||||
// manipulation of raw transactions. It also memoizes the hash for the
|
||||
// transaction on its first access so subsequent accesses don't have to repeat
|
||||
// the relatively expensive hashing operations.
|
||||
type Tx struct {
|
||||
msgTx *wire.MsgTx // Underlying MsgTx
|
||||
txHash *chainhash.Hash // Cached transaction hash
|
||||
txHashWitness *chainhash.Hash // Cached transaction witness hash
|
||||
txHasWitness *bool // If the transaction has witness data
|
||||
txIndex int // Position within a block or TxIndexUnknown
|
||||
}
|
||||
|
||||
// MsgTx returns the underlying wire.MsgTx for the transaction.
|
||||
func (t *Tx) MsgTx() *wire.MsgTx {
|
||||
// Return the cached transaction.
|
||||
return t.msgTx
|
||||
}
|
||||
|
||||
// Hash returns the hash of the transaction. This is equivalent to
|
||||
// calling TxHash on the underlying wire.MsgTx, however it caches the
|
||||
// result so subsequent calls are more efficient.
|
||||
func (t *Tx) Hash() *chainhash.Hash {
|
||||
// Return the cached hash if it has already been generated.
|
||||
if t.txHash != nil {
|
||||
return t.txHash
|
||||
}
|
||||
|
||||
// Cache the hash and return it.
|
||||
hash := t.msgTx.TxHash()
|
||||
t.txHash = &hash
|
||||
return &hash
|
||||
}
|
||||
|
||||
// WitnessHash returns the witness hash (wtxid) of the transaction. This is
|
||||
// equivalent to calling WitnessHash on the underlying wire.MsgTx, however it
|
||||
// caches the result so subsequent calls are more efficient.
|
||||
func (t *Tx) WitnessHash() *chainhash.Hash {
|
||||
// Return the cached hash if it has already been generated.
|
||||
if t.txHashWitness != nil {
|
||||
return t.txHashWitness
|
||||
}
|
||||
|
||||
// Cache the hash and return it.
|
||||
hash := t.msgTx.WitnessHash()
|
||||
t.txHashWitness = &hash
|
||||
return &hash
|
||||
}
|
||||
|
||||
// HasWitness returns false if none of the inputs within the transaction
|
||||
// contain witness data, true false otherwise. This equivalent to calling
|
||||
// HasWitness on the underlying wire.MsgTx, however it caches the result so
|
||||
// subsequent calls are more efficient.
|
||||
func (t *Tx) HasWitness() bool {
|
||||
if t.txHasWitness != nil {
|
||||
return *t.txHasWitness
|
||||
}
|
||||
|
||||
hasWitness := t.msgTx.HasWitness()
|
||||
t.txHasWitness = &hasWitness
|
||||
return hasWitness
|
||||
}
|
||||
|
||||
// Index returns the saved index of the transaction within a block. This value
|
||||
// will be TxIndexUnknown if it hasn't already explicitly been set.
|
||||
func (t *Tx) Index() int {
|
||||
return t.txIndex
|
||||
}
|
||||
|
||||
// SetIndex sets the index of the transaction in within a block.
|
||||
func (t *Tx) SetIndex(index int) {
|
||||
t.txIndex = index
|
||||
}
|
||||
|
||||
// NewTx returns a new instance of a bitcoin transaction given an underlying
|
||||
// wire.MsgTx. See Tx.
|
||||
func NewTx(msgTx *wire.MsgTx) *Tx {
|
||||
return &Tx{
|
||||
msgTx: msgTx,
|
||||
txIndex: TxIndexUnknown,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTxFromBytes returns a new instance of a bitcoin transaction given the
|
||||
// serialized bytes. See Tx.
|
||||
func NewTxFromBytes(serializedTx []byte) (*Tx, error) {
|
||||
br := bytes.NewReader(serializedTx)
|
||||
return NewTxFromReader(br)
|
||||
}
|
||||
|
||||
// NewTxFromReader returns a new instance of a bitcoin transaction given a
|
||||
// Reader to deserialize the transaction. See Tx.
|
||||
func NewTxFromReader(r io.Reader) (*Tx, error) {
|
||||
// Deserialize the bytes into a MsgTx.
|
||||
var msgTx wire.MsgTx
|
||||
err := msgTx.Deserialize(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := Tx{
|
||||
msgTx: &msgTx,
|
||||
txIndex: TxIndexUnknown,
|
||||
}
|
||||
return &t, nil
|
||||
}
|
136
btcutil/tx_test.go
Normal file
136
btcutil/tx_test.go
Normal file
|
@ -0,0 +1,136 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// TestTx tests the API for Tx.
|
||||
func TestTx(t *testing.T) {
|
||||
testTx := Block100000.Transactions[0]
|
||||
tx := btcutil.NewTx(testTx)
|
||||
|
||||
// Ensure we get the same data back out.
|
||||
if msgTx := tx.MsgTx(); !reflect.DeepEqual(msgTx, testTx) {
|
||||
t.Errorf("MsgTx: mismatched MsgTx - got %v, want %v",
|
||||
spew.Sdump(msgTx), spew.Sdump(testTx))
|
||||
}
|
||||
|
||||
// Ensure transaction index set and get work properly.
|
||||
wantIndex := 0
|
||||
tx.SetIndex(0)
|
||||
if gotIndex := tx.Index(); gotIndex != wantIndex {
|
||||
t.Errorf("Index: mismatched index - got %v, want %v",
|
||||
gotIndex, wantIndex)
|
||||
}
|
||||
|
||||
// Hash for block 100,000 transaction 0.
|
||||
wantHashStr := "8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87"
|
||||
wantHash, err := chainhash.NewHashFromStr(wantHashStr)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
}
|
||||
|
||||
// Request the hash multiple times to test generation and caching.
|
||||
for i := 0; i < 2; i++ {
|
||||
hash := tx.Hash()
|
||||
if !hash.IsEqual(wantHash) {
|
||||
t.Errorf("Hash #%d mismatched hash - got %v, want %v", i,
|
||||
hash, wantHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewTxFromBytes tests creation of a Tx from serialized bytes.
|
||||
func TestNewTxFromBytes(t *testing.T) {
|
||||
// Serialize the test transaction.
|
||||
testTx := Block100000.Transactions[0]
|
||||
var testTxBuf bytes.Buffer
|
||||
err := testTx.Serialize(&testTxBuf)
|
||||
if err != nil {
|
||||
t.Errorf("Serialize: %v", err)
|
||||
}
|
||||
testTxBytes := testTxBuf.Bytes()
|
||||
|
||||
// Create a new transaction from the serialized bytes.
|
||||
tx, err := btcutil.NewTxFromBytes(testTxBytes)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxFromBytes: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the generated MsgTx is correct.
|
||||
if msgTx := tx.MsgTx(); !reflect.DeepEqual(msgTx, testTx) {
|
||||
t.Errorf("MsgTx: mismatched MsgTx - got %v, want %v",
|
||||
spew.Sdump(msgTx), spew.Sdump(testTx))
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxErrors tests the error paths for the Tx API.
|
||||
func TestTxErrors(t *testing.T) {
|
||||
// Serialize the test transaction.
|
||||
testTx := Block100000.Transactions[0]
|
||||
var testTxBuf bytes.Buffer
|
||||
err := testTx.Serialize(&testTxBuf)
|
||||
if err != nil {
|
||||
t.Errorf("Serialize: %v", err)
|
||||
}
|
||||
testTxBytes := testTxBuf.Bytes()
|
||||
|
||||
// Truncate the transaction byte buffer to force errors.
|
||||
shortBytes := testTxBytes[:4]
|
||||
_, err = btcutil.NewTxFromBytes(shortBytes)
|
||||
if err != io.EOF {
|
||||
t.Errorf("NewTxFromBytes: did not get expected error - "+
|
||||
"got %v, want %v", err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxHasWitness tests the HasWitness() method.
|
||||
func TestTxHasWitness(t *testing.T) {
|
||||
msgTx := Block100000.Transactions[0] // contains witness data
|
||||
tx := btcutil.NewTx(msgTx)
|
||||
|
||||
tx.WitnessHash() // Populate the witness hash cache
|
||||
tx.HasWitness() // Should not fail (see btcsuite/btcd#1543)
|
||||
|
||||
if !tx.HasWitness() {
|
||||
t.Errorf("HasWitness: got false, want true")
|
||||
}
|
||||
|
||||
for _, msgTxWithoutWitness := range Block100000.Transactions[1:] {
|
||||
txWithoutWitness := btcutil.NewTx(msgTxWithoutWitness)
|
||||
if txWithoutWitness.HasWitness() {
|
||||
t.Errorf("HasWitness: got false, want true")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxWitnessHash tests the WitnessHash() method.
|
||||
func TestTxWitnessHash(t *testing.T) {
|
||||
msgTx := Block100000.Transactions[0] // contains witness data
|
||||
tx := btcutil.NewTx(msgTx)
|
||||
|
||||
if tx.WitnessHash().IsEqual(tx.Hash()) {
|
||||
t.Errorf("WitnessHash: witness hash and tx id must NOT be same - "+
|
||||
"got %v, want %v", tx.WitnessHash(), tx.Hash())
|
||||
}
|
||||
|
||||
for _, msgTxWithoutWitness := range Block100000.Transactions[1:] {
|
||||
txWithoutWitness := btcutil.NewTx(msgTxWithoutWitness)
|
||||
if !txWithoutWitness.WitnessHash().IsEqual(txWithoutWitness.Hash()) {
|
||||
t.Errorf("WitnessHash: witness hash and tx id must be same - "+
|
||||
"got %v, want %v", txWithoutWitness.WitnessHash(), txWithoutWitness.Hash())
|
||||
}
|
||||
}
|
||||
}
|
31
btcutil/txsort/README.md
Normal file
31
btcutil/txsort/README.md
Normal file
|
@ -0,0 +1,31 @@
|
|||
txsort
|
||||
======
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcutil/txsort)
|
||||
|
||||
Package txsort provides the transaction sorting according to [BIP 69](https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki).
|
||||
|
||||
BIP 69 defines a standard lexicographical sort order of transaction inputs and
|
||||
outputs. This is useful to standardize transactions for faster multi-party
|
||||
agreement as well as preventing information leaks in a single-party use case.
|
||||
|
||||
The BIP goes into more detail, but for a quick and simplistic overview, the
|
||||
order for inputs is defined as first sorting on the previous output hash and
|
||||
then on the index as a tie breaker. The order for outputs is defined as first
|
||||
sorting on the amount and then on the raw public key script bytes as a tie
|
||||
breaker.
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/btcsuite/btcd/btcutil/txsort
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package txsort is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
20
btcutil/txsort/doc.go
Normal file
20
btcutil/txsort/doc.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright (c) 2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package txsort provides the transaction sorting according to BIP 69.
|
||||
|
||||
Overview
|
||||
|
||||
BIP 69 defines a standard lexicographical sort order of transaction inputs and
|
||||
outputs. This is useful to standardize transactions for faster multi-party
|
||||
agreement as well as preventing information leaks in a single-party use case.
|
||||
|
||||
The BIP goes into more detail, but for a quick and simplistic overview, the
|
||||
order for inputs is defined as first sorting on the previous output hash and
|
||||
then on the index as a tie breaker. The order for outputs is defined as first
|
||||
sorting on the amount and then on the raw public key script bytes as a tie
|
||||
breaker.
|
||||
*/
|
||||
package txsort
|
1
btcutil/txsort/testdata/bip69-1.hex
vendored
Normal file
1
btcutil/txsort/testdata/bip69-1.hex
vendored
Normal file
File diff suppressed because one or more lines are too long
1
btcutil/txsort/testdata/bip69-2.hex
vendored
Normal file
1
btcutil/txsort/testdata/bip69-2.hex
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
010000000255605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c268d28350000000049483045022100aa46504baa86df8a33b1192b1b9367b4d729dc41e389f2c04f3e5c7f0559aae702205e82253a54bf5c4f65b7428551554b2045167d6d206dfe6a2e198127d3f7df1501ffffffff55605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c268d2835010000004847304402202329484c35fa9d6bb32a55a70c0982f606ce0e3634b69006138683bcd12cbb6602200c28feb1e2555c3210f1dddb299738b4ff8bbe9667b68cb8764b5ac17b7adf0001ffffffff0200e1f505000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45afac00000000
|
1
btcutil/txsort/testdata/bip69-3.hex
vendored
Normal file
1
btcutil/txsort/testdata/bip69-3.hex
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
0100000001d992e5a888a86d4c7a6a69167a4728ee69497509740fc5f456a24528c340219a000000008b483045022100f0519bdc9282ff476da1323b8ef7ffe33f495c1a8d52cc522b437022d83f6a230220159b61d197fbae01b4a66622a23bc3f1def65d5fa24efd5c26fa872f3a246b8e014104839f9023296a1fabb133140128ca2709f6818c7d099491690bd8ac0fd55279def6a2ceb6ab7b5e4a71889b6e739f09509565eec789e86886f6f936fa42097adeffffffff02000fe208010000001976a914948c765a6914d43f2a7ac177da2c2f6b52de3d7c88ac00e32321000000001976a9140c34f4e29ab5a615d5ea28d4817f12b137d62ed588ac00000000
|
1
btcutil/txsort/testdata/bip69-4.hex
vendored
Normal file
1
btcutil/txsort/testdata/bip69-4.hex
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
01000000059daf0abe7a92618546a9dbcfd65869b6178c66ec21ccfda878c1175979cfd9ef000000004a493046022100c2f7f25be5de6ce88ac3c1a519514379e91f39b31ddff279a3db0b1a229b708b022100b29efbdbd9837cc6a6c7318aa4900ed7e4d65662c34d1622a2035a3a5534a99a01ffffffffd516330ebdf075948da56db13d22632a4fb941122df2884397dda45d451acefb0000000048473044022051243debe6d4f2b433bee0cee78c5c4073ead0e3bde54296dbed6176e128659c022044417bfe16f44eb7b6eb0cdf077b9ce972a332e15395c09ca5e4f602958d266101ffffffffe1f5aa33961227b3c344e57179417ce01b7ccd421117fe2336289b70489883f900000000484730440220593252bb992ce3c85baf28d6e3aa32065816271d2c822398fe7ee28a856bc943022066d429dd5025d3c86fd8fd8a58e183a844bd94aa312cefe00388f57c85b0ca3201ffffffffe207e83718129505e6a7484831442f668164ae659fddb82e9e5421a081fb90d50000000049483045022067cf27eb733e5bcae412a586b25a74417c237161a084167c2a0b439abfebdcb2022100efcc6baa6824b4c5205aa967e0b76d31abf89e738d4b6b014e788c9a8cccaf0c01ffffffffe23b8d9d80a9e9d977fab3c94dbe37befee63822443c3ec5ae5a713ede66c3940000000049483045022020f2eb35036666b1debe0d1d2e77a36d5d9c4e96c1dba23f5100f193dbf524790221008ce79bc1321fb4357c6daee818038d41544749127751726e46b2b320c8b565a201ffffffff0200ba1dd2050000001976a914366a27645806e817a6cd40bc869bdad92fe5509188ac40420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac00000000
|
1
btcutil/txsort/testdata/bip69-5.hex
vendored
Normal file
1
btcutil/txsort/testdata/bip69-5.hex
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
01000000011f636d0003f673b3aeea4971daef16b8eed784cf6e8019a5ae7da4985fbb06e5000000008a47304402205103941e2b11e746dfa817888d422f6e7f4d16dbbfb8ffa61d15ffb924a84b8802202fe861b0f23f17139d15a3374bfc6c7196d371f3d1a324e31cc0aadbba87e53c0141049e7e1b251a7e26cae9ee7553b278ef58ef3c28b4b20134d51b747d9b18b0a19b94b66cef320e2549dec0ea3d725cb4c742f368928b1fb74b4603e24a1e262c80ffffffff0240420f00000000001976a914bcfa0e27218a7c97257b351b03a9eac95c25a23988ac40420f00000000001976a9140c6a68f20bafc678164d171ee4f077adfa9b091688ac00000000
|
95
btcutil/txsort/txsort.go
Normal file
95
btcutil/txsort/txsort.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Provides functions for sorting tx inputs and outputs according to BIP 69
|
||||
// (https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki)
|
||||
|
||||
package txsort
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// InPlaceSort modifies the passed transaction inputs and outputs to be sorted
|
||||
// based on BIP 69.
|
||||
//
|
||||
// WARNING: This function must NOT be called with published transactions since
|
||||
// it will mutate the transaction if it's not already sorted. This can cause
|
||||
// issues if you mutate a tx in a block, for example, which would invalidate the
|
||||
// block. It could also cause cached hashes, such as in a btcutil.Tx to become
|
||||
// invalidated.
|
||||
//
|
||||
// The function should only be used if the caller is creating the transaction or
|
||||
// is otherwise 100% positive mutating will not cause adverse affects due to
|
||||
// other dependencies.
|
||||
func InPlaceSort(tx *wire.MsgTx) {
|
||||
sort.Sort(sortableInputSlice(tx.TxIn))
|
||||
sort.Sort(sortableOutputSlice(tx.TxOut))
|
||||
}
|
||||
|
||||
// Sort returns a new transaction with the inputs and outputs sorted based on
|
||||
// BIP 69. The passed transaction is not modified and the new transaction
|
||||
// might have a different hash if any sorting was done.
|
||||
func Sort(tx *wire.MsgTx) *wire.MsgTx {
|
||||
txCopy := tx.Copy()
|
||||
sort.Sort(sortableInputSlice(txCopy.TxIn))
|
||||
sort.Sort(sortableOutputSlice(txCopy.TxOut))
|
||||
return txCopy
|
||||
}
|
||||
|
||||
// IsSorted checks whether tx has inputs and outputs sorted according to BIP
|
||||
// 69.
|
||||
func IsSorted(tx *wire.MsgTx) bool {
|
||||
if !sort.IsSorted(sortableInputSlice(tx.TxIn)) {
|
||||
return false
|
||||
}
|
||||
if !sort.IsSorted(sortableOutputSlice(tx.TxOut)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type sortableInputSlice []*wire.TxIn
|
||||
type sortableOutputSlice []*wire.TxOut
|
||||
|
||||
// For SortableInputSlice and SortableOutputSlice, three functions are needed
|
||||
// to make it sortable with sort.Sort() -- Len, Less, and Swap
|
||||
// Len and Swap are trivial. Less is BIP 69 specific.
|
||||
func (s sortableInputSlice) Len() int { return len(s) }
|
||||
func (s sortableOutputSlice) Len() int { return len(s) }
|
||||
func (s sortableOutputSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s sortableInputSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Input comparison function.
|
||||
// First sort based on input hash (reversed / rpc-style), then index.
|
||||
func (s sortableInputSlice) Less(i, j int) bool {
|
||||
// Input hashes are the same, so compare the index.
|
||||
ihash := s[i].PreviousOutPoint.Hash
|
||||
jhash := s[j].PreviousOutPoint.Hash
|
||||
if ihash == jhash {
|
||||
return s[i].PreviousOutPoint.Index < s[j].PreviousOutPoint.Index
|
||||
}
|
||||
|
||||
// At this point, the hashes are not equal, so reverse them to
|
||||
// big-endian and return the result of the comparison.
|
||||
const hashSize = chainhash.HashSize
|
||||
for b := 0; b < hashSize/2; b++ {
|
||||
ihash[b], ihash[hashSize-1-b] = ihash[hashSize-1-b], ihash[b]
|
||||
jhash[b], jhash[hashSize-1-b] = jhash[hashSize-1-b], jhash[b]
|
||||
}
|
||||
return bytes.Compare(ihash[:], jhash[:]) == -1
|
||||
}
|
||||
|
||||
// Output comparison function.
|
||||
// First sort based on amount (smallest first), then PkScript.
|
||||
func (s sortableOutputSlice) Less(i, j int) bool {
|
||||
if s[i].Value == s[j].Value {
|
||||
return bytes.Compare(s[i].PkScript, s[j].PkScript) < 0
|
||||
}
|
||||
return s[i].Value < s[j].Value
|
||||
}
|
124
btcutil/txsort/txsort_test.go
Normal file
124
btcutil/txsort/txsort_test.go
Normal file
|
@ -0,0 +1,124 @@
|
|||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package txsort_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcd/btcutil/txsort"
|
||||
)
|
||||
|
||||
// TestSort ensures the transaction sorting works according to the BIP.
|
||||
func TestSort(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hexFile string
|
||||
isSorted bool
|
||||
unsortedHash string
|
||||
sortedHash string
|
||||
}{
|
||||
{
|
||||
name: "first test case from BIP 69 - sorts inputs only, based on hash",
|
||||
hexFile: "bip69-1.hex",
|
||||
isSorted: false,
|
||||
unsortedHash: "0a6a357e2f7796444e02638749d9611c008b253fb55f5dc88b739b230ed0c4c3",
|
||||
sortedHash: "839503cb611a3e3734bd521c608f881be2293ff77b7384057ab994c794fce623",
|
||||
},
|
||||
{
|
||||
name: "second test case from BIP 69 - already sorted",
|
||||
hexFile: "bip69-2.hex",
|
||||
isSorted: true,
|
||||
unsortedHash: "28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56ebdcacd3069a5f",
|
||||
sortedHash: "28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56ebdcacd3069a5f",
|
||||
},
|
||||
{
|
||||
name: "block 100001 tx[1] - sorts outputs only, based on amount",
|
||||
hexFile: "bip69-3.hex",
|
||||
isSorted: false,
|
||||
unsortedHash: "fbde5d03b027d2b9ba4cf5d4fecab9a99864df2637b25ea4cbcb1796ff6550ca",
|
||||
sortedHash: "0a8c246c55f6b82f094d211f4f57167bf2ea4898741d218b09bdb2536fd8d13f",
|
||||
},
|
||||
{
|
||||
name: "block 100001 tx[2] - sorts both inputs and outputs",
|
||||
hexFile: "bip69-4.hex",
|
||||
isSorted: false,
|
||||
unsortedHash: "8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb",
|
||||
sortedHash: "a3196553b928b0b6154b002fa9a1ce875adabc486fedaaaf4c17430fd4486329",
|
||||
},
|
||||
{
|
||||
name: "block 100998 tx[6] - sorts outputs only, based on output script",
|
||||
hexFile: "bip69-5.hex",
|
||||
isSorted: false,
|
||||
unsortedHash: "ff85e8fc92e71bbc217e3ea9a3bacb86b435e52b6df0b089d67302c293a2b81d",
|
||||
sortedHash: "9a6c24746de024f77cac9b2138694f11101d1c66289261224ca52a25155a7c94",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Load and deserialize the test transaction.
|
||||
filePath := filepath.Join("testdata", test.hexFile)
|
||||
txHexBytes, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Errorf("ReadFile (%s): failed to read test file: %v",
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
txBytes, err := hex.DecodeString(string(txHexBytes))
|
||||
if err != nil {
|
||||
t.Errorf("DecodeString (%s): failed to decode tx: %v",
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
var tx wire.MsgTx
|
||||
err = tx.Deserialize(bytes.NewReader(txBytes))
|
||||
if err != nil {
|
||||
t.Errorf("Deserialize (%s): unexpected error %v",
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the sort order of the original transaction matches the
|
||||
// expected value.
|
||||
if got := txsort.IsSorted(&tx); got != test.isSorted {
|
||||
t.Errorf("IsSorted (%s): sort does not match "+
|
||||
"expected - got %v, want %v", test.name, got,
|
||||
test.isSorted)
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort the transaction and ensure the resulting hash is the
|
||||
// expected value.
|
||||
sortedTx := txsort.Sort(&tx)
|
||||
if got := sortedTx.TxHash().String(); got != test.sortedHash {
|
||||
t.Errorf("Sort (%s): sorted hash does not match "+
|
||||
"expected - got %v, want %v", test.name, got,
|
||||
test.sortedHash)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the original transaction is not modified.
|
||||
if got := tx.TxHash().String(); got != test.unsortedHash {
|
||||
t.Errorf("Sort (%s): unsorted hash does not match "+
|
||||
"expected - got %v, want %v", test.name, got,
|
||||
test.unsortedHash)
|
||||
continue
|
||||
}
|
||||
|
||||
// Now sort the transaction using the mutable version and ensure
|
||||
// the resulting hash is the expected value.
|
||||
txsort.InPlaceSort(&tx)
|
||||
if got := tx.TxHash().String(); got != test.sortedHash {
|
||||
t.Errorf("SortMutate (%s): sorted hash does not match "+
|
||||
"expected - got %v, want %v", test.name, got,
|
||||
test.sortedHash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
169
btcutil/wif.go
Normal file
169
btcutil/wif.go
Normal file
|
@ -0,0 +1,169 @@
|
|||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
)
|
||||
|
||||
// ErrMalformedPrivateKey describes an error where a WIF-encoded private
|
||||
// key cannot be decoded due to being improperly formatted. This may occur
|
||||
// if the byte length is incorrect or an unexpected magic number was
|
||||
// encountered.
|
||||
var ErrMalformedPrivateKey = errors.New("malformed private key")
|
||||
|
||||
// compressMagic is the magic byte used to identify a WIF encoding for
|
||||
// an address created from a compressed serialized public key.
|
||||
const compressMagic byte = 0x01
|
||||
|
||||
// WIF contains the individual components described by the Wallet Import Format
|
||||
// (WIF). A WIF string is typically used to represent a private key and its
|
||||
// associated address in a way that may be easily copied and imported into or
|
||||
// exported from wallet software. WIF strings may be decoded into this
|
||||
// structure by calling DecodeWIF or created with a user-provided private key
|
||||
// by calling NewWIF.
|
||||
type WIF struct {
|
||||
// PrivKey is the private key being imported or exported.
|
||||
PrivKey *btcec.PrivateKey
|
||||
|
||||
// CompressPubKey specifies whether the address controlled by the
|
||||
// imported or exported private key was created by hashing a
|
||||
// compressed (33-byte) serialized public key, rather than an
|
||||
// uncompressed (65-byte) one.
|
||||
CompressPubKey bool
|
||||
|
||||
// netID is the bitcoin network identifier byte used when
|
||||
// WIF encoding the private key.
|
||||
netID byte
|
||||
}
|
||||
|
||||
// NewWIF creates a new WIF structure to export an address and its private key
|
||||
// as a string encoded in the Wallet Import Format. The compress argument
|
||||
// specifies whether the address intended to be imported or exported was created
|
||||
// by serializing the public key compressed rather than uncompressed.
|
||||
func NewWIF(privKey *btcec.PrivateKey, net *chaincfg.Params, compress bool) (*WIF, error) {
|
||||
if net == nil {
|
||||
return nil, errors.New("no network")
|
||||
}
|
||||
return &WIF{privKey, compress, net.PrivateKeyID}, nil
|
||||
}
|
||||
|
||||
// IsForNet returns whether or not the decoded WIF structure is associated
|
||||
// with the passed bitcoin network.
|
||||
func (w *WIF) IsForNet(net *chaincfg.Params) bool {
|
||||
return w.netID == net.PrivateKeyID
|
||||
}
|
||||
|
||||
// DecodeWIF creates a new WIF structure by decoding the string encoding of
|
||||
// the import format.
|
||||
//
|
||||
// The WIF string must be a base58-encoded string of the following byte
|
||||
// sequence:
|
||||
//
|
||||
// * 1 byte to identify the network, must be 0x80 for mainnet or 0xef for
|
||||
// either testnet3 or the regression test network
|
||||
// * 32 bytes of a binary-encoded, big-endian, zero-padded private key
|
||||
// * Optional 1 byte (equal to 0x01) if the address being imported or exported
|
||||
// was created by taking the RIPEMD160 after SHA256 hash of a serialized
|
||||
// compressed (33-byte) public key
|
||||
// * 4 bytes of checksum, must equal the first four bytes of the double SHA256
|
||||
// of every byte before the checksum in this sequence
|
||||
//
|
||||
// If the base58-decoded byte sequence does not match this, DecodeWIF will
|
||||
// return a non-nil error. ErrMalformedPrivateKey is returned when the WIF
|
||||
// is of an impossible length or the expected compressed pubkey magic number
|
||||
// does not equal the expected value of 0x01. ErrChecksumMismatch is returned
|
||||
// if the expected WIF checksum does not match the calculated checksum.
|
||||
func DecodeWIF(wif string) (*WIF, error) {
|
||||
decoded := base58.Decode(wif)
|
||||
decodedLen := len(decoded)
|
||||
var compress bool
|
||||
|
||||
// Length of base58 decoded WIF must be 32 bytes + an optional 1 byte
|
||||
// (0x01) if compressed, plus 1 byte for netID + 4 bytes of checksum.
|
||||
switch decodedLen {
|
||||
case 1 + btcec.PrivKeyBytesLen + 1 + 4:
|
||||
if decoded[33] != compressMagic {
|
||||
return nil, ErrMalformedPrivateKey
|
||||
}
|
||||
compress = true
|
||||
case 1 + btcec.PrivKeyBytesLen + 4:
|
||||
compress = false
|
||||
default:
|
||||
return nil, ErrMalformedPrivateKey
|
||||
}
|
||||
|
||||
// Checksum is first four bytes of double SHA256 of the identifier byte
|
||||
// and privKey. Verify this matches the final 4 bytes of the decoded
|
||||
// private key.
|
||||
var tosum []byte
|
||||
if compress {
|
||||
tosum = decoded[:1+btcec.PrivKeyBytesLen+1]
|
||||
} else {
|
||||
tosum = decoded[:1+btcec.PrivKeyBytesLen]
|
||||
}
|
||||
cksum := chainhash.DoubleHashB(tosum)[:4]
|
||||
if !bytes.Equal(cksum, decoded[decodedLen-4:]) {
|
||||
return nil, ErrChecksumMismatch
|
||||
}
|
||||
|
||||
netID := decoded[0]
|
||||
privKeyBytes := decoded[1 : 1+btcec.PrivKeyBytesLen]
|
||||
privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes)
|
||||
return &WIF{privKey, compress, netID}, nil
|
||||
}
|
||||
|
||||
// String creates the Wallet Import Format string encoding of a WIF structure.
|
||||
// See DecodeWIF for a detailed breakdown of the format and requirements of
|
||||
// a valid WIF string.
|
||||
func (w *WIF) String() string {
|
||||
// Precalculate size. Maximum number of bytes before base58 encoding
|
||||
// is one byte for the network, 32 bytes of private key, possibly one
|
||||
// extra byte if the pubkey is to be compressed, and finally four
|
||||
// bytes of checksum.
|
||||
encodeLen := 1 + btcec.PrivKeyBytesLen + 4
|
||||
if w.CompressPubKey {
|
||||
encodeLen++
|
||||
}
|
||||
|
||||
a := make([]byte, 0, encodeLen)
|
||||
a = append(a, w.netID)
|
||||
// Pad and append bytes manually, instead of using Serialize, to
|
||||
// avoid another call to make.
|
||||
a = paddedAppend(btcec.PrivKeyBytesLen, a, w.PrivKey.D.Bytes())
|
||||
if w.CompressPubKey {
|
||||
a = append(a, compressMagic)
|
||||
}
|
||||
cksum := chainhash.DoubleHashB(a)[:4]
|
||||
a = append(a, cksum...)
|
||||
return base58.Encode(a)
|
||||
}
|
||||
|
||||
// SerializePubKey serializes the associated public key of the imported or
|
||||
// exported private key in either a compressed or uncompressed format. The
|
||||
// serialization format chosen depends on the value of w.CompressPubKey.
|
||||
func (w *WIF) SerializePubKey() []byte {
|
||||
pk := (*btcec.PublicKey)(&w.PrivKey.PublicKey)
|
||||
if w.CompressPubKey {
|
||||
return pk.SerializeCompressed()
|
||||
}
|
||||
return pk.SerializeUncompressed()
|
||||
}
|
||||
|
||||
// paddedAppend appends the src byte slice to dst, returning the new slice.
|
||||
// If the length of the source is smaller than the passed size, leading zero
|
||||
// bytes are appended to the dst slice before appending src.
|
||||
func paddedAppend(size uint, dst, src []byte) []byte {
|
||||
for i := 0; i < int(size)-len(src); i++ {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
return append(dst, src...)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue