This commit is contained in:
mattn 2024-03-25 22:44:25 +09:00 committed by GitHub
parent e39d2eb63d
commit 3cb9f602e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
77 changed files with 116 additions and 116 deletions

View File

@ -782,7 +782,7 @@ Changes in 0.8.0-beta (Sun May 25 2014)
recent reference client changes recent reference client changes
(https://github.com/conformal/btcd/issues/100) (https://github.com/conformal/btcd/issues/100)
- Raise the maximum signature script size to support standard 15-of-15 - Raise the maximum signature script size to support standard 15-of-15
multi-signature pay-to-sript-hash transactions with compressed pubkeys multi-signature pay-to-script-hash transactions with compressed pubkeys
to remain compatible with the reference client to remain compatible with the reference client
(https://github.com/conformal/btcd/issues/128) (https://github.com/conformal/btcd/issues/128)
- Reduce max bytes allowed for a standard nulldata transaction to 40 for - Reduce max bytes allowed for a standard nulldata transaction to 40 for

View File

@ -184,7 +184,7 @@ func TestAddrManagerV1ToV2(t *testing.T) {
addrMgr.loadPeers() addrMgr.loadPeers()
addrs := addrMgr.getAddresses() addrs := addrMgr.getAddresses()
if len(addrs) != len(expectedAddrs) { if len(addrs) != len(expectedAddrs) {
t.Fatalf("expected to find %d adddresses, found %d", t.Fatalf("expected to find %d addresses, found %d",
len(expectedAddrs), len(addrs)) len(expectedAddrs), len(addrs))
} }
for _, addr := range addrs { for _, addr := range addrs {

View File

@ -782,7 +782,7 @@ func TestLocateInventory(t *testing.T) {
&test.hashStop) &test.hashStop)
} }
if !reflect.DeepEqual(headers, test.headers) { if !reflect.DeepEqual(headers, test.headers) {
t.Errorf("%s: unxpected headers -- got %v, want %v", t.Errorf("%s: unexpected headers -- got %v, want %v",
test.name, headers, test.headers) test.name, headers, test.headers)
continue continue
} }
@ -795,7 +795,7 @@ func TestLocateInventory(t *testing.T) {
hashes := chain.LocateBlocks(test.locator, &test.hashStop, hashes := chain.LocateBlocks(test.locator, &test.hashStop,
maxAllowed) maxAllowed)
if !reflect.DeepEqual(hashes, test.hashes) { if !reflect.DeepEqual(hashes, test.hashes) {
t.Errorf("%s: unxpected hashes -- got %v, want %v", t.Errorf("%s: unexpected hashes -- got %v, want %v",
test.name, hashes, test.hashes) test.name, hashes, test.hashes)
continue continue
} }
@ -888,7 +888,7 @@ func TestHeightToHashRange(t *testing.T) {
} }
if !reflect.DeepEqual(hashes, test.hashes) { if !reflect.DeepEqual(hashes, test.hashes) {
t.Errorf("%s: unxpected hashes -- got %v, want %v", t.Errorf("%s: unexpected hashes -- got %v, want %v",
test.name, hashes, test.hashes) test.name, hashes, test.hashes)
} }
} }
@ -960,7 +960,7 @@ func TestIntervalBlockHashes(t *testing.T) {
} }
if !reflect.DeepEqual(hashes, test.hashes) { if !reflect.DeepEqual(hashes, test.hashes) {
t.Errorf("%s: unxpected hashes -- got %v, want %v", t.Errorf("%s: unexpected hashes -- got %v, want %v",
test.name, hashes, test.hashes) test.name, hashes, test.hashes)
} }
} }

View File

@ -247,7 +247,7 @@ type SpentTxOut struct {
// Amount is the amount of the output. // Amount is the amount of the output.
Amount int64 Amount int64
// PkScipt is the public key script for the output. // PkScript is the public key script for the output.
PkScript []byte PkScript []byte
// Height is the height of the block containing the creating tx. // Height is the height of the block containing the creating tx.

View File

@ -403,7 +403,7 @@ func TestSpendJournalErrors(t *testing.T) {
} }
// TestUtxoSerialization ensures serializing and deserializing unspent // TestUtxoSerialization ensures serializing and deserializing unspent
// trasaction output entries works as expected. // transaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) { func TestUtxoSerialization(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -343,7 +343,7 @@ func (b *BlockChain) TstSetCoinbaseMaturity(maturity uint16) {
b.chainParams.CoinbaseMaturity = maturity b.chainParams.CoinbaseMaturity = maturity
} }
// newFakeChain returns a chain that is usable for syntetic tests. It is // newFakeChain returns a chain that is usable for synthetic tests. It is
// important to note that this chain has no database associated with it, so // important to note that this chain has no database associated with it, so
// it is not usable with all functions and the tests must take care when making // it is not usable with all functions and the tests must take care when making
// use of it. // use of it.

View File

@ -32,7 +32,7 @@ func TestBigToCompact(t *testing.T) {
} }
// TestCompactToBig ensures CompactToBig converts numbers using the compact // TestCompactToBig ensures CompactToBig converts numbers using the compact
// representation to the expected big intergers. // representation to the expected big integers.
func TestCompactToBig(t *testing.T) { func TestCompactToBig(t *testing.T) {
tests := []struct { tests := []struct {
in uint32 in uint32

View File

@ -70,7 +70,7 @@ const (
// ErrUnexpectedDifficulty indicates specified bits do not align with // ErrUnexpectedDifficulty indicates specified bits do not align with
// the expected value either because it doesn't match the calculated // the expected value either because it doesn't match the calculated
// valued based on difficulty regarted rules or it is out of the valid // valued based on difficulty regarded rules or it is out of the valid
// range. // range.
ErrUnexpectedDifficulty ErrUnexpectedDifficulty

View File

@ -960,7 +960,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
// The comments below identify the structure of the chain being built. // The comments below identify the structure of the chain being built.
// //
// The values in parenthesis repesent which outputs are being spent. // The values in parenthesis represent which outputs are being spent.
// //
// For example, b1(0) indicates the first collected spendable output // For example, b1(0) indicates the first collected spendable output
// which, due to the code above to create the correct number of blocks, // which, due to the code above to create the correct number of blocks,
@ -1218,7 +1218,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
accepted() accepted()
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
// Multisig[Verify]/ChecksigVerifiy signature operation count tests. // Multisig[Verify]/ChecksigVerify signature operation count tests.
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
// Create block with max signature operations as OP_CHECKMULTISIG. // Create block with max signature operations as OP_CHECKMULTISIG.

View File

@ -64,7 +64,7 @@ const (
addrKeyTypeWitnessScriptHash = 3 addrKeyTypeWitnessScriptHash = 3
// addrKeyTypeTaprootPubKey is the address type in an address key that // addrKeyTypeTaprootPubKey is the address type in an address key that
// represnts a pay-to-taproot address. We use this to denote addresses // represents a pay-to-taproot address. We use this to denote addresses
// related to the segwit v1 that are encoded in the bech32m format. // related to the segwit v1 that are encoded in the bech32m format.
addrKeyTypeTaprootPubKey = 4 addrKeyTypeTaprootPubKey = 4
@ -158,7 +158,7 @@ func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc) []byte {
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the // deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and // provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID // uses the passed block hash fetching function in order to convert the block ID
// to the associated block hash. // to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion,
fetchBlockHash fetchBlockHashFunc) error { fetchBlockHash fetchBlockHashFunc) error {
@ -734,7 +734,7 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *btcutil.Block,
idx.indexPkScript(data, pkScript, txIdx) idx.indexPkScript(data, pkScript, txIdx)
// With an input indexed, we'll advance the // With an input indexed, we'll advance the
// stxo coutner. // stxo counter.
stxoIndex++ stxoIndex++
} }
} }

View File

@ -146,7 +146,7 @@ func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash
merkles[offset] = &newHash merkles[offset] = &newHash
// The normal case sets the parent node to the double sha256 // The normal case sets the parent node to the double sha256
// of the concatentation of the left and right children. // of the concatenation of the left and right children.
default: default:
newHash := HashMerkleBranches(merkles[i], merkles[i+1]) newHash := HashMerkleBranches(merkles[i], merkles[i+1])
merkles[offset] = &newHash merkles[offset] = &newHash

View File

@ -9,7 +9,7 @@ import (
"testing" "testing"
) )
// TestDeserializeUtxoEntryV0 ensures deserializing unspent trasaction output // TestDeserializeUtxoEntryV0 ensures deserializing unspent transaction output
// entries from the legacy version 0 format works as expected. // entries from the legacy version 0 format works as expected.
func TestDeserializeUtxoEntryV0(t *testing.T) { func TestDeserializeUtxoEntryV0(t *testing.T) {
tests := []struct { tests := []struct {

View File

@ -445,7 +445,7 @@ func TestUtxoCacheFlush(t *testing.T) {
t.Fatalf("Unexpected nil entry found for %v", outpoint) t.Fatalf("Unexpected nil entry found for %v", outpoint)
} }
if !entry.isModified() { if !entry.isModified() {
t.Fatal("Entry should be marked mofified") t.Fatal("Entry should be marked modified")
} }
if !entry.isFresh() { if !entry.isFresh() {
t.Fatal("Entry should be marked fresh") t.Fatal("Entry should be marked fresh")

View File

@ -163,13 +163,13 @@ type UtxoViewpoint struct {
} }
// BestHash returns the hash of the best block in the chain the view currently // BestHash returns the hash of the best block in the chain the view currently
// respresents. // represents.
func (view *UtxoViewpoint) BestHash() *chainhash.Hash { func (view *UtxoViewpoint) BestHash() *chainhash.Hash {
return &view.bestHash return &view.bestHash
} }
// SetBestHash sets the hash of the best block in the chain the view currently // SetBestHash sets the hash of the best block in the chain the view currently
// respresents. // represents.
func (view *UtxoViewpoint) SetBestHash(hash *chainhash.Hash) { func (view *UtxoViewpoint) SetBestHash(hash *chainhash.Hash) {
view.bestHash = *hash view.bestHash = *hash
} }

View File

@ -879,7 +879,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode
// //
// This function MUST be called with the chain state lock held (for reads). // This function MUST be called with the chain state lock held (for reads).
func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error {
// Fetch utxos for all of the transaction ouputs in this block. // Fetch utxos for all of the transaction outputs in this block.
// Typically, there will not be any utxos for any of the outputs. // Typically, there will not be any utxos for any of the outputs.
fetch := make([]wire.OutPoint, 0, len(block.Transactions())) fetch := make([]wire.OutPoint, 0, len(block.Transactions()))
for _, tx := range block.Transactions() { for _, tx := range block.Transactions() {

View File

@ -10,7 +10,7 @@ Bitcoin (secp256k1 only for now). It is designed so that it may be used with the
standard crypto/ecdsa packages provided with go. A comprehensive suite of test standard crypto/ecdsa packages provided with go. A comprehensive suite of test
is provided to ensure proper functionality. Package btcec was originally based is provided to ensure proper functionality. Package btcec was originally based
on work from ThePiachu which is licensed under the same terms as Go, but it has on work from ThePiachu which is licensed under the same terms as Go, but it has
signficantly diverged since then. The btcsuite developers original is licensed significantly diverged since then. The btcsuite developers original is licensed
under the liberal ISC license. under the liberal ISC license.
Although this package was primarily written for btcd, it has intentionally been Although this package was primarily written for btcd, it has intentionally been

View File

@ -212,7 +212,7 @@ func parseSig(sigStr []byte, der bool) (*Signature, error) {
} }
// ParseSignature parses a signature in BER format for the curve type `curve' // ParseSignature parses a signature in BER format for the curve type `curve'
// into a Signature type, perfoming some basic sanity checks. If parsing // into a Signature type, performing some basic sanity checks. If parsing
// according to the more strict DER format is needed, use ParseDERSignature. // according to the more strict DER format is needed, use ParseDERSignature.
func ParseSignature(sigStr []byte) (*Signature, error) { func ParseSignature(sigStr []byte) (*Signature, error) {
return parseSig(sigStr, false) return parseSig(sigStr, false)

View File

@ -952,7 +952,7 @@ func TestFieldSquareRoot(t *testing.T) {
input := setHex(test.in).Normalize() input := setHex(test.in).Normalize()
want := setHex(test.want).Normalize() want := setHex(test.want).Normalize()
// Calculate the square root and enusre the validity flag matches the // Calculate the square root and ensure the validity flag matches the
// expected value. // expected value.
var result FieldVal var result FieldVal
isValid := result.SquareRootVal(input) isValid := result.SquareRootVal(input)

View File

@ -513,7 +513,7 @@ func (s *Session) PublicNonce() [PubNonceSize]byte {
} }
// NumRegisteredNonces returns the total number of nonces that have been // NumRegisteredNonces returns the total number of nonces that have been
// regsitered so far. // registered so far.
func (s *Session) NumRegisteredNonces() int { func (s *Session) NumRegisteredNonces() int {
return len(s.pubNonces) return len(s.pubNonces)
} }

View File

@ -258,7 +258,7 @@ func TestMuSigMultiParty(t *testing.T) {
} }
// TestMuSigEarlyNonce tests that for protocols where nonces need to be // TestMuSigEarlyNonce tests that for protocols where nonces need to be
// exchagned before all signers are known, the context API works as expected. // exchanged before all signers are known, the context API works as expected.
func TestMuSigEarlyNonce(t *testing.T) { func TestMuSigEarlyNonce(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -144,7 +144,7 @@ func defaultNonceGenOpts() *nonceGenOpts {
// WithCustomRand allows a caller to use a custom random number generator in // WithCustomRand allows a caller to use a custom random number generator in
// place for crypto/rand. This should only really be used to generate // place for crypto/rand. This should only really be used to generate
// determinstic tests. // deterministic tests.
func WithCustomRand(r io.Reader) NonceGenOption { func WithCustomRand(r io.Reader) NonceGenOption {
return func(o *nonceGenOpts) { return func(o *nonceGenOpts) {
o.randReader = r o.randReader = r

View File

@ -298,7 +298,7 @@ type sigCombineTestVectors struct {
ValidCases []sigCombineValidCase `json:"valid_test_cases"` ValidCases []sigCombineValidCase `json:"valid_test_cases"`
} }
func pSigsFromIndicies(t *testing.T, sigs []string, indices []int) []*PartialSignature { func pSigsFromIndices(t *testing.T, sigs []string, indices []int) []*PartialSignature {
pSigs := make([]*PartialSignature, len(indices)) pSigs := make([]*PartialSignature, len(indices))
for i, idx := range indices { for i, idx := range indices {
var pSig PartialSignature var pSig PartialSignature
@ -341,7 +341,7 @@ func TestMusig2SignCombine(t *testing.T) {
t, testCase.NonceIndices, testCases.PubNonces, t, testCase.NonceIndices, testCases.PubNonces,
) )
partialSigs := pSigsFromIndicies( partialSigs := pSigsFromIndices(
t, testCases.Psigs, testCase.PSigIndices, t, testCases.Psigs, testCase.PSigIndices,
) )

View File

@ -20,7 +20,7 @@ const (
// persistent peer. // persistent peer.
NRemove NodeSubCmd = "remove" NRemove NodeSubCmd = "remove"
// NDisconnect indicates the specified peer should be disonnected. // NDisconnect indicates the specified peer should be disconnected.
NDisconnect NodeSubCmd = "disconnect" NDisconnect NodeSubCmd = "disconnect"
) )

View File

@ -13,7 +13,7 @@ import (
) )
// TestBtcdExtCustomResults ensures any results that have custom marshalling // TestBtcdExtCustomResults ensures any results that have custom marshalling
// work as inteded. // work as intended.
// and unmarshal code of results are as expected. // and unmarshal code of results are as expected.
func TestBtcdExtCustomResults(t *testing.T) { func TestBtcdExtCustomResults(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -17,7 +17,7 @@ import (
) )
// TestChainSvrCustomResults ensures any results that have custom marshalling // TestChainSvrCustomResults ensures any results that have custom marshalling
// work as inteded. // work as intended.
// and unmarshal code of results are as expected. // and unmarshal code of results are as expected.
func TestChainSvrCustomResults(t *testing.T) { func TestChainSvrCustomResults(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -13,7 +13,7 @@ import (
) )
// TestChainSvrWsResults ensures any results that have custom marshalling // TestChainSvrWsResults ensures any results that have custom marshalling
// work as inteded. // work as intended.
func TestChainSvrWsResults(t *testing.T) { func TestChainSvrWsResults(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -11,7 +11,7 @@ import (
"github.com/btcsuite/btcd/btcjson" "github.com/btcsuite/btcd/btcjson"
) )
// TestCmdMethod tests the CmdMethod function to ensure it retunrs the expected // TestCmdMethod tests the CmdMethod function to ensure it returns the expected
// methods and errors. // methods and errors.
func TestCmdMethod(t *testing.T) { func TestCmdMethod(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -232,7 +232,7 @@ func baseType(arg reflect.Type) (reflect.Type, int) {
// assignField is the main workhorse for the NewCmd function which handles // assignField is the main workhorse for the NewCmd function which handles
// assigning the provided source value to the destination field. It supports // assigning the provided source value to the destination field. It supports
// direct type assignments, indirection, conversion of numeric types, and // direct type assignments, indirection, conversion of numeric types, and
// unmarshaling of strings into arrays, slices, structs, and maps via // unmarshalling of strings into arrays, slices, structs, and maps via
// json.Unmarshal. // json.Unmarshal.
func assignField(paramNum int, fieldName string, dest reflect.Value, src reflect.Value) error { func assignField(paramNum int, fieldName string, dest reflect.Value, src reflect.Value) error {
// Just error now when the types have no chance of being compatible. // Just error now when the types have no chance of being compatible.

View File

@ -30,7 +30,7 @@ const (
// embedded type which is not not supported. // embedded type which is not not supported.
ErrEmbeddedType ErrEmbeddedType
// ErrUnexportedField indiciates the provided command struct contains an // ErrUnexportedField indicates the provided command struct contains an
// unexported field which is not supported. // unexported field which is not supported.
ErrUnexportedField ErrUnexportedField

View File

@ -18,7 +18,7 @@ const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
// toBytes converts each character in the string 'chars' to the value of the // toBytes converts each character in the string 'chars' to the value of the
// index of the correspoding character in 'charset'. // index of the corresponding character in 'charset'.
func toBytes(chars string) ([]byte, error) { func toBytes(chars string) ([]byte, error) {
decoded := make([]byte, 0, len(chars)) decoded := make([]byte, 0, len(chars))
for i := 0; i < len(chars); i++ { for i := 0; i < len(chars); i++ {

View File

@ -297,9 +297,9 @@ func TestMixedCaseEncode(t *testing.T) {
} }
} }
// TestCanDecodeUnlimtedBech32 tests whether decoding a large bech32 string works // TestCanDecodeUnlimitedBech32 tests whether decoding a large bech32 string works
// when using the DecodeNoLimit version // when using the DecodeNoLimit version
func TestCanDecodeUnlimtedBech32(t *testing.T) { func TestCanDecodeUnlimitedBech32(t *testing.T) {
input := "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5kx0yd" input := "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5kx0yd"
// Sanity check that an input of this length errors on regular Decode() // Sanity check that an input of this length errors on regular Decode()

View File

@ -26,7 +26,7 @@ func ExampleNewFilter() {
filter := bloom.NewFilter(10, tweak, 0.0001, wire.BloomUpdateNone) filter := bloom.NewFilter(10, tweak, 0.0001, wire.BloomUpdateNone)
// Create a transaction hash and add it to the filter. This particular // Create a transaction hash and add it to the filter. This particular
// trasaction is the first transaction in block 310,000 of the main // transaction is the first transaction in block 310,000 of the main
// bitcoin block chain. // bitcoin block chain.
txHashStr := "fd611c56ca0d378cdcd16244b45c2ba9588da3adac367c4ef43e808b280b8a45" txHashStr := "fd611c56ca0d378cdcd16244b45c2ba9588da3adac367c4ef43e808b280b8a45"
txHash, err := chainhash.NewHashFromStr(txHashStr) txHash, err := chainhash.NewHashFromStr(txHashStr)

View File

@ -252,7 +252,7 @@ func TestSimpleCoin(t *testing.T) {
t.Error("Different value of coin pkScript than expected") t.Error("Different value of coin pkScript than expected")
} }
if testSimpleCoin.NumConfs() != 1 { if testSimpleCoin.NumConfs() != 1 {
t.Error("Differet value of num confs than expected") t.Error("Different value of num confs than expected")
} }
if testSimpleCoin.ValueAge() != testSimpleCoinTxValueAge0 { if testSimpleCoin.ValueAge() != testSimpleCoinTxValueAge0 {
t.Error("Different value of coin value * age than expected") t.Error("Different value of coin value * age than expected")

View File

@ -60,7 +60,7 @@ func RandomKey() ([gcs.KeySize]byte, error) {
} }
// DeriveKey is a utility function that derives a key from a chainhash.Hash by // DeriveKey is a utility function that derives a key from a chainhash.Hash by
// truncating the bytes of the hash to the appopriate key size. // truncating the bytes of the hash to the appropriate key size.
func DeriveKey(keyHash *chainhash.Hash) [gcs.KeySize]byte { func DeriveKey(keyHash *chainhash.Hash) [gcs.KeySize]byte {
var key [gcs.KeySize]byte var key [gcs.KeySize]byte
copy(key[:], keyHash.CloneBytes()) copy(key[:], keyHash.CloneBytes())
@ -207,7 +207,7 @@ func (b *GCSBuilder) Build() (*gcs.Filter, error) {
return nil, b.err return nil, b.err
} }
// We'll ensure that all the parmaters we need to actually build the // We'll ensure that all the paramaters we need to actually build the
// filter properly are set. // filter properly are set.
if b.p == 0 { if b.p == 0 {
return nil, fmt.Errorf("p value is not set, cannot build") return nil, fmt.Errorf("p value is not set, cannot build")

View File

@ -404,7 +404,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error {
} }
containsRedeemScript := pInput.RedeemScript != nil containsRedeemScript := pInput.RedeemScript != nil
cointainsWitnessScript := pInput.WitnessScript != nil containsWitnessScript := pInput.WitnessScript != nil
// If there's no redeem script, then we assume that this is native // If there's no redeem script, then we assume that this is native
// segwit input. // segwit input.
@ -413,7 +413,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error {
// If we have only a sigley pubkey+sig pair, and no witness // If we have only a sigley pubkey+sig pair, and no witness
// script, then we assume this is a P2WKH input. // script, then we assume this is a P2WKH input.
if len(pubKeys) == 1 && len(sigs) == 1 && if len(pubKeys) == 1 && len(sigs) == 1 &&
!cointainsWitnessScript { !containsWitnessScript {
serializedWitness, err = writePKHWitness( serializedWitness, err = writePKHWitness(
sigs[0], pubKeys[0], sigs[0], pubKeys[0],
@ -430,7 +430,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error {
// TODO(roasbeef): need to add custom finalize for // TODO(roasbeef): need to add custom finalize for
// non-multisig P2WSH outputs (HTLCs, delay outputs, // non-multisig P2WSH outputs (HTLCs, delay outputs,
// etc). // etc).
if !cointainsWitnessScript { if !containsWitnessScript {
return ErrNotFinalizable return ErrNotFinalizable
} }
@ -457,7 +457,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error {
// If don't have a witness script, then we assume this is a // If don't have a witness script, then we assume this is a
// nested p2wkh output. // nested p2wkh output.
if !cointainsWitnessScript { if !containsWitnessScript {
// Assumed p2sh-p2wkh Here the witness is just (sig, // Assumed p2sh-p2wkh Here the witness is just (sig,
// pub) as for p2pkh case // pub) as for p2pkh case
if len(sigs) != 1 || len(pubKeys) != 1 { if len(sigs) != 1 || len(pubKeys) != 1 {

View File

@ -275,7 +275,7 @@ func parseAndSetDebugLevels(debugLevel string) error {
// Validate subsystem. // Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists { if _, exists := subsystemLoggers[subsysID]; !exists {
str := "The specified subsystem [%v] is invalid -- " + str := "The specified subsystem [%v] is invalid -- " +
"supported subsytems %v" "supported subsystems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems()) return fmt.Errorf(str, subsysID, supportedSubsystems())
} }

View File

@ -525,9 +525,9 @@ func (cm *ConnManager) Start() {
// Start all the listeners so long as the caller requested them and // Start all the listeners so long as the caller requested them and
// provided a callback to be invoked when connections are accepted. // provided a callback to be invoked when connections are accepted.
if cm.cfg.OnAccept != nil { if cm.cfg.OnAccept != nil {
for _, listner := range cm.cfg.Listeners { for _, listener := range cm.cfg.Listeners {
cm.wg.Add(1) cm.wg.Add(1)
go cm.listenHandler(listner) go cm.listenHandler(listener)
} }
} }

View File

@ -23,7 +23,7 @@ const (
) )
// OnSeed is the signature of the callback function which is invoked when DNS // OnSeed is the signature of the callback function which is invoked when DNS
// seeding is succesfull. // seeding is successful.
type OnSeed func(addrs []*wire.NetAddressV2) type OnSeed func(addrs []*wire.NetAddressV2)
// LookupFunc is the signature of the DNS lookup function. // LookupFunc is the signature of the DNS lookup function.

View File

@ -87,7 +87,7 @@ const (
// should be relatively, so this should rarely be an issue. // should be relatively, so this should rarely be an issue.
ErrKeyTooLarge ErrKeyTooLarge
// ErrValueTooLarge indicates an attmpt to insert a value that is larger // ErrValueTooLarge indicates an attempt to insert a value that is larger
// than max allowed value size. The max key size depends on the // than max allowed value size. The max key size depends on the
// specific backend driver being used. // specific backend driver being used.
ErrValueTooLarge ErrValueTooLarge

View File

@ -78,7 +78,7 @@ func init() {
UseLogger: useLogger, UseLogger: useLogger,
} }
if err := database.RegisterDriver(driver); err != nil { if err := database.RegisterDriver(driver); err != nil {
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v", panic(fmt.Sprintf("Failed to register database driver '%s': %v",
dbType, err)) dbType, err))
} }
} }

View File

@ -255,7 +255,7 @@ func testDeleteValues(tc *testContext, bucket database.Bucket, values []keyPair)
return true return true
} }
// testCursorInterface ensures the cursor itnerface is working properly by // testCursorInterface ensures the cursor interface is working properly by
// exercising all of its functions on the passed bucket. // exercising all of its functions on the passed bucket.
func testCursorInterface(tc *testContext, bucket database.Bucket) bool { func testCursorInterface(tc *testContext, bucket database.Bucket) bool {
// Ensure a cursor can be obtained for the bucket. // Ensure a cursor can be obtained for the bucket.
@ -639,7 +639,7 @@ func rollbackOnPanic(t *testing.T, tx database.Tx) {
func testMetadataManualTxInterface(tc *testContext) bool { func testMetadataManualTxInterface(tc *testContext) bool {
// populateValues tests that populating values works as expected. // populateValues tests that populating values works as expected.
// //
// When the writable flag is false, a read-only tranasction is created, // When the writable flag is false, a read-only transaction is created,
// standard bucket tests for read-only transactions are performed, and // standard bucket tests for read-only transactions are performed, and
// the Commit function is checked to ensure it fails as expected. // the Commit function is checked to ensure it fails as expected.
// //

View File

@ -218,7 +218,7 @@ func TestCornerCases(t *testing.T) {
ldb := idb.(*db).cache.ldb ldb := idb.(*db).cache.ldb
ldb.Close() ldb.Close()
// Ensure initilization errors in the underlying database work as // Ensure initialization errors in the underlying database work as
// expected. // expected.
testName = "initDB: reinitialization" testName = "initDB: reinitialization"
wantErrCode = database.ErrDbNotOpen wantErrCode = database.ErrDbNotOpen

View File

@ -390,7 +390,7 @@ type Tx interface {
FetchBlockRegions(regions []BlockRegion) ([][]byte, error) FetchBlockRegions(regions []BlockRegion) ([][]byte, error)
// PruneBlocks deletes the block files until it reaches the target size // PruneBlocks deletes the block files until it reaches the target size
// (specificed in bytes). // (specified in bytes).
// //
// The interface contract guarantees at least the following errors will // The interface contract guarantees at least the following errors will
// be returned (other implementation-specific errors are possible): // be returned (other implementation-specific errors are possible):

View File

@ -472,7 +472,7 @@ Example Return|`{`<br />&nbsp;&nbsp;`"bytes": 310768,`<br />&nbsp;&nbsp;`"size":
|---|---| |---|---|
|Method|help| |Method|help|
|Parameters|1. command (string, optional) - the command to get help for| |Parameters|1. command (string, optional) - the command to get help for|
|Description|Returns a list of all commands or help for a specified command.<br />When no `command` parameter is specified, a list of avaialable commands is returned<br />When `command` is a valid method, the help text for that method is returned.| |Description|Returns a list of all commands or help for a specified command.<br />When no `command` parameter is specified, a list of available commands is returned<br />When `command` is a valid method, the help text for that method is returned.|
|Returns|string| |Returns|string|
|Example Return|getblockcount<br />Returns a numeric for the number of blocks in the longest block chain.| |Example Return|getblockcount<br />Returns a numeric for the number of blocks in the longest block chain.|
[Return to Overview](#MethodOverview)<br /> [Return to Overview](#MethodOverview)<br />

2
log.go
View File

@ -36,7 +36,7 @@ func (logWriter) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// Loggers per subsystem. A single backend logger is created and all subsytem // Loggers per subsystem. A single backend logger is created and all subsystem
// loggers created from it will write to the backend. When adding new // loggers created from it will write to the backend. When adding new
// subsystems, add the subsystem logger variable here and to the // subsystems, add the subsystem logger variable here and to the
// subsystemLoggers map. // subsystemLoggers map.

View File

@ -861,7 +861,7 @@ mempoolLoop:
}, nil }, nil
} }
// AddWitnessCommitment adds the witness commitment as an OP_RETURN outpout // AddWitnessCommitment adds the witness commitment as an OP_RETURN output
// within the coinbase tx. The raw commitment is returned. // within the coinbase tx. The raw commitment is returned.
func AddWitnessCommitment(coinbaseTx *btcutil.Tx, func AddWitnessCommitment(coinbaseTx *btcutil.Tx,
blockTxns []*btcutil.Tx) []byte { blockTxns []*btcutil.Tx) []byte {

View File

@ -112,7 +112,7 @@ func CalcPriority(tx *wire.MsgTx, utxoView *blockchain.UtxoViewpoint, nextBlockH
// A compressed pubkey pay-to-script-hash redemption with a maximum len // A compressed pubkey pay-to-script-hash redemption with a maximum len
// signature is of the form: // signature is of the form:
// [OP_DATA_73 <73-byte sig> + OP_DATA_35 + {OP_DATA_33 // [OP_DATA_73 <73-byte sig> + OP_DATA_35 + {OP_DATA_33
// <33 byte compresed pubkey> + OP_CHECKSIG}] // <33 byte compressed pubkey> + OP_CHECKSIG}]
// //
// Thus 1 + 73 + 1 + 1 + 33 + 1 = 110 // Thus 1 + 73 + 1 + 1 + 33 + 1 = 110
overhead := 0 overhead := 0

View File

@ -16,7 +16,7 @@ import (
) )
// mockRemotePeer creates a basic inbound peer listening on the simnet port for // mockRemotePeer creates a basic inbound peer listening on the simnet port for
// use with Example_peerConnection. It does not return until the listner is // use with Example_peerConnection. It does not return until the listener is
// active. // active.
func mockRemotePeer() error { func mockRemotePeer() error {
// Configure peer to act as a simnet node that offers no services. // Configure peer to act as a simnet node that offers no services.

View File

@ -744,7 +744,7 @@ func (p *Peer) LastRecv() time.Time {
// LocalAddr returns the local address of the connection. // LocalAddr returns the local address of the connection.
// //
// This function is safe fo concurrent access. // This function is safe for concurrent access.
func (p *Peer) LocalAddr() net.Addr { func (p *Peer) LocalAddr() net.Addr {
var localAddr net.Addr var localAddr net.Addr
if atomic.LoadInt32(&p.connected) != 0 { if atomic.LoadInt32(&p.connected) != 0 {

View File

@ -54,7 +54,7 @@ func TestUnmarshalGetBlockChainInfoResultSoftForks(t *testing.T) {
for _, test := range tests { for _, test := range tests {
success := t.Run(test.name, func(t *testing.T) { success := t.Run(test.name, func(t *testing.T) {
// We'll start by unmarshaling the JSON into a struct. // We'll start by unmarshalling the JSON into a struct.
// The SoftForks and UnifiedSoftForks field should not // The SoftForks and UnifiedSoftForks field should not
// be set yet, as they are unmarshaled within a // be set yet, as they are unmarshaled within a
// different function. // different function.
@ -226,7 +226,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) {
response := <-ch response := <-ch
if &expectedResponse != response { if &expectedResponse != response {
t.Fatalf("received unexepcted response") t.Fatalf("received unexpected response")
} }
// ensure the goroutine created in this test exists, // ensure the goroutine created in this test exists,
@ -236,7 +236,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) {
}, },
} }
// since these tests rely on concurrency, ensure there is a resonable timeout // since these tests rely on concurrency, ensure there is a reasonable timeout
// that they should run within // that they should run within
for _, testCase := range testTable { for _, testCase := range testTable {
done := make(chan bool) done := make(chan bool)

View File

@ -116,7 +116,7 @@ type jsonRequest struct {
type Client struct { type Client struct {
id uint64 // atomic, so must stay 64-bit aligned id uint64 // atomic, so must stay 64-bit aligned
// config holds the connection configuration assoiated with this client. // config holds the connection configuration associated with this client.
config *ConnConfig config *ConnConfig
// chainParams holds the params for the chain that this client is using, // chainParams holds the params for the chain that this client is using,
@ -351,7 +351,7 @@ type Response struct {
} }
// result checks whether the unmarshaled response contains a non-nil error, // result checks whether the unmarshaled response contains a non-nil error,
// returning an unmarshaled btcjson.RPCError (or an unmarshaling error) if so. // returning an unmarshaled btcjson.RPCError (or an unmarshalling error) if so.
// If the response is not an error, the raw bytes of the request are // If the response is not an error, the raw bytes of the request are
// returned for further unmashaling into specific result types. // returned for further unmashaling into specific result types.
func (r rawResponse) result() (result []byte, err error) { func (r rawResponse) result() (result []byte, err error) {
@ -433,7 +433,7 @@ func (c *Client) handleMessage(msg []byte) {
// to have come from reading from the websocket connection in wsInHandler, // to have come from reading from the websocket connection in wsInHandler,
// should be logged. // should be logged.
func (c *Client) shouldLogReadError(err error) bool { func (c *Client) shouldLogReadError(err error) bool {
// No logging when the connetion is being forcibly disconnected. // No logging when the connection is being forcibly disconnected.
select { select {
case <-c.shutdown: case <-c.shutdown:
return false return false

View File

@ -472,13 +472,13 @@ func (c *Client) handleNotification(ntfn *rawNotification) {
} }
} }
// wrongNumParams is an error type describing an unparseable JSON-RPC // wrongNumParams is an error type describing an unparsable JSON-RPC
// notificiation due to an incorrect number of parameters for the // notification due to an incorrect number of parameters for the
// expected notification type. The value is the number of parameters // expected notification type. The value is the number of parameters
// of the invalid notification. // of the invalid notification.
type wrongNumParams int type wrongNumParams int
// Error satisifies the builtin error interface. // Error satisfies the builtin error interface.
func (e wrongNumParams) Error() string { func (e wrongNumParams) Error() string {
return fmt.Sprintf("wrong number of parameters (%d)", e) return fmt.Sprintf("wrong number of parameters (%d)", e)
} }
@ -599,7 +599,7 @@ func parseFilteredBlockDisconnectedParams(params []json.RawMessage) (int32,
return 0, nil, err return 0, nil, err
} }
// Unmarshal second parmeter as a slice of bytes. // Unmarshal second parameter as a slice of bytes.
blockHeaderBytes, err := parseHexParam(params[1]) blockHeaderBytes, err := parseHexParam(params[1])
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err

View File

@ -719,7 +719,7 @@ func (c *Client) SignRawTransactionWithWallet3Async(tx *wire.MsgTx,
// //
// This function should only used if a non-default signature hash type is // This function should only used if a non-default signature hash type is
// desired. Otherwise, see SignRawTransactionWithWallet if the RPC server already // desired. Otherwise, see SignRawTransactionWithWallet if the RPC server already
// knows the input transactions, or SignRawTransactionWihWallet2 if it does not. // knows the input transactions, or SignRawTransactionWithWallet2 if it does not.
func (c *Client) SignRawTransactionWithWallet3(tx *wire.MsgTx, func (c *Client) SignRawTransactionWithWallet3(tx *wire.MsgTx,
inputs []btcjson.RawTxWitnessInput, hashType SigHashType) (*wire.MsgTx, bool, error) { inputs []btcjson.RawTxWitnessInput, hashType SigHashType) (*wire.MsgTx, bool, error) {

View File

@ -2661,7 +2661,7 @@ func (c *Client) WalletCreateFundedPsbt(
type FutureWalletProcessPsbtResult chan *Response type FutureWalletProcessPsbtResult chan *Response
// Receive waits for the Response promised by the future and returns an updated // Receive waits for the Response promised by the future and returns an updated
// PSBT with signed inputs from the wallet and a boolen indicating if the // PSBT with signed inputs from the wallet and a boolean indicating if the
// transaction has a complete set of signatures. // transaction has a complete set of signatures.
func (r FutureWalletProcessPsbtResult) Receive() (*btcjson.WalletProcessPsbtResult, error) { func (r FutureWalletProcessPsbtResult) Receive() (*btcjson.WalletProcessPsbtResult, error) {
res, err := ReceiveFuture(r) res, err := ReceiveFuture(r)

View File

@ -846,7 +846,7 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{}
// Get information about the script. // Get information about the script.
// Ignore the error here since an error means the script couldn't parse // Ignore the error here since an error means the script couldn't parse
// and there is no additinal information about it anyways. // and there is no additional information about it anyways.
scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script, scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script,
s.cfg.ChainParams) s.cfg.ChainParams)
addresses := make([]string, len(addrs)) addresses := make([]string, len(addrs))
@ -3220,7 +3220,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan
addressTxns := make([]retrievedTx, 0, numRequested) addressTxns := make([]retrievedTx, 0, numRequested)
if reverse { if reverse {
// Transactions in the mempool are not in a block header yet, // Transactions in the mempool are not in a block header yet,
// so the block header field in the retieved transaction struct // so the block header field in the retrieved transaction struct
// is left nil. // is left nil.
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr, mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
uint32(numToSkip), uint32(numRequested)) uint32(numToSkip), uint32(numRequested))
@ -3274,7 +3274,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan
// order and the number of results is still under the number requested. // order and the number of results is still under the number requested.
if !reverse && len(addressTxns) < numRequested { if !reverse && len(addressTxns) < numRequested {
// Transactions in the mempool are not in a block header yet, // Transactions in the mempool are not in a block header yet,
// so the block header field in the retieved transaction struct // so the block header field in the retrieved transaction struct
// is left nil. // is left nil.
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr, mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
uint32(numToSkip)-numSkipped, uint32(numRequested- uint32(numToSkip)-numSkipped, uint32(numRequested-
@ -4336,7 +4336,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin
// change the read deadline for the new connection and having one breaks // change the read deadline for the new connection and having one breaks
// long polling. However, not having a read deadline on the initial // long polling. However, not having a read deadline on the initial
// connection would mean clients can connect and idle forever. Thus, // connection would mean clients can connect and idle forever. Thus,
// hijack the connecton from the HTTP server, clear the read deadline, // hijack the connection from the HTTP server, clear the read deadline,
// and handle writing the response manually. // and handle writing the response manually.
hj, ok := w.(http.Hijacker) hj, ok := w.(http.Hijacker)
if !ok { if !ok {
@ -4359,7 +4359,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin
// Attempt to parse the raw body into a JSON-RPC request. // Attempt to parse the raw body into a JSON-RPC request.
// Setup a close notifier. Since the connection is hijacked, // Setup a close notifier. Since the connection is hijacked,
// the CloseNotifer on the ResponseWriter is not available. // the CloseNotifier on the ResponseWriter is not available.
closeChan := make(chan struct{}, 1) closeChan := make(chan struct{}, 1)
go func() { go func() {
_, err = conn.Read(make([]byte, 1)) _, err = conn.Read(make([]byte, 1))
@ -4409,7 +4409,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin
// Btcd does not respond to any request without and "id" or "id":null, // Btcd does not respond to any request without and "id" or "id":null,
// regardless the indicated JSON-RPC protocol version unless RPC quirks // regardless the indicated JSON-RPC protocol version unless RPC quirks
// are enabled. With RPC quirks enabled, such requests will be responded // are enabled. With RPC quirks enabled, such requests will be responded
// to if the reqeust does not indicate JSON-RPC version. // to if the request does not indicate JSON-RPC version.
// //
// RPC quirks can be enabled by the user to avoid compatibility issues // RPC quirks can be enabled by the user to avoid compatibility issues
// with software relying on Core's behavior. // with software relying on Core's behavior.

View File

@ -376,7 +376,7 @@ var helpDescsEnUS = map[string]string{
// GetCurrentNetCmd help. // GetCurrentNetCmd help.
"getcurrentnet--synopsis": "Get bitcoin network the server is running on.", "getcurrentnet--synopsis": "Get bitcoin network the server is running on.",
"getcurrentnet--result0": "The network identifer", "getcurrentnet--result0": "The network identifier",
// GetDifficultyCmd help. // GetDifficultyCmd help.
"getdifficulty--synopsis": "Returns the proof-of-work difficulty as a multiple of the minimum difficulty.", "getdifficulty--synopsis": "Returns the proof-of-work difficulty as a multiple of the minimum difficulty.",

View File

@ -132,8 +132,8 @@ type wsNotificationManager struct {
queueNotification chan interface{} queueNotification chan interface{}
// notificationMsgs feeds notificationHandler with notifications // notificationMsgs feeds notificationHandler with notifications
// and client (un)registeration requests from a queue as well as // and client (un)registration requests from a queue as well as
// registeration and unregisteration requests from clients. // registration and unregistration requests from clients.
notificationMsgs chan interface{} notificationMsgs chan interface{}
// Access channel for current number of connected clients. // Access channel for current number of connected clients.
@ -1236,7 +1236,7 @@ type wsResponse struct {
// requested notifications to all connected websocket clients. Inbound // requested notifications to all connected websocket clients. Inbound
// messages are read via the inHandler goroutine and generally dispatched to // messages are read via the inHandler goroutine and generally dispatched to
// their own handler. However, certain potentially long-running operations such // their own handler. However, certain potentially long-running operations such
// as rescans, are sent to the asyncHander goroutine and are limited to one at a // as rescans, are sent to the asyncHandler goroutine and are limited to one at a
// time. There are two outbound message types - one for responding to client // time. There are two outbound message types - one for responding to client
// requests and another for async notifications. Responses to client requests // requests and another for async notifications. Responses to client requests
// use SendMessage which employs a buffered channel thereby limiting the number // use SendMessage which employs a buffered channel thereby limiting the number
@ -2144,7 +2144,7 @@ func handleNotifySpent(wsc *wsClient, icmd interface{}) (interface{}, error) {
return nil, nil return nil, nil
} }
// handleNotifyNewTransations implements the notifynewtransactions command // handleNotifyNewTransactions implements the notifynewtransactions command
// extension for websocket connections. // extension for websocket connections.
func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) { func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) {
cmd, ok := icmd.(*btcjson.NotifyNewTransactionsCmd) cmd, ok := icmd.(*btcjson.NotifyNewTransactionsCmd)
@ -2157,7 +2157,7 @@ func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{},
return nil, nil return nil, nil
} }
// handleStopNotifyNewTransations implements the stopnotifynewtransactions // handleStopNotifyNewTransactions implements the stopnotifynewtransactions
// command extension for websocket connections. // command extension for websocket connections.
func handleStopNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) { func handleStopNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) {
wsc.server.ntfnMgr.UnregisterNewMempoolTxsUpdates(wsc) wsc.server.ntfnMgr.UnregisterNewMempoolTxsUpdates(wsc)
@ -2724,7 +2724,7 @@ fetchRange:
// was any) still exists in the database. If it // was any) still exists in the database. If it
// doesn't, we error. // doesn't, we error.
// //
// A goto is used to branch executation back to // A goto is used to branch execution back to
// before the range was evaluated, as it must be // before the range was evaluated, as it must be
// reevaluated for the new hashList. // reevaluated for the new hashList.
minBlock += int32(i) minBlock += int32(i)

View File

@ -153,7 +153,7 @@ func installService() error {
// Support events to the event log using the standard "standard" Windows // Support events to the event log using the standard "standard" Windows
// EventCreate.exe message file. This allows easy logging of custom // EventCreate.exe message file. This allows easy logging of custom
// messges instead of needing to create our own message catalog. // messages instead of needing to create our own message catalog.
eventlog.Remove(svcName) eventlog.Remove(svcName)
eventsSupported := uint32(eventlog.Error | eventlog.Warning | eventlog.Info) eventsSupported := uint32(eventlog.Error | eventlog.Warning | eventlog.Info)
return eventlog.InstallAsEventCreate(svcName, eventsSupported) return eventlog.InstallAsEventCreate(svcName, eventsSupported)

View File

@ -666,7 +666,7 @@
["0 0x02 0x0000 0", "CHECKMULTISIGVERIFY 1", "", "OK"], ["0 0x02 0x0000 0", "CHECKMULTISIGVERIFY 1", "", "OK"],
["While not really correctly DER encoded, the empty signature is allowed by"], ["While not really correctly DER encoded, the empty signature is allowed by"],
["STRICTENC to provide a compact way to provide a delibrately invalid signature."], ["STRICTENC to provide a compact way to provide a deliberately invalid signature."],
["0", "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", "STRICTENC", "OK"], ["0", "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", "STRICTENC", "OK"],
["0 0", "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", "STRICTENC", "OK"], ["0 0", "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", "STRICTENC", "OK"],

View File

@ -199,7 +199,7 @@
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4259839 CHECKSEQUENCEVERIFY 1"]], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4259839 CHECKSEQUENCEVERIFY 1"]],
"020000000100010000000000000000000000000000000000000000000000000000000000000000000000feff40000100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"], "020000000100010000000000000000000000000000000000000000000000000000000000000000000000feff40000100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"],
["By-time locks, with argument just beyond txin.nSequence (but within numerical boundries)"], ["By-time locks, with argument just beyond txin.nSequence (but within numerical boundaries)"],
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4194305 CHECKSEQUENCEVERIFY 1"]], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4194305 CHECKSEQUENCEVERIFY 1"]],
"020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"], "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"],
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4259839 CHECKSEQUENCEVERIFY 1"]], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4259839 CHECKSEQUENCEVERIFY 1"]],

View File

@ -1414,7 +1414,7 @@ func (vm *Engine) checkSignatureEncoding(sig []byte) error {
func getStack(stack *stack) [][]byte { func getStack(stack *stack) [][]byte {
array := make([][]byte, stack.Depth()) array := make([][]byte, stack.Depth())
for i := range array { for i := range array {
// PeekByteArry can't fail due to overflow, already checked // PeekByteArray can't fail due to overflow, already checked
array[len(array)-i-1], _ = stack.PeekByteArray(int32(i)) array[len(array)-i-1], _ = stack.PeekByteArray(int32(i))
} }
return array return array

View File

@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TestDebugEngine checks that the StepCallbck called during debug script // TestDebugEngine checks that the StepCallback called during debug script
// execution contains the expected data. // execution contains the expected data.
func TestDebugEngine(t *testing.T) { func TestDebugEngine(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -123,12 +123,12 @@ func TestCheckErrorCondition(t *testing.T) {
t.Fatalf("failed to step %dth time: %v", i, err) t.Fatalf("failed to step %dth time: %v", i, err)
} }
if done { if done {
t.Fatalf("finshed early on %dth time", i) t.Fatalf("finished early on %dth time", i)
} }
err = vm.CheckErrorCondition(false) err = vm.CheckErrorCondition(false)
if !IsErrorCode(err, ErrScriptUnfinished) { if !IsErrorCode(err, ErrScriptUnfinished) {
t.Fatalf("got unexepected error %v on %dth iteration", t.Fatalf("got unexpected error %v on %dth iteration",
err, i) err, i)
} }
} }

View File

@ -267,7 +267,7 @@ const (
ErrPubKeyType ErrPubKeyType
// ErrCleanStack is returned when the ScriptVerifyCleanStack flag // ErrCleanStack is returned when the ScriptVerifyCleanStack flag
// is set, and after evalution, the stack does not contain only a // is set, and after evaluation, the stack does not contain only a
// single element. // single element.
ErrCleanStack ErrCleanStack

View File

@ -244,7 +244,7 @@ func isCanonicalPush(opcode byte, data []byte) bool {
// removeOpcodeByData will return the script minus any opcodes that perform a // removeOpcodeByData will return the script minus any opcodes that perform a
// canonical push of data that contains the passed data to remove. This // canonical push of data that contains the passed data to remove. This
// function assumes it is provided a version 0 script as any future version of // function assumes it is provided a version 0 script as any future version of
// script should avoid this functionality since it is unncessary due to the // script should avoid this functionality since it is unnecessary due to the
// signature scripts not being part of the witness-free transaction hash. // signature scripts not being part of the witness-free transaction hash.
// //
// WARNING: This will return the passed script unmodified unless a modification // WARNING: This will return the passed script unmodified unless a modification

View File

@ -331,7 +331,7 @@ func newTaprootSigVerifier(pkBytes []byte, fullSigBytes []byte,
// key and signature, and the passed sigHash as the message digest. // key and signature, and the passed sigHash as the message digest.
func (t *taprootSigVerifier) verifySig(sigHash []byte) bool { func (t *taprootSigVerifier) verifySig(sigHash []byte) bool {
// At this point, we can check to see if this signature is already // At this point, we can check to see if this signature is already
// included in the sigCcahe and is valid or not (if one was passed in). // included in the sigCache and is valid or not (if one was passed in).
cacheKey, _ := chainhash.NewHash(sigHash) cacheKey, _ := chainhash.NewHash(sigHash)
if t.sigCache != nil { if t.sigCache != nil {
if t.sigCache.Exists(*cacheKey, t.fullSigBytes, t.pkBytes) { if t.sigCache.Exists(*cacheKey, t.fullSigBytes, t.pkBytes) {

View File

@ -884,7 +884,7 @@ func TestMultiSigScript(t *testing.T) {
} }
} }
// TestCalcMultiSigStats ensures the CalcMutliSigStats function returns the // TestCalcMultiSigStats ensures the CalcMultiSigStats function returns the
// expected errors. // expected errors.
func TestCalcMultiSigStats(t *testing.T) { func TestCalcMultiSigStats(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -255,7 +255,7 @@ func ComputeTaprootOutputKey(pubKey *btcec.PublicKey,
scriptRoot, scriptRoot,
) )
// With the tap tweek computed, we'll need to convert the merkle root // With the tap tweak computed, we'll need to convert the merkle root
// into something in the domain we can manipulate: a scalar value mod // into something in the domain we can manipulate: a scalar value mod
// N. // N.
var tweakScalar btcec.ModNScalar var tweakScalar btcec.ModNScalar

View File

@ -224,7 +224,7 @@ func TestTaprootTweakNoMutation(t *testing.T) {
return false return false
} }
// We shuold be able to re-derive the private key from raw // We should be able to re-derive the private key from raw
// bytes and have that match up again. // bytes and have that match up again.
privKeyCopy, _ := btcec.PrivKeyFromBytes(privBytes[:]) privKeyCopy, _ := btcec.PrivKeyFromBytes(privBytes[:])
if *privKey != *privKeyCopy { if *privKey != *privKeyCopy {

View File

@ -549,7 +549,7 @@ func BenchmarkDeserializeTxSmall(b *testing.B) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index 0xff, 0xff, 0xff, 0xff, // Previous output index
0x07, // Varint for length of signature script 0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, // Sequence 0xff, 0xff, 0xff, 0xff, // Sequence
@ -671,7 +671,7 @@ func BenchmarkSerializeTxSmall(b *testing.B) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index 0xff, 0xff, 0xff, 0xff, // Previous output index
0x07, // Varint for length of signature script 0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, // Sequence 0xff, 0xff, 0xff, 0xff, // Sequence

View File

@ -9,7 +9,7 @@ import (
"io" "io"
) )
// fixedWriter implements the io.Writer interface and intentially allows // fixedWriter implements the io.Writer interface and intentionally allows
// testing of error paths by forcing short writes. // testing of error paths by forcing short writes.
type fixedWriter struct { type fixedWriter struct {
b []byte b []byte
@ -44,7 +44,7 @@ func newFixedWriter(max int) io.Writer {
return &fw return &fw
} }
// fixedReader implements the io.Reader interface and intentially allows // fixedReader implements the io.Reader interface and intentionally allows
// testing of error paths by forcing short reads. // testing of error paths by forcing short reads.
type fixedReader struct { type fixedReader struct {
buf []byte buf []byte

View File

@ -83,7 +83,7 @@ const maxAlertSize = MaxMessagePayload - maxSignatureSize - MaxVarIntPayload - 1
// fit into a maximum size alert. // fit into a maximum size alert.
// //
// maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string) // maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string)
// for caculating maximum number of cancel IDs, set all other var sizes to 0 // for calculating maximum number of cancel IDs, set all other var sizes to 0
// maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(int32) // maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(int32)
// x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4 // x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4
const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4 const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4
@ -92,7 +92,7 @@ const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1)
// fit into a maximum size alert. // fit into a maximum size alert.
// //
// maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string) // maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string)
// for caculating maximum number of subversions, set all other var sizes to 0 // for calculating maximum number of subversions, set all other var sizes to 0
// maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(string) // maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(string)
// x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / sizeOf(string) // x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / sizeOf(string)
// subversion would typically be something like "/Satoshi:0.7.2/" (15 bytes) // subversion would typically be something like "/Satoshi:0.7.2/" (15 bytes)

View File

@ -245,7 +245,7 @@ func (msg *MsgBlock) Serialize(w io.Writer) error {
// SerializeNoWitness encodes a block to w using an identical format to // SerializeNoWitness encodes a block to w using an identical format to
// Serialize, with all (if any) witness data stripped from all transactions. // Serialize, with all (if any) witness data stripped from all transactions.
// This method is provided in additon to the regular Serialize, in order to // This method is provided in addition to the regular Serialize, in order to
// allow one to selectively encode transaction witness data to non-upgraded // allow one to selectively encode transaction witness data to non-upgraded
// peers which are unaware of the new encoding. // peers which are unaware of the new encoding.
func (msg *MsgBlock) SerializeNoWitness(w io.Writer) error { func (msg *MsgBlock) SerializeNoWitness(w io.Writer) error {

View File

@ -562,7 +562,7 @@ var blockOneBytes = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index 0xff, 0xff, 0xff, 0xff, // Previous output index
0x07, // Varint for length of signature script 0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script (coinbase) 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script (coinbase)
0xff, 0xff, 0xff, 0xff, // Sequence 0xff, 0xff, 0xff, 0xff, // Sequence

View File

@ -23,7 +23,7 @@ import (
// //
// The algorithm for building the block locator hashes should be to add the // The algorithm for building the block locator hashes should be to add the
// hashes in reverse order until you reach the genesis block. In order to keep // hashes in reverse order until you reach the genesis block. In order to keep
// the list of locator hashes to a resonable number of entries, first add the // the list of locator hashes to a reasonable number of entries, first add the
// most recent 10 block hashes, then double the step each loop iteration to // most recent 10 block hashes, then double the step each loop iteration to
// exponentially decrease the number of hashes the further away from head and // exponentially decrease the number of hashes the further away from head and
// closer to the genesis block you get. // closer to the genesis block you get.

View File

@ -672,7 +672,7 @@ func TestTxOverflowErrors(t *testing.T) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index 0xff, 0xff, 0xff, 0xff, // Previous output index
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for length of signature script 0xff, // Varint for length of signature script
}, pver, BaseEncoding, txVer, &MessageError{}, }, pver, BaseEncoding, txVer, &MessageError{},
@ -688,7 +688,7 @@ func TestTxOverflowErrors(t *testing.T) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index 0xff, 0xff, 0xff, 0xff, // Previous output index
0x00, // Varint for length of signature script 0x00, // Varint for length of signature script
0xff, 0xff, 0xff, 0xff, // Sequence 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions 0x01, // Varint for number of output transactions
@ -733,7 +733,7 @@ func TestTxSerializeSizeStripped(t *testing.T) {
in *MsgTx // Tx to encode in *MsgTx // Tx to encode
size int // Expected serialized size size int // Expected serialized size
}{ }{
// No inputs or outpus. // No inputs or outputs.
{noTx, 10}, {noTx, 10},
// Transcaction with an input and an output. // Transcaction with an input and an output.
@ -938,7 +938,7 @@ var multiTxEncoded = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index 0xff, 0xff, 0xff, 0xff, // Previous output index
0x07, // Varint for length of signature script 0x07, // Varint for length of signature script
0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script
0xff, 0xff, 0xff, 0xff, // Sequence 0xff, 0xff, 0xff, 0xff, // Sequence

View File

@ -46,7 +46,7 @@ type MsgVersion struct {
// connections. // connections.
Nonce uint64 Nonce uint64
// The user agent that generated messsage. This is a encoded as a varString // The user agent that generated message. This is a encoded as a varString
// on the wire. This has a max length of MaxUserAgentLen. // on the wire. This has a max length of MaxUserAgentLen.
UserAgent string UserAgent string