Merge pull request #1856 from maurycy/typos

multi: fix various typos in comments
This commit is contained in:
Olaoluwa Osuntokun 2018-09-27 20:38:10 -07:00 committed by GitHub
commit 6afee3d099
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 37 additions and 37 deletions

View File

@ -419,7 +419,7 @@ func (b *breachArbiter) waitForSpendEvent(breachInfo *retributionInfo,
// We'll wait for any of the outputs to be spent, or that we are
// signalled to exit.
select {
// A goroutine have signalled that a spend occured.
// A goroutine have signalled that a spend occurred.
case <-anySpend:
// Signal for the remaining goroutines to exit.
close(exit)

View File

@ -496,7 +496,7 @@ func (b *BitcoindNotifier) confDetailsFromTxIndex(txid *chainhash.Hash,
// then we may be able to dispatch it immediately.
tx, err := b.chainConn.GetRawTransactionVerbose(txid)
if err != nil {
// If the transaction lookup was succesful, but it wasn't found
// If the transaction lookup was successful, but it wasn't found
// within the index itself, then we can exit early. We'll also
// need to look at the error message returned as the error code
// is used for multiple errors.

View File

@ -566,7 +566,7 @@ func (b *BtcdNotifier) confDetailsFromTxIndex(txid *chainhash.Hash,
// then we may be able to dispatch it immediately.
tx, err := b.chainConn.GetRawTransactionVerbose(txid)
if err != nil {
// If the transaction lookup was succesful, but it wasn't found
// If the transaction lookup was successful, but it wasn't found
// within the index itself, then we can exit early. We'll also
// need to look at the error message returned as the error code
// is used for multiple errors.

View File

@ -211,7 +211,7 @@ func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash,
ntfn.details = details
// Now, we'll examine whether the transaction of this notification
// request has reched its required number of confirmations. If it has,
// request has reached its required number of confirmations. If it has,
// we'll disaptch a confirmation notification to the caller.
confHeight := details.BlockHeight + ntfn.NumConfirmations - 1
if confHeight <= tcn.currentHeight {

View File

@ -450,7 +450,7 @@ func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, error) {
// fetchChannels attempts to retrieve channels currently stored in the
// database. The pending parameter determines whether only pending channels
// will be returned, or only open channels will be returned. The waitingClose
// parameter determines wheter only channels waiting for a closing transaction
// parameter determines whether only channels waiting for a closing transaction
// to be confirmed should be returned. If no active channels exist within the
// network, then ErrNoActiveChannels is returned.
func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) {

View File

@ -1549,7 +1549,7 @@ func TestNodeUpdatesInHorizon(t *testing.T) {
len(nodeUpdates))
}
// We'll create 10 node announcements, each with an update timestmap 10
// We'll create 10 node announcements, each with an update timestamp 10
// seconds after the other.
const numNodes = 10
nodeAnns := make([]LightningNode, 0, numNodes)

View File

@ -264,7 +264,7 @@ func (c *ChannelArbitrator) Start() error {
// If the channel has been marked pending close in the database, and we
// haven't transitioned the state machine to StateContractClosed (or a
// suceeding state), then a state transition most likely failed. We'll
// succeeding state), then a state transition most likely failed. We'll
// try to recover from this by manually advancing the state by setting
// the corresponding close trigger.
trigger := chainTrigger
@ -1530,7 +1530,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// logged, we can safely close the channel. After this
// succeeds we won't be getting chain events anymore,
// so we must make sure we can recover on restart after
// it is marked closed. If the next state transation
// it is marked closed. If the next state transition
// fails, we'll start up in the prior state again, and
// we won't be longer getting chain events. In this
// case we must manually re-trigger the state
@ -1593,7 +1593,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// logged, we can safely close the channel. After this
// succeeds we won't be getting chain events anymore,
// so we must make sure we can recover on restart after
// it is marked closed. If the next state transation
// it is marked closed. If the next state transition
// fails, we'll start up in the prior state again, and
// we won't be longer getting chain events. In this
// case we must manually re-trigger the state

View File

@ -204,7 +204,7 @@ func TestGossipSyncerFilterGossipMsgsAllInMemory(t *testing.T) {
// through the gossiper to the target peer. Our message will consist of
// one node announcement above the horizon, one below. Additionally,
// we'll include a chan ann with an update below the horizon, one
// with an update timestmap above the horizon, and one without any
// with an update timestamp above the horizon, and one without any
// channel updates at all.
msgs := []msgWithSenders{
{

View File

@ -30,7 +30,7 @@ $ lnd --debuglevel=<subsystem>=<level>,<subsystem2>=<level>,...
`lnd` has a built-in feature which allows you to capture profiling data at
runtime using [pprof](https://golang.org/pkg/runtime/pprof/), a profiler for
Go. The profiler has negligible performance overhead during normal operations
(unless you have explictly enabled CPU profiling).
(unless you have explicitly enabled CPU profiling).
To enable this ability, start `lnd` with the `--profile` option using a free port.

View File

@ -825,7 +825,7 @@ func assertAddedToRouterGraph(t *testing.T, alice, bob *testNode,
// confirmed. The last arguments can be set if we expect the nodes to advertise
// custom min_htlc values as part of their ChannelUpdate. We expect Alice to
// advertise the value required by Bob and vice versa. If they are not set the
// advertised value will be checked againts the other node's default min_htlc
// advertised value will be checked against the other node's default min_htlc
// value.
func assertChannelAnnouncements(t *testing.T, alice, bob *testNode,
customMinHtlc ...lnwire.MilliSatoshi) {

View File

@ -88,7 +88,7 @@ func newPaymentCircuit(hash *[32]byte, pkt *htlcPacket) *PaymentCircuit {
}
}
// makePaymentCircuit initalizes a payment circuit on the stack using the
// makePaymentCircuit initializes a payment circuit on the stack using the
// payment hash and an in-memory htlc packet.
func makePaymentCircuit(hash *[32]byte, pkt *htlcPacket) PaymentCircuit {
var addRef channeldb.AddRef

View File

@ -2856,7 +2856,7 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) {
assertLinkBandwidth(t, alice.link, aliceStartingBandwidth)
// Now, try to commit the last two payment circuits, which are unused
// thus far. These should succeed without hestiation.
// thus far. These should succeed without hesitation.
fwdActions = alice.commitCircuits(circuits[halfHtlcs:])
if len(fwdActions.Adds) != halfHtlcs {
t.Fatalf("expected %d packets to be added", halfHtlcs)
@ -4528,7 +4528,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) {
// We start with he following scenario: Bob sends Alice two HTLCs, and a
// commitment dance ensures, leaving two HTLCs that Alice can respond
// to. Since Alice is in ExitSettle mode, we will then take over and
// provide targetted fail messages to test the link's ability to cleanup
// provide targeted fail messages to test the link's ability to cleanup
// spurious responses.
//
// Bob Alice

View File

@ -4492,7 +4492,7 @@ func testInvoiceRoutingHints(net *lntest.NetworkHarness, t *harnessTest) {
timeout := time.Duration(15 * time.Second)
const chanAmt = btcutil.Amount(100000)
// Throughout this test, we'll be opening a channel betwen Alice and
// Throughout this test, we'll be opening a channel between Alice and
// several other parties.
//
// First, we'll create a private channel between Alice and Bob. This
@ -9544,7 +9544,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
assertTxInBlock(t, block, commitHash)
// After the force close transacion is mined, Carol should broadcast
// her second level HTLC transacion. Bob will braodcast a sweep tx to
// her second level HTLC transacion. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. He can do this
// immediately, as the output is not timelocked since Carol was the one
// force closing.
@ -9872,7 +9872,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
assertTxInBlock(t, block, commitHash)
// After the force close transacion is mined, Carol should broadcast
// her second level HTLC transacion. Bob will braodcast a sweep tx to
// her second level HTLC transacion. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. He can do this
// immediately, as the output is not timelocked since Carol was the one
// force closing.
@ -11494,7 +11494,7 @@ func testQueryRoutes(net *lntest.NetworkHarness, t *harnessTest) {
}
// For all hops except the last, we check that fee equals feePerHop
// and amount to foward deducts feePerHop on each hop.
// and amount to forward deducts feePerHop on each hop.
expectedAmtToForwardMSat := expectedTotalAmtMSat
for j, hop := range route.Hops[:len(route.Hops)-1] {
expectedAmtToForwardMSat -= feePerHopMSat
@ -11520,7 +11520,7 @@ func testQueryRoutes(net *lntest.NetworkHarness, t *harnessTest) {
i, j, expectedAmtToForwardMSat, hop.AmtToForwardMsat)
}
}
// Last hop should have zero fee and amount to foward should equal
// Last hop should have zero fee and amount to forward should equal
// payment amount.
hop := route.Hops[len(route.Hops)-1]
@ -11671,7 +11671,7 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
}
}
// The payments should only be succesful across the route:
// The payments should only be successful across the route:
// Alice -> Bob -> Dave
// Therefore, we'll update the fee policy on Carol's side for the
// channel between her and Dave to invalidate the route:

View File

@ -409,7 +409,7 @@ func (*ChangePasswordResponse) Descriptor() ([]byte, []int) { return fileDescrip
type Transaction struct {
// / The transaction hash
TxHash string `protobuf:"bytes,1,opt,name=tx_hash" json:"tx_hash,omitempty"`
// / The transaction ammount, denominated in satoshis
// / The transaction amount, denominated in satoshis
Amount int64 `protobuf:"varint,2,opt,name=amount" json:"amount,omitempty"`
// / The number of confirmations
NumConfirmations int32 `protobuf:"varint,3,opt,name=num_confirmations" json:"num_confirmations,omitempty"`

View File

@ -656,7 +656,7 @@ message Transaction {
/// The transaction hash
string tx_hash = 1 [ json_name = "tx_hash" ];
/// The transaction ammount, denominated in satoshis
/// The transaction amount, denominated in satoshis
int64 amount = 2 [ json_name = "amount" ];
/// The number of confirmations

View File

@ -2716,7 +2716,7 @@
"amount": {
"type": "string",
"format": "int64",
"title": "/ The transaction ammount, denominated in satoshis"
"title": "/ The transaction amount, denominated in satoshis"
},
"num_confirmations": {
"type": "integer",

View File

@ -705,7 +705,7 @@ type OpenChannelParams struct {
// unconfirmed outputs to fund the channel.
SpendUnconfirmed bool
// MinHtlc is the htlc_minumum_msat value set when opening the channel.
// MinHtlc is the htlc_minimum_msat value set when opening the channel.
MinHtlc lnwire.MilliSatoshi
}

View File

@ -3354,7 +3354,7 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
// We owe them a commitment if the tip of their chain (from our Pov) is
// equal to what they think their next commit height should be. We'll
// re-send all the updates neccessary to recreate this state, along
// re-send all the updates necessary to recreate this state, along
// with the commit sig.
case msg.NextLocalCommitHeight == remoteTipHeight:
walletLog.Debugf("ChannelPoint(%v), sync: remote's next "+

View File

@ -1237,7 +1237,7 @@ func testTransactionSubscriptions(miner *rpctest.Harness,
select {
case <-time.After(time.Second * 10):
t.Fatalf("transactions not received after 10 seconds")
case <-unconfirmedNtfns: // Fall through on successs
case <-unconfirmedNtfns: // Fall through on success
}
}

View File

@ -493,7 +493,7 @@ func findPath(tx *bolt.Tx, graph *channeldb.ChannelGraph,
// For each node in the graph, we create an entry in the distance map
// for the node set with a distance of "infinity". graph.ForEachNode
// also returns the source node, so there is no need to add the source
// node explictly.
// node explicitly.
distance := make(map[Vertex]nodeWithDist)
if err := graph.ForEachNode(tx, func(_ *bolt.Tx, node *channeldb.LightningNode) error {
// TODO(roasbeef): with larger graph can just use disk seeks
@ -597,7 +597,7 @@ func findPath(tx *bolt.Tx, graph *channeldb.ChannelGraph,
// Compute fee that fromNode is charging. It is based on the
// amount that needs to be sent to the next node in the route.
//
// Source node has no precedessor to pay a fee. Therefore set
// Source node has no predecessor to pay a fee. Therefore set
// fee to zero, because it should not be included in the fee
// limit check and edge weight.
//

View File

@ -709,7 +709,7 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) {
}
// Send off the payment request to the router, this payment should
// suceed as we should actually go through Pham Nuwen in order to get
// succeed as we should actually go through Pham Nuwen in order to get
// to Sophon, even though he has higher fees.
paymentPreImage, route, err := ctx.router.SendPayment(&payment)
if err != nil {

View File

@ -1053,7 +1053,7 @@ func (s *server) Stopped() bool {
return atomic.LoadInt32(&s.shutdown) != 0
}
// configurePortForwarding attempts to set up port forwarding for the diffrent
// configurePortForwarding attempts to set up port forwarding for the different
// ports that the server will be listening on.
//
// NOTE: This should only be used when using some kind of NAT traversal to
@ -1094,7 +1094,7 @@ func (s *server) removePortForwarding() {
}
}
// watchExternalIP continously checks for an updated external IP address every
// watchExternalIP continuously checks for an updated external IP address every
// 15 minutes. Once a new IP address has been detected, it will automatically
// handle port forwarding rules and send updated node announcements to the
// currently connected peers.
@ -1165,7 +1165,7 @@ out:
}
// Now, we'll need to update the addresses in our node's
// announcement in order to propogate the update
// announcement in order to propagate the update
// throughout the network. We'll only include addresses
// that have a different IP from the previous one, as
// the previous IP is no longer valid.
@ -2341,7 +2341,7 @@ func (s *server) peerInitializer(p *peer) {
s.wg.Add(1)
go s.peerTerminationWatcher(p, ready)
// Start teh peer! If an error occurs, we Disconnect the peer, which
// Start the peer! If an error occurs, we Disconnect the peer, which
// will unblock the peerTerminationWatcher.
if err := p.Start(); err != nil {
p.Disconnect(fmt.Errorf("unable to start peer: %v", err))

View File

@ -133,8 +133,8 @@ func (c *Controller) sendCommand(command string) (int, string, error) {
return code, reply, nil
}
// parseTorReply parses the reply from the Tor server after receving a command
// from a controller. This will parse the relevent reply parameters into a map
// parseTorReply parses the reply from the Tor server after receiving a command
// from a controller. This will parse the relevant reply parameters into a map
// of keys and values.
func parseTorReply(reply string) map[string]string {
params := make(map[string]string)

View File

@ -233,7 +233,7 @@ func IsOnionHost(host string) bool {
// We'll now attempt to decode the host without its suffix, as the
// suffix includes invalid characters. This will tell us if the host is
// actually valid if succesful.
// actually valid if successful.
host = host[:suffixIndex]
if _, err := Base32Encoding.DecodeString(host); err != nil {
return false