multi: fix canceled spelling

This commit is contained in:
Joost Jager 2019-10-03 17:22:43 +02:00
parent 989de44a69
commit b58dbb2d70
No known key found for this signature in database
GPG Key ID: A61B9D4C393C59C7
31 changed files with 603 additions and 603 deletions

View File

@ -186,7 +186,7 @@ out:
// safely close the channel used to send epoch
// notifications, in order to notify any
// listeners that the intent has been
// cancelled.
// canceled.
close(b.blockEpochClients[msg.epochID].epochChan)
delete(b.blockEpochClients, msg.epochID)

View File

@ -300,7 +300,7 @@ out:
// safely close the channel used to send epoch
// notifications, in order to notify any
// listeners that the intent has been
// cancelled.
// canceled.
close(b.blockEpochClients[msg.epochID].epochChan)
delete(b.blockEpochClients, msg.epochID)
}

View File

@ -141,7 +141,7 @@ type ChainNotifier interface {
// Stops the concrete ChainNotifier. Once stopped, the ChainNotifier
// should disallow any future requests from potential clients.
// Additionally, all pending client notifications will be cancelled
// Additionally, all pending client notifications will be canceled
// by closing the related channels on the *Event's.
Stop() error
}

View File

@ -919,7 +919,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
// We'd like to test that once a spend notification is registered, it
// can be cancelled before the notification is dispatched.
// can be canceled before the notification is dispatched.
// First, we'll start by creating a new output that we can spend
// ourselves.
@ -1006,10 +1006,10 @@ func testCancelSpendNtfn(node *rpctest.Harness,
select {
case _, ok := <-spendClients[1].Spend:
if ok {
t.Fatalf("spend ntfn should have been cancelled")
t.Fatalf("spend ntfn should have been canceled")
}
case <-time.After(20 * time.Second):
t.Fatalf("spend ntfn never cancelled")
t.Fatalf("spend ntfn never canceled")
}
}
@ -1045,7 +1045,7 @@ func testCancelEpochNtfn(node *rpctest.Harness,
select {
case _, ok := <-epochClients[0].Epochs:
if ok {
t.Fatalf("epoch notification should have been cancelled")
t.Fatalf("epoch notification should have been canceled")
}
case <-time.After(2 * time.Second):
t.Fatalf("epoch notification not sent")
@ -1056,7 +1056,7 @@ func testCancelEpochNtfn(node *rpctest.Harness,
select {
case _, ok := <-epochClients[1].Epochs:
if !ok {
t.Fatalf("epoch was cancelled")
t.Fatalf("epoch was canceled")
}
case <-time.After(20 * time.Second):
t.Fatalf("epoch notification not sent")

View File

@ -302,7 +302,7 @@ out:
// safely close the channel used to send epoch
// notifications, in order to notify any
// listeners that the intent has been
// cancelled.
// canceled.
close(n.blockEpochClients[msg.epochID].epochChan)
delete(n.blockEpochClients, msg.epochID)
}

View File

@ -230,7 +230,7 @@ type Invoice struct {
AmtPaid lnwire.MilliSatoshi
// Htlcs records all htlcs that paid to this invoice. Some of these
// htlcs may have been marked as cancelled.
// htlcs may have been marked as canceled.
Htlcs map[CircuitKey]*InvoiceHTLC
}
@ -241,9 +241,9 @@ const (
// HtlcStateAccepted indicates the htlc is locked-in, but not resolved.
HtlcStateAccepted HtlcState = iota
// HtlcStateCancelled indicates the htlc is cancelled back to the
// HtlcStateCanceled indicates the htlc is canceled back to the
// sender.
HtlcStateCancelled
HtlcStateCanceled
// HtlcStateSettled indicates the htlc is settled.
HtlcStateSettled
@ -271,7 +271,7 @@ type InvoiceHTLC struct {
Expiry uint32
// State indicates the state the invoice htlc is currently in. A
// cancelled htlc isn't just removed from the invoice htlcs map, because
// canceled htlc isn't just removed from the invoice htlcs map, because
// we need AcceptHeight to properly cancel the htlc back.
State HtlcState
}
@ -296,7 +296,7 @@ type InvoiceUpdateDesc struct {
// Htlcs describes the changes that need to be made to the invoice htlcs
// in the database. Htlc map entries with their value set should be
// added. If the map value is nil, the htlc should be cancelled.
// added. If the map value is nil, the htlc should be canceled.
Htlcs map[CircuitKey]*HtlcAcceptDesc
// Preimage must be set to the preimage when state is settled.
@ -1219,7 +1219,7 @@ func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucke
for key, htlcUpdate := range update.Htlcs {
htlc, ok := invoice.Htlcs[key]
// No update means the htlc needs to be cancelled.
// No update means the htlc needs to be canceled.
if htlcUpdate == nil {
if !ok {
return nil, fmt.Errorf("unknown htlc %v", key)
@ -1229,7 +1229,7 @@ func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucke
"accepted htlcs")
}
htlc.State = HtlcStateCancelled
htlc.State = HtlcStateCanceled
htlc.ResolveTime = now
invoice.AmtPaid -= htlc.Amt

View File

@ -180,7 +180,7 @@ func (s *server) ConnectPeer(nodePub *btcec.PublicKey, addrs []net.Addr) error {
// For each of the known addresses, we'll attempt to launch a
// persistent connection to the (pub, addr) pair. In the event that any
// of them connect, all the other stale requests will be cancelled.
// of them connect, all the other stale requests will be canceled.
for _, addr := range addrs {
netAddr := &lnwire.NetAddress{
IdentityKey: nodePub,

View File

@ -2025,7 +2025,7 @@ func closedChannels(ctx *cli.Context) error {
LocalForce: ctx.Bool("local_force"),
RemoteForce: ctx.Bool("remote_force"),
Breach: ctx.Bool("breach"),
FundingCanceled: ctx.Bool("funding_cancelled"),
FundingCanceled: ctx.Bool("funding_canceled"),
Abandoned: ctx.Bool("abandoned"),
}

View File

@ -34,7 +34,7 @@ type ResolutionMsg struct {
// commitment trace.
HtlcIndex uint64
// Failure will be non-nil if the incoming contract should be cancelled
// Failure will be non-nil if the incoming contract should be canceled
// all together. This can happen if the outgoing contract was dust, if
// if the outgoing HTLC timed out.
Failure lnwire.FailureMessage

View File

@ -1785,7 +1785,7 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
)
// Now that we've sent this signal, we should have that
// HTLC be cancelled back immediately.
// HTLC be canceled back immediately.
select {
case msgs := <-chanArbCtx.resolutions:
if len(msgs) != 1 {

View File

@ -152,7 +152,7 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) {
return h.claimCleanUp(commitSpend)
case <-h.Quit:
return nil, fmt.Errorf("resolver cancelled")
return nil, fmt.Errorf("resolver canceled")
}
}
}

View File

@ -1434,7 +1434,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
// With the HTLC settled, we'll need to populate the wire
// message to target the specific channel and HTLC to be
// cancelled.
// canceled.
htlc.ChanID = l.ChanID()
htlc.ID = pkt.incomingHTLCID
@ -1491,7 +1491,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
// With the HTLC removed, we'll need to populate the wire
// message to target the specific channel and HTLC to be
// cancelled. The "Reason" field will have already been set
// canceled. The "Reason" field will have already been set
// within the switch.
htlc.ChanID = l.ChanID()
htlc.ID = pkt.incomingHTLCID
@ -2489,7 +2489,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
continue
}
// Fetch the reason the HTLC was cancelled so we can
// Fetch the reason the HTLC was canceled so we can
// continue to propagate it.
failPacket := &htlcPacket{
outgoingChanID: l.ShortChanID(),
@ -2650,7 +2650,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
// If we're unable to process the onion payload, or we
// we received malformed TLV stream, then we should
// send an error back to the caller so the HTLC can be
// cancelled.
// canceled.
l.sendHTLCError(
pd.HtlcIndex,
lnwire.NewInvalidOnionVersion(onionBlob[:]),

View File

@ -36,7 +36,7 @@ type PaymentResult struct {
Preimage [32]byte
// Error is non-nil in case a HTLC send failed, and the HTLC is now
// irrevocably cancelled. If the payment failed during forwarding, this
// irrevocably canceled. If the payment failed during forwarding, this
// error will be a *ForwardingError.
Error error
}

View File

@ -954,7 +954,7 @@ func (s *Switch) parseFailedPayment(deobfuscator ErrorDecrypter,
// go on chain.
case isResolution && htlc.Reason == nil:
userErr := fmt.Sprintf("payment was resolved "+
"on-chain, then cancelled back (hash=%v, pid=%d)",
"on-chain, then canceled back (hash=%v, pid=%d)",
paymentHash, paymentID)
return &ForwardingError{
@ -1861,7 +1861,7 @@ func (s *Switch) reforwardSettleFails(fwdPkgs []*channeldb.FwdPkg) {
// commitment state, so we'll forward this to the switch so the
// backwards undo can continue.
case lnwallet.Fail:
// Fetch the reason the HTLC was cancelled so we can
// Fetch the reason the HTLC was canceled so we can
// continue to propagate it.
failPacket := &htlcPacket{
outgoingChanID: fwdPkg.Source,

View File

@ -449,7 +449,7 @@ func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash,
htlc, ok := inv.Htlcs[circuitKey]
if ok {
switch htlc.State {
case channeldb.HtlcStateCancelled:
case channeldb.HtlcStateCanceled:
debugLog("replayed htlc to canceled invoice")
case channeldb.HtlcStateAccepted:
@ -567,7 +567,7 @@ func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash,
acceptHeight := int32(invoiceHtlc.AcceptHeight)
switch invoiceHtlc.State {
case channeldb.HtlcStateCancelled:
case channeldb.HtlcStateCanceled:
return &HodlEvent{
CircuitKey: circuitKey,
AcceptHeight: acceptHeight,
@ -662,7 +662,7 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error {
return nil, channeldb.ErrInvoiceAlreadyCanceled
}
// Mark individual held htlcs as cancelled.
// Mark individual held htlcs as canceled.
canceledHtlcs := make(
map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc,
)
@ -674,10 +674,10 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error {
return nil, errors.New("cannot cancel " +
"invoice with settled htlc(s)")
// Don't cancel htlcs that were already cancelled,
// Don't cancel htlcs that were already canceled,
// because it would incorrectly modify the invoice paid
// amt.
case channeldb.HtlcStateCancelled:
case channeldb.HtlcStateCanceled:
continue
}
@ -711,7 +711,7 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error {
// before, will be notified again. This isn't necessary but doesn't hurt
// either.
for key, htlc := range invoice.Htlcs {
if htlc.State != channeldb.HtlcStateCancelled {
if htlc.State != channeldb.HtlcStateCanceled {
continue
}
@ -749,7 +749,7 @@ type invoiceSubscriptionKit struct {
inv *InvoiceRegistry
ntfnQueue *queue.ConcurrentQueue
cancelled uint32 // To be used atomically.
canceled uint32 // To be used atomically.
cancelChan chan struct{}
wg sync.WaitGroup
}
@ -801,7 +801,7 @@ type SingleInvoiceSubscription struct {
// Cancel unregisters the InvoiceSubscription, freeing any previously allocated
// resources.
func (i *invoiceSubscriptionKit) Cancel() {
if !atomic.CompareAndSwapUint32(&i.cancelled, 0, 1) {
if !atomic.CompareAndSwapUint32(&i.canceled, 0, 1) {
return
}

View File

@ -461,7 +461,7 @@ func TestSettleHoldInvoice(t *testing.T) {
t.Fatalf("expected settle to succeed but got %v", err)
}
if event == nil || event.Preimage != nil {
t.Fatalf("expected htlc to be cancelled")
t.Fatalf("expected htlc to be canceled")
}
// We expect the accepted state to be sent to the single invoice

View File

@ -68,8 +68,8 @@ func CreateRPCInvoice(invoice *channeldb.Invoice,
state = lnrpc.InvoiceHTLCState_ACCEPTED
case channeldb.HtlcStateSettled:
state = lnrpc.InvoiceHTLCState_SETTLED
case channeldb.HtlcStateCancelled:
state = lnrpc.InvoiceHTLCState_CANCELLED
case channeldb.HtlcStateCanceled:
state = lnrpc.InvoiceHTLCState_CANCELED
default:
return nil, fmt.Errorf("unknown state %v", htlc.State)
}

File diff suppressed because it is too large Load Diff

View File

@ -2204,7 +2204,7 @@ message Invoice {
enum InvoiceHTLCState {
ACCEPTED = 0;
SETTLED = 1;
CANCELLED = 2;
CANCELED = 2;
}
/// Details of an HTLC that paid to an invoice
@ -2224,7 +2224,7 @@ message InvoiceHTLC {
/// Time at which this htlc was accepted.
int64 accept_time = 5 [json_name = "accept_time"];
/// Time at which this htlc was settled or cancelled.
/// Time at which this htlc was settled or canceled.
int64 resolve_time = 6 [json_name = "resolve_time"];
/// Block height at which this htlc expires.

View File

@ -2605,7 +2605,7 @@
"resolve_time": {
"type": "string",
"format": "int64",
"description": "/ Time at which this htlc was settled or cancelled."
"description": "/ Time at which this htlc was settled or canceled."
},
"expiry_height": {
"type": "integer",
@ -2624,7 +2624,7 @@
"enum": [
"ACCEPTED",
"SETTLED",
"CANCELLED"
"CANCELED"
],
"default": "ACCEPTED"
},

View File

@ -8818,7 +8818,7 @@ out:
}
// The balances of all parties should be the same as initially since
// the HTLC was cancelled.
// the HTLC was canceled.
assertBaseBalance()
// Next, we'll test the case of a recognized payHash but, an incorrect
@ -8856,7 +8856,7 @@ out:
}
// The balances of all parties should be the same as initially since
// the HTLC was cancelled.
// the HTLC was canceled.
assertBaseBalance()
// Next we'll test an error that occurs mid-route due to an outgoing
@ -10212,7 +10212,7 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
// outgoing HTLC is about to time out, then we'll go to chain in order to claim
// it. Any dust HTLC's should be immediately cancelled backwards. Once the
// it. Any dust HTLC's should be immediately canceled backwards. Once the
// timeout has been reached, then we should sweep it on-chain, and cancel the
// HTLC backwards.
func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
@ -10316,7 +10316,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
// Mine a block to confirm the closing transaction.
mineBlocks(t, net, 1, 1)
// At this point, Bob should have cancelled backwards the dust HTLC
// At this point, Bob should have canceled backwards the dust HTLC
// that we sent earlier. This means Alice should now only have a single
// HTLC on her channel.
nodes = []*lntest.HarnessNode{net.Alice}
@ -10611,7 +10611,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
assertTxInBlock(t, block, timeoutTx)
// With the second layer timeout transaction confirmed, Bob should have
// cancelled backwards the HTLC that carol sent.
// canceled backwards the HTLC that carol sent.
nodes = []*lntest.HarnessNode{net.Alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)

View File

@ -325,12 +325,12 @@ type PaymentDescriptor struct {
// NOTE: Populated only in payment descriptor with MalformedFail type.
ShaOnionBlob [sha256.Size]byte
// FailReason stores the reason why a particular payment was cancelled.
// FailReason stores the reason why a particular payment was canceled.
//
// NOTE: Populate only in fail payment descriptor entry types.
FailReason []byte
// FailCode stores the code why a particular payment was cancelled.
// FailCode stores the code why a particular payment was canceled.
//
// NOTE: Populated only in payment descriptor with MalformedFail type.
FailCode lnwire.FailCode

View File

@ -707,7 +707,7 @@ func testCancelNonExistentReservation(miner *rpctest.Harness,
// Attempt to cancel this reservation. This should fail, we know
// nothing of it.
if err := res.Cancel(); err == nil {
t.Fatalf("cancelled non-existent reservation")
t.Fatalf("canceled non-existent reservation")
}
}

View File

@ -56,7 +56,7 @@ func (c *ChannelContribution) toChanConfig() channeldb.ChannelConfig {
// reservation workflow, resources consumed by a contribution are "locked"
// themselves. This prevents a number of race conditions such as two funding
// transactions double-spending the same input. A reservation can also be
// cancelled, which removes the resources from limbo, allowing another
// canceled, which removes the resources from limbo, allowing another
// reservation to claim them.
//
// The reservation workflow consists of the following three steps:

View File

@ -363,7 +363,7 @@ func (l *LightningWallet) ResetReservations() {
}
// ActiveReservations returns a slice of all the currently active
// (non-cancelled) reservations.
// (non-canceled) reservations.
func (l *LightningWallet) ActiveReservations() []*ChannelReservation {
reservations := make([]*ChannelReservation, 0, len(l.fundingLimbo))
for _, reservation := range l.fundingLimbo {
@ -520,7 +520,7 @@ func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg
// Funding reservation request successfully handled. The funding inputs
// will be marked as unavailable until the reservation is either
// completed, or cancelled.
// completed, or canceled.
req.resp <- reservation
req.err <- nil
}

View File

@ -46,7 +46,7 @@ const (
FlagUpdate FailCode = 0x1000
)
// FailCode specifies the precise reason that an upstream HTLC was cancelled.
// FailCode specifies the precise reason that an upstream HTLC was canceled.
// Each UpdateFailHTLC message carries a FailCode which is to be passed
// backwards, encrypted at each step back to the source of the HTLC within the
// route.

View File

@ -712,7 +712,7 @@ func TestNodeUpdateNotification(t *testing.T) {
}
}
// TestNotificationCancellation tests that notifications are properly cancelled
// TestNotificationCancellation tests that notifications are properly canceled
// when the client wishes to exit.
func TestNotificationCancellation(t *testing.T) {
t.Parallel()
@ -800,7 +800,7 @@ func TestNotificationCancellation(t *testing.T) {
t.Fatal("notification sent but shouldn't have been")
case <-time.After(time.Second * 5):
t.Fatal("notification client never cancelled")
t.Fatal("notification client never canceled")
}
}

View File

@ -4134,7 +4134,7 @@ func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
case topChange, ok := <-client.TopologyChanges:
// If the second value from the channel read is nil,
// then this means that the channel router is exiting
// or the notification client was cancelled. So we'll
// or the notification client was canceled. So we'll
// exit early.
if !ok {
return errors.New("server shutting down")

View File

@ -2551,7 +2551,7 @@ func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn)
return
}
if _, ok := s.persistentConnReqs[pubStr]; !ok && connReq != nil {
srvrLog.Debugf("Ignoring cancelled outbound connection")
srvrLog.Debugf("Ignoring canceled outbound connection")
s.connMgr.Remove(connReq.ID())
conn.Close()
return

View File

@ -8,7 +8,7 @@ import (
)
// TestSubscribe tests that the subscription clients receive the updates sent
// to them after they subscribe, and that cancelled clients don't get more
// to them after they subscribe, and that canceled clients don't get more
// updates.
func TestSubscribe(t *testing.T) {
t.Parallel()
@ -69,13 +69,13 @@ func TestSubscribe(t *testing.T) {
switch {
// We expect the first third of the clients to quit, since they
// were cancelled.
// were canceled.
case i < numClients/3:
select {
case <-c.Quit():
continue
case <-time.After(1 * time.Second):
t.Fatalf("cancelled client %v did not quit", i)
t.Fatalf("canceled client %v did not quit", i)
}
// The next third should receive all updates.

View File

@ -166,7 +166,7 @@ func (m *MockNotifier) RegisterBlockEpochNtfn(
return &chainntnfs.BlockEpochEvent{
Epochs: epochChan,
Cancel: func() {
log.Tracef("Mock block ntfn cancelled")
log.Tracef("Mock block ntfn canceled")
m.mutex.Lock()
delete(m.epochChan, epochChan)
m.mutex.Unlock()