Merge pull request #328 from halseth/funding-double-fundinglocked

FundingLocked improvements
This commit is contained in:
Olaoluwa Osuntokun 2017-10-02 16:20:33 -07:00 committed by GitHub
commit 4153712ba7
8 changed files with 854 additions and 314 deletions

View file

@ -190,6 +190,12 @@ type fundingConfig struct {
// channel's funding transaction and initial commitment transaction.
SendToPeer func(target *btcec.PublicKey, msgs ...lnwire.Message) error
// NotifyWhenOnline allows the FundingManager to register with a
// subsystem that will notify it when the peer comes online.
// This is used when sending the fundingLocked message, since it MUST be
// delivered after the funding transaction is confirmed.
NotifyWhenOnline func(peer *btcec.PublicKey, connectedChan chan<- struct{})
// FindPeer searches the list of peers connected to the node so that
// the FundingManager can notify other daemon subsystems as necessary
// during the funding process.
@ -286,6 +292,9 @@ type fundingManager struct {
localDiscoveryMtx sync.Mutex
localDiscoverySignals map[lnwire.ChannelID]chan struct{}
handleFundingLockedMtx sync.RWMutex
handleFundingLockedBarriers map[lnwire.ChannelID]struct{}
quit chan struct{}
wg sync.WaitGroup
}
@ -323,16 +332,17 @@ var (
// fundingManager.
func newFundingManager(cfg fundingConfig) (*fundingManager, error) {
return &fundingManager{
cfg: &cfg,
chanIDKey: cfg.TempChanIDSeed,
activeReservations: make(map[serializedPubKey]pendingChannels),
signedReservations: make(map[lnwire.ChannelID][32]byte),
newChanBarriers: make(map[lnwire.ChannelID]chan struct{}),
fundingMsgs: make(chan interface{}, msgBufferSize),
fundingRequests: make(chan *initFundingMsg, msgBufferSize),
localDiscoverySignals: make(map[lnwire.ChannelID]chan struct{}),
queries: make(chan interface{}, 1),
quit: make(chan struct{}),
cfg: &cfg,
chanIDKey: cfg.TempChanIDSeed,
activeReservations: make(map[serializedPubKey]pendingChannels),
signedReservations: make(map[lnwire.ChannelID][32]byte),
newChanBarriers: make(map[lnwire.ChannelID]chan struct{}),
fundingMsgs: make(chan interface{}, msgBufferSize),
fundingRequests: make(chan *initFundingMsg, msgBufferSize),
localDiscoverySignals: make(map[lnwire.ChannelID]chan struct{}),
handleFundingLockedBarriers: make(map[lnwire.ChannelID]struct{}),
queries: make(chan interface{}, 1),
quit: make(chan struct{}),
}, nil
}
@ -420,10 +430,22 @@ func (f *fundingManager) Start() error {
return err
}
fndgLog.Debugf("channel with opening state %v found",
channelState)
chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
fndgLog.Debugf("channel (%v) with opening state %v found",
chanID, channelState)
// Set up the channel barriers again, to make sure
// waitUntilChannelOpen correctly waits until the opening
// process is completely over.
f.barrierMtx.Lock()
fndgLog.Tracef("Loading pending ChannelPoint(%v), "+
"creating chan barrier", channel.FundingOutpoint)
f.newChanBarriers[chanID] = make(chan struct{})
f.barrierMtx.Unlock()
// Set up a localDiscoverySignals to make sure we finish sending
// our own fundingLocked and channel announcements before
// processing a received fundingLocked.
f.localDiscoverySignals[chanID] = make(chan struct{})
// If we did find the channel in the opening state database, we
@ -587,6 +609,7 @@ func (f *fundingManager) reservationCoordinator() {
case *fundingSignedMsg:
f.handleFundingSigned(fmsg)
case *fundingLockedMsg:
f.wg.Add(1)
go f.handleFundingLocked(fmsg)
case *fundingErrorMsg:
f.handleErrorMsg(fmsg)
@ -1436,18 +1459,46 @@ func (f *fundingManager) sendFundingLockedAndAnnounceChannel(
}
fundingLockedMsg := lnwire.NewFundingLocked(chanID, nextRevocation)
err = f.cfg.SendToPeer(completeChan.IdentityPub, fundingLockedMsg)
if err != nil {
fndgLog.Errorf("unable to send fundingLocked to peer: %v", err)
return
// If the peer has disconnected before we reach this point, we will need
// to wait for him to come back online before sending the fundingLocked
// message. This is special for fundingLocked, since failing to send any
// of the previous messages in the funding flow just cancels the flow.
// But now the funding transaction is confirmed, the channel is open
// and we have to make sure the peer gets the fundingLocked message when
// it comes back online. This is also crucial during restart of lnd,
// where we might try to resend the fundingLocked message before the
// server has had the time to connect to the peer. We keep trying to
// send fundingLocked until we succeed, or the fundingManager is shut
// down.
for {
err = f.cfg.SendToPeer(completeChan.IdentityPub,
fundingLockedMsg)
if err == nil {
// Sending succeeded, we can break out and continue
// the funding flow.
break
}
fndgLog.Warnf("unable to send fundingLocked to peer %x: "+
"%v. Will retry when online",
completeChan.IdentityPub.SerializeCompressed(), err)
connected := make(chan struct{})
f.cfg.NotifyWhenOnline(completeChan.IdentityPub, connected)
select {
case <-connected:
// Retry sending.
case <-f.quit:
return
}
}
// As the fundingLocked message is now sent to the peer, the channel is
// moved to the next state of the state machine. It will be moved to the
// last state (actually deleted from the database) after the channel is
// finally announced.
err = f.saveChannelOpeningState(&completeChan.FundingOutpoint, fundingLockedSent,
shortChanID)
err = f.saveChannelOpeningState(&completeChan.FundingOutpoint,
fundingLockedSent, shortChanID)
if err != nil {
fndgLog.Errorf("error setting channel state to "+
"fundingLockedSent: %v", err)
@ -1520,17 +1571,46 @@ func (f *fundingManager) processFundingLocked(msg *lnwire.FundingLocked,
// handleFundingLocked finalizes the channel funding process and enables the
// channel to enter normal operating mode.
func (f *fundingManager) handleFundingLocked(fmsg *fundingLockedMsg) {
defer f.wg.Done()
// If we are currently in the process of handling a funding locked
// message for this channel, ignore.
f.handleFundingLockedMtx.Lock()
_, ok := f.handleFundingLockedBarriers[fmsg.msg.ChanID]
if ok {
fndgLog.Infof("Already handling fundingLocked for "+
"ChannelID(%v), ignoring.", fmsg.msg.ChanID)
f.handleFundingLockedMtx.Unlock()
return
}
// If not already handling fundingLocked for this channel, set up
// barrier, and move on.
f.handleFundingLockedBarriers[fmsg.msg.ChanID] = struct{}{}
f.handleFundingLockedMtx.Unlock()
defer func() {
f.handleFundingLockedMtx.Lock()
delete(f.handleFundingLockedBarriers, fmsg.msg.ChanID)
f.handleFundingLockedMtx.Unlock()
}()
f.localDiscoveryMtx.Lock()
localDiscoverySignal, ok := f.localDiscoverySignals[fmsg.msg.ChanID]
f.localDiscoveryMtx.Unlock()
if ok {
// Before we proceed with processing the funding locked
// message, we'll wait for the lcoal waitForFundingConfirmation
// message, we'll wait for the local waitForFundingConfirmation
// goroutine to signal that it has the necessary state in
// place. Otherwise, we may be missing critical information
// required to handle forwarded HTLC's.
<-localDiscoverySignal
select {
case <-localDiscoverySignal:
// Fallthrough
case <-f.quit:
return
}
// With the signal received, we can now safely delete the entry
// from the map.
@ -1550,7 +1630,14 @@ func (f *fundingManager) handleFundingLocked(fmsg *fundingLockedMsg) {
return
}
// TODO(roasbeef): done nothing if repeat message sent
// If the RemoteNextRevocation is non-nil, it means that we have
// already processed fundingLocked for this channel, so ignore.
if channel.RemoteNextRevocation() != nil {
fndgLog.Infof("Received duplicate fundingLocked for "+
"ChannelID(%v), ignoring.", chanID)
channel.Stop()
return
}
// The funding locked message contains the next commitment point we'll
// need to create the next commitment state for the remote party. So
@ -1565,7 +1652,11 @@ func (f *fundingManager) handleFundingLocked(fmsg *fundingLockedMsg) {
// With the channel retrieved, we'll send the breach arbiter the new
// channel so it can watch for attempts to breach the channel's
// contract by the remote party.
f.cfg.ArbiterChan <- channel
select {
case f.cfg.ArbiterChan <- channel:
case <-f.quit:
return
}
// Launch a defer so we _ensure_ that the channel barrier is properly
// closed even if the target peer is not longer online at this point.
@ -1574,9 +1665,13 @@ func (f *fundingManager) handleFundingLocked(fmsg *fundingLockedMsg) {
// that commitment related modifications to this channel can
// now proceed.
f.barrierMtx.Lock()
fndgLog.Tracef("Closing chan barrier for ChanID(%v)", chanID)
close(f.newChanBarriers[chanID])
delete(f.newChanBarriers, chanID)
chanBarrier, ok := f.newChanBarriers[chanID]
if ok {
fndgLog.Tracef("Closing chan barrier for ChanID(%v)",
chanID)
close(chanBarrier)
delete(f.newChanBarriers, chanID)
}
f.barrierMtx.Unlock()
}()
@ -1592,7 +1687,12 @@ func (f *fundingManager) handleFundingLocked(fmsg *fundingLockedMsg) {
channel: channel,
done: newChanDone,
}
peer.newChannels <- newChanMsg
select {
case peer.newChannels <- newChanMsg:
case <-f.quit:
return
}
// We pause here to wait for the peer to recognize the new channel
// before we close the channel barrier corresponding to the channel.

File diff suppressed because it is too large Load diff

View file

@ -322,7 +322,26 @@ func (l *channelLink) htlcManager() {
// TODO(roasbeef): fail chan in case of protocol violation
// TODO(roasbeef): resend funding locked if state zero
// If the number of updates on this channel has been zero, we should
// resend the fundingLocked message. This is because in this case we
// cannot be sure if the peer really received the last fundingLocked we
// sent, so resend now.
if l.channel.StateSnapshot().NumUpdates == 0 {
log.Debugf("Resending fundingLocked message to peer.")
nextRevocation, err := l.channel.NextRevocationKey()
if err != nil {
log.Errorf("unable to create next revocation: %v", err)
}
fundingLockedMsg := lnwire.NewFundingLocked(l.ChanID(),
nextRevocation)
err = l.cfg.Peer.SendMessage(fundingLockedMsg)
if err != nil {
log.Errorf("failed resending fundingLocked to peer: %v",
err)
}
}
out:
for {

View file

@ -280,6 +280,9 @@ func (s *mockServer) readHandler(message lnwire.Message) error {
targetChan = msg.ChanID
case *lnwire.CommitSig:
targetChan = msg.ChanID
case *lnwire.FundingLocked:
// Ignore
return nil
default:
return errors.New("unknown message type")
}

9
lnd.go
View file

@ -186,10 +186,11 @@ func lndMain() error {
idPrivKey.PubKey())
return <-errChan
},
ArbiterChan: server.breachArbiter.newContracts,
SendToPeer: server.SendToPeer,
FindPeer: server.FindPeer,
TempChanIDSeed: chanIDSeed,
ArbiterChan: server.breachArbiter.newContracts,
SendToPeer: server.SendToPeer,
NotifyWhenOnline: server.NotifyWhenOnline,
FindPeer: server.FindPeer,
TempChanIDSeed: chanIDSeed,
FindChannel: func(chanID lnwire.ChannelID) (*lnwallet.LightningChannel, error) {
dbChannels, err := chanDB.FetchAllChannels()
if err != nil {

View file

@ -4000,3 +4000,11 @@ func CreateCooperativeCloseTx(fundingTxIn *wire.TxIn,
func (lc *LightningChannel) CalcFee(feeRate uint64) uint64 {
return (feeRate * uint64(commitWeight)) / 1000
}
// RemoteNextRevocation returns the channelState's RemoteNextRevocation.
func (lc *LightningChannel) RemoteNextRevocation() *btcec.PublicKey {
lc.Lock()
defer lc.Unlock()
return lc.channelState.RemoteNextRevocation
}

13
peer.go
View file

@ -1003,10 +1003,19 @@ out:
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
newChan := newChanReq.channel
// First, we'll add this channel to the set of active
// Make sure this channel is not already active.
p.activeChanMtx.Lock()
if _, ok := p.activeChannels[chanID]; ok {
peerLog.Infof("Already have ChannelPoint(%v), ignoring.", chanPoint)
p.activeChanMtx.Unlock()
close(newChanReq.done)
newChanReq.channel.Stop()
continue
}
// If not already active, we'll add this channel to the set of active
// channels, so we can look it up later easily
// according to its channel ID.
p.activeChanMtx.Lock()
p.activeChannels[chanID] = newChan
p.activeChanMtx.Unlock()

View file

@ -57,6 +57,8 @@ type server struct {
inboundPeers map[string]*peer
outboundPeers map[string]*peer
peerConnectedListeners map[string][]chan<- struct{}
persistentPeers map[string]struct{}
persistentConnReqs map[string][]*connmgr.ConnReq
@ -134,10 +136,11 @@ func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
persistentPeers: make(map[string]struct{}),
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
peersByID: make(map[int32]*peer),
peersByPub: make(map[string]*peer),
inboundPeers: make(map[string]*peer),
outboundPeers: make(map[string]*peer),
peersByID: make(map[int32]*peer),
peersByPub: make(map[string]*peer),
inboundPeers: make(map[string]*peer),
outboundPeers: make(map[string]*peer),
peerConnectedListeners: make(map[string][]chan<- struct{}),
globalFeatures: globalFeatures,
localFeatures: localFeatures,
@ -860,6 +863,33 @@ func (s *server) SendToPeer(target *btcec.PublicKey,
return s.sendToPeer(target, msgs)
}
// NotifyWhenOnline can be called by other subsystems to get notified when a
// particular peer comes online.
//
// NOTE: This function is safe for concurrent access.
func (s *server) NotifyWhenOnline(peer *btcec.PublicKey,
connectedChan chan<- struct{}) {
s.mu.Lock()
defer s.mu.Unlock()
// Compute the target peer's identifier.
pubStr := string(peer.SerializeCompressed())
// Check if peer is connected.
_, ok := s.peersByPub[pubStr]
if ok {
// Connected, can return early.
srvrLog.Debugf("Notifying that peer %v is online", pubStr)
close(connectedChan)
return
}
// Not connected, store this listener such that it can be notified when
// the peer comes online.
s.peerConnectedListeners[pubStr] = append(
s.peerConnectedListeners[pubStr], connectedChan)
}
// sendToPeer is an internal method that delivers messages to the specified
// `target` peer.
func (s *server) sendToPeer(target *btcec.PublicKey,
@ -1272,6 +1302,12 @@ func (s *server) addPeer(p *peer) {
// channel router so we can synchronize our view of the channel graph
// with this new peer.
go s.authGossiper.SynchronizeNode(p.addr.IdentityKey)
// Check if there are listeners waiting for this peer to come online.
for _, con := range s.peerConnectedListeners[pubStr] {
close(con)
}
delete(s.peerConnectedListeners, pubStr)
}
// removePeer removes the passed peer from the server's state of all active