diff --git a/chainio/consumer.go b/chainio/consumer.go index a9ec25745b..eb059a6245 100644 --- a/chainio/consumer.go +++ b/chainio/consumer.go @@ -79,7 +79,7 @@ func (b *BeatConsumer) ProcessBlock(beat Blockbeat) error { // `beat.NotifyBlockProcessed` to send the error back here. select { case err := <-b.errChan: - beat.logger().Debugf("[%s] processed beat: err=%v", b.name, err) + beat.logger().Tracef("[%s] processed beat: err=%v", b.name, err) return err @@ -99,11 +99,11 @@ func (b *BeatConsumer) ProcessBlock(beat Blockbeat) error { // processing the block. func (b *BeatConsumer) NotifyBlockProcessed(beat Blockbeat, err error) { // Update the current height. - beat.logger().Debugf("[%s]: notifying beat processed", b.name) + beat.logger().Tracef("[%s]: notifying beat processed", b.name) select { case b.errChan <- err: - beat.logger().Debugf("[%s]: notified beat processed, err=%v", + beat.logger().Tracef("[%s]: notified beat processed, err=%v", b.name, err) case <-b.quit: diff --git a/chainio/dispatcher.go b/chainio/dispatcher.go index 87bc21fbaa..269bb1892c 100644 --- a/chainio/dispatcher.go +++ b/chainio/dispatcher.go @@ -9,6 +9,7 @@ import ( "github.com/btcsuite/btclog/v2" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/lnutils" "golang.org/x/sync/errgroup" ) @@ -136,6 +137,10 @@ func (b *BlockbeatDispatcher) dispatchBlocks( return } + // Log a separator so it's easier to identify when a + // new block arrives for subsystems. + clog.Debugf("%v", lnutils.NewSeparatorClosure()) + clog.Infof("Received new block %v at height %d, "+ "notifying consumers...", blockEpoch.Hash, blockEpoch.Height) diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index f193ee68de..cfefea4573 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -479,7 +479,7 @@ func (c *ChannelArbitrator) Start(state *chanArbStartState, } } - log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v", + log.Tracef("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v", c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs), state.currentState) @@ -2618,14 +2618,14 @@ func (c *ChannelArbitrator) replaceResolver(oldResolver, func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) { defer c.wg.Done() - log.Debugf("ChannelArbitrator(%v): attempting to resolve %T", + log.Tracef("ChannelArbitrator(%v): attempting to resolve %T", c.cfg.ChanPoint, currentContract) // Until the contract is fully resolved, we'll continue to iteratively // resolve the contract one step at a time. for !currentContract.IsResolved() { - log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved", - c.cfg.ChanPoint, currentContract) + log.Tracef("ChannelArbitrator(%v): contract %T not yet "+ + "resolved", c.cfg.ChanPoint, currentContract) select { @@ -2994,12 +2994,18 @@ func (c *ChannelArbitrator) handleBlockbeat(beat chainio.Blockbeat) error { // If the state is StateContractClosed, StateWaitingFullResolution, or // StateFullyResolved, there's no need to read the close event channel - // and launch the resolvers since the arbitrator can only get to this - // state after processing a previous close event and launched all its - // resolvers. + // since the arbitrator can only get to this state after processing a + // previous close event and launched all its resolvers. if c.state.IsContractClosed() { - log.Infof("ChannelArbitrator(%v): skipping launching "+ - "resolvers in state=%v", c.cfg.ChanPoint, c.state) + log.Infof("ChannelArbitrator(%v): skipping reading close "+ + "events in state=%v", c.cfg.ChanPoint, c.state) + + // Launch all active resolvers when a new blockbeat is + // received, even when the contract is closed, we still need + // this as the resolvers may transform into new ones. For + // already launched resolvers this will be NOOP as they track + // their own `launched` states. + c.launchResolvers() return nil } diff --git a/contractcourt/htlc_outgoing_contest_resolver.go b/contractcourt/htlc_outgoing_contest_resolver.go index b66a3fdf0b..9e94587ccc 100644 --- a/contractcourt/htlc_outgoing_contest_resolver.go +++ b/contractcourt/htlc_outgoing_contest_resolver.go @@ -165,9 +165,8 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) { if newHeight >= expiry-1 { h.log.Infof("HTLC about to expire "+ "(height=%v, expiry=%v), transforming "+ - "into timeout resolver", h, - h.htlcResolution.ClaimOutpoint, - newHeight, h.htlcResolution.Expiry) + "into timeout resolver", newHeight, + h.htlcResolution.Expiry) return h.htlcTimeoutResolver, nil } diff --git a/itest/list_on_test.go b/itest/list_on_test.go index fe6c373855..a192a423f4 100644 --- a/itest/list_on_test.go +++ b/itest/list_on_test.go @@ -13,10 +13,6 @@ var allTestCases = []*lntest.TestCase{ Name: "basic funding flow", TestFunc: testBasicChannelFunding, }, - { - Name: "multi hop receiver chain claim", - TestFunc: testMultiHopReceiverChainClaim, - }, { Name: "external channel funding", TestFunc: testExternalFundingChanPoint, @@ -153,18 +149,6 @@ var allTestCases = []*lntest.TestCase{ Name: "addpeer config", TestFunc: testAddPeerConfig, }, - { - Name: "multi hop htlc local timeout", - TestFunc: testMultiHopHtlcLocalTimeout, - }, - { - Name: "multi hop local force close on-chain htlc timeout", - TestFunc: testMultiHopLocalForceCloseOnChainHtlcTimeout, - }, - { - Name: "multi hop remote force close on-chain htlc timeout", - TestFunc: testMultiHopRemoteForceCloseOnChainHtlcTimeout, - }, { Name: "private channel update policy", TestFunc: testUpdateChannelPolicyForPrivateChannel, @@ -226,11 +210,15 @@ var allTestCases = []*lntest.TestCase{ TestFunc: testChannelUnsettledBalance, }, { - Name: "channel force closure", - TestFunc: testChannelForceClosure, + Name: "channel force closure anchor", + TestFunc: testChannelForceClosureAnchor, + }, + { + Name: "channel force closure simple taproot", + TestFunc: testChannelForceClosureSimpleTaproot, }, { - Name: "failing link", + Name: "failing channel", TestFunc: testFailingChannel, }, { @@ -313,18 +301,6 @@ var allTestCases = []*lntest.TestCase{ Name: "REST API", TestFunc: testRestAPI, }, - { - Name: "multi hop htlc local chain claim", - TestFunc: testMultiHopHtlcLocalChainClaim, - }, - { - Name: "multi hop htlc remote chain claim", - TestFunc: testMultiHopHtlcRemoteChainClaim, - }, - { - Name: "multi hop htlc aggregation", - TestFunc: testMultiHopHtlcAggregation, - }, { Name: "revoked uncooperative close retribution", TestFunc: testRevokedCloseRetribution, @@ -574,10 +550,6 @@ var allTestCases = []*lntest.TestCase{ Name: "lookup htlc resolution", TestFunc: testLookupHtlcResolution, }, - { - Name: "watchtower", - TestFunc: testWatchtower, - }, { Name: "channel fundmax", TestFunc: testChannelFundMax, @@ -715,3 +687,9 @@ var allTestCases = []*lntest.TestCase{ TestFunc: testQuiescence, }, } + +func init() { + // Register subtests. + allTestCases = append(allTestCases, multiHopForceCloseTestCases...) + allTestCases = append(allTestCases, watchtowerTestCases...) +} diff --git a/itest/lnd_channel_backup_test.go b/itest/lnd_channel_backup_test.go index 5140de5056..4ea5f6933b 100644 --- a/itest/lnd_channel_backup_test.go +++ b/itest/lnd_channel_backup_test.go @@ -637,8 +637,8 @@ func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest, minerHeight := ht.CurrentHeight() thawHeight := minerHeight + thawHeightDelta - fundingShim, _ = deriveFundingShim( - ht, dave, carol, crs.params.Amt, thawHeight, true, ct, + fundingShim, _ = ht.DeriveFundingShim( + dave, carol, crs.params.Amt, thawHeight, true, ct, ) crs.params.FundingShim = fundingShim } @@ -1320,12 +1320,20 @@ func testDataLossProtection(ht *lntest.HarnessTest) { // information Dave needs to sweep his funds. require.NoError(ht, restartDave(), "unable to restart Eve") + // Mine a block to trigger Dave's chain watcher to process Carol's sweep + // tx. + // + // TODO(yy): remove this block once the blockbeat starts remembering + // its last processed block and can handle looking for spends in the + // past blocks. + ht.MineEmptyBlocks(1) + + // Make sure Dave still has the pending force close channel. + ht.AssertNumPendingForceClose(dave, 1) + // Dave should have a pending sweep. ht.AssertNumPendingSweeps(dave, 1) - // Mine a block to trigger the sweep. - ht.MineBlocks(1) - // Dave should sweep his funds. ht.AssertNumTxsInMempool(1) @@ -1482,7 +1490,6 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode, expectedTxes := 1 // Mine a block to trigger the sweeps. - ht.MineBlocks(1) ht.AssertNumTxsInMempool(expectedTxes) // Carol should consider the channel pending force close (since she is @@ -1512,7 +1519,7 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode, // The commit sweep resolver publishes the sweep tx at defaultCSV-1 and // we already mined one block after the commitment was published, and // one block to trigger Carol's sweeps, so take that into account. - ht.MineEmptyBlocks(1) + ht.MineBlocks(2) ht.AssertNumPendingSweeps(dave, 2) // Mine a block to trigger the sweeps. @@ -1615,8 +1622,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest, // output and the other for her anchor. ht.AssertNumPendingSweeps(carol, 2) - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) ht.MineBlocksAndAssertNumTxes(1, 1) // Now the channel should be fully closed also from Carol's POV. @@ -1635,8 +1640,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest, // output and the other for his anchor. ht.AssertNumPendingSweeps(dave, 2) - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) ht.MineBlocksAndAssertNumTxes(1, 1) // Now Dave should consider the channel fully closed. @@ -1652,10 +1655,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest, ht.AssertNumPendingSweeps(dave, 1) } - // Mine one block to trigger the sweeper to sweep. - ht.MineEmptyBlocks(1) - blocksMined++ - // Expect one tx - the commitment sweep from Dave. For anchor // channels, we expect the two anchor sweeping txns to be // failed due they are uneconomical. @@ -1673,9 +1672,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest, // commitmment was published, so take that into account. ht.MineEmptyBlocks(int(defaultCSV - blocksMined)) - // Mine one block to trigger the sweeper to sweep. - ht.MineEmptyBlocks(1) - // Carol should have two pending sweeps: // 1. her commit output. // 2. her anchor output, if this is anchor channel. diff --git a/itest/lnd_channel_balance_test.go b/itest/lnd_channel_balance_test.go index 72dd16ea34..8ab276d2e6 100644 --- a/itest/lnd_channel_balance_test.go +++ b/itest/lnd_channel_balance_test.go @@ -156,7 +156,7 @@ func testChannelUnsettledBalance(ht *lntest.HarnessTest) { TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, } - alice.RPC.SendPayment(req) + ht.SendPaymentAssertInflight(alice, req) }() } diff --git a/itest/lnd_channel_force_close_test.go b/itest/lnd_channel_force_close_test.go index 81bdfad431..e5ef6b7cab 100644 --- a/itest/lnd_channel_force_close_test.go +++ b/itest/lnd_channel_force_close_test.go @@ -3,115 +3,97 @@ package itest import ( "bytes" "fmt" - "testing" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd" - "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/node" "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/routing" "github.com/stretchr/testify/require" ) -// testChannelForceClosure performs a test to exercise the behavior of "force" -// closing a channel or unilaterally broadcasting the latest local commitment -// state on-chain. The test creates a new channel between Alice and Carol, then -// force closes the channel after some cursory assertions. Within the test, a -// total of 3 + n transactions will be broadcast, representing the commitment -// transaction, a transaction sweeping the local CSV delayed output, a -// transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n -// htlc timeout transactions, where n is the number of payments Alice attempted -// to send to Carol. This test includes several restarts to ensure that the -// transaction output states are persisted throughout the forced closure -// process. -// -// TODO(roasbeef): also add an unsettled HTLC before force closing. -func testChannelForceClosure(ht *lntest.HarnessTest) { - // We'll test the scenario for some of the commitment types, to ensure - // outputs can be swept. - commitTypes := []lnrpc.CommitmentType{ - lnrpc.CommitmentType_ANCHORS, - lnrpc.CommitmentType_SIMPLE_TAPROOT, +const pushAmt = btcutil.Amount(5e5) + +// testChannelForceClosureAnchor runs `runChannelForceClosureTest` with anchor +// channels. +func testChannelForceClosureAnchor(ht *lntest.HarnessTest) { + // Create a simple network: Alice -> Carol, using anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + CommitmentType: lnrpc.CommitmentType_ANCHORS, } - for _, channelType := range commitTypes { - testName := fmt.Sprintf("committype=%v", channelType) - - channelType := channelType - success := ht.Run(testName, func(t *testing.T) { - st := ht.Subtest(t) - - args := lntest.NodeArgsForCommitType(channelType) - alice := st.NewNode("Alice", args) - defer st.Shutdown(alice) - - // Since we'd like to test failure scenarios with - // outstanding htlcs, we'll introduce another node into - // our test network: Carol. - carolArgs := []string{"--hodl.exit-settle"} - carolArgs = append(carolArgs, args...) - carol := st.NewNode("Carol", carolArgs) - defer st.Shutdown(carol) - - // Each time, we'll send Alice new set of coins in - // order to fund the channel. - st.FundCoins(btcutil.SatoshiPerBitcoin, alice) - - // NOTE: Alice needs 3 more UTXOs to sweep her - // second-layer txns after a restart - after a restart - // all the time-sensitive sweeps are swept immediately - // without being aggregated. - // - // TODO(yy): remove this once the can recover its state - // from restart. - st.FundCoins(btcutil.SatoshiPerBitcoin, alice) - st.FundCoins(btcutil.SatoshiPerBitcoin, alice) - st.FundCoins(btcutil.SatoshiPerBitcoin, alice) - st.FundCoins(btcutil.SatoshiPerBitcoin, alice) - - // Also give Carol some coins to allow her to sweep her - // anchor. - st.FundCoins(btcutil.SatoshiPerBitcoin, carol) - - channelForceClosureTest(st, alice, carol, channelType) - }) - if !success { - return - } + cfg := node.CfgAnchor + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfgCarol} + + runChannelForceClosureTest(ht, cfgs, openChannelParams) +} + +// testChannelForceClosureSimpleTaproot runs `runChannelForceClosureTest` with +// simple taproot channels. +func testChannelForceClosureSimpleTaproot(ht *lntest.HarnessTest) { + // Create a simple network: Alice -> Carol, using simple taproot + // channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + // If the channel is a taproot channel, then we'll need to + // create a private channel. + // + // TODO(roasbeef): lift after G175 + CommitmentType: lnrpc.CommitmentType_SIMPLE_TAPROOT, + Private: true, } + + cfg := node.CfgSimpleTaproot + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfgCarol} + + runChannelForceClosureTest(ht, cfgs, openChannelParams) } -func channelForceClosureTest(ht *lntest.HarnessTest, - alice, carol *node.HarnessNode, channelType lnrpc.CommitmentType) { +// runChannelForceClosureTest performs a test to exercise the behavior of +// "force" closing a channel or unilaterally broadcasting the latest local +// commitment state on-chain. The test creates a new channel between Alice and +// Carol, then force closes the channel after some cursory assertions. Within +// the test, a total of 3 + n transactions will be broadcast, representing the +// commitment transaction, a transaction sweeping the local CSV delayed output, +// a transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n htlc +// timeout transactions, where n is the number of payments Alice attempted +// to send to Carol. This test includes several restarts to ensure that the +// transaction output states are persisted throughout the forced closure +// process. +func runChannelForceClosureTest(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { const ( - chanAmt = btcutil.Amount(10e6) - pushAmt = btcutil.Amount(5e6) - paymentAmt = 100000 - numInvoices = 6 + numInvoices = 6 + commitFeeRate = 20000 ) - const commitFeeRate = 20000 ht.SetFeeEstimate(commitFeeRate) - // TODO(roasbeef): should check default value in config here - // instead, or make delay a param - defaultCLTV := uint32(chainreg.DefaultBitcoinTimeLockDelta) - - // We must let Alice have an open channel before she can send a node - // announcement, so we open a channel with Carol, - ht.ConnectNodes(alice, carol) + // Create a three hop network: Alice -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, carol := nodes[0], nodes[1] + chanPoint := chanPoints[0] // We need one additional UTXO for sweeping the remote anchor. - ht.FundCoins(btcutil.SatoshiPerBitcoin, alice) + if ht.IsNeutrinoBackend() { + ht.FundCoins(btcutil.SatoshiPerBitcoin, alice) + } // Before we start, obtain Carol's current wallet balance, we'll check // to ensure that at the end of the force closure by Alice, Carol @@ -119,24 +101,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest, carolBalResp := carol.RPC.WalletBalance() carolStartingBalance := carolBalResp.ConfirmedBalance - // If the channel is a taproot channel, then we'll need to create a - // private channel. - // - // TODO(roasbeef): lift after G175 - var privateChan bool - if channelType == lnrpc.CommitmentType_SIMPLE_TAPROOT { - privateChan = true - } - - chanPoint := ht.OpenChannel( - alice, carol, lntest.OpenChannelParams{ - Private: privateChan, - Amt: chanAmt, - PushAmt: pushAmt, - CommitmentType: channelType, - }, - ) - // Send payments from Alice to Carol, since Carol is htlchodl mode, the // htlc outputs should be left unsettled, and should be swept by the // utxo nursery. @@ -146,11 +110,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest, Dest: carolPubKey, Amt: int64(paymentAmt), PaymentHash: ht.Random32Bytes(), - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, + FinalCltvDelta: finalCltvDelta, TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, } - alice.RPC.SendPayment(req) + ht.SendPaymentAssertInflight(alice, req) } // Once the HTLC has cleared, all the nodes n our mini network should @@ -163,13 +127,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest, curHeight := int32(ht.CurrentHeight()) // Using the current height of the chain, derive the relevant heights - // for incubating two-stage htlcs. + // for sweeping two-stage htlcs. var ( startHeight = uint32(curHeight) commCsvMaturityHeight = startHeight + 1 + defaultCSV - htlcExpiryHeight = padCLTV(startHeight + defaultCLTV) + htlcExpiryHeight = padCLTV(startHeight + finalCltvDelta) htlcCsvMaturityHeight = padCLTV( - startHeight + defaultCLTV + 1 + defaultCSV, + startHeight + finalCltvDelta + 1 + defaultCSV, ) ) @@ -200,21 +164,15 @@ func channelForceClosureTest(ht *lntest.HarnessTest, ) // The several restarts in this test are intended to ensure that when a - // channel is force-closed, the UTXO nursery has persisted the state of - // the channel in the closure process and will recover the correct + // channel is force-closed, the contract court has persisted the state + // of the channel in the closure process and will recover the correct // state when the system comes back on line. This restart tests state // persistence at the beginning of the process, when the commitment // transaction has been broadcast but not yet confirmed in a block. ht.RestartNode(alice) - // To give the neutrino backend some time to catch up with the chain, - // we wait here until we have enough UTXOs to actually sweep the local - // and remote anchor. - const expectedUtxos = 6 - ht.AssertNumUTXOs(alice, expectedUtxos) - // We expect to see Alice's force close tx in the mempool. - ht.GetNumTxsFromMempool(1) + ht.AssertNumTxsInMempool(1) // Mine a block which should confirm the commitment transaction // broadcast as a result of the force closure. Once mined, we also @@ -259,46 +217,34 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // The following restart is intended to ensure that outputs from the // force close commitment transaction have been persisted once the - // transaction has been confirmed, but before the outputs are spendable - // (the "kindergarten" bucket.) + // transaction has been confirmed, but before the outputs are + // spendable. ht.RestartNode(alice) // Carol should offer her commit and anchor outputs to the sweeper. sweepTxns := ht.AssertNumPendingSweeps(carol, 2) - // Find Carol's anchor sweep. + // Identify Carol's pending sweeps. var carolAnchor, carolCommit = sweepTxns[0], sweepTxns[1] if carolAnchor.AmountSat != uint32(anchorSize) { carolAnchor, carolCommit = carolCommit, carolAnchor } - // Mine a block to trigger Carol's sweeper to make decisions on the - // anchor sweeping. - ht.MineEmptyBlocks(1) - // Carol's sweep tx should be in the mempool already, as her output is - // not timelocked. + // not timelocked. This sweep tx should spend her to_local output as + // the anchor output is not economical to spend. carolTx := ht.GetNumTxsFromMempool(1)[0] - // Carol's sweeping tx should have 2-input-1-output shape. - require.Len(ht, carolTx.TxIn, 2) + // Carol's sweeping tx should have 1-input-1-output shape. + require.Len(ht, carolTx.TxIn, 1) require.Len(ht, carolTx.TxOut, 1) // Calculate the total fee Carol paid. totalFeeCarol := ht.CalculateTxFee(carolTx) - // If we have anchors, add an anchor resolution for carol. - op := fmt.Sprintf("%v:%v", carolAnchor.Outpoint.TxidStr, - carolAnchor.Outpoint.OutputIndex) - carolReports[op] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_ANCHOR, - Outcome: lnrpc.ResolutionOutcome_CLAIMED, - SweepTxid: carolTx.TxHash().String(), - AmountSat: anchorSize, - Outpoint: carolAnchor.Outpoint, - } - - op = fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr, + // Carol's anchor report won't be created since it's uneconomical to + // sweep. So we expect to see only the commit sweep report. + op := fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr, carolCommit.Outpoint.OutputIndex) carolReports[op] = &lnrpc.Resolution{ ResolutionType: lnrpc.ResolutionType_COMMIT, @@ -320,9 +266,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Alice should still have the anchor sweeping request. ht.AssertNumPendingSweeps(alice, 1) - // The following restart checks to ensure that outputs in the - // kindergarten bucket are persisted while waiting for the required - // number of confirmations to be reported. + // The following restart checks to ensure that outputs in the contract + // court are persisted while waiting for the required number of + // confirmations to be reported. ht.RestartNode(alice) // Alice should see the channel in her set of pending force closed @@ -345,12 +291,12 @@ func channelForceClosureTest(ht *lntest.HarnessTest, aliceBalance = forceClose.Channel.LocalBalance // At this point, the nursery should show that the commitment - // output has 2 block left before its CSV delay expires. In + // output has 3 block left before its CSV delay expires. In // total, we have mined exactly defaultCSV blocks, so the htlc // outputs should also reflect that this many blocks have // passed. err = checkCommitmentMaturity( - forceClose, commCsvMaturityHeight, 2, + forceClose, commCsvMaturityHeight, 3, ) if err != nil { return err @@ -369,9 +315,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest, }, defaultTimeout) require.NoError(ht, err, "timeout while checking force closed channel") - // Generate an additional block, which should cause the CSV delayed - // output from the commitment txn to expire. - ht.MineEmptyBlocks(1) + // Generate two blocks, which should cause the CSV delayed output from + // the commitment txn to expire. + ht.MineBlocks(2) // At this point, the CSV will expire in the next block, meaning that // the output should be offered to the sweeper. @@ -381,14 +327,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest, commitSweep, anchorSweep = anchorSweep, commitSweep } - // Restart Alice to ensure that she resumes watching the finalized - // commitment sweep txid. - ht.RestartNode(alice) - // Mine one block and the sweeping transaction should now be broadcast. // So we fetch the node's mempool to ensure it has been properly // broadcast. - ht.MineEmptyBlocks(1) sweepingTXID := ht.AssertNumTxsInMempool(1)[0] // Fetch the sweep transaction, all input it's spending should be from @@ -399,7 +340,12 @@ func channelForceClosureTest(ht *lntest.HarnessTest, "sweep transaction not spending from commit") } - // We expect a resolution which spends our commit output. + // Restart Alice to ensure that she resumes watching the finalized + // commitment sweep txid. + ht.RestartNode(alice) + + // Alice's anchor report won't be created since it's uneconomical to + // sweep. We expect a resolution which spends our commit output. op = fmt.Sprintf("%v:%v", commitSweep.Outpoint.TxidStr, commitSweep.Outpoint.OutputIndex) aliceReports[op] = &lnrpc.Resolution{ @@ -410,17 +356,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest, AmountSat: uint64(aliceBalance), } - // Add alice's anchor to our expected set of reports. - op = fmt.Sprintf("%v:%v", aliceAnchor.Outpoint.TxidStr, - aliceAnchor.Outpoint.OutputIndex) - aliceReports[op] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_ANCHOR, - Outcome: lnrpc.ResolutionOutcome_CLAIMED, - SweepTxid: sweepingTXID.String(), - Outpoint: aliceAnchor.Outpoint, - AmountSat: uint64(anchorSize), - } - // Check that we can find the commitment sweep in our set of known // sweeps, using the simple transaction id ListSweeps output. ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0) @@ -490,17 +425,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Advance the blockchain until just before the CLTV expires, nothing // exciting should have happened during this time. - ht.MineEmptyBlocks(cltvHeightDelta) + ht.MineBlocks(cltvHeightDelta) // We now restart Alice, to ensure that she will broadcast the // presigned htlc timeout txns after the delay expires after // experiencing a while waiting for the htlc outputs to incubate. ht.RestartNode(alice) - // To give the neutrino backend some time to catch up with the chain, - // we wait here until we have enough UTXOs to - // ht.AssertNumUTXOs(alice, expectedUtxos) - // Alice should now see the channel in her set of pending force closed // channels with one pending HTLC. err = wait.NoError(func() error { @@ -535,24 +466,23 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Now, generate the block which will cause Alice to offer the // presigned htlc timeout txns to the sweeper. - ht.MineEmptyBlocks(1) + ht.MineBlocks(1) // Since Alice had numInvoices (6) htlcs extended to Carol before force // closing, we expect Alice to broadcast an htlc timeout txn for each - // one. - ht.AssertNumPendingSweeps(alice, numInvoices) + // one. We also expect Alice to still have her anchor since it's not + // swept. + ht.AssertNumPendingSweeps(alice, numInvoices+1) // Wait for them all to show up in the mempool - // - // NOTE: after restart, all the htlc timeout txns will be offered to - // the sweeper with `Immediate` set to true, so they won't be - // aggregated. - htlcTxIDs := ht.AssertNumTxsInMempool(numInvoices) + htlcTxIDs := ht.AssertNumTxsInMempool(1) // Retrieve each htlc timeout txn from the mempool, and ensure it is - // well-formed. This entails verifying that each only spends from - // output, and that output is from the commitment txn. - numInputs := 2 + // well-formed. The sweeping tx should spend all the htlc outputs. + // + // NOTE: We also add 1 output as the outgoing HTLC is swept using twice + // its value as its budget, so a wallet utxo is used. + numInputs := 6 + 1 // Construct a map of the already confirmed htlc timeout outpoints, // that will count the number of times each is spent by the sweep txn. @@ -561,6 +491,8 @@ func channelForceClosureTest(ht *lntest.HarnessTest, var htlcTxOutpointSet = make(map[wire.OutPoint]int) var htlcLessFees uint64 + + //nolint:ll for _, htlcTxID := range htlcTxIDs { // Fetch the sweep transaction, all input it's spending should // be from the commitment transaction which was broadcast @@ -653,10 +585,10 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Generate a block that mines the htlc timeout txns. Doing so now // activates the 2nd-stage CSV delayed outputs. - ht.MineBlocksAndAssertNumTxes(1, numInvoices) + ht.MineBlocksAndAssertNumTxes(1, 1) - // Alice is restarted here to ensure that she promptly moved the crib - // outputs to the kindergarten bucket after the htlc timeout txns were + // Alice is restarted here to ensure that her contract court properly + // handles the 2nd-stage sweeps after the htlc timeout txns were // confirmed. ht.RestartNode(alice) @@ -665,12 +597,19 @@ func channelForceClosureTest(ht *lntest.HarnessTest, currentHeight = int32(ht.CurrentHeight()) ht.Logf("current height: %v, htlcCsvMaturityHeight=%v", currentHeight, htlcCsvMaturityHeight) - numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 2) - ht.MineEmptyBlocks(numBlocks) + numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 1) + ht.MineBlocks(numBlocks) - // Restart Alice to ensure that she can recover from a failure before - // having graduated the htlc outputs in the kindergarten bucket. - ht.RestartNode(alice) + ht.AssertNumPendingSweeps(alice, numInvoices+1) + + // Restart Alice to ensure that she can recover from a failure. + // + // TODO(yy): Skip this step for neutrino as it cannot recover the + // sweeping txns from the mempool. We need to also store the txns in + // the sweeper store to make it work for the neutrino case. + if !ht.IsNeutrinoBackend() { + ht.RestartNode(alice) + } // Now that the channel has been fully swept, it should no longer show // incubated, check to see that Alice's node still reports the channel @@ -688,55 +627,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest, }, defaultTimeout) require.NoError(ht, err, "timeout while checking force closed channel") - // Generate a block that causes Alice to sweep the htlc outputs in the - // kindergarten bucket. - ht.MineEmptyBlocks(1) - ht.AssertNumPendingSweeps(alice, numInvoices) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - - // A temp hack to ensure the CI is not blocking the current - // development. There's a known issue in block sync among different - // subsystems, which is scheduled to be fixed in 0.18.1. - if ht.IsNeutrinoBackend() { - // We expect the htlcs to be aggregated into one tx. However, - // due to block sync issue, they may end up in two txns. Here - // we assert that there are two txns found in the mempool - if - // succeeded, it means the aggregation failed, and we won't - // continue the test. - // - // NOTE: we don't check `len(mempool) == 1` because it will - // give us false positive. - err := wait.NoError(func() error { - mempool := ht.Miner().GetRawMempool() - if len(mempool) == 2 { - return nil - } - - return fmt.Errorf("expected 2 txes in mempool, found "+ - "%d", len(mempool)) - }, lntest.DefaultTimeout) - ht.Logf("Assert num of txns got %v", err) - - // If there are indeed two txns found in the mempool, we won't - // continue the test. - if err == nil { - ht.Log("Neutrino backend failed to aggregate htlc " + - "sweeps!") - - // Clean the mempool. - ht.MineBlocksAndAssertNumTxes(1, 2) - - return - } - } + ht.AssertNumPendingSweeps(alice, numInvoices+1) // Wait for the single sweep txn to appear in the mempool. - htlcSweepTxID := ht.AssertNumTxsInMempool(1)[0] + htlcSweepTxid := ht.AssertNumTxsInMempool(1)[0] // Fetch the htlc sweep transaction from the mempool. - htlcSweepTx := ht.GetRawTransaction(htlcSweepTxID) + htlcSweepTx := ht.GetRawTransaction(htlcSweepTxid) // Ensure the htlc sweep transaction only has one input for each htlc // Alice extended before force closing. @@ -748,6 +645,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Ensure that each output spends from exactly one htlc timeout output. for _, txIn := range htlcSweepTx.MsgTx().TxIn { outpoint := txIn.PreviousOutPoint + // Check that the input is a confirmed htlc timeout txn. _, ok := htlcTxOutpointSet[outpoint] require.Truef(ht, ok, "htlc sweep output not spending from "+ @@ -785,11 +683,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Check that we can find the htlc sweep in our set of sweeps using // the verbose output of the listsweeps output. - ht.AssertSweepFound(alice, htlcSweepTx.Hash().String(), true, 0) + ht.AssertSweepFound(alice, htlcSweepTxid.String(), true, 0) - // The following restart checks to ensure that the nursery store is - // storing the txid of the previously broadcast htlc sweep txn, and - // that it begins watching that txid after restarting. + // The following restart checks to ensure that the sweeper is storing + // the txid of the previously broadcast htlc sweep txn, and that it + // begins watching that txid after restarting. ht.RestartNode(alice) // Now that the channel has been fully swept, it should no longer show @@ -805,7 +703,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest, } err = checkPendingHtlcStageAndMaturity( - forceClose, 2, htlcCsvMaturityHeight-1, -1, + forceClose, 2, htlcCsvMaturityHeight-1, 0, ) if err != nil { return err @@ -818,7 +716,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest, // Generate the final block that sweeps all htlc funds into the user's // wallet, and make sure the sweep is in this block. block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, htlcSweepTxID) + ht.AssertTxInBlock(block, htlcSweepTxid) // Now that the channel has been fully swept, it should no longer show // up within the pending channels RPC. @@ -847,12 +745,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest, carolExpectedBalance := btcutil.Amount(carolStartingBalance) + pushAmt - totalFeeCarol - // In addition, if this is an anchor-enabled channel, further add the - // anchor size. - if lntest.CommitTypeHasAnchors(channelType) { - carolExpectedBalance += btcutil.Amount(anchorSize) - } - require.Equal(ht, carolExpectedBalance, btcutil.Amount(carolBalResp.ConfirmedBalance), "carol's balance is incorrect") @@ -873,8 +765,6 @@ func padCLTV(cltv uint32) uint32 { // in the case where a counterparty tries to settle an HTLC with the wrong // preimage. func testFailingChannel(ht *lntest.HarnessTest) { - const paymentAmt = 10000 - chanAmt := lnd.MaxFundingAmount // We'll introduce Carol, which will settle any incoming invoice with a @@ -893,7 +783,7 @@ func testFailingChannel(ht *lntest.HarnessTest) { invoice := &lnrpc.Invoice{ Memo: "testing", RPreimage: preimage, - Value: paymentAmt, + Value: invoiceAmt, } resp := carol.RPC.AddInvoice(invoice) @@ -926,12 +816,12 @@ func testFailingChannel(ht *lntest.HarnessTest) { // Carol will use the correct preimage to resolve the HTLC on-chain. ht.AssertNumPendingSweeps(carol, 1) - // Bring down the fee rate estimation, otherwise the following sweep - // won't happen. - ht.SetFeeEstimate(chainfee.FeePerKwFloor) - - // Mine a block to trigger Carol's sweeper to broadcast the sweeping - // tx. + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. ht.MineEmptyBlocks(1) // Carol should have broadcast her sweeping tx. @@ -944,9 +834,6 @@ func testFailingChannel(ht *lntest.HarnessTest) { // Alice's should have one pending sweep request for her commit output. ht.AssertNumPendingSweeps(alice, 1) - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - // Mine Alice's sweeping tx. ht.MineBlocksAndAssertNumTxes(1, 1) diff --git a/itest/lnd_forward_interceptor_test.go b/itest/lnd_forward_interceptor_test.go index 0edf1e1098..a58dc34154 100644 --- a/itest/lnd_forward_interceptor_test.go +++ b/itest/lnd_forward_interceptor_test.go @@ -508,8 +508,7 @@ func testForwardInterceptorWireRecords(ht *lntest.HarnessTest) { FeeLimitMsat: noFeeLimitMsat, FirstHopCustomRecords: customRecords, } - - _ = alice.RPC.SendPayment(sendReq) + ht.SendPaymentAssertInflight(alice, sendReq) // We start the htlc interceptor with a simple implementation that saves // all intercepted packets. These packets are held to simulate a @@ -635,8 +634,7 @@ func testForwardInterceptorRestart(ht *lntest.HarnessTest) { FeeLimitMsat: noFeeLimitMsat, FirstHopCustomRecords: customRecords, } - - _ = alice.RPC.SendPayment(sendReq) + ht.SendPaymentAssertInflight(alice, sendReq) // We start the htlc interceptor with a simple implementation that saves // all intercepted packets. These packets are held to simulate a diff --git a/itest/lnd_funding_test.go b/itest/lnd_funding_test.go index 0b08da32b3..4c045776e0 100644 --- a/itest/lnd_funding_test.go +++ b/itest/lnd_funding_test.go @@ -6,18 +6,13 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainreg" - "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/funding" - "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/node" "github.com/lightningnetwork/lnd/lnwallet/chainfee" @@ -583,8 +578,8 @@ func runExternalFundingScriptEnforced(ht *lntest.HarnessTest) { // a transaction that will never be published. const thawHeight uint32 = 10 const chanSize = funding.MaxBtcFundingAmount - fundingShim1, chanPoint1 := deriveFundingShim( - ht, carol, dave, chanSize, thawHeight, false, commitmentType, + fundingShim1, chanPoint1 := ht.DeriveFundingShim( + carol, dave, chanSize, thawHeight, false, commitmentType, ) ht.OpenChannelAssertPending( carol, dave, lntest.OpenChannelParams{ @@ -599,8 +594,8 @@ func runExternalFundingScriptEnforced(ht *lntest.HarnessTest) { // externally funded, we should still be able to open another one. Let's // do exactly that now. For this one we publish the transaction so we // can mine it later. - fundingShim2, chanPoint2 := deriveFundingShim( - ht, carol, dave, chanSize, thawHeight, true, commitmentType, + fundingShim2, chanPoint2 := ht.DeriveFundingShim( + carol, dave, chanSize, thawHeight, true, commitmentType, ) // At this point, we'll now carry out the normal basic channel funding @@ -699,8 +694,8 @@ func runExternalFundingTaproot(ht *lntest.HarnessTest) { // a transaction that will never be published. const thawHeight uint32 = 10 const chanSize = funding.MaxBtcFundingAmount - fundingShim1, chanPoint1 := deriveFundingShim( - ht, carol, dave, chanSize, thawHeight, false, commitmentType, + fundingShim1, chanPoint1 := ht.DeriveFundingShim( + carol, dave, chanSize, thawHeight, false, commitmentType, ) ht.OpenChannelAssertPending(carol, dave, lntest.OpenChannelParams{ Amt: chanSize, @@ -715,8 +710,8 @@ func runExternalFundingTaproot(ht *lntest.HarnessTest) { // externally funded, we should still be able to open another one. Let's // do exactly that now. For this one we publish the transaction so we // can mine it later. - fundingShim2, chanPoint2 := deriveFundingShim( - ht, carol, dave, chanSize, thawHeight, true, commitmentType, + fundingShim2, chanPoint2 := ht.DeriveFundingShim( + carol, dave, chanSize, thawHeight, true, commitmentType, ) // At this point, we'll now carry out the normal basic channel funding @@ -1163,122 +1158,6 @@ func ensurePolicy(ht *lntest.HarnessTest, alice, peer *node.HarnessNode, require.EqualValues(ht, expectedFeeRate, alicePolicy.FeeRateMilliMsat) } -// deriveFundingShim creates a channel funding shim by deriving the necessary -// keys on both sides. -func deriveFundingShim(ht *lntest.HarnessTest, carol, dave *node.HarnessNode, - chanSize btcutil.Amount, thawHeight uint32, publish bool, - commitType lnrpc.CommitmentType) (*lnrpc.FundingShim, - *lnrpc.ChannelPoint) { - - keyLoc := &signrpc.KeyLocator{KeyFamily: 9999} - carolFundingKey := carol.RPC.DeriveKey(keyLoc) - daveFundingKey := dave.RPC.DeriveKey(keyLoc) - - // Now that we have the multi-sig keys for each party, we can manually - // construct the funding transaction. We'll instruct the backend to - // immediately create and broadcast a transaction paying out an exact - // amount. Normally this would reside in the mempool, but we just - // confirm it now for simplicity. - var ( - fundingOutput *wire.TxOut - musig2 bool - err error - ) - if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT { - var carolKey, daveKey *btcec.PublicKey - carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes) - require.NoError(ht, err) - daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes) - require.NoError(ht, err) - - _, fundingOutput, err = input.GenTaprootFundingScript( - carolKey, daveKey, int64(chanSize), - fn.None[chainhash.Hash](), - ) - require.NoError(ht, err) - - musig2 = true - } else { - _, fundingOutput, err = input.GenFundingPkScript( - carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes, - int64(chanSize), - ) - require.NoError(ht, err) - } - - var txid *chainhash.Hash - targetOutputs := []*wire.TxOut{fundingOutput} - if publish { - txid = ht.SendOutputsWithoutChange(targetOutputs, 5) - } else { - tx := ht.CreateTransaction(targetOutputs, 5) - - txHash := tx.TxHash() - txid = &txHash - } - - // At this point, we can being our external channel funding workflow. - // We'll start by generating a pending channel ID externally that will - // be used to track this new funding type. - pendingChanID := ht.Random32Bytes() - - // Now that we have the pending channel ID, Dave (our responder) will - // register the intent to receive a new channel funding workflow using - // the pending channel ID. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: txid[:], - }, - } - chanPointShim := &lnrpc.ChanPointShim{ - Amt: int64(chanSize), - ChanPoint: chanPoint, - LocalKey: &lnrpc.KeyDescriptor{ - RawKeyBytes: daveFundingKey.RawKeyBytes, - KeyLoc: &lnrpc.KeyLocator{ - KeyFamily: daveFundingKey.KeyLoc.KeyFamily, - KeyIndex: daveFundingKey.KeyLoc.KeyIndex, - }, - }, - RemoteKey: carolFundingKey.RawKeyBytes, - PendingChanId: pendingChanID, - ThawHeight: thawHeight, - Musig2: musig2, - } - fundingShim := &lnrpc.FundingShim{ - Shim: &lnrpc.FundingShim_ChanPointShim{ - ChanPointShim: chanPointShim, - }, - } - dave.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ - ShimRegister: fundingShim, - }, - }) - - // If we attempt to register the same shim (has the same pending chan - // ID), then we should get an error. - dave.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ - ShimRegister: fundingShim, - }, - }) - - // We'll take the chan point shim we just registered for Dave (the - // responder), and swap the local/remote keys before we feed it in as - // Carol's funding shim as the initiator. - fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{ - RawKeyBytes: carolFundingKey.RawKeyBytes, - KeyLoc: &lnrpc.KeyLocator{ - KeyFamily: carolFundingKey.KeyLoc.KeyFamily, - KeyIndex: carolFundingKey.KeyLoc.KeyIndex, - }, - } - fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes - - return fundingShim, chanPoint -} - // testChannelFundingWithUnstableUtxos tests channel openings with restricted // utxo selection. Internal wallet utxos might be restricted due to another // subsystems still using it therefore it would be unsecure to use them for @@ -1297,6 +1176,7 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // First, we'll create two new nodes that we'll use to open channel // between for this test. carol := ht.NewNode("carol", nil) + // We'll attempt at max 2 pending channels, so Dave will need to accept // two pending ones. dave := ht.NewNode("dave", []string{ @@ -1375,9 +1255,6 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // Mine one block to trigger the sweep transaction. - ht.MineEmptyBlocks(1) - // We need to wait for carol initiating the sweep of the to_remote // output of chanPoint2. utxo := ht.AssertNumUTXOsUnconfirmed(carol, 1)[0] @@ -1435,9 +1312,6 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // Mine one block to trigger the sweep transaction. - ht.MineEmptyBlocks(1) - // Wait for the to_remote sweep tx to show up in carol's wallet. ht.AssertNumUTXOsUnconfirmed(carol, 1) diff --git a/itest/lnd_hold_invoice_force_test.go b/itest/lnd_hold_invoice_force_test.go index 7c616e7a50..d9b8517107 100644 --- a/itest/lnd_hold_invoice_force_test.go +++ b/itest/lnd_hold_invoice_force_test.go @@ -44,14 +44,18 @@ func testHoldInvoiceForceClose(ht *lntest.HarnessTest) { TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, } - alice.RPC.SendPayment(req) + ht.SendPaymentAssertInflight(alice, req) ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) // Once the HTLC has cleared, alice and bob should both have a single // htlc locked in. - ht.AssertActiveHtlcs(alice, payHash[:]) - ht.AssertActiveHtlcs(bob, payHash[:]) + // + // Alice should have one outgoing HTLCs on channel Alice -> Bob. + ht.AssertOutgoingHTLCActive(alice, chanPoint, payHash[:]) + + // Bob should have one incoming HTLC on channel Alice -> Bob. + ht.AssertIncomingHTLCActive(bob, chanPoint, payHash[:]) // Get our htlc expiry height and current block height so that we // can mine the exact number of blocks required to expire the htlc. diff --git a/itest/lnd_htlc_timeout_resolver_test.go b/itest/lnd_htlc_timeout_resolver_test.go new file mode 100644 index 0000000000..cd1dd3d483 --- /dev/null +++ b/itest/lnd_htlc_timeout_resolver_test.go @@ -0,0 +1,381 @@ +package itest + +import ( + "github.com/btcsuite/btcd/btcutil" + "github.com/lightningnetwork/lnd/chainreg" + "github.com/lightningnetwork/lnd/lncfg" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/node" + "github.com/lightningnetwork/lnd/routing" + "github.com/stretchr/testify/require" +) + +const ( + finalCltvDelta = routing.MinCLTVDelta // 18. + thawHeightDelta = finalCltvDelta * 2 // 36. +) + +// makeRouteHints creates a route hints that will allow Carol to be reached +// using an unadvertised channel created by Bob (Bob -> Carol). If the zeroConf +// bool is set, then the scid alias of Bob will be used in place. +func makeRouteHints(bob, carol *node.HarnessNode, + zeroConf bool) []*lnrpc.RouteHint { + + carolChans := carol.RPC.ListChannels( + &lnrpc.ListChannelsRequest{}, + ) + + carolChan := carolChans.Channels[0] + + hopHint := &lnrpc.HopHint{ + NodeId: carolChan.RemotePubkey, + ChanId: carolChan.ChanId, + FeeBaseMsat: uint32( + chainreg.DefaultBitcoinBaseFeeMSat, + ), + FeeProportionalMillionths: uint32( + chainreg.DefaultBitcoinFeeRate, + ), + CltvExpiryDelta: chainreg.DefaultBitcoinTimeLockDelta, + } + + if zeroConf { + bobChans := bob.RPC.ListChannels( + &lnrpc.ListChannelsRequest{}, + ) + + // Now that we have Bob's channels, scan for the channel he has + // open to Carol so we can use the proper scid. + var found bool + for _, bobChan := range bobChans.Channels { + if bobChan.RemotePubkey == carol.PubKeyStr { + hopHint.ChanId = bobChan.AliasScids[0] + + found = true + + break + } + } + if !found { + bob.Fatalf("unable to create route hint") + } + } + + return []*lnrpc.RouteHint{ + { + HopHints: []*lnrpc.HopHint{hopHint}, + }, + } +} + +// testHtlcTimeoutResolverExtractPreimageRemote tests that in the multi-hop +// setting, Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using +// the 2nd level success tx2nd level success tx, Bob's timeout resolver will +// extract the preimage from the sweep tx found in mempool. The 2nd level +// success tx is broadcast by Carol and spends the outpoint on her commit tx. +func testHtlcTimeoutResolverExtractPreimageRemote(ht *lntest.HarnessTest) { + // For neutrino backend there's no mempool source so we skip it. The + // test of extracting preimage from blocks has already been covered in + // other tests. + if ht.IsNeutrinoBackend() { + ht.Skip("skipping neutrino") + } + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1] + + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: 100_000, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + } + eveInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: eveInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // Once the payment sent, Alice should have one outgoing HTLC active. + ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:]) + + // Bob should have two HTLCs active. One incoming HTLC from Alice, and + // one outgoing to Carol. + ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:]) + htlc := ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:]) + + // Carol should have one incoming HTLC from Bob. + ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:]) + + // Wait for Carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // Bob now goes offline so the link between Bob and Carol is broken. + restartBob := ht.SuspendNode(bob) + + // Carol now settles the invoice, since her link with Bob is broken, + // Bob won't know the preimage. + carol.RPC.SettleInvoice(preimage[:]) + + // We'll now mine enough blocks to trigger Carol's broadcast of her + // commitment transaction due to the fact that the HTLC is about to + // timeout. With the default incoming broadcast delta of 10, this + // will be the htlc expiry height minus 10. + numBlocks := padCLTV(uint32( + invoiceReq.CltvExpiry - incomingBroadcastDelta, + )) + ht.MineBlocks(int(numBlocks)) + + // Mine the two txns made from Carol, + // - the force close tx. + // - the anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // With the closing transaction confirmed, we should expect Carol's + // HTLC success transaction to be offered to the sweeper. along with her + // anchor output. Note that the anchor output is uneconomical to sweep. + ht.AssertNumPendingSweeps(carol, 1) + + // We should now have Carol's htlc success tx in the mempool. + ht.AssertNumTxsInMempool(1) + + // Restart Bob. Once he finishes syncing the channel state, he should + // notice the force close from Carol. + require.NoError(ht, restartBob()) + + // Get the current height to compute number of blocks to mine to + // trigger the htlc timeout resolver from Bob. + height := ht.CurrentHeight() + + // We'll now mine enough blocks to trigger Bob's timeout resolver. + numBlocks = htlc.ExpirationHeight - height - + lncfg.DefaultOutgoingBroadcastDelta + + // Mine empty blocks so Carol's htlc success tx stays in mempool. Once + // the height is reached, Bob's timeout resolver will resolve the htlc + // by extracing the preimage from the mempool. + ht.MineEmptyBlocks(int(numBlocks)) + + // Finally, check that the Alice's payment is marked as succeeded as + // Bob has settled the htlc using the preimage extracted from Carol's + // 2nd level success tx. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) + + // Mine a block to clean the mempool. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // NOTE: for non-standby nodes there's no need to clean up the force + // close as long as the mempool is cleaned. + ht.CleanShutDown() +} + +// testHtlcTimeoutResolverExtractPreimage tests that in the multi-hop setting, +// Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using the +// direct preimage spend, Bob's timeout resolver will extract the preimage from +// the sweep tx found in mempool. The direct spend tx is broadcast by Carol and +// spends the outpoint on Bob's commit tx. +func testHtlcTimeoutResolverExtractPreimageLocal(ht *lntest.HarnessTest) { + // For neutrino backend there's no mempool source so we skip it. The + // test of extracting preimage from blocks has already been covered in + // other tests. + if ht.IsNeutrinoBackend() { + ht.Skip("skipping neutrino") + } + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1] + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: 100_000, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Record the height which the invoice will expire. + invoiceExpiry := ht.CurrentHeight() + uint32(invoiceReq.CltvExpiry) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // Once the payment sent, Alice should have one outgoing HTLC active. + ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:]) + + // Bob should have two HTLCs active. One incoming HTLC from Alice, and + // one outgoing to Carol. + ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:]) + ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:]) + + // Carol should have one incoming HTLC from Bob. + ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:]) + + // Wait for Carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // Bob now goes offline so the link between Bob and Carol is broken. + restartBob := ht.SuspendNode(bob) + + // Carol now settles the invoice, since her link with Bob is broken, + // Bob won't know the preimage. + carol.RPC.SettleInvoice(preimage[:]) + + // Stop Carol so it's easier to check the mempool's state since she + // will broadcast the anchor sweeping once Bob force closes. + restartCarol := ht.SuspendNode(carol) + + // Restart Bob to force close the channel. + require.NoError(ht, restartBob()) + + // Bob force closes the channel, which gets his commitment tx into the + // mempool. + ht.CloseChannelAssertPending(bob, bobChanPoint, true) + + // Mine Bob's force close tx. + ht.MineClosingTx(bobChanPoint) + + // Once Bob's force closing tx is confirmed, he will re-offer the + // anchor output to his sweeper, which won't be swept due to it being + // uneconomical. + ht.AssertNumPendingSweeps(bob, 1) + + // Mine 3 blocks so the output will be offered to the sweeper. + ht.MineBlocks(defaultCSV - 1) + + // Bob should have two pending sweeps now, + // - the commit output. + // - the anchor output, uneconomical. + ht.AssertNumPendingSweeps(bob, 2) + + // Mine a block to confirm Bob's sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + ht.Logf("Invoice expire height: %d, current: %d", invoiceExpiry, + ht.CurrentHeight()) + + // We'll now mine enough blocks to trigger Carol's sweeping of the htlc + // via the direct spend. + numBlocks := padCLTV( + invoiceExpiry - ht.CurrentHeight() - incomingBroadcastDelta, + ) + ht.MineBlocks(int(numBlocks)) + + // Restart Carol to sweep the htlc output. + require.NoError(ht, restartCarol()) + + // With the above blocks mined, we should expect Carol's to offer the + // htlc output on Bob's commitment to the sweeper. + // + // Carol should two pending sweeps, + // - htlc output. + // - anchor output, uneconomical. + ht.AssertNumPendingSweeps(carol, 2) + + // Check the current mempool state and we should see, + // - Carol's direct spend tx, which contains the preimage. + // - Carol's anchor sweep tx cannot be broadcast as it's uneconomical. + ht.AssertNumTxsInMempool(1) + + // We'll now mine enough blocks to trigger Bob's htlc timeout resolver + // to act. Once his timeout resolver starts, it will extract the + // preimage from Carol's direct spend tx found in the mempool. + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + // Mine empty blocks so Carol's direct spend tx stays in mempool. Once + // the height is reached, Bob's timeout resolver will resolve the htlc + // by extracing the preimage from the mempool. + // + // TODO(yy): there's no need to wait till the HTLC's CLTV is reached, + // Bob's outgoing contest resolver can also monitor the mempool and + // resolve the payment even earlier. + ht.MineEmptyBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Finally, check that the Alice's payment is marked as succeeded as + // Bob has settled the htlc using the preimage extracted from Carol's + // direct spend tx. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) + + // NOTE: for non-standby nodes there's no need to clean up the force + // close as long as the mempool is cleaned. + ht.CleanShutDown() +} diff --git a/itest/lnd_misc_test.go b/itest/lnd_misc_test.go index 02e3386e6a..33a7067552 100644 --- a/itest/lnd_misc_test.go +++ b/itest/lnd_misc_test.go @@ -632,8 +632,10 @@ func testRejectHTLC(ht *lntest.HarnessTest) { TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, } - payStream := alice.RPC.SendPayment(paymentReq) - ht.AssertPaymentStatusFromStream(payStream, lnrpc.Payment_FAILED) + ht.SendPaymentAssertFail( + alice, paymentReq, + lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE, + ) ht.AssertLastHTLCError(alice, lnrpc.Failure_CHANNEL_DISABLED) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go new file mode 100644 index 0000000000..fc417c5d74 --- /dev/null +++ b/itest/lnd_multi-hop_force_close_test.go @@ -0,0 +1,3218 @@ +package itest + +import ( + "testing" + + "github.com/btcsuite/btcd/btcutil" + "github.com/lightningnetwork/lnd/lncfg" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/node" + "github.com/lightningnetwork/lnd/lntest/rpc" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" +) + +const ( + chanAmt = 1_000_000 + invoiceAmt = 100_000 + htlcAmt = btcutil.Amount(300_000) + + incomingBroadcastDelta = lncfg.DefaultIncomingBroadcastDelta +) + +var leasedType = lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE + +// multiHopForceCloseTestCases defines a set of tests that focuses on the +// behavior of the force close in a multi-hop scenario. +// +//nolint:ll +var multiHopForceCloseTestCases = []*lntest.TestCase{ + { + Name: "multihop local claim outgoing htlc anchor", + TestFunc: testLocalClaimOutgoingHTLCAnchor, + }, + { + Name: "multihop local claim outgoing htlc simple taproot", + TestFunc: testLocalClaimOutgoingHTLCSimpleTaproot, + }, + { + Name: "multihop local claim outgoing htlc leased", + TestFunc: testLocalClaimOutgoingHTLCLeased, + }, + { + Name: "multihop receiver preimage claim anchor", + TestFunc: testMultiHopReceiverPreimageClaimAnchor, + }, + { + Name: "multihop receiver preimage claim simple taproot", + TestFunc: testMultiHopReceiverPreimageClaimSimpleTaproot, + }, + { + Name: "multihop receiver preimage claim leased", + TestFunc: testMultiHopReceiverPreimageClaimLeased, + }, + { + Name: "multihop local force close before timeout anchor", + TestFunc: testLocalForceCloseBeforeTimeoutAnchor, + }, + { + Name: "multihop local force close before timeout simple taproot", + TestFunc: testLocalForceCloseBeforeTimeoutSimpleTaproot, + }, + { + Name: "multihop local force close before timeout leased", + TestFunc: testLocalForceCloseBeforeTimeoutLeased, + }, + { + Name: "multihop remote force close before timeout anchor", + TestFunc: testRemoteForceCloseBeforeTimeoutAnchor, + }, + { + Name: "multihop remote force close before timeout simple taproot", + TestFunc: testRemoteForceCloseBeforeTimeoutSimpleTaproot, + }, + { + Name: "multihop remote force close before timeout leased", + TestFunc: testRemoteForceCloseBeforeTimeoutLeased, + }, + { + Name: "multihop local claim incoming htlc anchor", + TestFunc: testLocalClaimIncomingHTLCAnchor, + }, + { + Name: "multihop local claim incoming htlc simple taproot", + TestFunc: testLocalClaimIncomingHTLCSimpleTaproot, + }, + { + Name: "multihop local claim incoming htlc leased", + TestFunc: testLocalClaimIncomingHTLCLeased, + }, + { + Name: "multihop local preimage claim anchor", + TestFunc: testLocalPreimageClaimAnchor, + }, + { + Name: "multihop local preimage claim simple taproot", + TestFunc: testLocalPreimageClaimSimpleTaproot, + }, + { + Name: "multihop local preimage claim leased", + TestFunc: testLocalPreimageClaimLeased, + }, + { + Name: "multihop htlc aggregation anchor", + TestFunc: testHtlcAggregaitonAnchor, + }, + { + Name: "multihop htlc aggregation simple taproot", + TestFunc: testHtlcAggregaitonSimpleTaproot, + }, + { + Name: "multihop htlc aggregation leased", + TestFunc: testHtlcAggregaitonLeased, + }, +} + +// testLocalClaimOutgoingHTLCAnchor tests `runLocalClaimOutgoingHTLC` with +// anchor channel. +func testLocalClaimOutgoingHTLCAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalClaimOutgoingHTLC(st, cfgs, openChannelParams) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalClaimOutgoingHTLC(st, cfgs, openChannelParams) + }) +} + +// testLocalClaimOutgoingHTLCSimpleTaproot tests `runLocalClaimOutgoingHTLC` +// with simple taproot channel. +func testLocalClaimOutgoingHTLCSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalClaimOutgoingHTLC(st, cfgs, openChannelParams) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalClaimOutgoingHTLC(st, cfgs, openChannelParams) + }) +} + +// testLocalClaimOutgoingHTLCLeased tests `runLocalClaimOutgoingHTLC` with +// script enforced lease channel. +func testLocalClaimOutgoingHTLCLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalClaimOutgoingHTLC(st, cfgs, openChannelParams) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalClaimOutgoingHTLC(st, cfgs, openChannelParams) + }) +} + +// runLocalClaimOutgoingHTLC tests that in a multi-hop scenario, if the +// outgoing HTLC is about to time out, then we'll go to chain in order to claim +// it using the HTLC timeout transaction. Any dust HTLC's should be immediately +// canceled backwards. Once the timeout has been reached, then we should sweep +// it on-chain, and cancel the HTLC backwards. +func runLocalClaimOutgoingHTLC(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Create a three hop network: Alice -> Bob -> Carol. + _, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + + // For neutrino backend, we need to fund one more UTXO for Bob so he + // can sweep his outputs. + if ht.IsNeutrinoBackend() { + ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) + } + + // Bob should have enough wallet UTXOs here to sweep the HTLC in the + // end of this test. However, due to a known issue, Bob's wallet may + // report there's no UTXO available. For details, + // - https://github.com/lightningnetwork/lnd/issues/8786 + // + // TODO(yy): remove this step once the issue is resolved. + ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) + + // Now that our channels are set up, we'll send two HTLC's from Alice + // to Carol. The first HTLC will be universally considered "dust", + // while the second will be a proper fully valued HTLC. + const dustHtlcAmt = btcutil.Amount(100) + + // We'll create two random payment hashes unknown to carol, then send + // each of them by manually specifying the HTLC details. + carolPubKey := carol.PubKey[:] + dustPayHash := ht.Random32Bytes() + payHash := ht.Random32Bytes() + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice can actually find a route. + var routeHints []*lnrpc.RouteHint + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + routeHints = makeRouteHints(bob, carol, params.ZeroConf) + } + + req := &routerrpc.SendPaymentRequest{ + Dest: carolPubKey, + Amt: int64(dustHtlcAmt), + PaymentHash: dustPayHash, + FinalCltvDelta: finalCltvDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + RouteHints: routeHints, + } + ht.SendPaymentAssertInflight(alice, req) + + req = &routerrpc.SendPaymentRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + RouteHints: routeHints, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. + // + // Alice should have two outgoing HTLCs on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 2) + + // Bob should have two incoming HTLCs on channel Alice -> Bob, and two + // outgoing HTLCs on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 4) + + // Carol should have two incoming HTLCs on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 2) + + // We'll now mine enough blocks to trigger Bob's force close the + // channel Bob=>Carol due to the fact that the HTLC is about to + // timeout. With the default outgoing broadcast delta of zero, this + // will be the same height as the htlc expiry height. + numBlocks := padCLTV( + uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), + ) + ht.MineBlocks(int(numBlocks)) + + // Bob's force close tx should have the following outputs, + // 1. anchor output. + // 2. to_local output, which is CSV locked. + // 3. outgoing HTLC output, which has expired. + // + // Bob's anchor output should be offered to his sweeper since Bob has + // time-sensitive HTLCs - we expect both anchors to be offered, while + // the sweeping of the remote anchor will be marked as failed due to + // `testmempoolaccept` check. + // + // For neutrino backend, there's no way to know the sweeping of the + // remote anchor is failed, so Bob still sees two pending sweeps. + if ht.IsNeutrinoBackend() { + ht.AssertNumPendingSweeps(bob, 2) + } else { + ht.AssertNumPendingSweeps(bob, 1) + } + + // We expect to see tow txns in the mempool, + // 1. Bob's force close tx. + // 2. Bob's anchor sweep tx. + ht.AssertNumTxsInMempool(2) + + // Mine a block to confirm the closing tx and the anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // At this point, Bob should have canceled backwards the dust HTLC that + // we sent earlier. This means Alice should now only have a single HTLC + // on her channel. + ht.AssertNumActiveHtlcs(alice, 1) + + // With the closing transaction confirmed, we should expect Bob's HTLC + // timeout transaction to be offered to the sweeper due to the expiry + // being reached. we also expect Carol's anchor sweeps. + ht.AssertNumPendingSweeps(bob, 1) + ht.AssertNumPendingSweeps(carol, 1) + + // Bob's sweeper should sweep his outgoing HTLC immediately since it's + // expired. His to_local output cannot be swept due to the CSV lock. + // Carol's anchor sweep should be failed due to output being dust. + ht.AssertNumTxsInMempool(1) + + // Mine a block to confirm Bob's outgoing HTLC sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // With Bob's HTLC timeout transaction confirmed, there should be no + // active HTLC's on the commitment transaction from Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 0) + + // At this point, Bob should show that the pending HTLC has advanced to + // the second stage and is ready to be swept once the timelock is up. + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.NotZero(ht, resp.LimboBalance) + require.Positive(ht, resp.BlocksTilMaturity) + require.Equal(ht, 1, len(resp.PendingHtlcs)) + require.Equal(ht, uint32(2), resp.PendingHtlcs[0].Stage) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + if params.CommitmentType == leasedType { + // Since Bob is the initiator of the script-enforced leased + // channel between him and Carol, he will incur an additional + // CLTV on top of the usual CSV delay on any outputs that he + // can sweep back to his wallet. + // + // We now mine enough blocks so the CLTV lock expires, which + // will trigger the sweep of the to_local and outgoing HTLC + // outputs. + ht.MineBlocks(int(resp.BlocksTilMaturity)) + + // Check that Bob has a pending sweeping tx which sweeps his + // to_local and outgoing HTLC outputs. + ht.AssertNumPendingSweeps(bob, 2) + + // Mine a block to confirm the sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + } else { + // Since Bob force closed the channel between him and Carol, he + // will incur the usual CSV delay on any outputs that he can + // sweep back to his wallet. We'll subtract one block from our + // current maturity period to assert on the mempool. + ht.MineBlocks(int(resp.BlocksTilMaturity - 1)) + + // Check that Bob has a pending sweeping tx which sweeps his + // to_local output. + ht.AssertNumPendingSweeps(bob, 1) + + // Mine a block to confirm the to_local sweeping tx, which also + // triggers the sweeping of the second stage HTLC output. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Bob's sweeper should now broadcast his second layer sweep + // due to the CSV on the HTLC timeout output. + ht.AssertNumTxsInMempool(1) + + // Next, we'll mine a final block that should confirm the + // sweeping transactions left. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // Once this transaction has been confirmed, Bob should detect that he + // no longer has any pending channels. + ht.AssertNumPendingForceClose(bob, 0) +} + +// testMultiHopReceiverPreimageClaimAnchor tests +// `runMultiHopReceiverPreimageClaim` with anchor channels. +func testMultiHopReceiverPreimageClaimAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + runMultiHopReceiverPreimageClaim(st, cfgs, openChannelParams) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgs := [][]string{cfg, cfg, cfg} + + runMultiHopReceiverPreimageClaim(st, cfgs, openChannelParams) + }) +} + +// testMultiHopReceiverPreimageClaimSimpleTaproot tests +// `runMultiHopReceiverPreimageClaim` with simple taproot channels. +func testMultiHopReceiverPreimageClaimSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgs := [][]string{cfg, cfg, cfg} + + runMultiHopReceiverPreimageClaim(st, cfgs, openChannelParams) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runMultiHopReceiverPreimageClaim(st, cfgs, openChannelParams) + }) +} + +// testMultiHopReceiverPreimageClaimLeased tests +// `runMultiHopReceiverPreimageClaim` with script enforce lease channels. +func testMultiHopReceiverPreimageClaimLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgs := [][]string{cfg, cfg, cfg} + + runMultiHopReceiverPreimageClaim(st, cfgs, openChannelParams) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + openChannelParams := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runMultiHopReceiverPreimageClaim(st, cfgs, openChannelParams) + }) +} + +// runMultiHopReceiverClaim tests that in the multi-hop setting, if the +// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC +// off-chain, then it goes on chain to claim the HTLC uing the HTLC success +// transaction. In this scenario, the node that sent the outgoing HTLC should +// extract the preimage from the sweep transaction, and finish settling the +// HTLC backwards into the route. +func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + bobChanPoint := chanPoints[1] + + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // For neutrino backend, we need to one more UTXO for Carol so she can + // sweep her outputs. + if ht.IsNeutrinoBackend() { + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + } + + // Fund Carol one UTXO so she can sweep outputs. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice can actually find a route. + var routeHints []*lnrpc.RouteHint + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + routeHints = makeRouteHints(bob, carol, params.ZeroConf) + } + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + var preimage lntypes.Preimage + copy(preimage[:], ht.Random32Bytes()) + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + RouteHints: routeHints, + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLCs on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // Wait for Carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // Stop Bob so he won't be able to settle the incoming htlc. + restartBob := ht.SuspendNode(bob) + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + carol.RPC.SettleInvoice(preimage[:]) + + // We now advance the block height to the point where Carol will force + // close her channel with Bob, broadcast the closing tx but keep it + // unconfirmed. + numBlocks := padCLTV(uint32( + invoiceReq.CltvExpiry - incomingBroadcastDelta, + )) + + // Now we'll mine enough blocks to prompt Carol to actually go to the + // chain in order to sweep her HTLC since the value is high enough. + ht.MineBlocks(int(numBlocks)) + + // Carol's force close tx should have the following outputs, + // 1. anchor output. + // 2. to_local output, which is CSV locked. + // 3. incoming HTLC output, which she has the preimage to settle. + // + // Carol's anchor output should be offered to her sweeper since she has + // time-sensitive HTLCs - we expect both anchors to be offered, while + // the sweeping of the remote anchor will be marked as failed due to + // `testmempoolaccept` check. + // + // For neutrino backend, there's no way to know the sweeping of the + // remote anchor is failed, so Carol still sees two pending sweeps. + if ht.IsNeutrinoBackend() { + ht.AssertNumPendingSweeps(carol, 2) + } else { + ht.AssertNumPendingSweeps(carol, 1) + } + + // We expect to see tow txns in the mempool, + // 1. Carol's force close tx. + // 2. Carol's anchor sweep tx. + ht.AssertNumTxsInMempool(2) + + // Mine a block to confirm the closing tx and the anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + ht.Log("Current height", ht.CurrentHeight()) + + // After the force close tx is mined, Carol should offer her second + // level HTLC tx to the sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Restart bob again. + require.NoError(ht, restartBob()) + + // Once Bob is online, he should notice Carol's second level tx in the + // mempool, he will extract the preimage and settle the HTLC back + // off-chain. He will also try to sweep his anchor and to_local + // outputs, with the anchor output being skipped due to it being + // uneconomical. + if params.CommitmentType == leasedType { + // For leased channels, Bob cannot sweep his to_local output + // yet since it's timelocked, so we only see his anchor input. + ht.AssertNumPendingSweeps(bob, 1) + } else { + // For non-leased channels, Bob should have two pending sweeps, + // 1. to_local output. + // 2. anchor output, tho it won't be swept due to it being + // uneconomical. + ht.AssertNumPendingSweeps(bob, 2) + } + + // Mine an empty block the for neutrino backend. We need this step to + // trigger Bob's chain watcher to detect the force close tx. Deep down, + // this happens because the notification system for neutrino is very + // different from others. Specifically, when a block contains the force + // close tx is notified, these two calls, + // - RegisterBlockEpochNtfn, will notify the block first. + // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to + // the block, then perform a GetUtxo, which, by the time the spend + // details are sent, the blockbeat is considered processed in Bob's + // chain watcher. + // + // TODO(yy): refactor txNotifier to fix the above issue. + if ht.IsNeutrinoBackend() { + ht.MineEmptyBlocks(1) + } + + if params.CommitmentType == leasedType { + // We expect to see 1 txns in the mempool, + // - Carol's second level HTLC sweep tx. + // We now mine a block to confirm it. + ht.MineBlocksAndAssertNumTxes(1, 1) + } else { + // We expect to see 2 txns in the mempool, + // - Bob's to_local sweep tx. + // - Carol's second level HTLC sweep tx. + // We now mine a block to confirm the sweeping txns. + ht.MineBlocksAndAssertNumTxes(1, 2) + } + + // Once the second-level transaction confirmed, Bob should have + // extracted the preimage from the chain, and sent it back to Alice, + // clearing the HTLC off-chain. + ht.AssertNumActiveHtlcs(alice, 0) + + // Check that the Alice's payment is correctly marked succeeded. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) + + // Carol's pending channel report should now show two outputs under + // limbo: her commitment output, as well as the second-layer claim + // output, and the pending HTLC should also now be in stage 2. + ht.AssertNumHTLCsAndStage(carol, bobChanPoint, 1, 2) + + // If we mine 4 additional blocks, then Carol can sweep the second + // level HTLC output once the CSV expires. + ht.MineBlocks(defaultCSV - 1) + + // Assert Carol has the pending HTLC sweep. + ht.AssertNumPendingSweeps(carol, 1) + + // We should have a new transaction in the mempool. + ht.AssertNumTxsInMempool(1) + + // Finally, if we mine an additional block to confirm Carol's second + // level success transaction. Carol should not show a pending channel + // in her report afterwards. + ht.MineBlocksAndAssertNumTxes(1, 1) + ht.AssertNumPendingForceClose(carol, 0) + + // The invoice should show as settled for Carol, indicating that it was + // swept on-chain. + ht.AssertInvoiceSettled(carol, carolInvoice.PaymentAddr) + + // For leased channels, Bob still has his commit output to sweep to + // since he incurred an additional CLTV from being the channel + // initiator. + if params.CommitmentType == leasedType { + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.Positive(ht, resp.LimboBalance) + require.Positive(ht, resp.BlocksTilMaturity) + + // Mine enough blocks for Bob's commit output's CLTV to expire + // and sweep it. + ht.MineBlocks(int(resp.BlocksTilMaturity)) + + // Bob should have two pending inputs to be swept, the commit + // output and the anchor output. + ht.AssertNumPendingSweeps(bob, 2) + + // Mine a block to confirm the commit output sweep. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // Assert Bob also sees the channel as closed. + ht.AssertNumPendingForceClose(bob, 0) +} + +// testLocalForceCloseBeforeTimeoutAnchor tests +// `runLocalForceCloseBeforeHtlcTimeout` with anchor channel. +func testLocalForceCloseBeforeTimeoutAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) +} + +// testLocalForceCloseBeforeTimeoutSimpleTaproot tests +// `runLocalForceCloseBeforeHtlcTimeout` with simple taproot channel. +func testLocalForceCloseBeforeTimeoutSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) +} + +// testLocalForceCloseBeforeTimeoutLeased tests +// `runLocalForceCloseBeforeHtlcTimeout` with script enforced lease channel. +func testLocalForceCloseBeforeTimeoutLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runLocalForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) +} + +// runLocalForceCloseBeforeHtlcTimeout tests that in a multi-hop HTLC scenario, +// if the node that extended the HTLC to the final node closes their commitment +// on-chain early, then it eventually recognizes this HTLC as one that's timed +// out. At this point, the node should timeout the HTLC using the HTLC timeout +// transaction, then cancel it backwards as normal. +func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + bobChanPoint := chanPoints[1] + + // With our channels set up, we'll then send a single HTLC from Alice + // to Carol. As Carol is in hodl mode, she won't settle this HTLC which + // opens up the base for out tests. + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice can actually find a route. + var routeHints []*lnrpc.RouteHint + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + routeHints = makeRouteHints(bob, carol, params.ZeroConf) + } + + // We'll now send a single HTLC across our multi-hop network. + carolPubKey := carol.PubKey[:] + payHash := ht.Random32Bytes() + req := &routerrpc.SendPaymentRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + RouteHints: routeHints, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLC on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // Now that all parties have the HTLC locked in, we'll immediately + // force close the Bob -> Carol channel. This should trigger contract + // resolution mode for both of them. + stream, _ := ht.CloseChannelAssertPending(bob, bobChanPoint, true) + ht.AssertStreamChannelForceClosed(bob, bobChanPoint, true, stream) + + // Bob's force close tx should have the following outputs, + // 1. anchor output. + // 2. to_local output, which is CSV locked. + // 3. outgoing HTLC output, which hasn't expired yet. + // + // The channel close has anchors, we should expect to see both Bob and + // Carol has a pending sweep request for the anchor sweep. + ht.AssertNumPendingSweeps(carol, 1) + anchorSweep := ht.AssertNumPendingSweeps(bob, 1)[0] + + // We expcet Bob's anchor sweep to be a non-CPFP anchor sweep now. + // Although he has time-sensitive outputs, which means initially his + // anchor output was used for CPFP, this anchor will be replaced by a + // new anchor sweeping request once his force close tx is confirmed in + // the above block. The timeline goes as follows: + // 1. At block 447, Bob force closes his channel with Carol, which + // caused the channel arbitartor to create a CPFP anchor sweep. + // 2. This force close tx was mined in AssertStreamChannelForceClosed, + // and we are now in block 448. + // 3. Since the blockbeat is processed via the chain [ChainArbitrator + // -> chainWatcher -> channelArbitrator -> Sweeper -> TxPublisher], + // when it reaches `chainWatcher`, Bob will detect the confirmed + // force close tx and notifies `channelArbitrator`. In response, + // `channelArbitrator` will advance to `StateContractClosed`, in + // which it will prepare an anchor resolution that's non-CPFP, send + // it to the sweeper to replace the CPFP anchor sweep. + // 4. By the time block 448 reaches `Sweeper`, the old CPFP anchor + // sweep has already been replaced with the new non-CPFP anchor + // sweep. + require.EqualValues(ht, 330, anchorSweep.Budget, "expected 330 sat "+ + "budget, got %v", anchorSweep.Budget) + + // Before the HTLC times out, we'll need to assert that Bob broadcasts + // a sweep tx for his commit output. Note that if the channel has a + // script-enforced lease, then Bob will have to wait for an additional + // CLTV before sweeping it. + if params.CommitmentType != leasedType { + // The sweeping tx is broadcast on the block CSV-1 so mine one + // block less than defaultCSV in order to perform mempool + // assertions. + ht.MineBlocks(int(defaultCSV - 1)) + + // Mine a block to confirm Bob's to_local sweep. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // We'll now mine enough blocks for the HTLC to expire. After this, Bob + // should hand off the now expired HTLC output to the sweeper. + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Bob's pending channel report should show that he has a single HTLC + // that's now in stage one. + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1) + + // Bob should have two pending sweep requests, + // 1. the anchor sweep. + // 2. the outgoing HTLC sweep. + ht.AssertNumPendingSweeps(bob, 2) + + // Bob's outgoing HTLC sweep should be broadcast now. Mine a block to + // confirm it. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // With the second layer timeout tx confirmed, Bob should have canceled + // backwards the HTLC that Carol sent. + ht.AssertNumActiveHtlcs(bob, 0) + + // Additionally, Bob should now show that HTLC as being advanced to the + // second stage. + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2) + + // Get the expiry height of the CSV-locked HTLC. + resp = ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + pendingHtlc := resp.PendingHtlcs[0] + require.Positive(ht, pendingHtlc.BlocksTilMaturity) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + // Mine enough blocks for the HTLC to expire. + ht.MineBlocks(int(pendingHtlc.BlocksTilMaturity)) + + // Based on this is a leased channel or not, Bob may still need to + // sweep his to_local output. + if params.CommitmentType == leasedType { + // Bob should have three pending sweep requests, + // 1. the anchor sweep. + // 2. the second-level HTLC sweep. + // 3. the to_local output sweep, which is CSV+CLTV locked, is + // now mature. + // + // The test is setup such that the to_local and the + // second-level HTLC sweeps share the same deadline, which + // means they will be swept in the same tx. + ht.AssertNumPendingSweeps(bob, 3) + } else { + // Bob should have two pending sweeps, + // 1. the anchor sweep. + // 2. the second-level HTLC sweep. + ht.AssertNumPendingSweeps(bob, 2) + } + + // Now that the CSV timelock has expired, mine a block to confirm the + // sweep. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // At this point, Bob should no longer show any channels as pending + // close. + ht.AssertNumPendingForceClose(bob, 0) +} + +// testRemoteForceCloseBeforeTimeoutAnchor tests +// `runRemoteForceCloseBeforeHtlcTimeout` with anchor channel. +func testRemoteForceCloseBeforeTimeoutAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runRemoteForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runRemoteForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) +} + +// testRemoteForceCloseBeforeTimeoutSimpleTaproot tests +// `runLocalForceCloseBeforeHtlcTimeout` with simple taproot channel. +func testRemoteForceCloseBeforeTimeoutSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runRemoteForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runRemoteForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) +} + +// testRemoteForceCloseBeforeTimeoutLeased tests +// `runRemoteForceCloseBeforeHtlcTimeout` with script enforced lease channel. +func testRemoteForceCloseBeforeTimeoutLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runRemoteForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...) + cfgs := [][]string{cfg, cfg, cfgCarol} + + runRemoteForceCloseBeforeHtlcTimeout(st, cfgs, params) + }) +} + +// runRemoteForceCloseBeforeHtlcTimeout tests that if we extend a multi-hop +// HTLC, and the final destination of the HTLC force closes the channel, then +// we properly timeout the HTLC directly on *their* commitment transaction once +// the timeout has expired. Once we sweep the transaction, we should also +// cancel back the initial HTLC. +func runRemoteForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + bobChanPoint := chanPoints[1] + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice can actually find a route. + var routeHints []*lnrpc.RouteHint + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + routeHints = makeRouteHints(bob, carol, params.ZeroConf) + } + + // With our channels set up, we'll then send a single HTLC from Alice + // to Carol. As Carol is in hodl mode, she won't settle this HTLC which + // opens up the base for out tests. + var preimage lntypes.Preimage + copy(preimage[:], ht.Random32Bytes()) + payHash := preimage.Hash() + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: int64(htlcAmt), + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + RouteHints: routeHints, + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLC on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // At this point, we'll now instruct Carol to force close the tx. This + // will let us exercise that Bob is able to sweep the expired HTLC on + // Carol's version of the commitment tx. + closeStream, _ := ht.CloseChannelAssertPending( + carol, bobChanPoint, true, + ) + + // For anchor channels, the anchor won't be used for CPFP because + // channel arbitrator thinks Carol doesn't have preimage for her + // incoming HTLC on the commitment transaction Bob->Carol. Although + // Carol created this invoice, because it's a hold invoice, the + // preimage won't be generated automatically. + ht.AssertStreamChannelForceClosed( + carol, bobChanPoint, true, closeStream, + ) + + // At this point, Bob should have a pending force close channel as + // Carol has gone directly to chain. + ht.AssertNumPendingForceClose(bob, 1) + + // Carol will offer her anchor to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Bob should offered the anchor output to his sweeper. + if params.CommitmentType == leasedType { + // For script enforced lease channels, Bob can sweep his anchor + // output immediately although it will be skipped due to it + // being uneconomical. His to_local output is CLTV locked so it + // cannot be swept yet. + ht.AssertNumPendingSweeps(bob, 1) + } else { + // For non-leased channels, Bob can sweep his commit and anchor + // outputs immediately. + ht.AssertNumPendingSweeps(bob, 2) + + // We expect to see only one sweeping tx to be published from + // Bob, which sweeps his to_local output. His anchor output + // won't be swept due it being uneconomical. For Carol, since + // her anchor is not used for CPFP, it'd be also uneconomical + // to sweep so it will fail. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // Next, we'll mine enough blocks for the HTLC to expire. At this + // point, Bob should hand off the output to his sweeper, which will + // broadcast a sweep transaction. + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // If we check Bob's pending channel report, it should show that he has + // a single HTLC that's now in the second stage, as it skipped the + // initial first stage since this is a direct HTLC. + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2) + + // Bob should have two pending sweep requests, + // 1. the uneconomical anchor sweep. + // 2. the direct timeout sweep. + ht.AssertNumPendingSweeps(bob, 2) + + // Bob's sweeping tx should now be found in the mempool. + sweepTx := ht.AssertNumTxsInMempool(1)[0] + + // If we mine an additional block, then this should confirm Bob's tx + // which sweeps the direct HTLC output. + block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] + ht.AssertTxInBlock(block, sweepTx) + + // Now that the sweeping tx has been confirmed, Bob should cancel back + // that HTLC. As a result, Alice should not know of any active HTLC's. + ht.AssertNumActiveHtlcs(alice, 0) + + // For script enforced lease channels, Bob still need to wait for the + // CLTV lock to expire before he can sweep his to_local output. + if params.CommitmentType == leasedType { + // Get the remaining blocks to mine. + resp = ht.AssertNumPendingForceClose(bob, 1)[0] + ht.MineBlocks(int(resp.BlocksTilMaturity)) + + // Assert the commit output has been offered to the sweeper. + // Bob should have two pending sweep requests - one for the + // commit output and one for the anchor output. + ht.AssertNumPendingSweeps(bob, 2) + + // Mine the to_local sweep tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // Now we'll check Bob's pending channel report. Since this was Carol's + // commitment, he doesn't have to wait for any CSV delays, but he may + // still need to wait for a CLTV on his commit output to expire + // depending on the commitment type. + ht.AssertNumPendingForceClose(bob, 0) + + // While we're here, we assert that our expired invoice's state is + // correctly updated, and can no longer be settled. + ht.AssertInvoiceState(stream, lnrpc.Invoice_CANCELED) +} + +// testLocalClaimIncomingHTLCAnchor tests `runLocalClaimIncomingHTLC` with +// anchor channel. +func testLocalClaimIncomingHTLCAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + runLocalClaimIncomingHTLC(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgs := [][]string{cfg, cfg, cfg} + + runLocalClaimIncomingHTLC(st, cfgs, params) + }) +} + +// testLocalClaimIncomingHTLCSimpleTaproot tests `runLocalClaimIncomingHTLC` +// with simple taproot channel. +func testLocalClaimIncomingHTLCSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgs := [][]string{cfg, cfg, cfg} + + runLocalClaimIncomingHTLC(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runLocalClaimIncomingHTLC(st, cfgs, params) + }) +} + +// runLocalClaimIncomingHTLC tests that in a multi-hop HTLC scenario, if we +// force close a channel with an incoming HTLC, and later find out the preimage +// via the witness beacon, we properly settle the HTLC on-chain using the HTLC +// success transaction in order to ensure we don't lose any funds. +func runLocalClaimIncomingHTLC(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + aliceChanPoint := chanPoints[0] + + // Fund Carol one UTXO so she can sweep outputs. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice can actually find a route. + var routeHints []*lnrpc.RouteHint + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + routeHints = makeRouteHints(bob, carol, params.ZeroConf) + } + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + RouteHints: routeHints, + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLC on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // At this point, Bob decides that he wants to exit the channel + // Alice=>Bob immediately, so he force closes his commitment tx. + closeStream, _ := ht.CloseChannelAssertPending( + bob, aliceChanPoint, true, + ) + + // For anchor channels, the anchor won't be used for CPFP as there's no + // deadline pressure for Bob on the channel Alice->Bob at the moment. + // For Bob's local commitment tx, there's only one incoming HTLC which + // he doesn't have the preimage yet. + hasAnchorSweep := false + bobForceClose := ht.AssertStreamChannelForceClosed( + bob, aliceChanPoint, hasAnchorSweep, closeStream, + ) + + // Alice will offer her to_local and anchor outputs to her sweeper. + ht.AssertNumPendingSweeps(alice, 2) + + // Bob will offer his anchor to his sweeper. + ht.AssertNumPendingSweeps(bob, 1) + + // Assert the expected num of txns are found in the mempool. + // + // We expect to see only one sweeping tx to be published from Alice, + // which sweeps her to_local output (which is to to_remote on Bob's + // commit tx). Her anchor output won't be swept as it's uneconomical. + // For Bob, since his anchor is not used for CPFP, it'd be uneconomical + // to sweep so it will fail. + ht.AssertNumTxsInMempool(1) + + // Mine a block to confirm Alice's sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Suspend Bob to force Carol to go to chain. + restartBob := ht.SuspendNode(bob) + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + carol.RPC.SettleInvoice(preimage[:]) + + // We now advance the block height to the point where Carol will force + // close her channel with Bob, broadcast the closing tx but keep it + // unconfirmed. + numBlocks := padCLTV( + uint32(invoiceReq.CltvExpiry - incomingBroadcastDelta), + ) + + // We've already mined 2 blocks at this point, so we only need to mine + // CLTV-2 blocks. + ht.MineBlocks(int(numBlocks - 2)) + + // Expect two txns in the mempool, + // - Carol's force close tx. + // - Carol's CPFP anchor sweeping tx. + // Mine a block to confirm them. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // After the force close tx is mined, Carol should offer her + // second-level success HTLC tx to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Restart bob again. + require.NoError(ht, restartBob()) + + // Once Bob is online and sees the force close tx Bob=>Carol, he will + // create a tx to sweep his commitment output. His anchor outputs will + // not be swept due to uneconomical. We expect to see three sweeping + // requests, + // - the commitment output. + // - the anchor output from channel Alice=>Bob. + // - the anchor output from channel Bob=>Carol. + ht.AssertNumPendingSweeps(bob, 3) + + // Mine an empty block the for neutrino backend. We need this step to + // trigger Bob's chain watcher to detect the force close tx. Deep down, + // this happens because the notification system for neutrino is very + // different from others. Specifically, when a block contains the force + // close tx is notified, these two calls, + // - RegisterBlockEpochNtfn, will notify the block first. + // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to + // the block, then perform a GetUtxo, which, by the time the spend + // details are sent, the blockbeat is considered processed in Bob's + // chain watcher. + // + // TODO(yy): refactor txNotifier to fix the above issue. + if ht.IsNeutrinoBackend() { + ht.MineEmptyBlocks(1) + } + + // Assert txns can be found in the mempool. + // + // Carol will broadcast her sweeping tx and Bob will sweep his + // commitment anchor output, we'd expect to see two txns, + // - Carol's second level HTLC tx. + // - Bob's commitment output sweeping tx. + ht.AssertNumTxsInMempool(2) + + // At this point we suspend Alice to make sure she'll handle the + // on-chain settle after a restart. + restartAlice := ht.SuspendNode(alice) + + // Mine a block to confirm the sweeping txns made by Bob and Carol. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // When Bob notices Carol's second level tx in the block, he will + // extract the preimage and broadcast a second level tx to claim the + // HTLC in his (already closed) channel with Alice, which means Bob has + // three sweeping requests, + // - the second level HTLC tx from channel Alice=>Bob. + // - the anchor output from channel Alice=>Bob. + // - the anchor output from channel Bob=>Carol. + ht.AssertNumPendingSweeps(bob, 3) + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // At this point, Bob should have broadcast his second layer success + // tx, and should have sent it to his sweeper. + // + // Check Bob's second level tx. + bobSecondLvlTx := ht.GetNumTxsFromMempool(1)[0] + + // It should spend from the commitment in the channel with Alice. + ht.AssertTxSpendFrom(bobSecondLvlTx, bobForceClose) + + // We'll now mine a block which should confirm Bob's second layer tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Bob should consider the channel Bob=>Carol closed, and channel + // Alice=>Bob pending close. + ht.AssertNumPendingForceClose(bob, 1) + + // Now that the preimage from Bob has hit the chain, restart Alice to + // ensure she'll pick it up. + require.NoError(ht, restartAlice()) + + // If we then mine 1 additional block, Carol's second level tx should + // mature, and she can pull the funds from it with a sweep tx. + resp := ht.AssertNumPendingForceClose(carol, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Carol's timelock to_local output=%v, timelock on second "+ + "stage htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Carol should have one a sweep request for her second level tx. + ht.AssertNumPendingSweeps(carol, 1) + + // Carol's sweep tx should be broadcast, assert it's in the mempool and + // mine it. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // We now mine blocks till the CSV lock on Bob's success HTLC on + // commitment Alice=>Bob expires. + resp = ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Bob should have three requests in his sweeper. + // - the second level HTLC tx. + // - the anchor output from channel Alice=>Bob. + // - the anchor output from channel Bob=>Carol. + ht.AssertNumPendingSweeps(bob, 3) + + // When we mine one additional block, that will confirm Bob's sweep. + // Now Bob should have no pending channels anymore, as this just + // resolved it by the confirmation of the sweep transaction. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // All nodes should show zero pending and open channels. + for _, node := range []*node.HarnessNode{alice, bob, carol} { + ht.AssertNumPendingForceClose(node, 0) + ht.AssertNodeNumChannels(node, 0) + } + + // Finally, check that the Alice's payment is correctly marked + // succeeded. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) +} + +// testLocalClaimIncomingHTLCLeased tests `runLocalClaimIncomingHTLCLeased` +// with script enforced lease channel. +func testLocalClaimIncomingHTLCLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgs := [][]string{cfg, cfg, cfg} + + runLocalClaimIncomingHTLCLeased(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runLocalClaimIncomingHTLCLeased(st, cfgs, params) + }) +} + +// runLocalClaimIncomingHTLCLeased tests that in a multi-hop HTLC scenario, if +// we force close a channel with an incoming HTLC, and later find out the +// preimage via the witness beacon, we properly settle the HTLC on-chain using +// the HTLC success transaction in order to ensure we don't lose any funds. +// +// TODO(yy): simplify or remove this test as it's too complicated. +func runLocalClaimIncomingHTLCLeased(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 5 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(5000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1] + + // Fund Carol one UTXO so she can sweep outputs. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // Carol should have enough wallet UTXOs here to sweep the HTLC in the + // end of this test. However, due to a known issue, Carol's wallet may + // report there's no UTXO available. For details, + // - https://github.com/lightningnetwork/lnd/issues/8786 + // + // TODO(yy): remove this step once the issue is resolved. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLC on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // At this point, Bob decides that he wants to exit the channel + // Alice=>Bob immediately, so he force closes his commitment tx. + closeStream, _ := ht.CloseChannelAssertPending( + bob, aliceChanPoint, true, + ) + + // For anchor channels, the anchor won't be used for CPFP as there's no + // deadline pressure for Bob on the channel Alice->Bob at the moment. + // For Bob's local commitment tx, there's only one incoming HTLC which + // he doesn't have the preimage yet. + hasAnchorSweep := false + bobForceClose := ht.AssertStreamChannelForceClosed( + bob, aliceChanPoint, hasAnchorSweep, closeStream, + ) + + // Alice will offer her anchor output to her sweeper. Her commitment + // output cannot be swept yet as it has incurred an additional CLTV due + // to being the initiator of a script-enforced leased channel. + // + // This anchor output cannot be swept due to it being uneconomical. + ht.AssertNumPendingSweeps(alice, 1) + + // Bob will offer his anchor to his sweeper. + // + // This anchor output cannot be swept due to it being uneconomical. + ht.AssertNumPendingSweeps(bob, 1) + + // Suspend Bob to force Carol to go to chain. + restartBob := ht.SuspendNode(bob) + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + carol.RPC.SettleInvoice(preimage[:]) + + // We now advance the block height to the point where Carol will force + // close her channel with Bob, broadcast the closing tx but keep it + // unconfirmed. + numBlocks := padCLTV( + uint32(invoiceReq.CltvExpiry - incomingBroadcastDelta), + ) + ht.MineBlocks(int(numBlocks) - 1) + + // Expect two txns in the mempool, + // - Carol's force close tx. + // - Carol's CPFP anchor sweeping tx. + // Mine a block to confirm them. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // After the force close tx is mined, Carol should offer her + // second-level success HTLC tx to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Restart bob again. + require.NoError(ht, restartBob()) + + // Once Bob is online and sees the force close tx Bob=>Carol, he will + // offer his commitment output to his sweeper, which will be skipped + // due to it being timelocked. His anchor outputs will not be swept due + // to uneconomical. We expect to see two sweeping requests, + // - the anchor output from channel Alice=>Bob. + // - the anchor output from channel Bob=>Carol. + ht.AssertNumPendingSweeps(bob, 2) + + // Assert txns can be found in the mempool. + // + // Carol will broadcast her second-level HTLC sweeping txns. Bob canoot + // sweep his commitment anchor output yet due to it being CLTV locked. + ht.AssertNumTxsInMempool(1) + + // At this point we suspend Alice to make sure she'll handle the + // on-chain settle after a restart. + restartAlice := ht.SuspendNode(alice) + + // Mine a block to confirm the sweeping tx from Carol. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // When Bob notices Carol's second level tx in the block, he will + // extract the preimage and broadcast a second level tx to claim the + // HTLC in his (already closed) channel with Alice, which means Bob has + // three sweeping requests, + // - the second level HTLC tx from channel Alice=>Bob. + // - the anchor output from channel Alice=>Bob. + // - the anchor output from channel Bob=>Carol. + ht.AssertNumPendingSweeps(bob, 3) + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // At this point, Bob should have broadcast his second layer success + // tx, and should have sent it to his sweeper. + // + // Check Bob's second level tx. + bobSecondLvlTx := ht.GetNumTxsFromMempool(1)[0] + + // It should spend from the commitment in the channel with Alice. + ht.AssertTxSpendFrom(bobSecondLvlTx, bobForceClose) + + // The channel between Bob and Carol will still be pending force close + // if this is a leased channel. We'd also check the HTLC stages are + // correct in both channels. + ht.AssertNumPendingForceClose(bob, 2) + ht.AssertNumHTLCsAndStage(bob, aliceChanPoint, 1, 1) + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1) + + // We'll now mine a block which should confirm Bob's second layer tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Now that the preimage from Bob has hit the chain, restart Alice to + // ensure she'll pick it up. + require.NoError(ht, restartAlice()) + + // If we then mine 1 additional block, Carol's second level tx should + // mature, and she can pull the funds from it with a sweep tx. + resp := ht.AssertNumPendingForceClose(carol, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Carol's timelock to_local output=%v, timelock on second "+ + "stage htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Carol should have one a sweep request for her second level tx. + ht.AssertNumPendingSweeps(carol, 1) + + // Carol's sweep tx should be broadcast, assert it's in the mempool and + // mine it. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // We now mine blocks till the CSV lock on Bob's success HTLC on + // commitment Alice=>Bob expires. + resp = ht.AssertChannelPendingForceClose(bob, aliceChanPoint) + require.Equal(ht, 1, len(resp.PendingHtlcs)) + htlcExpiry := resp.PendingHtlcs[0].BlocksTilMaturity + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, htlcExpiry) + ht.MineBlocks(int(htlcExpiry)) + + // When we mine one additional block, that will confirm Bob's second + // level HTLC sweep on channel Alice=>Bob. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // We now mine blocks till the CLTV lock on Bob's to_local output HTLC + // on commitment Bob=>Carol expires. + resp = ht.AssertChannelPendingForceClose(bob, bobChanPoint) + require.Equal(ht, 1, len(resp.PendingHtlcs)) + htlcExpiry = resp.PendingHtlcs[0].BlocksTilMaturity + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, htlcExpiry) + ht.MineBlocks(int(resp.BlocksTilMaturity)) + + // Bob should have three requests in his sweeper. + // - to_local output from channel Bob=>Carol. + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the anchor output from channel Bob=>Carol, uneconomical. + ht.AssertNumPendingSweeps(bob, 3) + + // Alice should have two requests in her sweeper, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - her commitment output, now mature. + ht.AssertNumPendingSweeps(alice, 2) + + // Mine a block to confirm Bob's to_local output sweep. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // All nodes should show zero pending and open channels. + for _, node := range []*node.HarnessNode{alice, bob, carol} { + ht.AssertNumPendingForceClose(node, 0) + ht.AssertNodeNumChannels(node, 0) + } + + // Finally, check that the Alice's payment is correctly marked + // succeeded. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) +} + +// testLocalPreimageClaimAnchor tests `runLocalPreimageClaim` with anchor +// channel. +func testLocalPreimageClaimAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + runLocalPreimageClaim(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgs := [][]string{cfg, cfg, cfg} + + runLocalPreimageClaim(st, cfgs, params) + }) +} + +// testLocalPreimageClaimSimpleTaproot tests `runLocalClaimIncomingHTLC` with +// simple taproot channel. +func testLocalPreimageClaimSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgs := [][]string{cfg, cfg, cfg} + + runLocalPreimageClaim(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runLocalPreimageClaim(st, cfgs, params) + }) +} + +// runLocalPreimageClaim tests that in the multi-hop HTLC scenario, if the +// remote party goes to chain while we have an incoming HTLC, then when we +// found out the preimage via the witness beacon, we properly settle the HTLC +// directly on-chain using the preimage in order to ensure that we don't lose +// any funds. +func runLocalPreimageClaim(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + aliceChanPoint := chanPoints[0] + + // Fund Carol one UTXO so she can sweep outputs. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // Carol should have enough wallet UTXOs here to sweep the HTLC in the + // end of this test. However, due to a known issue, Carol's wallet may + // report there's no UTXO available. For details, + // - https://github.com/lightningnetwork/lnd/issues/8786 + // + // TODO(yy): remove this step once the issue is resolved. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice can actually find a route. + var routeHints []*lnrpc.RouteHint + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + routeHints = makeRouteHints(bob, carol, params.ZeroConf) + } + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + RouteHints: routeHints, + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLC on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // Record the height which the invoice will expire. + invoiceExpiry := ht.CurrentHeight() + uint32(invoiceReq.CltvExpiry) + + // Next, Alice decides that she wants to exit the channel, so she'll + // immediately force close the channel by broadcast her commitment + // transaction. + closeStream, _ := ht.CloseChannelAssertPending( + alice, aliceChanPoint, true, + ) + aliceForceClose := ht.AssertStreamChannelForceClosed( + alice, aliceChanPoint, true, closeStream, + ) + + // Wait for the channel to be marked pending force close. + ht.AssertChannelPendingForceClose(alice, aliceChanPoint) + + // Once the force closing tx is mined, Alice should offer the anchor + // output to her sweeper. + ht.AssertNumPendingSweeps(alice, 1) + + // Bob should offer his anchor output to his sweeper. + ht.AssertNumPendingSweeps(bob, 1) + + // Mine enough blocks for Alice to sweep her funds from the force + // closed channel. AssertStreamChannelForceClosed() already mined a + // block, so mine one less than defaultCSV in order to perform mempool + // assertions. + ht.MineBlocks(defaultCSV - 1) + + // Mine Alice's commit sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Suspend bob, so Carol is forced to go on chain. + restartBob := ht.SuspendNode(bob) + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + carol.RPC.SettleInvoice(preimage[:]) + + ht.Logf("Invoice expire height: %d, current: %d", invoiceExpiry, + ht.CurrentHeight()) + + // We'll now mine enough blocks so Carol decides that she needs to go + // on-chain to claim the HTLC as Bob has been inactive. + numBlocks := padCLTV( + invoiceExpiry - ht.CurrentHeight() - incomingBroadcastDelta, + ) + ht.MineBlocks(int(numBlocks)) + + // Since Carol has time-sensitive HTLCs, she will use the anchor for + // CPFP purpose. Assert the anchor output is offered to the sweeper. + // + // For neutrino backend, Carol still have the two anchors - one from + // local commitment and the other from the remote. + if ht.IsNeutrinoBackend() { + ht.AssertNumPendingSweeps(carol, 2) + } else { + ht.AssertNumPendingSweeps(carol, 1) + } + + // We should see two txns in the mempool, we now a block to confirm, + // - Carol's force close tx. + // - Carol's anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // Once the force close tx is confirmed, Carol should offer her + // incoming HTLC to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Restart bob again. + require.NoError(ht, restartBob()) + + // Bob should have three sweeping requests, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the anchor output from channel Bob=>Carol, uneconomical. + // - the commit output sweep from the channel with Carol, no timelock. + ht.AssertNumPendingSweeps(bob, 3) + + // Mine an empty block the for neutrino backend. We need this step to + // trigger Bob's chain watcher to detect the force close tx. Deep down, + // this happens because the notification system for neutrino is very + // different from others. Specifically, when a block contains the force + // close tx is notified, these two calls, + // - RegisterBlockEpochNtfn, will notify the block first. + // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to + // the block, then perform a GetUtxo, which, by the time the spend + // details are sent, the blockbeat is considered processed in Bob's + // chain watcher. + // + // TODO(yy): refactor txNotifier to fix the above issue. + if ht.IsNeutrinoBackend() { + ht.MineEmptyBlocks(1) + } + + // We mine one block to confirm, + // - Carol's sweeping tx of the incoming HTLC. + // - Bob's sweeping tx of his commit output. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // When Bob notices Carol's second level tx in the block, he will + // extract the preimage and offer the HTLC to his sweeper. So he has, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the anchor output from channel Bob=>Carol, uneconomical. + // - the htlc sweeping tx. + ht.AssertNumPendingSweeps(bob, 3) + + // Mine an empty block the for neutrino backend. We need this step to + // trigger Bob's chain watcher to detect the force close tx. Deep down, + // this happens because the notification system for neutrino is very + // different from others. Specifically, when a block contains the force + // close tx is notified, these two calls, + // - RegisterBlockEpochNtfn, will notify the block first. + // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to + // the block, then perform a GetUtxo, which, by the time the spend + // details are sent, the blockbeat is considered processed in Bob's + // chain watcher. + // + // TODO(yy): refactor txNotifier to fix the above issue. + if ht.IsNeutrinoBackend() { + ht.MineEmptyBlocks(1) + } + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // Bob should broadcast the sweeping of the direct preimage spent now. + bobHtlcSweep := ht.GetNumTxsFromMempool(1)[0] + + // It should spend from the commitment in the channel with Alice. + ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose) + + // We'll now mine a block which should confirm Bob's HTLC sweep tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Now that the sweeping tx has been confirmed, Bob should recognize + // that all contracts for the Bob-Carol channel have been fully + // resolved. + ht.AssertNumPendingForceClose(bob, 0) + + // Mine blocks till Carol's second level tx matures. + resp := ht.AssertNumPendingForceClose(carol, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Carol's timelock to_local output=%v, timelock on second "+ + "stage htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Carol should offer the htlc output to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Mine a block to confirm Carol's sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // When Carol's sweep gets confirmed, she should have no more pending + // channels. + ht.AssertNumPendingForceClose(carol, 0) + + // The invoice should show as settled for Carol, indicating that it was + // swept on-chain. + ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED) + + // Finally, check that the Alice's payment is correctly marked + // succeeded. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) +} + +// testLocalPreimageClaimLeased tests `runLocalPreimageClaim` with script +// enforced lease channel. +func testLocalPreimageClaimLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgs := [][]string{cfg, cfg, cfg} + + runLocalPreimageClaimLeased(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runLocalPreimageClaimLeased(st, cfgs, params) + }) +} + +// runLocalPreimageClaimLeased tests that in the multi-hop HTLC scenario, if +// the remote party goes to chain while we have an incoming HTLC, then when we +// found out the preimage via the witness beacon, we properly settle the HTLC +// directly on-chain using the preimage in order to ensure that we don't lose +// any funds. +func runLocalPreimageClaimLeased(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1] + + // Fund Carol one UTXO so she can sweep outputs. + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) + + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice should have one outgoing HTLC on channel Alice -> Bob. + ht.AssertNumActiveHtlcs(alice, 1) + + // Bob should have one incoming HTLC on channel Alice -> Bob, and one + // outgoing HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, 2) + + // Carol should have one incoming HTLC on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(carol, 1) + + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + + // Record the height which the invoice will expire. + invoiceExpiry := ht.CurrentHeight() + uint32(invoiceReq.CltvExpiry) + + // Next, Alice decides that she wants to exit the channel, so she'll + // immediately force close the channel by broadcast her commitment + // transaction. + closeStream, _ := ht.CloseChannelAssertPending( + alice, aliceChanPoint, true, + ) + aliceForceClose := ht.AssertStreamChannelForceClosed( + alice, aliceChanPoint, true, closeStream, + ) + + // Wait for the channel to be marked pending force close. + ht.AssertChannelPendingForceClose(alice, aliceChanPoint) + + // Once the force closing tx is mined, Alice should offer the anchor + // output to her sweeper. + ht.AssertNumPendingSweeps(alice, 1) + + // Bob should offer his anchor output to his sweeper. + ht.AssertNumPendingSweeps(bob, 1) + + // Suspend bob, so Carol is forced to go on chain. + restartBob := ht.SuspendNode(bob) + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + carol.RPC.SettleInvoice(preimage[:]) + + ht.Logf("Invoice expire height: %d, current: %d", invoiceExpiry, + ht.CurrentHeight()) + + // We'll now mine enough blocks so Carol decides that she needs to go + // on-chain to claim the HTLC as Bob has been inactive. + numBlocks := padCLTV( + invoiceExpiry - ht.CurrentHeight() - incomingBroadcastDelta - 1, + ) + ht.MineBlocks(int(numBlocks)) + + // Since Carol has time-sensitive HTLCs, she will use the anchor for + // CPFP purpose. Assert the anchor output is offered to the sweeper. + // + // For neutrino backend, there's no way to know the sweeping of the + // remote anchor is failed, so Carol still sees two pending sweeps. + if ht.IsNeutrinoBackend() { + ht.AssertNumPendingSweeps(carol, 2) + } else { + ht.AssertNumPendingSweeps(carol, 1) + } + + // We should see two txns in the mempool, we now a block to confirm, + // - Carol's force close tx. + // - Carol's anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // Once the force close tx is confirmed, Carol should offer her + // incoming HTLC to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Restart bob again. + require.NoError(ht, restartBob()) + + // Bob should have two sweeping requests, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the anchor output from channel Bob=>Carol, uneconomical. + // - the commit output sweep from the channel with Carol, which is CLTV + // locked so it won't show up the pending sweeps. + ht.AssertNumPendingSweeps(bob, 2) + + // We mine one block to confirm, + // - Carol's sweeping tx of the incoming HTLC. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // When Bob notices Carol's second level tx in the block, he will + // extract the preimage and offer the HTLC to his sweeper. So he has, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the anchor output from channel Bob=>Carol, uneconomical. + // - the htlc sweeping tx. + ht.AssertNumPendingSweeps(bob, 3) + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // Bob should broadcast the sweeping of the direct preimage spent now. + bobHtlcSweep := ht.GetNumTxsFromMempool(1)[0] + + // It should spend from the commitment in the channel with Alice. + ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose) + + // We'll now mine a block which should confirm Bob's HTLC sweep tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // Now that the sweeping tx has been confirmed, Bob should recognize + // that all contracts for the Bob-Carol channel have been fully + // resolved. + ht.AssertNumPendingForceClose(bob, 1) + ht.AssertChannelPendingForceClose(bob, bobChanPoint) + + // Mine blocks till Carol's second level tx matures. + resp := ht.AssertNumPendingForceClose(carol, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Carol's timelock to_local output=%v, timelock on second "+ + "stage htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // Carol should offer the htlc output to her sweeper. + ht.AssertNumPendingSweeps(carol, 1) + + // Mine a block to confirm Carol's sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + + // When Carol's sweep gets confirmed, she should have no more pending + // channels. + ht.AssertNumPendingForceClose(carol, 0) + + // The invoice should show as settled for Carol, indicating that it was + // swept on-chain. + ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED) + + // Check that the Alice's payment is correctly marked succeeded. + ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) + + // With the script-enforced lease commitment type, Alice and Bob still + // haven't been able to sweep their respective commit outputs due to + // the additional CLTV. We'll need to mine enough blocks for the + // timelock to expire and prompt their sweep. + // + // Get num of blocks to mine. + resp = ht.AssertNumPendingForceClose(alice, 1)[0] + require.Equal(ht, 1, len(resp.PendingHtlcs)) + + ht.Logf("Alice's timelock to_local output=%v, timelock on second "+ + "stage htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.BlocksTilMaturity)) + + // Alice should two sweeping requests, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the commit output sweep from the channel with Bob. + ht.AssertNumPendingSweeps(alice, 2) + + // Bob should have three sweeping requests, + // - the anchor output from channel Alice=>Bob, uneconomical. + // - the anchor output from channel Bob=>Carol, uneconomical. + // - the commit output sweep from the channel with Carol. + ht.AssertNumPendingSweeps(bob, 3) + + // Confirm their sweeps. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // Both nodes should consider the channel fully closed. + ht.AssertNumPendingForceClose(alice, 0) + ht.AssertNumPendingForceClose(bob, 0) +} + +// testHtlcAggregaitonAnchor tests `runHtlcAggregation` with anchor channel. +func testHtlcAggregaitonAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) +} + +// testHtlcAggregaitonSimpleTaproot tests `runHtlcAggregation` with simple +// taproot channel. +func testHtlcAggregaitonSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) +} + +// testHtlcAggregaitonLeased tests `runHtlcAggregation` with script enforced +// lease channel. +func testHtlcAggregaitonLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) +} + +// runHtlcAggregation tests that in a multi-hop HTLC scenario, if we force +// close a channel with both incoming and outgoing HTLCs, we can properly +// resolve them using the second level timeout and success transactions. In +// case of anchor channels, the second-level spends can also be aggregated and +// properly feebumped, so we'll check that as well. +func runHtlcAggregation(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + _, bobChanPoint := chanPoints[0], chanPoints[1] + + // We need one additional UTXO to create the sweeping tx for the + // second-level success txes. + ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) + + // Bob should have enough wallet UTXOs here to sweep the HTLC in the + // end of this test. However, due to a known issue, Bob's wallet may + // report there's no UTXO available. For details, + // - https://github.com/lightningnetwork/lnd/issues/8786 + // + // TODO(yy): remove this step once the issue is resolved. + ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice+Carol can actually find a route. + var ( + carolRouteHints []*lnrpc.RouteHint + aliceRouteHints []*lnrpc.RouteHint + ) + + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + carolRouteHints = makeRouteHints(bob, carol, params.ZeroConf) + aliceRouteHints = makeRouteHints(bob, alice, params.ZeroConf) + } + + // To ensure we have capacity in both directions of the route, we'll + // make a fairly large payment Alice->Carol and settle it. + const reBalanceAmt = 500_000 + invoice := &lnrpc.Invoice{ + Value: reBalanceAmt, + RouteHints: carolRouteHints, + } + invResp := carol.RPC.AddInvoice(invoice) + ht.CompletePaymentRequests(alice, []string{invResp.PaymentRequest}) + + // Make sure Carol has settled the invoice. + ht.AssertInvoiceSettled(carol, invResp.PaymentAddr) + + // With the network active, we'll now add a new hodl invoices at both + // Alice's and Carol's end. Make sure the cltv expiry delta is large + // enough, otherwise Bob won't send out the outgoing htlc. + const numInvoices = 5 + const invoiceAmt = 50_000 + + var ( + carolInvoices []*invoicesrpc.AddHoldInvoiceResp + aliceInvoices []*invoicesrpc.AddHoldInvoiceResp + alicePreimages []lntypes.Preimage + payHashes [][]byte + invoiceStreamsCarol []rpc.SingleInvoiceClient + invoiceStreamsAlice []rpc.SingleInvoiceClient + ) + + // Add Carol invoices. + for i := 0; i < numInvoices; i++ { + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + RouteHints: carolRouteHints, + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + carolInvoices = append(carolInvoices, carolInvoice) + payHashes = append(payHashes, payHash[:]) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + invoiceStreamsCarol = append(invoiceStreamsCarol, stream) + } + + // We'll give Alice's invoices a longer CLTV expiry, to ensure the + // channel Bob<->Carol will be closed first. + for i := 0; i < numInvoices; i++ { + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: thawHeightDelta - 4, + Hash: payHash[:], + RouteHints: aliceRouteHints, + } + aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq) + + aliceInvoices = append(aliceInvoices, aliceInvoice) + alicePreimages = append(alicePreimages, preimage) + payHashes = append(payHashes, payHash[:]) + + // Subscribe the invoice. + stream := alice.RPC.SubscribeSingleInvoice(payHash[:]) + invoiceStreamsAlice = append(invoiceStreamsAlice, stream) + } + + // Now that we've created the invoices, we'll pay them all from + // Alice<->Carol, going through Bob. We won't wait for the response + // however, as neither will immediately settle the payment. + // + // Alice will pay all of Carol's invoices. + for _, carolInvoice := range carolInvoices { + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(alice, req) + } + + // And Carol will pay Alice's. + for _, aliceInvoice := range aliceInvoices { + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: aliceInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertInflight(carol, req) + } + + // At this point, all 3 nodes should now have an active channel with + // the created HTLCs pending on all of them. + // + // Alice sent numInvoices and received numInvoices payments, she should + // have numInvoices*2 HTLCs. + ht.AssertNumActiveHtlcs(alice, numInvoices*2) + + // Bob should have 2*numInvoices HTLCs on channel Alice -> Bob, and + // numInvoices*2 HTLCs on channel Bob -> Carol. + ht.AssertNumActiveHtlcs(bob, numInvoices*4) + + // Carol sent numInvoices and received numInvoices payments, she should + // have numInvoices*2 HTLCs. + ht.AssertNumActiveHtlcs(carol, numInvoices*2) + + // Wait for Alice and Carol to mark the invoices as accepted. There is + // a small gap to bridge between adding the htlc to the channel and + // executing the exit hop logic. + for _, stream := range invoiceStreamsCarol { + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + } + + for _, stream := range invoiceStreamsAlice { + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + } + + // We want Carol's htlcs to expire off-chain to demonstrate bob's force + // close. However, Carol will cancel her invoices to prevent force + // closes, so we shut her down for now. + restartCarol := ht.SuspendNode(carol) + + // We'll now mine enough blocks to trigger Bob's broadcast of his + // commitment transaction due to the fact that the Carol's HTLCs are + // about to timeout. With the default outgoing broadcast delta of zero, + // this will be the same height as the htlc expiry height. + numBlocks := padCLTV( + uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), + ) + ht.MineBlocks(int(numBlocks)) + + // Bob should have one anchor sweep request. + // + // For neutrino backend, there's no way to know the sweeping of the + // remote anchor is failed, so Bob still sees two pending sweeps. + if ht.IsNeutrinoBackend() { + ht.AssertNumPendingSweeps(bob, 2) + } else { + ht.AssertNumPendingSweeps(bob, 1) + } + + // Bob's force close tx and anchor sweeping tx should now be found in + // the mempool. + ht.AssertNumTxsInMempool(2) + + // Mine a block to confirm Bob's force close tx and anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // Bob should have `numInvoices` for HTLC timeout txns. + ht.AssertNumPendingSweeps(bob, numInvoices) + + // Once bob has force closed, we can restart carol. + require.NoError(ht, restartCarol()) + + // Carol should have commit and anchor outputs. + ht.AssertNumPendingSweeps(carol, 2) + + // Let Alice settle her invoices. When Bob now gets the preimages, he + // will broadcast his second-level txns to claim the htlcs. + for _, preimage := range alicePreimages { + alice.RPC.SettleInvoice(preimage[:]) + } + + // Bob should have `numInvoices` for both HTLC success and timeout + // txns. + ht.AssertNumPendingSweeps(bob, numInvoices*2) + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // We expect to see three sweeping txns: + // 1. Bob's sweeping tx for all timeout HTLCs. + // 2. Bob's sweeping tx for all success HTLCs. + // 3. Carol's sweeping tx for her commit output. + // Mine a block to confirm them. + ht.MineBlocksAndAssertNumTxes(1, 3) + + // For this channel, we also check the number of HTLCs and the stage + // are correct. + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2) + + // For non-leased channels, we can now mine one block so Bob will sweep + // his to_local output. + if params.CommitmentType != leasedType { + // Mine one block so Bob's to_local becomes mature. + ht.MineBlocks(1) + + // Bob should offer the to_local output to his sweeper now. + ht.AssertNumPendingSweeps(bob, 1) + + // Mine a block to confirm Bob's sweeping of his to_local + // output. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // Mine blocks till the CSV expires on Bob's HTLC output. + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, numInvoices*2, len(resp.PendingHtlcs)) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // With the above mined block, Bob's HTLCs should now all be offered to + // his sweeper since the CSV lock is now expired. + // + // For leased channel, due to the test setup, Bob's to_local output is + // now also mature and can be swept together with his HTLCs. + if params.CommitmentType == leasedType { + ht.AssertNumPendingSweeps(bob, numInvoices*2+1) + } else { + ht.AssertNumPendingSweeps(bob, numInvoices*2) + } + + // When we mine one additional block, that will confirm Bob's second + // level sweep. Now Bob should have no pending channels anymore, as + // this just resolved it by the confirmation of the sweep tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + ht.AssertNumPendingForceClose(bob, 0) + + // Carol should have no channels left. + ht.AssertNumPendingForceClose(carol, 0) +} diff --git a/itest/lnd_multi-hop_test.go b/itest/lnd_multi-hop_test.go deleted file mode 100644 index f3a906c141..0000000000 --- a/itest/lnd_multi-hop_test.go +++ /dev/null @@ -1,2697 +0,0 @@ -package itest - -import ( - "context" - "fmt" - "testing" - - "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd/chainreg" - "github.com/lightningnetwork/lnd/lncfg" - "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" - "github.com/lightningnetwork/lnd/lnrpc/routerrpc" - "github.com/lightningnetwork/lnd/lntest" - "github.com/lightningnetwork/lnd/lntest/node" - "github.com/lightningnetwork/lnd/lntest/rpc" - "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/lnwallet/chainfee" - "github.com/lightningnetwork/lnd/routing" - "github.com/stretchr/testify/require" -) - -const ( - finalCltvDelta = routing.MinCLTVDelta // 18. - thawHeightDelta = finalCltvDelta * 2 // 36. -) - -var commitWithZeroConf = []struct { - commitType lnrpc.CommitmentType - zeroConf bool -}{ - { - commitType: lnrpc.CommitmentType_ANCHORS, - zeroConf: false, - }, - { - commitType: lnrpc.CommitmentType_ANCHORS, - zeroConf: true, - }, - { - commitType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE, - zeroConf: false, - }, - { - commitType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE, - zeroConf: true, - }, - { - commitType: lnrpc.CommitmentType_SIMPLE_TAPROOT, - zeroConf: false, - }, - { - commitType: lnrpc.CommitmentType_SIMPLE_TAPROOT, - zeroConf: true, - }, -} - -// makeRouteHints creates a route hints that will allow Carol to be reached -// using an unadvertised channel created by Bob (Bob -> Carol). If the zeroConf -// bool is set, then the scid alias of Bob will be used in place. -func makeRouteHints(bob, carol *node.HarnessNode, - zeroConf bool) []*lnrpc.RouteHint { - - carolChans := carol.RPC.ListChannels( - &lnrpc.ListChannelsRequest{}, - ) - - carolChan := carolChans.Channels[0] - - hopHint := &lnrpc.HopHint{ - NodeId: carolChan.RemotePubkey, - ChanId: carolChan.ChanId, - FeeBaseMsat: uint32( - chainreg.DefaultBitcoinBaseFeeMSat, - ), - FeeProportionalMillionths: uint32( - chainreg.DefaultBitcoinFeeRate, - ), - CltvExpiryDelta: chainreg.DefaultBitcoinTimeLockDelta, - } - - if zeroConf { - bobChans := bob.RPC.ListChannels( - &lnrpc.ListChannelsRequest{}, - ) - - // Now that we have Bob's channels, scan for the channel he has - // open to Carol so we can use the proper scid. - var found bool - for _, bobChan := range bobChans.Channels { - if bobChan.RemotePubkey == carol.PubKeyStr { - hopHint.ChanId = bobChan.AliasScids[0] - - found = true - - break - } - } - if !found { - bob.Fatalf("unable to create route hint") - } - } - - return []*lnrpc.RouteHint{ - { - HopHints: []*lnrpc.HopHint{hopHint}, - }, - } -} - -// caseRunner defines a single test case runner. -type caseRunner func(ht *lntest.HarnessTest, alice, bob *node.HarnessNode, - c lnrpc.CommitmentType, zeroConf bool) - -// runMultiHopHtlcClaimTest is a helper method to build test cases based on -// different commitment types and zero-conf config and run them. -// -// TODO(yy): flatten this test. -func runMultiHopHtlcClaimTest(ht *lntest.HarnessTest, tester caseRunner) { - for _, typeAndConf := range commitWithZeroConf { - typeAndConf := typeAndConf - name := fmt.Sprintf("zeroconf=%v/committype=%v", - typeAndConf.zeroConf, typeAndConf.commitType.String()) - - // Create the nodes here so that separate logs will be created - // for Alice and Bob. - args := lntest.NodeArgsForCommitType(typeAndConf.commitType) - if typeAndConf.zeroConf { - args = append( - args, "--protocol.option-scid-alias", - "--protocol.zero-conf", - ) - } - - s := ht.Run(name, func(t1 *testing.T) { - st := ht.Subtest(t1) - - alice := st.NewNode("Alice", args) - bob := st.NewNode("Bob", args) - st.ConnectNodes(alice, bob) - - // Start each test with the default static fee estimate. - st.SetFeeEstimate(12500) - - // Add test name to the logs. - alice.AddToLogf("Running test case: %s", name) - bob.AddToLogf("Running test case: %s", name) - - tester( - st, alice, bob, - typeAndConf.commitType, typeAndConf.zeroConf, - ) - }) - if !s { - return - } - } -} - -// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the -// outgoing HTLC is about to time out, then we'll go to chain in order to claim -// it using the HTLC timeout transaction. Any dust HTLC's should be immediately -// canceled backwards. Once the timeout has been reached, then we should sweep -// it on-chain, and cancel the HTLC backwards. -func testMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runMultiHopHtlcLocalTimeout) -} - -func runMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, true, c, zeroConf, - ) - - // For neutrino backend, we need to fund one more UTXO for Bob so he - // can sweep his outputs. - if ht.IsNeutrinoBackend() { - ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) - } - - // Now that our channels are set up, we'll send two HTLC's from Alice - // to Carol. The first HTLC will be universally considered "dust", - // while the second will be a proper fully valued HTLC. - const ( - dustHtlcAmt = btcutil.Amount(100) - htlcAmt = btcutil.Amount(300_000) - ) - - // We'll create two random payment hashes unknown to carol, then send - // each of them by manually specifying the HTLC details. - carolPubKey := carol.PubKey[:] - dustPayHash := ht.Random32Bytes() - payHash := ht.Random32Bytes() - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(dustHtlcAmt), - PaymentHash: dustPayHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - RouteHints: routeHints, - }) - - alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - RouteHints: routeHints, - }) - - // Verify that all nodes in the path now have two HTLC's with the - // proper parameters. - ht.AssertActiveHtlcs(alice, dustPayHash, payHash) - ht.AssertActiveHtlcs(bob, dustPayHash, payHash) - ht.AssertActiveHtlcs(carol, dustPayHash, payHash) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - ht.SetFeeEstimate(30000) - - // We'll now mine enough blocks to trigger Bob's broadcast of his - // commitment transaction due to the fact that the HTLC is about to - // timeout. With the default outgoing broadcast delta of zero, this will - // be the same height as the htlc expiry height. - numBlocks := padCLTV( - uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), - ) - ht.MineBlocks(int(numBlocks)) - - // Bob's force close transaction should now be found in the mempool. - ht.AssertNumTxsInMempool(1) - op := ht.OutPointFromChannelPoint(bobChanPoint) - closeTx := ht.AssertOutpointInMempool(op) - - // Dust HTLCs are immediately canceled backwards as soon as the local - // commitment tx is successfully broadcasted to the local mempool. - ht.AssertActiveHtlcs(alice, payHash) - - // Bob's anchor output should be offered to his sweep since Bob has - // time-sensitive HTLCs - we expect both anchors are offered. - ht.AssertNumPendingSweeps(bob, 2) - - // Mine a block to confirm the closing transaction. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // With the closing transaction confirmed, we should expect Bob's HTLC - // timeout transaction to be offered to the sweeper due to the expiry - // being reached. we also expect Bon and Carol's anchor sweeps. - ht.AssertNumPendingSweeps(bob, 2) - ht.AssertNumPendingSweeps(carol, 1) - - // Mine a block to trigger Bob's sweeper to sweep. - ht.MineEmptyBlocks(1) - - // The above mined block would trigger Bob and Carol's sweepers to take - // action. We now expect two txns: - // 1. Bob's sweeping tx anchor sweep should now be found in the mempool. - // 2. Bob's HTLC timeout tx sweep should now be found in the mempool. - // Carol's anchor sweep should be failed due to output being dust. - ht.AssertNumTxsInMempool(2) - - htlcOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 2} - commitOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 3} - htlcTimeoutTxid := ht.AssertOutpointInMempool( - htlcOutpoint, - ).TxHash() - - // Mine a block to confirm the above two sweeping txns. - ht.MineBlocksAndAssertNumTxes(1, 2) - - // With Bob's HTLC timeout transaction confirmed, there should be no - // active HTLC's on the commitment transaction from Alice -> Bob. - ht.AssertNumActiveHtlcs(alice, 0) - - // At this point, Bob should show that the pending HTLC has advanced to - // the second stage and is ready to be swept once the timelock is up. - pendingChanResp := bob.RPC.PendingChannels() - require.Equal(ht, 1, len(pendingChanResp.PendingForceClosingChannels)) - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - require.NotZero(ht, forceCloseChan.LimboBalance) - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - require.Equal(ht, 1, len(forceCloseChan.PendingHtlcs)) - require.Equal(ht, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) - - ht.Logf("Bob's timelock on commit=%v, timelock on htlc=%v", - forceCloseChan.BlocksTilMaturity, - forceCloseChan.PendingHtlcs[0].BlocksTilMaturity) - - htlcTimeoutOutpoint := wire.OutPoint{Hash: htlcTimeoutTxid, Index: 0} - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // Since Bob is the initiator of the script-enforced leased - // channel between him and Carol, he will incur an additional - // CLTV on top of the usual CSV delay on any outputs that he - // can sweep back to his wallet. - blocksTilMaturity := int(forceCloseChan.BlocksTilMaturity) - - // We now mine enough blocks to trigger the sweep of the HTLC - // timeout tx. - ht.MineEmptyBlocks(blocksTilMaturity - 1) - - // Check that Bob has one pending sweeping tx - the HTLC - // timeout tx. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine one more blocks, then his commit output will mature. - // This will also trigger the sweeper to sweep his HTLC timeout - // tx. - ht.MineEmptyBlocks(1) - - // Check that Bob has two pending sweeping txns. - ht.AssertNumPendingSweeps(bob, 2) - - // Assert that the HTLC timeout tx is now in the mempool. - ht.AssertOutpointInMempool(htlcTimeoutOutpoint) - - // We now wait for 30 seconds to overcome the flake - there's a - // block race between contractcourt and sweeper, causing the - // sweep to be broadcast earlier. - // - // TODO(yy): remove this once `blockbeat` is in place. - numExpected := 1 - err := wait.NoError(func() error { - mem := ht.GetRawMempool() - if len(mem) == 2 { - numExpected = 2 - return nil - } - - return fmt.Errorf("want %d, got %v in mempool: %v", - numExpected, len(mem), mem) - }, wait.DefaultTimeout) - ht.Logf("Checking mempool got: %v", err) - - // Mine a block to trigger the sweep of his commit output and - // confirm his HTLC timeout sweep. - ht.MineBlocksAndAssertNumTxes(1, numExpected) - - // For leased channels, we need to mine one more block to - // confirm Bob's commit output sweep. - // - // NOTE: we mine this block conditionally, as the commit output - // may have already been swept one block earlier due to the - // race in block consumption among subsystems. - pendingChanResp := bob.RPC.PendingChannels() - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - // Check that the sweep spends the expected inputs. - ht.AssertOutpointInMempool(commitOutpoint) - ht.MineBlocksAndAssertNumTxes(1, 1) - } - } else { - // Since Bob force closed the channel between him and Carol, he - // will incur the usual CSV delay on any outputs that he can - // sweep back to his wallet. We'll subtract one block from our - // current maturity period to assert on the mempool. - numBlocks := int(forceCloseChan.BlocksTilMaturity - 1) - ht.MineEmptyBlocks(numBlocks) - - // Check that Bob has a pending sweeping tx. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine a block the trigger the sweeping behavior. - ht.MineEmptyBlocks(1) - - // Check that the sweep spends from the mined commitment. - ht.AssertOutpointInMempool(commitOutpoint) - - // Mine one more block to trigger the timeout path. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Bob's sweeper should now broadcast his second layer sweep - // due to the CSV on the HTLC timeout output. - ht.AssertOutpointInMempool(htlcTimeoutOutpoint) - - // Next, we'll mine a final block that should confirm the - // sweeping transactions left. - ht.MineBlocksAndAssertNumTxes(1, 1) - } - - // Once this transaction has been confirmed, Bob should detect that he - // no longer has any pending channels. - ht.AssertNumPendingForceClose(bob, 0) - - // Coop close channel, expect no anchors. - ht.CloseChannel(alice, aliceChanPoint) -} - -// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the -// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC -// off-chain, then it goes on chain to claim the HTLC uing the HTLC success -// transaction. In this scenario, the node that sent the outgoing HTLC should -// extract the preimage from the sweep transaction, and finish settling the -// HTLC backwards into the route. -func testMultiHopReceiverChainClaim(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runMultiHopReceiverChainClaim) -} - -func runMultiHopReceiverChainClaim(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - // For neutrino backend, we need to fund one more UTXO for Carol so she - // can sweep her outputs. - if ht.IsNeutrinoBackend() { - ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) - } - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - const invoiceAmt = 100000 - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: routeHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - ht.AssertActiveHtlcs(alice, payHash[:]) - ht.AssertActiveHtlcs(bob, payHash[:]) - ht.AssertActiveHtlcs(carol, payHash[:]) - - // Wait for carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - - restartBob := ht.SuspendNode(bob) - - // Settle invoice. This will just mark the invoice as settled, as there - // is no link anymore to remove the htlc from the commitment tx. For - // this test, it is important to actually settle and not leave the - // invoice in the accepted state, because without a known preimage, the - // channel arbitrator won't go to chain. - carol.RPC.SettleInvoice(preimage[:]) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - ht.SetFeeEstimate(30000) - - // We now advance the block height to the point where Carol will force - // close her channel with Bob, broadcast the closing tx but keep it - // unconfirmed. - numBlocks := padCLTV(uint32( - invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta, - )) - - // Now we'll mine enough blocks to prompt carol to actually go to the - // chain in order to sweep her HTLC since the value is high enough. - ht.MineEmptyBlocks(int(numBlocks)) - - // At this point, Carol should broadcast her active commitment - // transaction in order to go to the chain and sweep her HTLC. - ht.AssertNumTxsInMempool(1) - - closingTx := ht.AssertOutpointInMempool( - ht.OutPointFromChannelPoint(bobChanPoint), - ) - closingTxid := closingTx.TxHash() - - // Carol's anchor should have been offered to her sweeper as she has - // time-sensitive HTLCs. Assert that we have two anchors - one for the - // anchor on the local commitment and the other for the anchor on the - // remote commitment (invalid). - ht.AssertNumPendingSweeps(carol, 2) - - // Confirm the commitment. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // The above mined block will trigger Carol's sweeper to publish the - // anchor sweeping tx. - // - // TODO(yy): should instead cancel the broadcast of the anchor sweeping - // tx to save fees since we know the force close tx has been confirmed? - // This is very difficult as it introduces more complicated RBF - // scenarios, as we are using a wallet utxo, which means any txns using - // that wallet utxo must pay more fees. On the other hand, there's no - // way to remove that anchor-CPFP tx from the mempool. - ht.AssertNumTxsInMempool(1) - - // After the force close transaction is mined, Carol should offer her - // second level HTLC tx to the sweeper, which means we should see two - // pending inputs now - the anchor and the htlc. - ht.AssertNumPendingSweeps(carol, 2) - - // Restart bob again. - require.NoError(ht, restartBob()) - - var expectedTxes int - - // After the force close transaction is mined, a series of transactions - // should be broadcast by Bob and Carol. When Bob notices Carol's - // second level transaction in the mempool, he will extract the - // preimage and settle the HTLC back off-chain. - switch c { - // We expect to see three txns in the mempool: - // 1. Carol should broadcast her second level HTLC tx. - // 2. Carol should broadcast her anchor sweeping tx. - // 3. Bob should broadcast a sweep tx to sweep his output in the - // channel with Carol, and in the same sweep tx to sweep his anchor - // output. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - expectedTxes = 3 - ht.AssertNumPendingSweeps(bob, 2) - - // We expect to see two txns in the mempool: - // 1. Carol should broadcast her second level HTLC tx. - // 2. Carol should broadcast her anchor sweeping tx. - // Bob would offer his anchor output to his sweeper, but it cannot be - // swept due to it being uneconomical. Bob's commit output can't be - // swept yet as he's incurring an additional CLTV from being the - // channel initiator of a script-enforced leased channel. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - expectedTxes = 2 - ht.AssertNumPendingSweeps(bob, 1) - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Mine one block to trigger the sweeper to sweep. - ht.MineEmptyBlocks(1) - - // All transactions should be spending from the commitment transaction. - txes := ht.GetNumTxsFromMempool(expectedTxes) - ht.AssertAllTxesSpendFrom(txes, closingTxid) - - // We'll now mine an additional block which should confirm both the - // second layer transactions. - ht.MineBlocksAndAssertNumTxes(1, expectedTxes) - - // Carol's pending channel report should now show two outputs under - // limbo: her commitment output, as well as the second-layer claim - // output, and the pending HTLC should also now be in stage 2. - ht.AssertNumHTLCsAndStage(carol, bobChanPoint, 1, 2) - - // Once the second-level transaction confirmed, Bob should have - // extracted the preimage from the chain, and sent it back to Alice, - // clearing the HTLC off-chain. - ht.AssertNumActiveHtlcs(alice, 0) - - // If we mine 4 additional blocks, then Carol can sweep the second - // level HTLC output once the CSV expires. - ht.MineEmptyBlocks(defaultCSV - 1) - - // Assert Carol has the pending HTLC sweep. - ht.AssertNumPendingSweeps(carol, 1) - - // Mine one block to trigger the sweeper to sweep. - ht.MineEmptyBlocks(1) - - // We should have a new transaction in the mempool. - ht.AssertNumTxsInMempool(1) - - // Finally, if we mine an additional block to confirm Carol's second - // level success transaction. Carol should not show a pending channel - // in her report afterwards. - ht.MineBlocksAndAssertNumTxes(1, 1) - ht.AssertNumPendingForceClose(carol, 0) - - // The invoice should show as settled for Carol, indicating that it was - // swept on-chain. - ht.AssertInvoiceSettled(carol, carolInvoice.PaymentAddr) - - // Finally, check that the Alice's payment is correctly marked - // succeeded. - ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) - - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // Bob still has his commit output to sweep to since he - // incurred an additional CLTV from being the channel initiator - // of a script-enforced leased channel, regardless of whether - // he forced closed the channel or not. - pendingChanResp := bob.RPC.PendingChannels() - - require.Len(ht, pendingChanResp.PendingForceClosingChannels, 1) - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - require.Positive(ht, forceCloseChan.LimboBalance) - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - - // TODO: Bob still shows a pending HTLC at this point when he - // shouldn't, as he already extracted the preimage from Carol's - // claim. - // require.Len(t.t, forceCloseChan.PendingHtlcs, 0) - - // Mine enough blocks for Bob's commit output's CLTV to expire - // and sweep it. - numBlocks := int(forceCloseChan.BlocksTilMaturity) - ht.MineEmptyBlocks(numBlocks) - - // Bob should have two pending inputs to be swept, the commit - // output and the anchor output. - ht.AssertNumPendingSweeps(bob, 2) - ht.MineEmptyBlocks(1) - - commitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3} - ht.AssertOutpointInMempool(commitOutpoint) - ht.MineBlocksAndAssertNumTxes(1, 1) - } - - ht.AssertNumPendingForceClose(bob, 0) - - // We'll close out the channel between Alice and Bob, then shutdown - // carol to conclude the test. - ht.CloseChannel(alice, aliceChanPoint) -} - -// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC -// scenario, if the node that extended the HTLC to the final node closes their -// commitment on-chain early, then it eventually recognizes this HTLC as one -// that's timed out. At this point, the node should timeout the HTLC using the -// HTLC timeout transaction, then cancel it backwards as normal. -func testMultiHopLocalForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest( - ht, runMultiHopLocalForceCloseOnChainHtlcTimeout, - ) -} - -func runMultiHopLocalForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, true, c, zeroConf, - ) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const htlcAmt = btcutil.Amount(300_000) - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := ht.Random32Bytes() - req := &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - RouteHints: routeHints, - } - alice.RPC.SendPayment(req) - - // Once the HTLC has cleared, all channels in our mini network should - // have the it locked in. - ht.AssertActiveHtlcs(alice, payHash) - ht.AssertActiveHtlcs(bob, payHash) - ht.AssertActiveHtlcs(carol, payHash) - - // blocksMined records how many blocks have mined after the creation of - // the invoice so it can be used to calculate how many more blocks need - // to be mined to trigger a force close later on. - var blocksMined uint32 - - // Now that all parties have the HTLC locked in, we'll immediately - // force close the Bob -> Carol channel. This should trigger contract - // resolution mode for both of them. - stream, _ := ht.CloseChannelAssertPending(bob, bobChanPoint, true) - closeTx := ht.AssertStreamChannelForceClosed( - bob, bobChanPoint, true, stream, - ) - - // Increase the blocks mined. At the step - // AssertStreamChannelForceClosed mines one block. - blocksMined++ - - // The channel close has anchors, we should expect to see both Bob and - // Carol has a pending sweep request for the anchor sweep. - ht.AssertNumPendingSweeps(carol, 1) - ht.AssertNumPendingSweeps(bob, 1) - - // Mine a block to confirm Bob's anchor sweep - Carol's anchor sweep - // won't succeed because it's not used for CPFP, so there's no wallet - // utxo used, resulting it to be uneconomical. - ht.MineBlocksAndAssertNumTxes(1, 1) - blocksMined++ - - htlcOutpoint := wire.OutPoint{Hash: closeTx, Index: 2} - bobCommitOutpoint := wire.OutPoint{Hash: closeTx, Index: 3} - - // Before the HTLC times out, we'll need to assert that Bob broadcasts - // a sweep transaction for his commit output. Note that if the channel - // has a script-enforced lease, then Bob will have to wait for an - // additional CLTV before sweeping it. - if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // The sweep is broadcast on the block immediately before the - // CSV expires and the commitment was already mined inside - // AssertStreamChannelForceClosed(), so mine one block less - // than defaultCSV in order to perform mempool assertions. - ht.MineEmptyBlocks(int(defaultCSV - blocksMined)) - blocksMined = defaultCSV - - // Assert Bob has the sweep and trigger it. - ht.AssertNumPendingSweeps(bob, 1) - ht.MineEmptyBlocks(1) - blocksMined++ - - commitSweepTx := ht.AssertOutpointInMempool( - bobCommitOutpoint, - ) - txid := commitSweepTx.TxHash() - block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, txid) - - blocksMined++ - } - - // We'll now mine enough blocks for the HTLC to expire. After this, Bob - // should hand off the now expired HTLC output to the utxo nursery. - numBlocks := padCLTV(uint32(finalCltvDelta) - - lncfg.DefaultOutgoingBroadcastDelta) - ht.MineEmptyBlocks(int(numBlocks - blocksMined)) - - // Bob's pending channel report should show that he has a single HTLC - // that's now in stage one. - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1) - - // Bob should have a pending sweep request. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine one block to trigger Bob's sweeper to sweep it. - ht.MineEmptyBlocks(1) - - // We should also now find a transaction in the mempool, as Bob should - // have broadcast his second layer timeout transaction. - timeoutTx := ht.AssertOutpointInMempool(htlcOutpoint).TxHash() - - // Next, we'll mine an additional block. This should serve to confirm - // the second layer timeout transaction. - block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, timeoutTx) - - // With the second layer timeout transaction confirmed, Bob should have - // canceled backwards the HTLC that carol sent. - ht.AssertNumActiveHtlcs(bob, 0) - - // Additionally, Bob should now show that HTLC as being advanced to the - // second stage. - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2) - - // Bob should now broadcast a transaction that sweeps certain inputs - // depending on the commitment type. We'll need to mine some blocks - // before the broadcast is possible. - resp := bob.RPC.PendingChannels() - - require.Len(ht, resp.PendingForceClosingChannels, 1) - forceCloseChan := resp.PendingForceClosingChannels[0] - require.Len(ht, forceCloseChan.PendingHtlcs, 1) - pendingHtlc := forceCloseChan.PendingHtlcs[0] - require.Positive(ht, pendingHtlc.BlocksTilMaturity) - numBlocks = uint32(pendingHtlc.BlocksTilMaturity) - - ht.MineEmptyBlocks(int(numBlocks)) - - var numExpected int - - // Now that the CSV/CLTV timelock has expired, the transaction should - // either only sweep the HTLC timeout transaction, or sweep both the - // HTLC timeout transaction and Bob's commit output depending on the - // commitment type. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // Assert the expected number of pending sweeps are found. - sweeps := ht.AssertNumPendingSweeps(bob, 2) - - numExpected = 1 - if sweeps[0].DeadlineHeight != sweeps[1].DeadlineHeight { - numExpected = 2 - } - } else { - ht.AssertNumPendingSweeps(bob, 1) - numExpected = 1 - } - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - - // Assert the sweeping tx is found in the mempool. - htlcTimeoutOutpoint := wire.OutPoint{Hash: timeoutTx, Index: 0} - ht.AssertOutpointInMempool(htlcTimeoutOutpoint) - - // Mine a block to confirm the sweep. - ht.MineBlocksAndAssertNumTxes(1, numExpected) - - // At this point, Bob should no longer show any channels as pending - // close. - ht.AssertNumPendingForceClose(bob, 0) - - // Coop close, no anchors. - ht.CloseChannel(alice, aliceChanPoint) -} - -// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a -// multi-hop HTLC, and the final destination of the HTLC force closes the -// channel, then we properly timeout the HTLC directly on *their* commitment -// transaction once the timeout has expired. Once we sweep the transaction, we -// should also cancel back the initial HTLC. -func testMultiHopRemoteForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest( - ht, runMultiHopRemoteForceCloseOnChainHtlcTimeout, - ) -} - -func runMultiHopRemoteForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, true, c, zeroConf, - ) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const htlcAmt = btcutil.Amount(30000) - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // We'll now send a single HTLC across our multi-hop network. - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: int64(htlcAmt), - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: routeHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - - // blocksMined records how many blocks have mined after the creation of - // the invoice so it can be used to calculate how many more blocks need - // to be mined to trigger a force close later on. - var blocksMined uint32 - - // Once the HTLC has cleared, all the nodes in our mini network should - // show that the HTLC has been locked in. - ht.AssertActiveHtlcs(alice, payHash[:]) - ht.AssertActiveHtlcs(bob, payHash[:]) - ht.AssertActiveHtlcs(carol, payHash[:]) - - // At this point, we'll now instruct Carol to force close the - // transaction. This will let us exercise that Bob is able to sweep the - // expired HTLC on Carol's version of the commitment transaction. - closeStream, _ := ht.CloseChannelAssertPending( - carol, bobChanPoint, true, - ) - - // For anchor channels, the anchor won't be used for CPFP because - // channel arbitrator thinks Carol doesn't have preimage for her - // incoming HTLC on the commitment transaction Bob->Carol. Although - // Carol created this invoice, because it's a hold invoice, the - // preimage won't be generated automatically. - closeTx := ht.AssertStreamChannelForceClosed( - carol, bobChanPoint, true, closeStream, - ) - - // Increase the blocks mined. At this step - // AssertStreamChannelForceClosed mines one block. - blocksMined++ - - // At this point, Bob should have a pending force close channel as - // Carol has gone directly to chain. - ht.AssertNumPendingForceClose(bob, 1) - - var expectedTxes int - switch c { - // Bob can sweep his commit and anchor outputs immediately. Carol will - // also offer her anchor to her sweeper. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - ht.AssertNumPendingSweeps(bob, 2) - ht.AssertNumPendingSweeps(carol, 1) - - // We expect to see only one sweeping tx to be published from - // Bob, which sweeps his commit and anchor outputs in the same - // tx. For Carol, since her anchor is not used for CPFP, it'd - // be uneconomical to sweep so it will fail. - expectedTxes = 1 - - // Bob can't sweep his commit output yet as he was the initiator of a - // script-enforced leased channel, so he'll always incur the additional - // CLTV. He can still offer his anchor output to his sweeper however. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - ht.AssertNumPendingSweeps(bob, 1) - ht.AssertNumPendingSweeps(carol, 1) - - // We expect to see only no sweeping txns to be published, - // neither Bob's or Carol's anchor sweep can succeed due to - // it's uneconomical. - expectedTxes = 0 - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Mine one block to trigger the sweeps. - ht.MineEmptyBlocks(1) - blocksMined++ - - // We now mine a block to clear up the mempool. - ht.MineBlocksAndAssertNumTxes(1, expectedTxes) - blocksMined++ - - // Next, we'll mine enough blocks for the HTLC to expire. At this - // point, Bob should hand off the output to his internal utxo nursery, - // which will broadcast a sweep transaction. - numBlocks := padCLTV(uint32(finalCltvDelta) - - lncfg.DefaultOutgoingBroadcastDelta) - ht.MineEmptyBlocks(int(numBlocks - blocksMined)) - - // If we check Bob's pending channel report, it should show that he has - // a single HTLC that's now in the second stage, as it skipped the - // initial first stage since this is a direct HTLC. - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2) - - // We need to generate an additional block to expire the CSV 1. - ht.MineEmptyBlocks(1) - - // For script-enforced leased channels, Bob has failed to sweep his - // anchor output before, so it's still pending. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - ht.AssertNumPendingSweeps(bob, 2) - } else { - // Bob should have a pending sweep request. - ht.AssertNumPendingSweeps(bob, 1) - } - - // Mine a block to trigger the sweeper to sweep it. - ht.MineEmptyBlocks(1) - - // Bob's sweeping transaction should now be found in the mempool at - // this point. - sweepTx := ht.AssertNumTxsInMempool(1)[0] - - // If we mine an additional block, then this should confirm Bob's - // transaction which sweeps the direct HTLC output. - block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, sweepTx) - - // Now that the sweeping transaction has been confirmed, Bob should - // cancel back that HTLC. As a result, Alice should not know of any - // active HTLC's. - ht.AssertNumActiveHtlcs(alice, 0) - - // Now we'll check Bob's pending channel report. Since this was Carol's - // commitment, he doesn't have to wait for any CSV delays, but he may - // still need to wait for a CLTV on his commit output to expire - // depending on the commitment type. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - resp := bob.RPC.PendingChannels() - - require.Len(ht, resp.PendingForceClosingChannels, 1) - forceCloseChan := resp.PendingForceClosingChannels[0] - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - - numBlocks := int(forceCloseChan.BlocksTilMaturity) - ht.MineEmptyBlocks(numBlocks) - - // Assert the commit output has been offered to the sweeper. - // Bob should have two pending sweep requests - one for the - // commit output and one for the anchor output. - ht.AssertNumPendingSweeps(bob, 2) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - - bobCommitOutpoint := wire.OutPoint{Hash: closeTx, Index: 3} - bobCommitSweep := ht.AssertOutpointInMempool( - bobCommitOutpoint, - ) - bobCommitSweepTxid := bobCommitSweep.TxHash() - block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, bobCommitSweepTxid) - } - ht.AssertNumPendingForceClose(bob, 0) - - // While we're here, we assert that our expired invoice's state is - // correctly updated, and can no longer be settled. - ht.AssertInvoiceState(stream, lnrpc.Invoice_CANCELED) - - // We'll close out the test by closing the channel from Alice to Bob, - // and then shutting down the new node we created as its no longer - // needed. Coop close, no anchors. - ht.CloseChannel(alice, aliceChanPoint) -} - -// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if -// we force close a channel with an incoming HTLC, and later find out the -// preimage via the witness beacon, we properly settle the HTLC on-chain using -// the HTLC success transaction in order to ensure we don't lose any funds. -func testMultiHopHtlcLocalChainClaim(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runMultiHopHtlcLocalChainClaim) -} - -func runMultiHopHtlcLocalChainClaim(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - // For neutrino backend, we need to fund one more UTXO for Carol so she - // can sweep her outputs. - if ht.IsNeutrinoBackend() { - ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) - } - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - const invoiceAmt = 100000 - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: routeHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - ht.AssertActiveHtlcs(alice, payHash[:]) - ht.AssertActiveHtlcs(bob, payHash[:]) - ht.AssertActiveHtlcs(carol, payHash[:]) - - // Wait for carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - - // blocksMined records how many blocks have mined after the creation of - // the invoice so it can be used to calculate how many more blocks need - // to be mined to trigger a force close later on. - var blocksMined uint32 - - // At this point, Bob decides that he wants to exit the channel - // immediately, so he force closes his commitment transaction. - closeStream, _ := ht.CloseChannelAssertPending( - bob, aliceChanPoint, true, - ) - - // For anchor channels, the anchor won't be used for CPFP as there's no - // deadline pressure for Bob on the channel Alice->Bob at the moment. - // For Bob's local commitment tx, there's only one incoming HTLC which - // he doesn't have the preimage yet. Thus this anchor won't be - // force-swept. - hasAnchorSweep := false - bobForceClose := ht.AssertStreamChannelForceClosed( - bob, aliceChanPoint, hasAnchorSweep, closeStream, - ) - - // Increase the blocks mined. At this step - // AssertStreamChannelForceClosed mines one block. - blocksMined++ - - var expectedTxes int - switch c { - // Alice will sweep her commitment and anchor output immediately. Bob - // will also offer his anchor to his sweeper. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - ht.AssertNumPendingSweeps(alice, 2) - ht.AssertNumPendingSweeps(bob, 1) - - // We expect to see only one sweeping tx to be published from - // Alice, which sweeps her commit and anchor outputs in the - // same tx. For Bob, since his anchor is not used for CPFP, - // it'd be uneconomical to sweep so it will fail. - expectedTxes = 1 - - // Alice will offer her anchor output to her sweeper. Her commitment - // output cannot be swept yet as it has incurred an additional CLTV due - // to being the initiator of a script-enforced leased channel. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - ht.AssertNumPendingSweeps(alice, 1) - ht.AssertNumPendingSweeps(bob, 1) - - // We expect to see only no sweeping txns to be published, - // neither Alice's or Bob's anchor sweep can succeed due to - // it's uneconomical. - expectedTxes = 0 - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Mine a block to trigger the sweeps. - ht.MineEmptyBlocks(1) - blocksMined++ - - // Assert the expected num of txns are found in the mempool. - ht.AssertNumTxsInMempool(expectedTxes) - - // Mine a block to clean up the mempool for the rest of the test. - ht.MineBlocksAndAssertNumTxes(1, expectedTxes) - blocksMined++ - - // Suspend Bob to force Carol to go to chain. - restartBob := ht.SuspendNode(bob) - - // Settle invoice. This will just mark the invoice as settled, as there - // is no link anymore to remove the htlc from the commitment tx. For - // this test, it is important to actually settle and not leave the - // invoice in the accepted state, because without a known preimage, the - // channel arbitrator won't go to chain. - carol.RPC.SettleInvoice(preimage[:]) - - // We now advance the block height to the point where Carol will force - // close her channel with Bob, broadcast the closing tx but keep it - // unconfirmed. - numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry - - lncfg.DefaultIncomingBroadcastDelta)) - ht.MineEmptyBlocks(int(numBlocks - blocksMined)) - - // Carol's commitment transaction should now be in the mempool. - ht.AssertNumTxsInMempool(1) - - // Look up the closing transaction. It should be spending from the - // funding transaction, - closingTx := ht.AssertOutpointInMempool( - ht.OutPointFromChannelPoint(bobChanPoint), - ) - closingTxid := closingTx.TxHash() - - // Mine a block that should confirm the commit tx. - block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, closingTxid) - - // After the force close transaction is mined, Carol should offer her - // second-level success HTLC tx and anchor to the sweeper. - ht.AssertNumPendingSweeps(carol, 2) - - // Restart bob again. - require.NoError(ht, restartBob()) - - // Lower the fee rate so Bob's two anchor outputs are economical to - // be swept in one tx. - ht.SetFeeEstimate(chainfee.FeePerKwFloor) - - // After the force close transaction is mined, transactions will be - // broadcast by both Bob and Carol. - switch c { - // Carol will broadcast her sweeping txns and Bob will sweep his - // commitment and anchor outputs, we'd expect to see three txns, - // - Carol's second level HTLC transaction. - // - Carol's anchor sweeping txns since it's used for CPFP. - // - Bob's sweep tx spending his commitment output, and two anchor - // outputs, one from channel Alice to Bob and the other from channel - // Bob to Carol. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - ht.AssertNumPendingSweeps(bob, 3) - expectedTxes = 3 - - // Carol will broadcast her sweeping txns and Bob will sweep his - // anchor outputs. Bob can't sweep his commitment output yet as it has - // incurred an additional CLTV due to being the initiator of a - // script-enforced leased channel: - // - Carol's second level HTLC transaction. - // - Carol's anchor sweeping txns since it's used for CPFP. - // - Bob's sweep tx spending his two anchor outputs, one from channel - // Alice to Bob and the other from channel Bob to Carol. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - ht.AssertNumPendingSweeps(bob, 2) - expectedTxes = 3 - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Mine a block to trigger the sweeps. - ht.MineEmptyBlocks(1) - - // Assert transactions can be found in the mempool. - ht.AssertNumTxsInMempool(expectedTxes) - - // At this point we suspend Alice to make sure she'll handle the - // on-chain settle after a restart. - restartAlice := ht.SuspendNode(alice) - - // Mine a block to confirm the expected transactions (+ the coinbase). - ht.MineBlocksAndAssertNumTxes(1, expectedTxes) - - // For a channel of the anchor type, we will subtract one block - // from the default CSV, as the Sweeper will handle the input, and the - // Sweeper sweeps the input as soon as the lock expires. - secondLevelMaturity := uint32(defaultCSV - 1) - - // Keep track of the second level tx maturity. - carolSecondLevelCSV := secondLevelMaturity - - // When Bob notices Carol's second level transaction in the block, he - // will extract the preimage and broadcast a second level tx to claim - // the HTLC in his (already closed) channel with Alice. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine a block to trigger the sweep of the second level tx. - ht.MineEmptyBlocks(1) - carolSecondLevelCSV-- - - // Check Bob's second level tx. - bobSecondLvlTx := ht.GetNumTxsFromMempool(1)[0] - - // It should spend from the commitment in the channel with Alice. - ht.AssertTxSpendFrom(bobSecondLvlTx, bobForceClose) - - // At this point, Bob should have broadcast his second layer success - // transaction, and should have sent it to the nursery for incubation. - ht.AssertNumHTLCsAndStage(bob, aliceChanPoint, 1, 1) - - // The channel between Bob and Carol will still be pending force close - // if this is a leased channel. In that case, we'd also check the HTLC - // stages are correct in that channel. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - ht.AssertNumPendingForceClose(bob, 2) - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1) - } else { - ht.AssertNumPendingForceClose(bob, 1) - } - - // We'll now mine a block which should confirm Bob's second layer - // transaction. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Keep track of Bob's second level maturity, and decrement our track - // of Carol's. - bobSecondLevelCSV := secondLevelMaturity - carolSecondLevelCSV-- - - // Now that the preimage from Bob has hit the chain, restart Alice to - // ensure she'll pick it up. - require.NoError(ht, restartAlice()) - - // If we then mine 1 additional blocks, Carol's second level tx should - // mature, and she can pull the funds from it with a sweep tx. - ht.MineEmptyBlocks(int(carolSecondLevelCSV)) - bobSecondLevelCSV -= carolSecondLevelCSV - - // Carol should have one a sweep request for her second level tx. - ht.AssertNumPendingSweeps(carol, 1) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - bobSecondLevelCSV-- - - // Carol's sweep tx should be broadcast. - carolSweep := ht.AssertNumTxsInMempool(1)[0] - - // Bob should offer his second level tx to his sweeper. - ht.AssertNumPendingSweeps(bob, 1) - - // Mining one additional block, Bob's second level tx is mature, and he - // can sweep the output. - block = ht.MineBlocksAndAssertNumTxes(bobSecondLevelCSV, 1)[0] - ht.AssertTxInBlock(block, carolSweep) - - bobSweep := ht.GetNumTxsFromMempool(1)[0] - bobSweepTxid := bobSweep.TxHash() - - // When we mine one additional block, that will confirm Bob's sweep. - // Now Bob should have no pending channels anymore, as this just - // resolved it by the confirmation of the sweep transaction. - block = ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, bobSweepTxid) - - // With the script-enforced lease commitment type, Alice and Bob still - // haven't been able to sweep their respective commit outputs due to the - // additional CLTV. We'll need to mine enough blocks for the timelock to - // expire and prompt their sweep. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - for _, node := range []*node.HarnessNode{alice, bob} { - ht.AssertNumPendingForceClose(node, 1) - } - - // Due to the way the test is set up, Alice and Bob share the - // same CLTV for their commit outputs even though it's enforced - // on different channels (Alice-Bob and Bob-Carol). - resp := alice.RPC.PendingChannels() - require.Len(ht, resp.PendingForceClosingChannels, 1) - forceCloseChan := resp.PendingForceClosingChannels[0] - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - - // Mine enough blocks for the timelock to expire. - numBlocks := uint32(forceCloseChan.BlocksTilMaturity) - ht.MineEmptyBlocks(int(numBlocks)) - - // Both Alice and Bob should now offer their commit outputs to - // the sweeper. For Alice, she still has her anchor output as - // pending sweep as it's not used for CPFP, thus it's - // uneconomical to sweep it alone. - ht.AssertNumPendingSweeps(alice, 2) - ht.AssertNumPendingSweeps(bob, 1) - - // Mine a block to trigger the sweeps. - ht.MineEmptyBlocks(1) - - // Both Alice and Bob show broadcast their commit sweeps. - aliceCommitOutpoint := wire.OutPoint{ - Hash: bobForceClose, Index: 3, - } - ht.AssertOutpointInMempool( - aliceCommitOutpoint, - ).TxHash() - bobCommitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3} - ht.AssertOutpointInMempool( - bobCommitOutpoint, - ).TxHash() - - // Confirm their sweeps. - ht.MineBlocksAndAssertNumTxes(1, 2) - } - - // All nodes should show zero pending and open channels. - for _, node := range []*node.HarnessNode{alice, bob, carol} { - ht.AssertNumPendingForceClose(node, 0) - ht.AssertNodeNumChannels(node, 0) - } - - // Finally, check that the Alice's payment is correctly marked - // succeeded. - ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) -} - -// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, -// if the remote party goes to chain while we have an incoming HTLC, then when -// we found out the preimage via the witness beacon, we properly settle the -// HTLC directly on-chain using the preimage in order to ensure that we don't -// lose any funds. -func testMultiHopHtlcRemoteChainClaim(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runMultiHopHtlcRemoteChainClaim) -} - -func runMultiHopHtlcRemoteChainClaim(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - const invoiceAmt = 100000 - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: routeHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - ht.AssertActiveHtlcs(alice, payHash[:]) - ht.AssertActiveHtlcs(bob, payHash[:]) - ht.AssertActiveHtlcs(carol, payHash[:]) - - // Wait for carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - - // blocksMined records how many blocks have mined after the creation of - // the invoice so it can be used to calculate how many more blocks need - // to be mined to trigger a force close later on. - var blocksMined int - - // Lower the fee rate so Bob's two anchor outputs are economical to - // be swept in one tx. - ht.SetFeeEstimate(chainfee.FeePerKwFloor) - - // Next, Alice decides that she wants to exit the channel, so she'll - // immediately force close the channel by broadcast her commitment - // transaction. - closeStream, _ := ht.CloseChannelAssertPending( - alice, aliceChanPoint, true, - ) - aliceForceClose := ht.AssertStreamChannelForceClosed( - alice, aliceChanPoint, true, closeStream, - ) - - // Increase the blocks mined. At this step - // AssertStreamChannelForceClosed mines one block. - blocksMined++ - - // Wait for the channel to be marked pending force close. - ht.AssertChannelPendingForceClose(alice, aliceChanPoint) - - // After AssertStreamChannelForceClosed returns, it has mined a block - // so now bob will attempt to redeem his anchor output. Check the - // anchor is offered to the sweeper. - ht.AssertNumPendingSweeps(bob, 1) - ht.AssertNumPendingSweeps(alice, 1) - - // Mine enough blocks for Alice to sweep her funds from the force - // closed channel. AssertStreamChannelForceClosed() already mined a - // block containing the commitment tx and the commit sweep tx will be - // broadcast immediately before it can be included in a block, so mine - // one less than defaultCSV in order to perform mempool assertions. - if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - ht.MineEmptyBlocks(defaultCSV - blocksMined) - blocksMined = defaultCSV - - // Alice should now sweep her funds. - ht.AssertNumPendingSweeps(alice, 2) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - blocksMined++ - - // Mine Alice's commit sweeping tx. - ht.MineBlocksAndAssertNumTxes(1, 1) - blocksMined++ - } - - // Suspend bob, so Carol is forced to go on chain. - restartBob := ht.SuspendNode(bob) - - // Settle invoice. This will just mark the invoice as settled, as there - // is no link anymore to remove the htlc from the commitment tx. For - // this test, it is important to actually settle and not leave the - // invoice in the accepted state, because without a known preimage, the - // channel arbitrator won't go to chain. - carol.RPC.SettleInvoice(preimage[:]) - - // We'll now mine enough blocks so Carol decides that she needs to go - // on-chain to claim the HTLC as Bob has been inactive. - numBlocks := padCLTV(uint32( - invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta, - )) - ht.MineEmptyBlocks(int(numBlocks) - blocksMined) - - // Carol's commitment transaction should now be in the mempool. - ht.AssertNumTxsInMempool(1) - - // The closing transaction should be spending from the funding - // transaction. - closingTx := ht.AssertOutpointInMempool( - ht.OutPointFromChannelPoint(bobChanPoint), - ) - closingTxid := closingTx.TxHash() - - // Since Carol has time-sensitive HTLCs, she will use the anchor for - // CPFP purpose. Assert she has two pending anchor sweep requests - one - // from local commit and the other from remote commit. - ht.AssertNumPendingSweeps(carol, 2) - - // Mine a block, which should contain: the commitment. - block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, closingTxid) - - // After the force close transaction is mined, Carol should offer her - // second level HTLC tx to the sweeper, along with her anchor output. - ht.AssertNumPendingSweeps(carol, 2) - - // Restart bob again. - require.NoError(ht, restartBob()) - - // After the force close transaction is mined, we should expect Bob and - // Carol to broadcast some transactions depending on the channel - // commitment type. - switch c { - // Carol should broadcast her second level HTLC transaction and Bob - // should broadcast a sweeping tx to sweep his commitment output and - // anchor outputs from the two channels. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - ht.AssertNumPendingSweeps(bob, 3) - - // Carol should broadcast her second level HTLC transaction and Bob - // should broadcast a transaction to sweep his anchor outputs. Bob - // can't sweep his commitment output yet as he has incurred an - // additional CLTV due to being the channel initiator of a force closed - // script-enforced leased channel. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - ht.AssertNumPendingSweeps(bob, 2) - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Keep track of the second level tx maturity. - carolSecondLevelCSV := uint32(defaultCSV) - - // Mine a block to trigger the sweeps, also confirms Carol's CPFP - // anchor sweeping. - ht.MineBlocksAndAssertNumTxes(1, 1) - carolSecondLevelCSV-- - ht.AssertNumTxsInMempool(2) - - // Mine a block to confirm the expected transactions. - ht.MineBlocksAndAssertNumTxes(1, 2) - - // When Bob notices Carol's second level transaction in the block, he - // will extract the preimage and offer the HTLC to his sweeper. - ht.AssertNumPendingSweeps(bob, 1) - - // NOTE: after Bob is restarted, the sweeping of the direct preimage - // spent will happen immediately so we don't need to mine a block to - // trigger Bob's sweeper to sweep it. - bobHtlcSweep := ht.GetNumTxsFromMempool(1)[0] - bobHtlcSweepTxid := bobHtlcSweep.TxHash() - - // It should spend from the commitment in the channel with Alice. - ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose) - - // We'll now mine a block which should confirm Bob's HTLC sweep - // transaction. - block = ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, bobHtlcSweepTxid) - carolSecondLevelCSV-- - - // Now that the sweeping transaction has been confirmed, Bob should now - // recognize that all contracts for the Bob-Carol channel have been - // fully resolved - aliceBobPendingChansLeft := 0 - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - aliceBobPendingChansLeft = 1 - } - for _, node := range []*node.HarnessNode{alice, bob} { - ht.AssertNumPendingForceClose( - node, aliceBobPendingChansLeft, - ) - } - - // If we then mine 3 additional blocks, Carol's second level tx will - // mature, and she should pull the funds. - ht.MineEmptyBlocks(int(carolSecondLevelCSV)) - ht.AssertNumPendingSweeps(carol, 1) - - // Mine a block to trigger the sweep of the second level tx. - ht.MineEmptyBlocks(1) - carolSweep := ht.AssertNumTxsInMempool(1)[0] - - // When Carol's sweep gets confirmed, she should have no more pending - // channels. - block = ht.MineBlocksAndAssertNumTxes(1, 1)[0] - ht.AssertTxInBlock(block, carolSweep) - ht.AssertNumPendingForceClose(carol, 0) - - // With the script-enforced lease commitment type, Alice and Bob still - // haven't been able to sweep their respective commit outputs due to the - // additional CLTV. We'll need to mine enough blocks for the timelock to - // expire and prompt their sweep. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // Due to the way the test is set up, Alice and Bob share the - // same CLTV for their commit outputs even though it's enforced - // on different channels (Alice-Bob and Bob-Carol). - resp := alice.RPC.PendingChannels() - require.Len(ht, resp.PendingForceClosingChannels, 1) - forceCloseChan := resp.PendingForceClosingChannels[0] - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - - // Mine enough blocks for the timelock to expire. - numBlocks := int(forceCloseChan.BlocksTilMaturity) - ht.MineEmptyBlocks(numBlocks) - - // Both Alice and Bob should offer their commit sweeps. - ht.AssertNumPendingSweeps(alice, 2) - ht.AssertNumPendingSweeps(bob, 1) - - // Mine a block to trigger the sweeps. - ht.MineEmptyBlocks(1) - - // Both Alice and Bob should broadcast their commit sweeps. - aliceCommitOutpoint := wire.OutPoint{ - Hash: aliceForceClose, Index: 3, - } - ht.AssertOutpointInMempool(aliceCommitOutpoint) - bobCommitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3} - ht.AssertOutpointInMempool(bobCommitOutpoint) - - // Confirm their sweeps. - ht.MineBlocksAndAssertNumTxes(1, 2) - - // Alice and Bob should not show any pending channels anymore as - // they have been fully resolved. - for _, node := range []*node.HarnessNode{alice, bob} { - ht.AssertNumPendingForceClose(node, 0) - } - } - - // The invoice should show as settled for Carol, indicating that it was - // swept on-chain. - invoice := ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED) - require.Equal(ht, int64(invoiceAmt), invoice.AmtPaidSat) - - // Finally, check that the Alice's payment is correctly marked - // succeeded. - ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) -} - -// testMultiHopHtlcAggregation tests that in a multi-hop HTLC scenario, if we -// force close a channel with both incoming and outgoing HTLCs, we can properly -// resolve them using the second level timeout and success transactions. In -// case of anchor channels, the second-level spends can also be aggregated and -// properly feebumped, so we'll check that as well. -func testMultiHopHtlcAggregation(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runMultiHopHtlcAggregation) -} - -func runMultiHopHtlcAggregation(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // We need one additional UTXO to create the sweeping tx for the - // second-level success txes. - ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) - - // First, we'll create a three hop network: Alice -> Bob -> Carol. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice+Carol can actually find a route. - var ( - carolRouteHints []*lnrpc.RouteHint - aliceRouteHints []*lnrpc.RouteHint - ) - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - carolRouteHints = makeRouteHints(bob, carol, zeroConf) - aliceRouteHints = makeRouteHints(bob, alice, zeroConf) - } - - // To ensure we have capacity in both directions of the route, we'll - // make a fairly large payment Alice->Carol and settle it. - const reBalanceAmt = 500_000 - invoice := &lnrpc.Invoice{ - Value: reBalanceAmt, - RouteHints: carolRouteHints, - } - resp := carol.RPC.AddInvoice(invoice) - ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest}) - - // Make sure Carol has settled the invoice. - ht.AssertInvoiceSettled(carol, resp.PaymentAddr) - - // With the network active, we'll now add a new hodl invoices at both - // Alice's and Carol's end. Make sure the cltv expiry delta is large - // enough, otherwise Bob won't send out the outgoing htlc. - const numInvoices = 5 - const invoiceAmt = 50_000 - - var ( - carolInvoices []*invoicesrpc.AddHoldInvoiceResp - aliceInvoices []*invoicesrpc.AddHoldInvoiceResp - alicePreimages []lntypes.Preimage - payHashes [][]byte - invoiceStreamsCarol []rpc.SingleInvoiceClient - invoiceStreamsAlice []rpc.SingleInvoiceClient - ) - - // Add Carol invoices. - for i := 0; i < numInvoices; i++ { - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: carolRouteHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - carolInvoices = append(carolInvoices, carolInvoice) - payHashes = append(payHashes, payHash[:]) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - invoiceStreamsCarol = append(invoiceStreamsCarol, stream) - } - - // We'll give Alice's invoices a longer CLTV expiry, to ensure the - // channel Bob<->Carol will be closed first. - for i := 0; i < numInvoices; i++ { - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: thawHeightDelta - 4, - Hash: payHash[:], - RouteHints: aliceRouteHints, - } - aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq) - - aliceInvoices = append(aliceInvoices, aliceInvoice) - alicePreimages = append(alicePreimages, preimage) - payHashes = append(payHashes, payHash[:]) - - // Subscribe the invoice. - stream := alice.RPC.SubscribeSingleInvoice(payHash[:]) - invoiceStreamsAlice = append(invoiceStreamsAlice, stream) - } - - // Now that we've created the invoices, we'll pay them all from - // Alice<->Carol, going through Bob. We won't wait for the response - // however, as neither will immediately settle the payment. - - // Alice will pay all of Carol's invoices. - for _, carolInvoice := range carolInvoices { - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - } - - // And Carol will pay Alice's. - for _, aliceInvoice := range aliceInvoices { - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: aliceInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - carol.RPC.SendPayment(req) - } - - // At this point, all 3 nodes should now the HTLCs active on their - // channels. - ht.AssertActiveHtlcs(alice, payHashes...) - ht.AssertActiveHtlcs(bob, payHashes...) - ht.AssertActiveHtlcs(carol, payHashes...) - - // Wait for Alice and Carol to mark the invoices as accepted. There is - // a small gap to bridge between adding the htlc to the channel and - // executing the exit hop logic. - for _, stream := range invoiceStreamsCarol { - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - } - - for _, stream := range invoiceStreamsAlice { - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - } - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - ht.SetFeeEstimate(30000) - - // We want Carol's htlcs to expire off-chain to demonstrate bob's force - // close. However, Carol will cancel her invoices to prevent force - // closes, so we shut her down for now. - restartCarol := ht.SuspendNode(carol) - - // We'll now mine enough blocks to trigger Bob's broadcast of his - // commitment transaction due to the fact that the Carol's HTLCs are - // about to timeout. With the default outgoing broadcast delta of zero, - // this will be the same height as the htlc expiry height. - numBlocks := padCLTV( - uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), - ) - ht.MineEmptyBlocks(int(numBlocks)) - - // Bob's force close transaction should now be found in the mempool. If - // there are anchors, we expect it to be offered to Bob's sweeper. - ht.AssertNumTxsInMempool(1) - - // Bob has two anchor sweep requests, one for remote (invalid) and the - // other for local. - ht.AssertNumPendingSweeps(bob, 2) - - closeTx := ht.AssertOutpointInMempool( - ht.OutPointFromChannelPoint(bobChanPoint), - ) - closeTxid := closeTx.TxHash() - - // Go through the closing transaction outputs, and make an index for - // the HTLC outputs. - successOuts := make(map[wire.OutPoint]struct{}) - timeoutOuts := make(map[wire.OutPoint]struct{}) - for i, txOut := range closeTx.TxOut { - op := wire.OutPoint{ - Hash: closeTxid, - Index: uint32(i), - } - - switch txOut.Value { - // If this HTLC goes towards Carol, Bob will claim it with a - // timeout Tx. In this case the value will be the invoice - // amount. - case invoiceAmt: - timeoutOuts[op] = struct{}{} - - // If the HTLC has direction towards Alice, Bob will claim it - // with the success TX when he learns the preimage. In this - // case one extra sat will be on the output, because of the - // routing fee. - case invoiceAmt + 1: - successOuts[op] = struct{}{} - } - } - - // Once bob has force closed, we can restart carol. - require.NoError(ht, restartCarol()) - - // Mine a block to confirm the closing transaction. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // The above mined block will trigger Bob to sweep his anchor output. - ht.AssertNumTxsInMempool(1) - - // Let Alice settle her invoices. When Bob now gets the preimages, he - // has no other option than to broadcast his second-level transactions - // to claim the money. - for _, preimage := range alicePreimages { - alice.RPC.SettleInvoice(preimage[:]) - } - - expectedTxes := 0 - switch c { - // In case of anchors, all success transactions will be aggregated into - // one, the same is the case for the timeout transactions. In this case - // Carol will also sweep her commitment and anchor output in a single - // tx. - case lnrpc.CommitmentType_ANCHORS, - lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE, - lnrpc.CommitmentType_SIMPLE_TAPROOT: - - // Bob should have `numInvoices` for both HTLC success and - // timeout txns, plus one anchor sweep. - ht.AssertNumPendingSweeps(bob, numInvoices*2+1) - - // Carol should have commit and anchor outputs. - ht.AssertNumPendingSweeps(carol, 2) - - // We expect to see three sweeping txns: - // 1. Bob's sweeping tx for all timeout HTLCs. - // 2. Bob's sweeping tx for all success HTLCs. - // 3. Carol's sweeping tx for her commit and anchor outputs. - expectedTxes = 3 - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Mine a block to confirm Bob's anchor sweeping, which will also - // trigger his sweeper to sweep HTLCs. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Assert the sweeping txns are found in the mempool. - txes := ht.GetNumTxsFromMempool(expectedTxes) - - // Since Bob can aggregate the transactions, we expect a single - // transaction, that have multiple spends from the commitment. - var ( - timeoutTxs []*chainhash.Hash - successTxs []*chainhash.Hash - ) - for _, tx := range txes { - txid := tx.TxHash() - - for i := range tx.TxIn { - prevOp := tx.TxIn[i].PreviousOutPoint - if _, ok := successOuts[prevOp]; ok { - successTxs = append(successTxs, &txid) - - break - } - - if _, ok := timeoutOuts[prevOp]; ok { - timeoutTxs = append(timeoutTxs, &txid) - - break - } - } - } - - // In case of anchor we expect all the timeout and success second - // levels to be aggregated into one tx. For earlier channel types, they - // will be separate transactions. - if lntest.CommitTypeHasAnchors(c) { - require.Len(ht, timeoutTxs, 1) - require.Len(ht, successTxs, 1) - } else { - require.Len(ht, timeoutTxs, numInvoices) - require.Len(ht, successTxs, numInvoices) - } - - // All mempool transactions should be spending from the commitment - // transaction. - ht.AssertAllTxesSpendFrom(txes, closeTxid) - - // Mine a block to confirm the all the transactions, including Carol's - // commitment tx, anchor tx(optional), and Bob's second-level timeout - // and success txes. - ht.MineBlocksAndAssertNumTxes(1, expectedTxes) - - // At this point, Bob should have broadcast his second layer success - // transaction, and should have sent it to the nursery for incubation, - // or to the sweeper for sweeping. - forceCloseChan := ht.AssertNumPendingForceClose(bob, 1)[0] - ht.Logf("Bob's timelock on commit=%v, timelock on htlc=%v", - forceCloseChan.BlocksTilMaturity, - forceCloseChan.PendingHtlcs[0].BlocksTilMaturity) - - // For this channel, we also check the number of HTLCs and the stage - // are correct. - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2) - - if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // If we then mine additional blocks, Bob can sweep his - // commitment output. - ht.MineEmptyBlocks(1) - - // Assert the tx has been offered to the sweeper. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine one block to trigger the sweep. - ht.MineEmptyBlocks(1) - - // Find the commitment sweep. - bobCommitSweep := ht.GetNumTxsFromMempool(1)[0] - ht.AssertTxSpendFrom(bobCommitSweep, closeTxid) - - // Also ensure it is not spending from any of the HTLC output. - for _, txin := range bobCommitSweep.TxIn { - for _, timeoutTx := range timeoutTxs { - require.NotEqual(ht, *timeoutTx, - txin.PreviousOutPoint.Hash, - "found unexpected spend of timeout tx") - } - - for _, successTx := range successTxs { - require.NotEqual(ht, *successTx, - txin.PreviousOutPoint.Hash, - "found unexpected spend of success tx") - } - } - } - - switch c { - // Mining one additional block, Bob's second level tx is mature, and he - // can sweep the output. Before the blocks are mined, we should expect - // to see Bob's commit sweep in the mempool. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Since Bob is the initiator of the Bob-Carol script-enforced leased - // channel, he incurs an additional CLTV when sweeping outputs back to - // his wallet. We'll need to mine enough blocks for the timelock to - // expire to prompt his broadcast. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - resp := bob.RPC.PendingChannels() - require.Len(ht, resp.PendingForceClosingChannels, 1) - forceCloseChan := resp.PendingForceClosingChannels[0] - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - numBlocks := uint32(forceCloseChan.BlocksTilMaturity) - - // Add debug log. - height := ht.CurrentHeight() - bob.AddToLogf("itest: now mine %d blocks at height %d", - numBlocks, height) - ht.MineEmptyBlocks(int(numBlocks) - 1) - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Make sure Bob's sweeper has received all the sweeping requests. - ht.AssertNumPendingSweeps(bob, numInvoices*2) - - // Mine one block to trigger the sweeps. - ht.MineEmptyBlocks(1) - - // For leased channels, Bob's commit output will mature after the above - // block. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - ht.AssertNumPendingSweeps(bob, numInvoices*2+1) - } - - // We now wait for 30 seconds to overcome the flake - there's a block - // race between contractcourt and sweeper, causing the sweep to be - // broadcast earlier. - // - // TODO(yy): remove this once `blockbeat` is in place. - numExpected := 1 - err := wait.NoError(func() error { - mem := ht.GetRawMempool() - if len(mem) == numExpected { - return nil - } - - if len(mem) > 0 { - numExpected = len(mem) - } - - return fmt.Errorf("want %d, got %v in mempool: %v", numExpected, - len(mem), mem) - }, wait.DefaultTimeout) - ht.Logf("Checking mempool got: %v", err) - - // Make sure it spends from the second level tx. - secondLevelSweep := ht.GetNumTxsFromMempool(numExpected)[0] - bobSweep := secondLevelSweep.TxHash() - - // It should be sweeping all the second-level outputs. - var secondLvlSpends int - for _, txin := range secondLevelSweep.TxIn { - for _, timeoutTx := range timeoutTxs { - if *timeoutTx == txin.PreviousOutPoint.Hash { - secondLvlSpends++ - } - } - - for _, successTx := range successTxs { - if *successTx == txin.PreviousOutPoint.Hash { - secondLvlSpends++ - } - } - } - - // TODO(yy): bring the following check back when `blockbeat` is in - // place - atm we may have two sweeping transactions in the mempool. - // require.Equal(ht, 2*numInvoices, secondLvlSpends) - - // When we mine one additional block, that will confirm Bob's second - // level sweep. Now Bob should have no pending channels anymore, as - // this just resolved it by the confirmation of the sweep transaction. - block := ht.MineBlocksAndAssertNumTxes(1, numExpected)[0] - ht.AssertTxInBlock(block, bobSweep) - - // For leased channels, we need to mine one more block to confirm Bob's - // commit output sweep. - // - // NOTE: we mine this block conditionally, as the commit output may - // have already been swept one block earlier due to the race in block - // consumption among subsystems. - pendingChanResp := bob.RPC.PendingChannels() - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - ht.MineBlocksAndAssertNumTxes(1, 1) - } - ht.AssertNumPendingForceClose(bob, 0) - - // THe channel with Alice is still open. - ht.AssertNodeNumChannels(bob, 1) - - // Carol should have no channels left (open nor pending). - ht.AssertNumPendingForceClose(carol, 0) - ht.AssertNodeNumChannels(carol, 0) - - // Coop close, no anchors. - ht.CloseChannel(alice, aliceChanPoint) -} - -// createThreeHopNetwork creates a topology of `Alice -> Bob -> Carol`. -func createThreeHopNetwork(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, carolHodl bool, c lnrpc.CommitmentType, - zeroConf bool) (*lnrpc.ChannelPoint, - *lnrpc.ChannelPoint, *node.HarnessNode) { - - ht.EnsureConnected(alice, bob) - - // We'll create a new node "carol" and have Bob connect to her. - // If the carolHodl flag is set, we'll make carol always hold onto the - // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. - carolFlags := lntest.NodeArgsForCommitType(c) - if carolHodl { - carolFlags = append(carolFlags, "--hodl.exit-settle") - } - - if zeroConf { - carolFlags = append( - carolFlags, "--protocol.option-scid-alias", - "--protocol.zero-conf", - ) - } - carol := ht.NewNode("Carol", carolFlags) - - ht.ConnectNodes(bob, carol) - - // Make sure there are enough utxos for anchoring. Because the anchor - // by itself often doesn't meet the dust limit, a utxo from the wallet - // needs to be attached as an additional input. This can still lead to - // a positively-yielding transaction. - for i := 0; i < 2; i++ { - ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, alice) - ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, bob) - ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol) - - // Mine 1 block to get the above coins confirmed. - ht.MineBlocksAndAssertNumTxes(1, 3) - } - - // We'll start the test by creating a channel between Alice and Bob, - // which will act as the first leg for out multi-hop HTLC. - const chanAmt = 1000000 - var aliceFundingShim *lnrpc.FundingShim - var thawHeight uint32 - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - minerHeight := ht.CurrentHeight() - thawHeight = minerHeight + thawHeightDelta - aliceFundingShim, _ = deriveFundingShim( - ht, alice, bob, chanAmt, thawHeight, true, c, - ) - } - - var privateChan bool - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - privateChan = true - } - - aliceParams := lntest.OpenChannelParams{ - Private: privateChan, - Amt: chanAmt, - CommitmentType: c, - FundingShim: aliceFundingShim, - ZeroConf: zeroConf, - } - - // If the channel type is taproot, then use an explicit channel type to - // open it. - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - aliceParams.CommitmentType = lnrpc.CommitmentType_SIMPLE_TAPROOT - } - - // We'll create a channel from Bob to Carol. After this channel is - // open, our topology looks like: A -> B -> C. - var bobFundingShim *lnrpc.FundingShim - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - bobFundingShim, _ = deriveFundingShim( - ht, bob, carol, chanAmt, thawHeight, true, c, - ) - } - - // Prepare params for Bob. - bobParams := lntest.OpenChannelParams{ - Amt: chanAmt, - Private: privateChan, - CommitmentType: c, - FundingShim: bobFundingShim, - ZeroConf: zeroConf, - } - - // If the channel type is taproot, then use an explicit channel type to - // open it. - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - bobParams.CommitmentType = lnrpc.CommitmentType_SIMPLE_TAPROOT - } - - var ( - acceptStreamBob rpc.AcceptorClient - acceptStreamCarol rpc.AcceptorClient - cancelBob context.CancelFunc - cancelCarol context.CancelFunc - ) - - // If a zero-conf channel is being opened, the nodes are signalling the - // zero-conf feature bit. Setup a ChannelAcceptor for the fundee. - if zeroConf { - acceptStreamBob, cancelBob = bob.RPC.ChannelAcceptor() - go acceptChannel(ht.T, true, acceptStreamBob) - - acceptStreamCarol, cancelCarol = carol.RPC.ChannelAcceptor() - go acceptChannel(ht.T, true, acceptStreamCarol) - } - - // Open channels in batch to save blocks mined. - reqs := []*lntest.OpenChannelRequest{ - {Local: alice, Remote: bob, Param: aliceParams}, - {Local: bob, Remote: carol, Param: bobParams}, - } - resp := ht.OpenMultiChannelsAsync(reqs) - aliceChanPoint := resp[0] - bobChanPoint := resp[1] - - // Make sure alice and carol know each other's channels. - // - // We'll only do this though if it wasn't a private channel we opened - // earlier. - if !privateChan { - ht.AssertChannelInGraph(alice, bobChanPoint) - ht.AssertChannelInGraph(carol, aliceChanPoint) - } else { - // Otherwise, we want to wait for all the channels to be shown - // as active before we proceed. - ht.AssertChannelExists(alice, aliceChanPoint) - ht.AssertChannelExists(carol, bobChanPoint) - } - - // Remove the ChannelAcceptor for Bob and Carol. - if zeroConf { - cancelBob() - cancelCarol() - } - - return aliceChanPoint, bobChanPoint, carol -} - -// testHtlcTimeoutResolverExtractPreimageRemote tests that in the multi-hop -// setting, Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using -// the 2nd level success tx2nd level success tx, Bob's timeout resolver will -// extract the preimage from the sweep tx found in mempool or blocks(for -// neutrino). The 2nd level success tx is broadcast by Carol and spends the -// outpoint on her commit tx. -func testHtlcTimeoutResolverExtractPreimageRemote(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runExtraPreimageFromRemoteCommit) -} - -// runExtraPreimageFromRemoteCommit checks that Bob's htlc timeout resolver -// will extract the preimage from the 2nd level success tx broadcast by Carol -// which spends the htlc output on her commitment tx. -func runExtraPreimageFromRemoteCommit(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - if ht.IsNeutrinoBackend() { - ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) - } - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - preimage := ht.RandomPreimage() - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: 100_000, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: routeHints, - } - eveInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: eveInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - - // Once the payment sent, Alice should have one outgoing HTLC active. - ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:]) - - // Bob should have two HTLCs active. One incoming HTLC from Alice, and - // one outgoing to Carol. - ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:]) - htlc := ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:]) - - // Carol should have one incoming HTLC from Bob. - ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:]) - - // Wait for Carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - - // Bob now goes offline so the link between Bob and Carol is broken. - restartBob := ht.SuspendNode(bob) - - // Carol now settles the invoice, since her link with Bob is broken, - // Bob won't know the preimage. - carol.RPC.SettleInvoice(preimage[:]) - - // We'll now mine enough blocks to trigger Carol's broadcast of her - // commitment transaction due to the fact that the HTLC is about to - // timeout. With the default incoming broadcast delta of 10, this - // will be the htlc expiry height minus 10. - numBlocks := padCLTV(uint32( - invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta, - )) - ht.MineEmptyBlocks(int(numBlocks)) - - // Carol's force close transaction should now be found in the mempool. - // If there are anchors, we also expect Carol's contractcourt to offer - // the anchors to her sweeper - one from the local commitment and the - // other from the remote. - ht.AssertNumPendingSweeps(carol, 2) - - // We now mine a block to confirm Carol's closing transaction, which - // will trigger her sweeper to sweep her CPFP anchor sweeping. - ht.MineClosingTx(bobChanPoint) - - // With the closing transaction confirmed, we should expect Carol's - // HTLC success transaction to be offered to the sweeper along with her - // anchor output. - ht.AssertNumPendingSweeps(carol, 2) - - // Mine a block to trigger the sweep, and clean up the anchor sweeping - // tx. - ht.MineBlocksAndAssertNumTxes(1, 1) - ht.AssertNumTxsInMempool(1) - - // Restart Bob. Once he finishes syncing the channel state, he should - // notice the force close from Carol. - require.NoError(ht, restartBob()) - - // Get the current height to compute number of blocks to mine to - // trigger the htlc timeout resolver from Bob. - height := ht.CurrentHeight() - - // We'll now mine enough blocks to trigger Bob's timeout resolver. - numBlocks = htlc.ExpirationHeight - height - - lncfg.DefaultOutgoingBroadcastDelta - - // We should now have Carol's htlc success tx in the mempool. - numTxesMempool := 1 - ht.AssertNumTxsInMempool(numTxesMempool) - - // For neutrino backend, the timeout resolver needs to extract the - // preimage from the blocks. - if ht.IsNeutrinoBackend() { - // Mine a block to confirm Carol's 2nd level success tx. - ht.MineBlocksAndAssertNumTxes(1, 1) - numBlocks-- - } - - // Mine empty blocks so Carol's htlc success tx stays in mempool. Once - // the height is reached, Bob's timeout resolver will resolve the htlc - // by extracing the preimage from the mempool. - ht.MineEmptyBlocks(int(numBlocks)) - - // Finally, check that the Alice's payment is marked as succeeded as - // Bob has settled the htlc using the preimage extracted from Carol's - // 2nd level success tx. - ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) - - switch c { - // For anchor channel type, we should expect to see Bob's commit output - // and his anchor output be swept in a single tx in the mempool. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - numTxesMempool++ - - // For script-enforced leased channel, Bob's anchor sweep tx won't - // happen as it's not used for CPFP, hence no wallet utxo is used so - // it'll be uneconomical. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - } - - // For neutrino backend, Carol's second-stage sweep should be offered - // to her sweeper. - if ht.IsNeutrinoBackend() { - ht.AssertNumPendingSweeps(carol, 1) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - } - - // Mine a block to clean the mempool. - ht.MineBlocksAndAssertNumTxes(1, numTxesMempool) - - // NOTE: for non-standby nodes there's no need to clean up the force - // close as long as the mempool is cleaned. - ht.CleanShutDown() -} - -// testHtlcTimeoutResolverExtractPreimage tests that in the multi-hop setting, -// Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using the -// direct preimage spend, Bob's timeout resolver will extract the preimage from -// the sweep tx found in mempool or blocks(for neutrino). The direct spend tx -// is broadcast by Carol and spends the outpoint on Bob's commit tx. -func testHtlcTimeoutResolverExtractPreimageLocal(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runExtraPreimageFromLocalCommit) -} - -// runExtraPreimageFromLocalCommit checks that Bob's htlc timeout resolver will -// extract the preimage from the direct spend broadcast by Carol which spends -// the htlc output on Bob's commitment tx. -func runExtraPreimageFromLocalCommit(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice can actually find a route. - var routeHints []*lnrpc.RouteHint - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - routeHints = makeRouteHints(bob, carol, zeroConf) - } - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - preimage := ht.RandomPreimage() - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: 100_000, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: routeHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - - // Once the payment sent, Alice should have one outgoing HTLC active. - ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:]) - - // Bob should have two HTLCs active. One incoming HTLC from Alice, and - // one outgoing to Carol. - ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:]) - htlc := ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:]) - - // Carol should have one incoming HTLC from Bob. - ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:]) - - // Wait for Carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - - // Bob now goes offline so the link between Bob and Carol is broken. - restartBob := ht.SuspendNode(bob) - - // Carol now settles the invoice, since her link with Bob is broken, - // Bob won't know the preimage. - carol.RPC.SettleInvoice(preimage[:]) - - // Stop Carol so it's easier to check the mempool's state since she - // will broadcast the anchor sweeping once Bob force closes. - restartCarol := ht.SuspendNode(carol) - - // Restart Bob to force close the channel. - require.NoError(ht, restartBob()) - - // Bob force closes the channel, which gets his commitment tx into the - // mempool. - ht.CloseChannelAssertPending(bob, bobChanPoint, true) - - // Bob should now has offered his anchors to his sweeper - both local - // and remote versions. - ht.AssertNumPendingSweeps(bob, 2) - - // Mine Bob's force close tx. - closeTx := ht.MineClosingTx(bobChanPoint) - - // Mine Bob's anchor sweeping tx. - ht.MineBlocksAndAssertNumTxes(1, 1) - blocksMined := 1 - - // We'll now mine enough blocks to trigger Carol's sweeping of the htlc - // via the direct spend. With the default incoming broadcast delta of - // 10, this will be the htlc expiry height minus 10. - // - // NOTE: we need to mine 1 fewer block as we've already mined one to - // confirm Bob's force close tx. - numBlocks := padCLTV(uint32( - invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta - 1, - )) - - // If this is a nont script-enforced channel, Bob will be able to sweep - // his commit output after 4 blocks. - if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // Mine 3 blocks so the output will be offered to the sweeper. - ht.MineEmptyBlocks(defaultCSV - blocksMined - 1) - - // Assert the commit output has been offered to the sweeper. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - blocksMined = defaultCSV - } - - // Mine empty blocks so it's easier to check Bob's sweeping txes below. - ht.MineEmptyBlocks(int(numBlocks) - blocksMined) - - // With the above blocks mined, we should expect Carol's to offer the - // htlc output on Bob's commitment to the sweeper. - // - // TODO(yy): it's not offered to the sweeper yet, instead, the utxo - // nursery is creating and broadcasting the sweep tx - we should unify - // this behavior and offer it to the sweeper. - // ht.AssertNumPendingSweeps(carol, 1) - - // Increase the fee rate used by the sweeper so Carol's direct spend tx - // won't be replaced by Bob's timeout tx. - ht.SetFeeEstimate(30000) - - // Restart Carol to sweep the htlc output. - require.NoError(ht, restartCarol()) - - ht.AssertNumPendingSweeps(carol, 2) - ht.MineEmptyBlocks(1) - - // Construct the htlc output on Bob's commitment tx, and decide its - // index based on the commit type below. - htlcOutpoint := wire.OutPoint{Hash: closeTx.TxHash()} - - // Check the current mempool state and we should see, - // - Carol's direct spend tx. - // - Bob's local output sweep tx, if this is NOT script enforced lease. - // - Carol's anchor sweep tx cannot be broadcast as it's uneconomical. - switch c { - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - htlcOutpoint.Index = 2 - ht.AssertNumTxsInMempool(2) - - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - htlcOutpoint.Index = 2 - ht.AssertNumTxsInMempool(1) - } - - // Get the current height to compute number of blocks to mine to - // trigger the timeout resolver from Bob. - height := ht.CurrentHeight() - - // We'll now mine enough blocks to trigger Bob's htlc timeout resolver - // to act. Once his timeout resolver starts, it will extract the - // preimage from Carol's direct spend tx found in the mempool. - numBlocks = htlc.ExpirationHeight - height - - lncfg.DefaultOutgoingBroadcastDelta - - // Decrease the fee rate used by the sweeper so Bob's timeout tx will - // not replace Carol's direct spend tx. - ht.SetFeeEstimate(1000) - - // Mine empty blocks so Carol's direct spend tx stays in mempool. Once - // the height is reached, Bob's timeout resolver will resolve the htlc - // by extracing the preimage from the mempool. - ht.MineEmptyBlocks(int(numBlocks)) - - // For neutrino backend, the timeout resolver needs to extract the - // preimage from the blocks. - if ht.IsNeutrinoBackend() { - // Make sure the direct spend tx is still in the mempool. - ht.AssertOutpointInMempool(htlcOutpoint) - - // Mine a block to confirm two txns, - // - Carol's direct spend tx. - // - Bob's to_local output sweep tx. - if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - ht.MineBlocksAndAssertNumTxes(1, 2) - } else { - ht.MineBlocksAndAssertNumTxes(1, 1) - } - } - - // Finally, check that the Alice's payment is marked as succeeded as - // Bob has settled the htlc using the preimage extracted from Carol's - // direct spend tx. - ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) - - // NOTE: for non-standby nodes there's no need to clean up the force - // close as long as the mempool is cleaned. - ht.CleanShutDown() -} diff --git a/itest/lnd_payment_test.go b/itest/lnd_payment_test.go index 3515cbc946..4df148d95e 100644 --- a/itest/lnd_payment_test.go +++ b/itest/lnd_payment_test.go @@ -135,22 +135,32 @@ func testPaymentSucceededHTLCRemoteSwept(ht *lntest.HarnessTest) { // direct preimage spend. ht.AssertNumPendingSweeps(bob, 1) - // Mine a block to trigger the sweep. - // - // TODO(yy): remove it once `blockbeat` is implemented. - ht.MineEmptyBlocks(1) - - // Mine Bob's sweeping tx. - ht.MineBlocksAndAssertNumTxes(1, 1) - // Let Alice come back up. Since the channel is now closed, we expect // different behaviors based on whether the HTLC is a dust. // - For dust payment, it should be failed now as the HTLC won't go // onchain. // - For non-dust payment, it should be marked as succeeded since her // outgoing htlc is swept by Bob. + // + // TODO(yy): move the restart after Bob's sweeping tx being confirmed + // once the blockbeat starts remembering its last processed block and + // can handle looking for spends in the past blocks. require.NoError(ht, restartAlice()) + // Alice should have a pending force close channel. + ht.AssertNumPendingForceClose(alice, 1) + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // Mine Bob's sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + // Since Alice is restarted, we need to track the payments again. payStream := alice.RPC.TrackPaymentV2(payHash[:]) dustPayStream := alice.RPC.TrackPaymentV2(dustPayHash[:]) @@ -325,9 +335,6 @@ func runTestPaymentHTLCTimeout(ht *lntest.HarnessTest, restartAlice bool) { // sweep her outgoing HTLC in next block. ht.MineBlocksAndAssertNumTxes(1, 1) - // Cleanup the channel. - ht.CleanupForceClose(alice) - // We expect the non-dust payment to marked as failed in Alice's // database and also from her stream. ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_FAILED) diff --git a/itest/lnd_psbt_test.go b/itest/lnd_psbt_test.go index 438661138b..a774b3aa70 100644 --- a/itest/lnd_psbt_test.go +++ b/itest/lnd_psbt_test.go @@ -1696,9 +1696,6 @@ func testPsbtChanFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // Mine one block to trigger the sweep transaction. - ht.MineEmptyBlocks(1) - // We wait for the to_remote sweep tx. ht.AssertNumUTXOsUnconfirmed(carol, 1) @@ -1821,9 +1818,6 @@ func testPsbtChanFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // Mine one block to trigger the sweep transaction. - ht.MineEmptyBlocks(1) - // We wait for the to_remote sweep tx of channelPoint2. utxos := ht.AssertNumUTXOsUnconfirmed(carol, 1) diff --git a/itest/lnd_route_blinding_test.go b/itest/lnd_route_blinding_test.go index fd378a25f7..85690da7d3 100644 --- a/itest/lnd_route_blinding_test.go +++ b/itest/lnd_route_blinding_test.go @@ -832,7 +832,6 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) { // we've already mined 1 block so we need one less than our CSV. ht.MineBlocks(node.DefaultCSV - 1) ht.AssertNumPendingSweeps(ht.Bob, 1) - ht.MineEmptyBlocks(1) ht.MineBlocksAndAssertNumTxes(1, 1) // Restart bob so that we can test that he's able to recover everything @@ -852,6 +851,7 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) { ht.AssertNumPendingSweeps(ht.Bob, 0) ht.MineBlocksAndAssertNumTxes(1, 1) + // Assert that the HTLC has cleared. ht.AssertHTLCNotActive(ht.Bob, testCase.channels[0], hash[:]) ht.AssertHTLCNotActive(ht.Alice, testCase.channels[0], hash[:]) @@ -866,8 +866,9 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) { ) // Clean up the rest of our force close: mine blocks so that Bob's CSV - // expires plus one block to trigger his sweep and then mine it. - ht.MineBlocks(node.DefaultCSV + 1) + // expires to trigger his sweep and then mine it. + ht.MineBlocks(node.DefaultCSV) + ht.AssertNumPendingSweeps(ht.Bob, 1) ht.MineBlocksAndAssertNumTxes(1, 1) // Bring carol back up so that we can close out the rest of our diff --git a/itest/lnd_sweep_test.go b/itest/lnd_sweep_test.go index 158e8768f9..130b8202d0 100644 --- a/itest/lnd_sweep_test.go +++ b/itest/lnd_sweep_test.go @@ -2,7 +2,6 @@ package itest import ( "fmt" - "math" "time" "github.com/btcsuite/btcd/btcutil" @@ -61,10 +60,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // Set up the fee estimator to return the testing fee rate when the // conf target is the deadline. - // - // TODO(yy): switch to conf when `blockbeat` is in place. - // ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor) - ht.SetFeeEstimate(startFeeRateAnchor) + ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor) // htlcValue is the outgoing HTLC's value. htlcValue := invoiceAmt @@ -171,16 +167,20 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { )) ht.MineEmptyBlocks(int(numBlocks)) - // Assert Bob's force closing tx has been broadcast. - closeTxid := ht.AssertNumTxsInMempool(1)[0] + // Assert Bob's force closing tx has been broadcast. We should see two + // txns in the mempool: + // 1. Bob's force closing tx. + // 2. Bob's anchor sweeping tx CPFPing the force close tx. + _, sweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool() // Remember the force close height so we can calculate the deadline // height. forceCloseHeight := ht.CurrentHeight() - // Bob should have two pending sweeps, + var anchorSweep *walletrpc.PendingSweep + + // Bob should have one pending sweep, // - anchor sweeping from his local commitment. - // - anchor sweeping from his remote commitment (invalid). // // TODO(yy): consider only sweeping the anchor from the local // commitment. Previously we would sweep up to three versions of @@ -189,34 +189,22 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // their commitment tx and replaces ours. With the new fee bumping, we // should be safe to only sweep our local anchor since we RBF it on // every new block, which destroys the remote's ability to pin us. - sweeps := ht.AssertNumPendingSweeps(bob, 2) - - // The two anchor sweeping should have the same deadline height. - deadlineHeight := forceCloseHeight + deadlineDeltaAnchor - require.Equal(ht, deadlineHeight, sweeps[0].DeadlineHeight) - require.Equal(ht, deadlineHeight, sweeps[1].DeadlineHeight) - - // Remember the deadline height for the CPFP anchor. - anchorDeadline := sweeps[0].DeadlineHeight + expectedNumSweeps := 1 - // Mine a block so Bob's force closing tx stays in the mempool, which - // also triggers the CPFP anchor sweep. - ht.MineEmptyBlocks(1) + // For neutrino backend, Bob would have two anchor sweeps - one from + // the local and the other from the remote. + if ht.IsNeutrinoBackend() { + expectedNumSweeps = 2 + } - // Bob should still have two pending sweeps, - // - anchor sweeping from his local commitment. - // - anchor sweeping from his remote commitment (invalid). - ht.AssertNumPendingSweeps(bob, 2) + anchorSweep = ht.AssertNumPendingSweeps(bob, expectedNumSweeps)[0] - // We now check the expected fee and fee rate are used for Bob's anchor - // sweeping tx. - // - // We should see Bob's anchor sweeping tx triggered by the above - // block, along with his force close tx. - txns := ht.GetNumTxsFromMempool(2) + // The anchor sweeping should have the expected deadline height. + deadlineHeight := forceCloseHeight + deadlineDeltaAnchor + require.Equal(ht, deadlineHeight, anchorSweep.DeadlineHeight) - // Find the sweeping tx. - sweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0] + // Remember the deadline height for the CPFP anchor. + anchorDeadline := anchorSweep.DeadlineHeight // Get the weight for Bob's anchor sweeping tx. txWeight := ht.CalculateTxWeight(sweepTx) @@ -228,11 +216,10 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { fee := uint64(ht.CalculateTxFee(sweepTx)) feeRate := uint64(ht.CalculateTxFeeRate(sweepTx)) - // feeFuncWidth is the width of the fee function. By the time we got - // here, we've already mined one block, and the fee function maxes - // out one block before the deadline, so the width is the original - // deadline minus 2. - feeFuncWidth := deadlineDeltaAnchor - 2 + // feeFuncWidth is the width of the fee function. The fee function + // maxes out one block before the deadline, so the width is the + // original deadline minus 1. + feeFuncWidth := deadlineDeltaAnchor - 1 // Calculate the expected delta increased per block. feeDelta := (cpfpBudget - startFeeAnchor).MulF64( @@ -258,20 +245,27 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // Bob's fee bumper should increase its fees. ht.MineEmptyBlocks(1) - // Bob should still have two pending sweeps, - // - anchor sweeping from his local commitment. - // - anchor sweeping from his remote commitment (invalid). - ht.AssertNumPendingSweeps(bob, 2) - - // Make sure Bob's old sweeping tx has been removed from the - // mempool. - ht.AssertTxNotInMempool(sweepTx.TxHash()) + // Bob should still have the anchor sweeping from his local + // commitment. His anchor sweeping from his remote commitment + // is invalid and should be removed. + ht.AssertNumPendingSweeps(bob, expectedNumSweeps) // We expect to see two txns in the mempool, // - Bob's force close tx. // - Bob's anchor sweep tx. ht.AssertNumTxsInMempool(2) + // Make sure Bob's old sweeping tx has been removed from the + // mempool. + ht.AssertTxNotInMempool(sweepTx.TxHash()) + + // Assert the two txns are still in the mempool and grab the + // sweeping tx. + // + // NOTE: must call it again after `AssertTxNotInMempool` to + // make sure we get the replaced tx. + _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool() + // We expect the fees to increase by i*delta. expectedFee := startFeeAnchor + feeDelta.MulF64(float64(i)) expectedFeeRate := chainfee.NewSatPerKWeight( @@ -280,11 +274,6 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // We should see Bob's anchor sweeping tx being fee bumped // since it's not confirmed, along with his force close tx. - txns = ht.GetNumTxsFromMempool(2) - - // Find the sweeping tx. - sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0] - // Calculate the fee rate of Bob's new sweeping tx. feeRate = uint64(ht.CalculateTxFeeRate(sweepTx)) @@ -292,9 +281,9 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { fee = uint64(ht.CalculateTxFee(sweepTx)) ht.Logf("Bob(position=%v): txWeight=%v, expected: [fee=%d, "+ - "feerate=%v], got: [fee=%v, feerate=%v]", + "feerate=%v], got: [fee=%v, feerate=%v] in tx %v", feeFuncWidth-i, txWeight, expectedFee, - expectedFeeRate, fee, feeRate) + expectedFeeRate, fee, feeRate, sweepTx.TxHash()) // Assert Bob's tx has the expected fee and fee rate. require.InEpsilonf(ht, uint64(expectedFee), fee, 0.01, @@ -314,22 +303,23 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // Mine one more block, we'd use up all the CPFP budget. ht.MineEmptyBlocks(1) + // We expect to see two txns in the mempool, + // - Bob's force close tx. + // - Bob's anchor sweep tx. + ht.AssertNumTxsInMempool(2) + // Make sure Bob's old sweeping tx has been removed from the mempool. ht.AssertTxNotInMempool(sweepTx.TxHash()) // Get the last sweeping tx - we should see two txns here, Bob's anchor // sweeping tx and his force close tx. - txns = ht.GetNumTxsFromMempool(2) - - // Find the sweeping tx. - sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0] + // + // NOTE: must call it again after `AssertTxNotInMempool` to make sure + // we get the replaced tx. + _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool() - // Calculate the fee of Bob's new sweeping tx. - fee = uint64(ht.CalculateTxFee(sweepTx)) - - // Assert the budget is now used up. - require.InEpsilonf(ht, uint64(cpfpBudget), fee, 0.01, "want %d, got %d", - cpfpBudget, fee) + // Bob should have the anchor sweeping from his local commitment. + ht.AssertNumPendingSweeps(bob, expectedNumSweeps) // Mine one more block. Since Bob's budget has been used up, there // won't be any more sweeping attempts. We now assert this by checking @@ -340,10 +330,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // // We expect two txns here, one for the anchor sweeping, the other for // the force close tx. - txns = ht.GetNumTxsFromMempool(2) - - // Find the sweeping tx. - currentSweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0] + _, currentSweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool() // Assert the anchor sweep tx stays unchanged. require.Equal(ht, sweepTx.TxHash(), currentSweepTx.TxHash()) @@ -357,6 +344,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) { // the HTLC sweeping behaviors so we just perform a simple check and // exit the test. ht.AssertNumPendingSweeps(bob, 1) + ht.MineBlocksAndAssertNumTxes(1, 1) // Finally, clean the mempool for the next test. ht.CleanShutDown() @@ -404,10 +392,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { // Set up the fee estimator to return the testing fee rate when the // conf target is the deadline. - // - // TODO(yy): switch to conf when `blockbeat` is in place. - // ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor) - ht.SetFeeEstimate(startFeeRateAnchor) + ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor) // Create a preimage, that will be held by Carol. var preimage lntypes.Preimage @@ -524,40 +509,30 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { numBlocks := forceCloseHeight - currentHeight ht.MineEmptyBlocks(int(numBlocks)) - // Assert Bob's force closing tx has been broadcast. - closeTxid := ht.AssertNumTxsInMempool(1)[0] + // Assert Bob's force closing tx has been broadcast. We should see two + // txns in the mempool: + // 1. Bob's force closing tx. + // 2. Bob's anchor sweeping tx CPFPing the force close tx. + _, sweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool() - // Bob should have two pending sweeps, + // Bob should have one pending sweep, // - anchor sweeping from his local commitment. - // - anchor sweeping from his remote commitment (invalid). - sweeps := ht.AssertNumPendingSweeps(bob, 2) - - // The two anchor sweeping should have the same deadline height. - deadlineHeight := forceCloseHeight + deadlineDeltaAnchor - require.Equal(ht, deadlineHeight, sweeps[0].DeadlineHeight) - require.Equal(ht, deadlineHeight, sweeps[1].DeadlineHeight) + expectedNumSweeps := 1 - // Remember the deadline height for the CPFP anchor. - anchorDeadline := sweeps[0].DeadlineHeight + // For neutrino backend, Bob would have two anchor sweeps - one from + // the local and the other from the remote. + if ht.IsNeutrinoBackend() { + expectedNumSweeps = 2 + } - // Mine a block so Bob's force closing tx stays in the mempool, which - // also triggers the CPFP anchor sweep. - ht.MineEmptyBlocks(1) + anchorSweep := ht.AssertNumPendingSweeps(bob, expectedNumSweeps)[0] - // Bob should still have two pending sweeps, - // - anchor sweeping from his local commitment. - // - anchor sweeping from his remote commitment (invalid). - ht.AssertNumPendingSweeps(bob, 2) - - // We now check the expected fee and fee rate are used for Bob's anchor - // sweeping tx. - // - // We should see Bob's anchor sweeping tx triggered by the above - // block, along with his force close tx. - txns := ht.GetNumTxsFromMempool(2) + // The anchor sweeping should have the expected deadline height. + deadlineHeight := forceCloseHeight + deadlineDeltaAnchor + require.Equal(ht, deadlineHeight, anchorSweep.DeadlineHeight) - // Find the sweeping tx. - sweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0] + // Remember the deadline height for the CPFP anchor. + anchorDeadline := anchorSweep.DeadlineHeight // Get the weight for Bob's anchor sweeping tx. txWeight := ht.CalculateTxWeight(sweepTx) @@ -569,11 +544,10 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { fee := uint64(ht.CalculateTxFee(sweepTx)) feeRate := uint64(ht.CalculateTxFeeRate(sweepTx)) - // feeFuncWidth is the width of the fee function. By the time we got - // here, we've already mined one block, and the fee function maxes - // out one block before the deadline, so the width is the original - // deadline minus 2. - feeFuncWidth := deadlineDeltaAnchor - 2 + // feeFuncWidth is the width of the fee function. The fee function + // maxes out one block before the deadline, so the width is the + // original deadline minus 1. + feeFuncWidth := deadlineDeltaAnchor - 1 // Calculate the expected delta increased per block. feeDelta := (cpfpBudget - startFeeAnchor).MulF64( @@ -599,10 +573,15 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { // Bob's fee bumper should increase its fees. ht.MineEmptyBlocks(1) - // Bob should still have two pending sweeps, - // - anchor sweeping from his local commitment. - // - anchor sweeping from his remote commitment (invalid). - ht.AssertNumPendingSweeps(bob, 2) + // Bob should still have the anchor sweeping from his local + // commitment. His anchor sweeping from his remote commitment + // is invalid and should be removed. + ht.AssertNumPendingSweeps(bob, expectedNumSweeps) + + // We expect to see two txns in the mempool, + // - Bob's force close tx. + // - Bob's anchor sweep tx. + ht.AssertNumTxsInMempool(2) // Make sure Bob's old sweeping tx has been removed from the // mempool. @@ -611,7 +590,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { // We expect to see two txns in the mempool, // - Bob's force close tx. // - Bob's anchor sweep tx. - ht.AssertNumTxsInMempool(2) + _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool() // We expect the fees to increase by i*delta. expectedFee := startFeeAnchor + feeDelta.MulF64(float64(i)) @@ -619,13 +598,6 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { expectedFee, txWeight, ) - // We should see Bob's anchor sweeping tx being fee bumped - // since it's not confirmed, along with his force close tx. - txns = ht.GetNumTxsFromMempool(2) - - // Find the sweeping tx. - sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0] - // Calculate the fee rate of Bob's new sweeping tx. feeRate = uint64(ht.CalculateTxFeeRate(sweepTx)) @@ -633,9 +605,9 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { fee = uint64(ht.CalculateTxFee(sweepTx)) ht.Logf("Bob(position=%v): txWeight=%v, expected: [fee=%d, "+ - "feerate=%v], got: [fee=%v, feerate=%v]", + "feerate=%v], got: [fee=%v, feerate=%v] in tx %v", feeFuncWidth-i, txWeight, expectedFee, - expectedFeeRate, fee, feeRate) + expectedFeeRate, fee, feeRate, sweepTx.TxHash()) // Assert Bob's tx has the expected fee and fee rate. require.InEpsilonf(ht, uint64(expectedFee), fee, 0.01, @@ -655,15 +627,17 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { // Mine one more block, we'd use up all the CPFP budget. ht.MineEmptyBlocks(1) + // We expect to see two txns in the mempool, + // - Bob's force close tx. + // - Bob's anchor sweep tx. + ht.AssertNumTxsInMempool(2) + // Make sure Bob's old sweeping tx has been removed from the mempool. ht.AssertTxNotInMempool(sweepTx.TxHash()) // Get the last sweeping tx - we should see two txns here, Bob's anchor // sweeping tx and his force close tx. - txns = ht.GetNumTxsFromMempool(2) - - // Find the sweeping tx. - sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0] + _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool() // Calculate the fee of Bob's new sweeping tx. fee = uint64(ht.CalculateTxFee(sweepTx)) @@ -681,10 +655,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { // // We expect two txns here, one for the anchor sweeping, the other for // the force close tx. - txns = ht.GetNumTxsFromMempool(2) - - // Find the sweeping tx. - currentSweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0] + _, currentSweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool() // Assert the anchor sweep tx stays unchanged. require.Equal(ht, sweepTx.TxHash(), currentSweepTx.TxHash()) @@ -698,6 +669,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) { // the HTLC sweeping behaviors so we just perform a simple check and // exit the test. ht.AssertNumPendingSweeps(bob, 1) + ht.MineBlocksAndAssertNumTxes(1, 1) // Finally, clean the mempool for the next test. ht.CleanShutDown() @@ -735,9 +707,9 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { cltvDelta := routing.MinCLTVDelta // Start tracking the deadline delta of Bob's HTLCs. We need one block - // for the CSV lock, and another block to trigger the sweeper to sweep. - outgoingHTLCDeadline := int32(cltvDelta - 2) - incomingHTLCDeadline := int32(lncfg.DefaultIncomingBroadcastDelta - 2) + // to trigger the sweeper to sweep. + outgoingHTLCDeadline := int32(cltvDelta - 1) + incomingHTLCDeadline := int32(lncfg.DefaultIncomingBroadcastDelta - 1) // startFeeRate1 and startFeeRate2 are returned by the fee estimator in // sat/kw. They will be used as the starting fee rate for the linear @@ -894,34 +866,35 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { // Bob should now have two pending sweeps, one for the anchor on the // local commitment, the other on the remote commitment. - ht.AssertNumPendingSweeps(bob, 2) + expectedNumSweeps := 1 - // Assert Bob's force closing tx has been broadcast. - ht.AssertNumTxsInMempool(1) + // For neutrino backend, we expect the anchor output from his remote + // commitment to be present. + if ht.IsNeutrinoBackend() { + expectedNumSweeps = 2 + } + + ht.AssertNumPendingSweeps(bob, expectedNumSweeps) - // Mine the force close tx, which triggers Bob's contractcourt to offer - // his outgoing HTLC to his sweeper. + // We expect to see two txns in the mempool: + // 1. Bob's force closing tx. + // 2. Bob's anchor CPFP sweeping tx. + ht.AssertNumTxsInMempool(2) + + // Mine the force close tx and CPFP sweeping tx, which triggers Bob's + // contractcourt to offer his outgoing HTLC to his sweeper. // // NOTE: HTLC outputs are only offered to sweeper when the force close // tx is confirmed and the CSV has reached. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Update the blocks left till Bob force closes Alice->Bob. - blocksTillIncomingSweep-- - - // Bob should have two pending sweeps, one for the anchor sweeping, the - // other for the outgoing HTLC. - ht.AssertNumPendingSweeps(bob, 2) - - // Mine one block to confirm Bob's anchor sweeping tx, which will - // trigger his sweeper to publish the HTLC sweeping tx. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxes(1, 2) // Update the blocks left till Bob force closes Alice->Bob. blocksTillIncomingSweep-- - // Bob should now have one sweep and one sweeping tx in the mempool. + // Bob should have one pending sweep for the outgoing HTLC. ht.AssertNumPendingSweeps(bob, 1) + + // Bob should have one sweeping tx in the mempool. outgoingSweep := ht.GetNumTxsFromMempool(1)[0] // Check the shape of the sweeping tx - we expect it to be @@ -945,8 +918,8 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { // Assert the initial sweeping tx is using the start fee rate. outgoingStartFeeRate := ht.CalculateTxFeeRate(outgoingSweep) require.InEpsilonf(ht, uint64(startFeeRate1), - uint64(outgoingStartFeeRate), 0.01, "want %d, got %d", - startFeeRate1, outgoingStartFeeRate) + uint64(outgoingStartFeeRate), 0.01, "want %d, got %d in tx=%v", + startFeeRate1, outgoingStartFeeRate, outgoingSweep.TxHash()) // Now the start fee rate is checked, we can calculate the fee rate // delta. @@ -971,13 +944,12 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { ) ht.Logf("Bob's %s HTLC (deadline=%v): txWeight=%v, want "+ - "feerate=%v, got feerate=%v, delta=%v", desc, + "feerate=%v, got feerate=%v, delta=%v in tx %v", desc, deadline-position, txSize, expectedFeeRate, - feeRate, delta) + feeRate, delta, sweepTx.TxHash()) require.InEpsilonf(ht, uint64(expectedFeeRate), uint64(feeRate), - 0.01, "want %v, got %v in tx=%v", expectedFeeRate, - feeRate, sweepTx.TxHash()) + 0.01, "want %v, got %v", expectedFeeRate, feeRate) } // We now mine enough blocks to trigger Bob to force close channel @@ -1019,22 +991,34 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { // Update Bob's fee function position. outgoingFuncPosition++ - // Bob should now have three pending sweeps: + // Bob should now have two pending sweeps: // 1. the outgoing HTLC output. // 2. the anchor output from his local commitment. - // 3. the anchor output from his remote commitment. - ht.AssertNumPendingSweeps(bob, 3) + expectedNumSweeps = 2 - // We should see two txns in the mempool: + // For neutrino backend, we expect the anchor output from his remote + // commitment to be present. + if ht.IsNeutrinoBackend() { + expectedNumSweeps = 3 + } + + ht.AssertNumPendingSweeps(bob, expectedNumSweeps) + + // We should see three txns in the mempool: // 1. Bob's outgoing HTLC sweeping tx. // 2. Bob's force close tx for Alice->Bob. - txns := ht.GetNumTxsFromMempool(2) + // 3. Bob's anchor CPFP sweeping tx for Alice->Bob. + txns := ht.GetNumTxsFromMempool(3) // Find the force close tx - we expect it to have a single input. closeTx := txns[0] if len(closeTx.TxIn) != 1 { closeTx = txns[1] } + if len(closeTx.TxIn) != 1 { + closeTx = txns[2] + } + require.Len(ht, closeTx.TxIn, 1) // We don't care the behavior of the anchor sweep in this test, so we // mine the force close tx to trigger Bob's contractcourt to offer his @@ -1050,13 +1034,6 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { // 3. the anchor sweeping on Alice-> Bob. ht.AssertNumPendingSweeps(bob, 3) - // Mine one block, which will trigger his sweeper to publish his - // incoming HTLC sweeping tx. - ht.MineEmptyBlocks(1) - - // Update the fee function's positions. - outgoingFuncPosition++ - // We should see three txns in the mempool: // 1. the outgoing HTLC sweeping tx. // 2. the incoming HTLC sweeping tx. @@ -1224,8 +1201,9 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { // Test: // 1. Alice's anchor sweeping is not attempted, instead, it should be swept // together with her to_local output using the no deadline path. -// 2. Bob would also sweep his anchor and to_local outputs in a single -// sweeping tx using the no deadline path. +// 2. Bob would also sweep his anchor and to_local outputs separately due to +// they have different deadline heights, which means only the to_local +// sweeping tx will succeed as the anchor sweeping is not economical. // 3. Both Alice and Bob's RBF attempts are using the fee rates calculated // from the deadline and budget. // 4. Wallet UTXOs requirements are met - neither Alice nor Bob needs wallet @@ -1238,10 +1216,20 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { // config. deadline := uint32(1000) - // The actual deadline used by the fee function will be one block off - // from the deadline configured as we require one block to be mined to - // trigger the sweep. - deadlineA, deadlineB := deadline-1, deadline-1 + // deadlineA is the deadline used for Alice, since her commit output is + // offered to the sweeper at CSV-1. With a deadline of 1000, her actual + // width of her fee func is CSV+1000-1. Given we are using a CSV of 2 + // here, her fee func deadline then becomes 1001. + deadlineA := deadline + 1 + + // deadlineB is the deadline used for Bob, the actual deadline used by + // the fee function will be one block off from the deadline configured + // as we require one block to be mined to trigger the sweep. In + // addition, when sweeping his to_local output from Alice's commit tx, + // because of CSV of 2, the starting height will be + // "force_close_height+2", which means when the sweep request is + // received by the sweeper, the actual deadline delta is "deadline+1". + deadlineB := deadline + 1 // startFeeRate is returned by the fee estimator in sat/kw. This // will be used as the starting fee rate for the linear fee func used @@ -1252,7 +1240,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { // Set up the fee estimator to return the testing fee rate when the // conf target is the deadline. - ht.SetFeeEstimateWithConf(startFeeRate, deadlineA) + ht.SetFeeEstimateWithConf(startFeeRate, deadlineB) // toLocalCSV is the CSV delay for Alice's to_local output. We use a // small value to save us from mining blocks. @@ -1260,25 +1248,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { // NOTE: once the force close tx is confirmed, we expect anchor // sweeping starts. Then two more block later the commit output // sweeping starts. - // - // NOTE: The CSV value is chosen to be 3 instead of 2, to reduce the - // possibility of flakes as there is a race between the two goroutines: - // G1 - Alice's sweeper receives the commit output. - // G2 - Alice's sweeper receives the new block mined. - // G1 is triggered by the same block being received by Alice's - // contractcourt, deciding the commit output is mature and offering it - // to her sweeper. Normally, we'd expect G2 to be finished before G1 - // because it's the same block processed by both contractcourt and - // sweeper. However, if G2 is delayed (maybe the sweeper is slow in - // finishing its previous round), G1 may finish before G2. This will - // cause the sweeper to add the commit output to its pending inputs, - // and once G2 fires, it will then start sweeping this output, - // resulting a valid sweep tx being created using her commit and anchor - // outputs. - // - // TODO(yy): fix the above issue by making sure subsystems share the - // same view on current block height. - toLocalCSV := 3 + toLocalCSV := 2 // htlcAmt is the amount of the HTLC in sats, this should be Alice's // to_remote amount that goes to Bob. @@ -1377,155 +1347,41 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { // - commit sweeping from the to_remote on Alice's commit tx. ht.AssertNumPendingSweeps(bob, 2) + // Bob's sweeper should have broadcast the commit output sweeping tx. + // At the block which mined the force close tx, Bob's `chainWatcher` + // will process the blockbeat first, which sends a signal to his + // `ChainArbitrator` to launch the resolvers. Once launched, the sweep + // requests will be sent to the sweeper. Finally, when the sweeper + // receives this blockbeat, it will create the sweeping tx and publish + // it. + ht.AssertNumTxsInMempool(1) + // Mine one more empty block should trigger Bob's sweeping. Since we - // use a CSV of 3, this means Alice's to_local output is one block away - // from being mature. + // use a CSV of 2, this means Alice's to_local output is now mature. ht.MineEmptyBlocks(1) - // We expect to see one sweeping tx in the mempool: - // - Alice's anchor sweeping tx must have been failed due to the fee - // rate chosen in this test - the anchor sweep tx has no output. - // - Bob's sweeping tx, which sweeps both his anchor and commit outputs. - bobSweepTx := ht.GetNumTxsFromMempool(1)[0] - // We expect two pending sweeps for Bob - anchor and commit outputs. - pendingSweepBob := ht.AssertNumPendingSweeps(bob, 2)[0] - - // The sweeper may be one block behind contractcourt, so we double - // check the actual deadline. - // - // TODO(yy): assert they are equal once blocks are synced via - // `blockbeat`. - currentHeight := int32(ht.CurrentHeight()) - actualDeadline := int32(pendingSweepBob.DeadlineHeight) - currentHeight - if actualDeadline != int32(deadlineB) { - ht.Logf("!!! Found unsynced block between sweeper and "+ - "contractcourt, expected deadline=%v, got=%v", - deadlineB, actualDeadline) - - deadlineB = uint32(actualDeadline) - } - - // Alice should still have one pending sweep - the anchor output. - ht.AssertNumPendingSweeps(alice, 1) - - // We now check Bob's sweeping tx. - // - // Bob's sweeping tx should have 2 inputs, one from his commit output, - // the other from his anchor output. - require.Len(ht, bobSweepTx.TxIn, 2) - - // Because Bob is sweeping without deadline pressure, the starting fee - // rate should be the min relay fee rate. - bobStartFeeRate := ht.CalculateTxFeeRate(bobSweepTx) - require.InEpsilonf(ht, uint64(chainfee.FeePerKwFloor), - uint64(bobStartFeeRate), 0.01, "want %v, got %v", - chainfee.FeePerKwFloor, bobStartFeeRate) - - // With Bob's starting fee rate being validated, we now calculate his - // ending fee rate and fee rate delta. - // - // Bob sweeps two inputs - anchor and commit, so the starting budget - // should come from the sum of these two. - bobValue := btcutil.Amount(bobToLocal + 330) - bobBudget := bobValue.MulF64(contractcourt.DefaultBudgetRatio) - - // Calculate the ending fee rate and fee rate delta used in his fee - // function. - bobTxWeight := ht.CalculateTxWeight(bobSweepTx) - bobEndingFeeRate := chainfee.NewSatPerKWeight(bobBudget, bobTxWeight) - bobFeeRateDelta := (bobEndingFeeRate - bobStartFeeRate) / - chainfee.SatPerKWeight(deadlineB-1) - - // Mine an empty block, which should trigger Alice's contractcourt to - // offer her commit output to the sweeper. - ht.MineEmptyBlocks(1) - - // Alice should have both anchor and commit as the pending sweep - // requests. - aliceSweeps := ht.AssertNumPendingSweeps(alice, 2) - aliceAnchor, aliceCommit := aliceSweeps[0], aliceSweeps[1] - if aliceAnchor.AmountSat > aliceCommit.AmountSat { - aliceAnchor, aliceCommit = aliceCommit, aliceAnchor - } - - // The sweeper may be one block behind contractcourt, so we double - // check the actual deadline. - // - // TODO(yy): assert they are equal once blocks are synced via - // `blockbeat`. - currentHeight = int32(ht.CurrentHeight()) - actualDeadline = int32(aliceCommit.DeadlineHeight) - currentHeight - if actualDeadline != int32(deadlineA) { - ht.Logf("!!! Found unsynced block between Alice's sweeper and "+ - "contractcourt, expected deadline=%v, got=%v", - deadlineA, actualDeadline) - - deadlineA = uint32(actualDeadline) - } - - // We now wait for 30 seconds to overcome the flake - there's a block - // race between contractcourt and sweeper, causing the sweep to be - // broadcast earlier. - // - // TODO(yy): remove this once `blockbeat` is in place. - aliceStartPosition := 0 - var aliceFirstSweepTx *wire.MsgTx - err := wait.NoError(func() error { - mem := ht.GetRawMempool() - if len(mem) != 2 { - return fmt.Errorf("want 2, got %v in mempool: %v", - len(mem), mem) - } - - // If there are two txns, it means Alice's sweep tx has been - // created and published. - aliceStartPosition = 1 - - txns := ht.GetNumTxsFromMempool(2) - aliceFirstSweepTx = txns[0] - - // Reassign if the second tx is larger. - if txns[1].TxOut[0].Value > aliceFirstSweepTx.TxOut[0].Value { - aliceFirstSweepTx = txns[1] - } - - return nil - }, wait.DefaultTimeout) - ht.Logf("Checking mempool got: %v", err) - - // Mine an empty block, which should trigger Alice's sweeper to publish - // her commit sweep along with her anchor output. - ht.MineEmptyBlocks(1) + ht.AssertNumPendingSweeps(bob, 2) - // If Alice has already published her initial sweep tx, the above mined - // block would trigger an RBF. We now need to assert the mempool has - // removed the replaced tx. - if aliceFirstSweepTx != nil { - ht.AssertTxNotInMempool(aliceFirstSweepTx.TxHash()) - } + // We expect two pending sweeps for Alice - anchor and commit outputs. + ht.AssertNumPendingSweeps(alice, 2) // We also remember the positions of fee functions used by Alice and // Bob. They will be used to calculate the expected fee rates later. - // - // Alice's sweeping tx has just been created, so she is at the starting - // position. For Bob, due to the above mined blocks, his fee function - // is now at position 2. - alicePosition, bobPosition := uint32(aliceStartPosition), uint32(2) + alicePosition, bobPosition := uint32(0), uint32(1) // We should see two txns in the mempool: // - Alice's sweeping tx, which sweeps her commit output at the // starting fee rate - Alice's anchor output won't be swept with her // commit output together because they have different deadlines. - // - Bob's previous sweeping tx, which sweeps both his anchor and - // commit outputs, at the starting fee rate. + // - Bob's previous sweeping tx, which sweeps his and commit outputs, + // at the starting fee rate. txns := ht.GetNumTxsFromMempool(2) // Assume the first tx is Alice's sweeping tx, if the second tx has a // larger output value, then that's Alice's as her to_local value is // much gearter. - aliceSweepTx := txns[0] - bobSweepTx = txns[1] + aliceSweepTx, bobSweepTx := txns[0], txns[1] // Swap them if bobSweepTx is smaller. if bobSweepTx.TxOut[0].Value > aliceSweepTx.TxOut[0].Value { @@ -1539,20 +1395,6 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { require.Len(ht, aliceSweepTx.TxIn, 1) require.Len(ht, aliceSweepTx.TxOut, 1) - // We now check Alice's sweeping tx to see if it's already published. - // - // TODO(yy): remove this check once we have better block control. - aliceSweeps = ht.AssertNumPendingSweeps(alice, 2) - aliceCommit = aliceSweeps[0] - if aliceCommit.AmountSat < aliceSweeps[1].AmountSat { - aliceCommit = aliceSweeps[1] - } - if aliceCommit.BroadcastAttempts > 1 { - ht.Logf("!!! Alice's commit sweep has already been broadcast, "+ - "broadcast_attempts=%v", aliceCommit.BroadcastAttempts) - alicePosition = aliceCommit.BroadcastAttempts - } - // Alice's sweeping tx should use the min relay fee rate as there's no // deadline pressure. aliceStartingFeeRate := chainfee.FeePerKwFloor @@ -1567,7 +1409,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { aliceTxWeight := uint64(ht.CalculateTxWeight(aliceSweepTx)) aliceEndingFeeRate := sweep.DefaultMaxFeeRate.FeePerKWeight() aliceFeeRateDelta := (aliceEndingFeeRate - aliceStartingFeeRate) / - chainfee.SatPerKWeight(deadlineA-1) + chainfee.SatPerKWeight(deadlineA) aliceFeeRate := ht.CalculateTxFeeRate(aliceSweepTx) expectedFeeRateAlice := aliceStartingFeeRate + @@ -1576,119 +1418,41 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { uint64(aliceFeeRate), 0.02, "want %v, got %v", expectedFeeRateAlice, aliceFeeRate) - // We now check Bob' sweeping tx. - // - // The above mined block will trigger Bob's sweeper to RBF his previous - // sweeping tx, which will fail due to RBF rule#4 - the additional fees - // paid are not sufficient. This happens as our default incremental - // relay fee rate is 1 sat/vb, with the tx size of 771 weight units, or - // 192 vbytes, we need to pay at least 192 sats more to be able to RBF. - // However, since Bob's budget delta is (100_000 + 330) * 0.5 / 1008 = - // 49.77 sats, it means Bob can only perform a successful RBF every 4 - // blocks. - // - // Assert Bob's sweeping tx is not RBFed. - bobFeeRate := ht.CalculateTxFeeRate(bobSweepTx) - expectedFeeRateBob := bobStartFeeRate - require.InEpsilonf(ht, uint64(expectedFeeRateBob), uint64(bobFeeRate), - 0.01, "want %d, got %d", expectedFeeRateBob, bobFeeRate) - - // reloclateAlicePosition is a temp hack to find the actual fee - // function position used for Alice. Due to block sync issue among the - // subsystems, we can end up having this situation: - // - sweeper is at block 2, starts sweeping an input with deadline 100. - // - fee bumper is at block 1, and thinks the conf target is 99. - // - new block 3 arrives, the func now is at position 2. + // We now check Bob's sweeping tx. // - // TODO(yy): fix it using `blockbeat`. - reloclateAlicePosition := func() { - // Mine an empty block to trigger the possible RBF attempts. - ht.MineEmptyBlocks(1) - - // Increase the positions for both fee functions. - alicePosition++ - bobPosition++ + // Bob's sweeping tx should have one input, which is his commit output. + // His anchor output won't be swept due to it being uneconomical. + require.Len(ht, bobSweepTx.TxIn, 1, "tx=%v", bobSweepTx.TxHash()) - // We expect two pending sweeps for both nodes as we are mining - // empty blocks. - ht.AssertNumPendingSweeps(alice, 2) - ht.AssertNumPendingSweeps(bob, 2) - - // We expect to see both Alice's and Bob's sweeping txns in the - // mempool. - ht.AssertNumTxsInMempool(2) - - // Make sure Alice's old sweeping tx has been removed from the - // mempool. - ht.AssertTxNotInMempool(aliceSweepTx.TxHash()) - - // We should see two txns in the mempool: - // - Alice's sweeping tx, which sweeps both her anchor and - // commit outputs, using the increased fee rate. - // - Bob's previous sweeping tx, which sweeps both his anchor - // and commit outputs, at the possible increased fee rate. - txns = ht.GetNumTxsFromMempool(2) - - // Assume the first tx is Alice's sweeping tx, if the second tx - // has a larger output value, then that's Alice's as her - // to_local value is much gearter. - aliceSweepTx = txns[0] - bobSweepTx = txns[1] - - // Swap them if bobSweepTx is smaller. - if bobSweepTx.TxOut[0].Value > aliceSweepTx.TxOut[0].Value { - aliceSweepTx, bobSweepTx = bobSweepTx, aliceSweepTx - } - - // Alice's sweeping tx should be increased. - aliceFeeRate := ht.CalculateTxFeeRate(aliceSweepTx) - expectedFeeRate := aliceStartingFeeRate + - aliceFeeRateDelta*chainfee.SatPerKWeight(alicePosition) - - ht.Logf("Alice(deadline=%v): txWeight=%v, want feerate=%v, "+ - "got feerate=%v, delta=%v", deadlineA-alicePosition, - aliceTxWeight, expectedFeeRate, aliceFeeRate, - aliceFeeRateDelta) - - nextPosition := alicePosition + 1 - nextFeeRate := aliceStartingFeeRate + - aliceFeeRateDelta*chainfee.SatPerKWeight(nextPosition) - - // Calculate the distances. - delta := math.Abs(float64(aliceFeeRate - expectedFeeRate)) - deltaNext := math.Abs(float64(aliceFeeRate - nextFeeRate)) - - // Exit early if the first distance is smaller - it means we - // are at the right fee func position. - if delta < deltaNext { - require.InEpsilonf(ht, uint64(expectedFeeRate), - uint64(aliceFeeRate), 0.02, "want %v, got %v "+ - "in tx=%v", expectedFeeRate, - aliceFeeRate, aliceSweepTx.TxHash()) - - return - } - - alicePosition++ - ht.Logf("Jump position for Alice(deadline=%v): txWeight=%v, "+ - "want feerate=%v, got feerate=%v, delta=%v", - deadlineA-alicePosition, aliceTxWeight, nextFeeRate, - aliceFeeRate, aliceFeeRateDelta) + // Because Bob is sweeping without deadline pressure, the starting fee + // rate should be the min relay fee rate. + bobStartFeeRate := ht.CalculateTxFeeRate(bobSweepTx) + require.InEpsilonf(ht, uint64(chainfee.FeePerKwFloor), + uint64(bobStartFeeRate), 0.01, "want %v, got %v", + chainfee.FeePerKwFloor, bobStartFeeRate) - require.InEpsilonf(ht, uint64(nextFeeRate), - uint64(aliceFeeRate), 0.02, "want %v, got %v in tx=%v", - nextFeeRate, aliceFeeRate, aliceSweepTx.TxHash()) - } + // With Bob's starting fee rate being validated, we now calculate his + // ending fee rate and fee rate delta. + // + // Bob sweeps one input - the commit output. + bobValue := btcutil.Amount(bobToLocal) + bobBudget := bobValue.MulF64(contractcourt.DefaultBudgetRatio) - reloclateAlicePosition() + // Calculate the ending fee rate and fee rate delta used in his fee + // function. + bobTxWeight := ht.CalculateTxWeight(bobSweepTx) + bobEndingFeeRate := chainfee.NewSatPerKWeight(bobBudget, bobTxWeight) + bobFeeRateDelta := (bobEndingFeeRate - bobStartFeeRate) / + chainfee.SatPerKWeight(deadlineB-1) + expectedFeeRateBob := bobStartFeeRate - // We now mine 7 empty blocks. For each block mined, we'd see Alice's + // We now mine 8 empty blocks. For each block mined, we'd see Alice's // sweeping tx being RBFed. For Bob, he performs a fee bump every - // block, but will only publish a tx every 4 blocks mined as some of + // block, but will only publish a tx every 3 blocks mined as some of // the fee bumps is not sufficient to meet the fee requirements // enforced by RBF. Since his fee function is already at position 1, // mining 7 more blocks means he will RBF his sweeping tx twice. - for i := 1; i < 7; i++ { + for i := 1; i < 9; i++ { // Mine an empty block to trigger the possible RBF attempts. ht.MineEmptyBlocks(1) @@ -1711,9 +1475,9 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { // Make sure Bob's old sweeping tx has been removed from the // mempool. Since Bob's sweeping tx will only be successfully - // RBFed every 4 blocks, his old sweeping tx only will be - // removed when there are 4 blocks increased. - if bobPosition%4 == 0 { + // RBFed every 3 blocks, his old sweeping tx only will be + // removed when there are 3 blocks increased. + if bobPosition%3 == 0 { ht.AssertTxNotInMempool(bobSweepTx.TxHash()) } @@ -1749,9 +1513,10 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { aliceFeeRateDelta*chainfee.SatPerKWeight(alicePosition) ht.Logf("Alice(deadline=%v): txWeight=%v, want feerate=%v, "+ - "got feerate=%v, delta=%v", deadlineA-alicePosition, - aliceTxWeight, expectedFeeRateAlice, aliceFeeRate, - aliceFeeRateDelta) + "got feerate=%v, delta=%v in tx %v", + deadlineA-alicePosition, aliceTxWeight, + expectedFeeRateAlice, aliceFeeRate, + aliceFeeRateDelta, aliceSweepTx.TxHash()) require.InEpsilonf(ht, uint64(expectedFeeRateAlice), uint64(aliceFeeRate), 0.02, "want %v, got %v in tx=%v", @@ -1767,16 +1532,17 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { accumulatedDelta := bobFeeRateDelta * chainfee.SatPerKWeight(bobPosition) - // Bob's sweeping tx will only be successfully RBFed every 4 + // Bob's sweeping tx will only be successfully RBFed every 3 // blocks. - if bobPosition%4 == 0 { + if bobPosition%3 == 0 { expectedFeeRateBob = bobStartFeeRate + accumulatedDelta } ht.Logf("Bob(deadline=%v): txWeight=%v, want feerate=%v, "+ - "got feerate=%v, delta=%v", deadlineB-bobPosition, - bobTxWeight, expectedFeeRateBob, bobFeeRate, - bobFeeRateDelta) + "got feerate=%v, delta=%v in tx %v", + deadlineB-bobPosition, bobTxWeight, + expectedFeeRateBob, bobFeeRate, + bobFeeRateDelta, bobSweepTx.TxHash()) require.InEpsilonf(ht, uint64(expectedFeeRateBob), uint64(bobFeeRate), 0.02, "want %d, got %d in tx=%v", @@ -2115,6 +1881,7 @@ func testBumpForceCloseFee(ht *lntest.HarnessTest) { if ht.IsNeutrinoBackend() { ht.Skipf("skipping BumpForceCloseFee test for neutrino backend") } + // fundAmt is the funding amount. fundAmt := btcutil.Amount(1_000_000) @@ -2140,6 +1907,7 @@ func testBumpForceCloseFee(ht *lntest.HarnessTest) { // Unwrap the results. chanPoint := chanPoints[0] alice := nodes[0] + bob := nodes[1] // We need to fund alice with 2 wallet inputs so that we can test to // increase the fee rate of the anchor cpfp via two subsequent calls of @@ -2252,6 +2020,10 @@ func testBumpForceCloseFee(ht *lntest.HarnessTest) { txns = ht.GetNumTxsFromMempool(2) ht.FindSweepingTxns(txns, 1, closingTx.TxHash()) + // Shut down Bob, otherwise he will create a sweeping tx to collect the + // to_remote output once Alice's force closing tx is confirmed below. + ht.Shutdown(bob) + // Mine both transactions, the closing tx and the anchor cpfp tx. // This is needed to clean up the mempool. ht.MineBlocksAndAssertNumTxes(1, 2) diff --git a/itest/lnd_watchtower_test.go b/itest/lnd_watchtower_test.go index 0a36b077d2..a68df852bc 100644 --- a/itest/lnd_watchtower_test.go +++ b/itest/lnd_watchtower_test.go @@ -19,22 +19,21 @@ import ( "github.com/stretchr/testify/require" ) -// testWatchtower tests the behaviour of the watchtower client and server. -func testWatchtower(ht *lntest.HarnessTest) { - ht.Run("revocation", func(t *testing.T) { - tt := ht.Subtest(t) - testRevokedCloseRetributionAltruistWatchtower(tt) - }) - - ht.Run("session deletion", func(t *testing.T) { - tt := ht.Subtest(t) - testTowerClientSessionDeletion(tt) - }) - - ht.Run("tower and session activation", func(t *testing.T) { - tt := ht.Subtest(t) - testTowerClientTowerAndSessionManagement(tt) - }) +// watchtowerTestCases defines a set of tests to check the behaviour of the +// watchtower client and server. +var watchtowerTestCases = []*lntest.TestCase{ + { + Name: "watchtower revoked close retribution altruist", + TestFunc: testRevokedCloseRetributionAltruistWatchtower, + }, + { + Name: "watchtower client session deletion", + TestFunc: testTowerClientSessionDeletion, + }, + { + Name: "watchtower client tower and session management", + TestFunc: testTowerClientTowerAndSessionManagement, + }, } // testTowerClientTowerAndSessionManagement tests the various control commands @@ -565,6 +564,15 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(ht *lntest.HarnessTest, // then been swept to his wallet by Willy. require.NoError(ht, restart(), "unable to restart dave") + // For neutrino backend, we may need to mine one more block to trigger + // the chain watcher to act. + // + // TODO(yy): remove it once the blockbeat remembers the last block + // processed. + if ht.IsNeutrinoBackend() { + ht.MineEmptyBlocks(1) + } + err = wait.NoError(func() error { daveBalResp := dave.RPC.ChannelBalance() if daveBalResp.LocalBalance.Sat != 0 { @@ -579,16 +587,6 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(ht *lntest.HarnessTest, ht.AssertNumPendingForceClose(dave, 0) - // If this is an anchor channel, Dave would offer his sweeper the - // anchor. However, due to no time-sensitive outputs involved, the - // anchor sweeping won't happen as it's uneconomical. - if lntest.CommitTypeHasAnchors(commitType) { - ht.AssertNumPendingSweeps(dave, 1) - - // Mine a block to trigger the sweep. - ht.MineEmptyBlocks(1) - } - // Check that Dave's wallet balance is increased. err = wait.NoError(func() error { daveBalResp := dave.RPC.WalletBalance() @@ -657,17 +655,12 @@ func generateBackups(ht *lntest.HarnessTest, srcNode, ) send := func(node *node.HarnessNode, payReq string) { - stream := node.RPC.SendPayment( - &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - - ht.AssertPaymentStatusFromStream( - stream, lnrpc.Payment_SUCCEEDED, - ) + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: payReq, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + ht.SendPaymentAssertSettled(node, req) } // Pay each invoice. diff --git a/itest/lnd_wipe_fwdpkgs_test.go b/itest/lnd_wipe_fwdpkgs_test.go index a302f596a6..b6211d7f8e 100644 --- a/itest/lnd_wipe_fwdpkgs_test.go +++ b/itest/lnd_wipe_fwdpkgs_test.go @@ -117,9 +117,6 @@ func testWipeForwardingPackages(ht *lntest.HarnessTest) { // Alice should one pending sweep. ht.AssertNumPendingSweeps(alice, 1) - // Mine a block to trigger the sweep. - ht.MineBlocks(1) - // Mine 1 block to get Alice's sweeping tx confirmed. ht.MineBlocksAndAssertNumTxes(1, 1) diff --git a/lntest/harness.go b/lntest/harness.go index 8e8fcd3936..606b64351b 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -8,16 +8,19 @@ import ( "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/kvdb/etcd" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntest/miner" "github.com/lightningnetwork/lnd/lntest/node" @@ -26,6 +29,7 @@ import ( "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing" "github.com/stretchr/testify/require" ) @@ -49,6 +53,9 @@ const ( // maxBlocksAllowed specifies the max allowed value to be used when // mining blocks. maxBlocksAllowed = 100 + + finalCltvDelta = routing.MinCLTVDelta // 18. + thawHeightDelta = finalCltvDelta * 2 // 36. ) // TestCase defines a test case that's been used in the integration test. @@ -1653,6 +1660,22 @@ func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) { // Wait for the channel to be marked pending force close. h.AssertNumPendingForceClose(hn, 1) + // Mine enough blocks for the node to sweep its funds from the force + // closed channel. The commit sweep resolver is offers the input to the + // sweeper when it's force closed, and broadcast the sweep tx at + // defaulCSV-1. + // + // NOTE: we might empty blocks here as we don't know the exact number + // of blocks to mine. This may end up mining more blocks than needed. + h.MineEmptyBlocks(node.DefaultCSV - 1) + + // Assert there is one pending sweep. + h.AssertNumPendingSweeps(hn, 1) + + // The node should now sweep the funds, clean up by mining the sweeping + // tx. + h.MineBlocksAndAssertNumTxes(1, 1) + // Mine blocks to get any second level HTLC resolved. If there are no // HTLCs, this will behave like h.AssertNumPendingCloseChannels. h.mineTillForceCloseResolved(hn) @@ -1994,7 +2017,8 @@ func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode, return nil } - return fmt.Errorf("sweep tx %v not found", sweep) + return fmt.Errorf("sweep tx %v not found in resp %v", sweep, + sweepResp) }, wait.DefaultTimeout) require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name()) } @@ -2308,12 +2332,40 @@ func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode, // Sanity check the params. require.Greater(h, len(nodes), 1, "need at least 2 nodes") + // attachFundingShim is a helper closure that optionally attaches a + // funding shim to the open channel params and returns it. + attachFundingShim := func( + nodeA, nodeB *node.HarnessNode) OpenChannelParams { + + // If this channel is not a script enforced lease channel, + // we'll do nothing and return the params. + leasedType := lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE + if p.CommitmentType != leasedType { + return p + } + + // Otherwise derive the funding shim, attach it to the original + // open channel params and return it. + minerHeight := h.CurrentHeight() + thawHeight := minerHeight + thawHeightDelta + fundingShim, _ := h.DeriveFundingShim( + nodeA, nodeB, p.Amt, thawHeight, true, leasedType, + ) + + p.FundingShim = fundingShim + + return p + } + // Open channels in batch to save blocks mined. reqs := make([]*OpenChannelRequest, 0, len(nodes)-1) for i := 0; i < len(nodes)-1; i++ { nodeA := nodes[i] nodeB := nodes[i+1] + // Optionally attach a funding shim to the open channel params. + p = attachFundingShim(nodeA, nodeB) + req := &OpenChannelRequest{ Local: nodeA, Remote: nodeB, @@ -2364,3 +2416,122 @@ func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode, return resp } + +// DeriveFundingShim creates a channel funding shim by deriving the necessary +// keys on both sides. +func (h *HarnessTest) DeriveFundingShim(alice, bob *node.HarnessNode, + chanSize btcutil.Amount, thawHeight uint32, publish bool, + commitType lnrpc.CommitmentType) (*lnrpc.FundingShim, + *lnrpc.ChannelPoint) { + + keyLoc := &signrpc.KeyLocator{KeyFamily: 9999} + carolFundingKey := alice.RPC.DeriveKey(keyLoc) + daveFundingKey := bob.RPC.DeriveKey(keyLoc) + + // Now that we have the multi-sig keys for each party, we can manually + // construct the funding transaction. We'll instruct the backend to + // immediately create and broadcast a transaction paying out an exact + // amount. Normally this would reside in the mempool, but we just + // confirm it now for simplicity. + var ( + fundingOutput *wire.TxOut + musig2 bool + err error + ) + + if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT || + commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY { + + var carolKey, daveKey *btcec.PublicKey + carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes) + require.NoError(h, err) + daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes) + require.NoError(h, err) + + _, fundingOutput, err = input.GenTaprootFundingScript( + carolKey, daveKey, int64(chanSize), + fn.None[chainhash.Hash](), + ) + require.NoError(h, err) + + musig2 = true + } else { + _, fundingOutput, err = input.GenFundingPkScript( + carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes, + int64(chanSize), + ) + require.NoError(h, err) + } + + var txid *chainhash.Hash + targetOutputs := []*wire.TxOut{fundingOutput} + if publish { + txid = h.SendOutputsWithoutChange(targetOutputs, 5) + } else { + tx := h.CreateTransaction(targetOutputs, 5) + + txHash := tx.TxHash() + txid = &txHash + } + + // At this point, we can being our external channel funding workflow. + // We'll start by generating a pending channel ID externally that will + // be used to track this new funding type. + pendingChanID := h.Random32Bytes() + + // Now that we have the pending channel ID, Dave (our responder) will + // register the intent to receive a new channel funding workflow using + // the pending channel ID. + chanPoint := &lnrpc.ChannelPoint{ + FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ + FundingTxidBytes: txid[:], + }, + } + chanPointShim := &lnrpc.ChanPointShim{ + Amt: int64(chanSize), + ChanPoint: chanPoint, + LocalKey: &lnrpc.KeyDescriptor{ + RawKeyBytes: daveFundingKey.RawKeyBytes, + KeyLoc: &lnrpc.KeyLocator{ + KeyFamily: daveFundingKey.KeyLoc.KeyFamily, + KeyIndex: daveFundingKey.KeyLoc.KeyIndex, + }, + }, + RemoteKey: carolFundingKey.RawKeyBytes, + PendingChanId: pendingChanID, + ThawHeight: thawHeight, + Musig2: musig2, + } + fundingShim := &lnrpc.FundingShim{ + Shim: &lnrpc.FundingShim_ChanPointShim{ + ChanPointShim: chanPointShim, + }, + } + bob.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ + ShimRegister: fundingShim, + }, + }) + + // If we attempt to register the same shim (has the same pending chan + // ID), then we should get an error. + bob.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ + ShimRegister: fundingShim, + }, + }) + + // We'll take the chan point shim we just registered for Dave (the + // responder), and swap the local/remote keys before we feed it in as + // Carol's funding shim as the initiator. + fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{ + RawKeyBytes: carolFundingKey.RawKeyBytes, + KeyLoc: &lnrpc.KeyLocator{ + KeyFamily: carolFundingKey.KeyLoc.KeyFamily, + KeyIndex: carolFundingKey.KeyLoc.KeyIndex, + }, + } + fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes + + return fundingShim, chanPoint +} diff --git a/lntest/harness_assertion.go b/lntest/harness_assertion.go index 11cbefdd5c..b99f461d18 100644 --- a/lntest/harness_assertion.go +++ b/lntest/harness_assertion.go @@ -29,6 +29,7 @@ import ( "github.com/lightningnetwork/lnd/lntest/rpc" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnutils" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" ) @@ -738,15 +739,19 @@ func (h *HarnessTest) AssertStreamChannelForceClosed(hn *node.HarnessNode, channeldb.ChanStatusLocalCloseInitiator.String(), "channel not coop broadcasted") + // Get the closing txid. + closeTxid, err := chainhash.NewHashFromStr(resp.ClosingTxid) + require.NoError(h, err) + // We'll now, generate a single block, wait for the final close status // update, then ensure that the closing transaction was included in the // block. - block := h.MineBlocksAndAssertNumTxes(1, 1)[0] + closeTx := h.AssertTxInMempool(*closeTxid) + h.MineBlockWithTx(closeTx) // Consume one close event and assert the closing txid can be found in // the block. closingTxid := h.WaitForChannelCloseEvent(stream) - h.AssertTxInBlock(block, closingTxid) // We should see zero waiting close channels and 1 pending force close // channels now. @@ -1309,58 +1314,6 @@ func (h *HarnessTest) AssertNumActiveHtlcs(hn *node.HarnessNode, num int) { hn.Name()) } -// AssertActiveHtlcs makes sure the node has the _exact_ HTLCs matching -// payHashes on _all_ their channels. -func (h *HarnessTest) AssertActiveHtlcs(hn *node.HarnessNode, - payHashes ...[]byte) { - - err := wait.NoError(func() error { - // We require the RPC call to be succeeded and won't wait for - // it as it's an unexpected behavior. - req := &lnrpc.ListChannelsRequest{} - nodeChans := hn.RPC.ListChannels(req) - - for _, ch := range nodeChans.Channels { - // Record all payment hashes active for this channel. - htlcHashes := make(map[string]struct{}) - - for _, htlc := range ch.PendingHtlcs { - h := hex.EncodeToString(htlc.HashLock) - _, ok := htlcHashes[h] - if ok { - return fmt.Errorf("duplicate HashLock "+ - "in PendingHtlcs: %v", - ch.PendingHtlcs) - } - htlcHashes[h] = struct{}{} - } - - // Channel should have exactly the payHashes active. - if len(payHashes) != len(htlcHashes) { - return fmt.Errorf("node [%s:%x] had %v "+ - "htlcs active, expected %v", - hn.Name(), hn.PubKey[:], - len(htlcHashes), len(payHashes)) - } - - // Make sure all the payHashes are active. - for _, payHash := range payHashes { - h := hex.EncodeToString(payHash) - if _, ok := htlcHashes[h]; ok { - continue - } - - return fmt.Errorf("node [%s:%x] didn't have: "+ - "the payHash %v active", hn.Name(), - hn.PubKey[:], h) - } - } - - return nil - }, DefaultTimeout) - require.NoError(h, err, "timeout checking active HTLCs") -} - // AssertIncomingHTLCActive asserts the node has a pending incoming HTLC in the // given channel. Returns the HTLC if found and active. func (h *HarnessTest) AssertIncomingHTLCActive(hn *node.HarnessNode, @@ -1609,8 +1562,9 @@ func (h *HarnessTest) AssertNumHTLCsAndStage(hn *node.HarnessNode, } if len(target.PendingHtlcs) != num { - return fmt.Errorf("got %d pending htlcs, want %d", - len(target.PendingHtlcs), num) + return fmt.Errorf("got %d pending htlcs, want %d, %s", + len(target.PendingHtlcs), num, + lnutils.SpewLogClosure(target.PendingHtlcs)()) } for i, htlc := range target.PendingHtlcs { @@ -2786,3 +2740,48 @@ func (h *HarnessTest) FindSweepingTxns(txns []*wire.MsgTx, return sweepTxns } + +// AssertForceCloseAndAnchorTxnsInMempool asserts that the force close and +// anchor sweep txns are found in the mempool and returns the force close tx +// and the anchor sweep tx. +func (h *HarnessTest) AssertForceCloseAndAnchorTxnsInMempool() (*wire.MsgTx, + *wire.MsgTx) { + + // Assert there are two txns in the mempool. + txns := h.GetNumTxsFromMempool(2) + + // isParentAndChild checks whether there is an input used in the + // assumed child tx by checking every input's previous outpoint against + // the assumed parentTxid. + isParentAndChild := func(parent, child *wire.MsgTx) bool { + parentTxid := parent.TxHash() + + for _, inp := range child.TxIn { + if inp.PreviousOutPoint.Hash == parentTxid { + // Found a match, this is indeed the anchor + // sweeping tx so we return it here. + return true + } + } + + return false + } + + switch { + // Assume the first one is the closing tx and the second one is the + // anchor sweeping tx. + case isParentAndChild(txns[0], txns[1]): + return txns[0], txns[1] + + // Assume the first one is the anchor sweeping tx and the second one is + // the closing tx. + case isParentAndChild(txns[1], txns[0]): + return txns[1], txns[0] + + // Unrelated txns found, fail the test. + default: + h.Fatalf("the two txns not related: %v", txns) + + return nil, nil + } +} diff --git a/lntest/harness_miner.go b/lntest/harness_miner.go index 22b2b95acc..17fd864ed7 100644 --- a/lntest/harness_miner.go +++ b/lntest/harness_miner.go @@ -196,7 +196,8 @@ func (h *HarnessTest) mineTillForceCloseResolved(hn *node.HarnessNode) { return nil }, DefaultTimeout) - require.NoErrorf(h, err, "assert force close resolved timeout") + require.NoErrorf(h, err, "%s: assert force close resolved timeout", + hn.Name()) } // AssertTxInMempool asserts a given transaction can be found in the mempool. diff --git a/lntest/node/config.go b/lntest/node/config.go index 32f24395b6..6cba60941c 100644 --- a/lntest/node/config.go +++ b/lntest/node/config.go @@ -41,6 +41,40 @@ var ( btcdExecutable = flag.String( "btcdexec", "", "full path to btcd binary", ) + + // CfgLegacy specifies the config used to create a node that uses the + // legacy channel format. + CfgLegacy = []string{"--protocol.legacy.committweak"} + + // CfgStaticRemoteKey specifies the config used to create a node that + // uses the static remote key feature. + CfgStaticRemoteKey = []string{} + + // CfgAnchor specifies the config used to create a node that uses the + // anchor output feature. + CfgAnchor = []string{"--protocol.anchors"} + + // CfgLeased specifies the config used to create a node that uses the + // leased channel feature. + CfgLeased = []string{ + "--protocol.anchors", + "--protocol.script-enforced-lease", + } + + // CfgSimpleTaproot specifies the config used to create a node that + // uses the simple taproot feature. + CfgSimpleTaproot = []string{ + "--protocol.anchors", + "--protocol.simple-taproot-chans", + } + + // CfgZeroConf specifies the config used to create a node that uses the + // zero-conf channel feature. + CfgZeroConf = []string{ + "--protocol.anchors", + "--protocol.option-scid-alias", + "--protocol.zero-conf", + } ) type DatabaseBackend int diff --git a/lnutils/log.go b/lnutils/log.go index a32738bdf4..a588bf3ce3 100644 --- a/lnutils/log.go +++ b/lnutils/log.go @@ -1,6 +1,10 @@ package lnutils -import "github.com/davecgh/go-spew/spew" +import ( + "strings" + + "github.com/davecgh/go-spew/spew" +) // LogClosure is used to provide a closure over expensive logging operations so // don't have to be performed when the logging level doesn't warrant it. @@ -25,3 +29,10 @@ func SpewLogClosure(a any) LogClosure { return spew.Sdump(a) } } + +// NewSeparatorClosure returns a new closure that logs a separator line. +func NewSeparatorClosure() LogClosure { + return func() string { + return strings.Repeat("=", 80) + } +} diff --git a/sweep/fee_bumper.go b/sweep/fee_bumper.go index a43f875850..57f9f6e303 100644 --- a/sweep/fee_bumper.go +++ b/sweep/fee_bumper.go @@ -1588,7 +1588,10 @@ func prepareSweepTx(inputs []input.Input, changePkScript lnwallet.AddrWithKey, // Check if the lock time has reached if lt > uint32(currentHeight) { - return 0, noChange, noLocktime, ErrLocktimeImmature + return 0, noChange, noLocktime, + fmt.Errorf("%w: current height is %v, "+ + "locktime is %v", ErrLocktimeImmature, + currentHeight, lt) } // If another input commits to a different locktime, they diff --git a/sweep/sweeper.go b/sweep/sweeper.go index d49f104af0..88404e52a8 100644 --- a/sweep/sweeper.go +++ b/sweep/sweeper.go @@ -229,7 +229,7 @@ func (p *SweeperInput) isMature(currentHeight uint32) (bool, uint32) { locktime, _ := p.RequiredLockTime() if currentHeight < locktime { log.Debugf("Input %v has locktime=%v, current height is %v", - p.OutPoint(), locktime, currentHeight) + p, locktime, currentHeight) return false, locktime } @@ -243,8 +243,7 @@ func (p *SweeperInput) isMature(currentHeight uint32) (bool, uint32) { locktime = p.BlocksToMaturity() + p.HeightHint() if currentHeight+1 < locktime { log.Debugf("Input %v has CSV expiry=%v, current height is %v, "+ - "skipped sweeping", p.OutPoint(), locktime, - currentHeight) + "skipped sweeping", p, locktime, currentHeight) return false, locktime } @@ -255,6 +254,21 @@ func (p *SweeperInput) isMature(currentHeight uint32) (bool, uint32) { // InputsMap is a type alias for a set of pending inputs. type InputsMap = map[wire.OutPoint]*SweeperInput +// inputsMapToString returns a human readable interpretation of the pending +// inputs. +func inputsMapToString(inputs InputsMap) string { + if len(inputs) == 0 { + return "" + } + + inps := make([]input.Input, 0, len(inputs)) + for _, in := range inputs { + inps = append(inps, in) + } + + return "\n" + inputTypeSummary(inps) +} + // pendingSweepsReq is an internal message we'll use to represent an external // caller's intent to retrieve all of the pending inputs the UtxoSweeper is // attempting to sweep. @@ -700,17 +714,10 @@ func (s *UtxoSweeper) collector() { inputs := s.updateSweeperInputs() log.Debugf("Received new block: height=%v, attempt "+ - "sweeping %d inputs:\n%s", - s.currentHeight, len(inputs), + "sweeping %d inputs:%s", s.currentHeight, + len(inputs), lnutils.NewLogClosure(func() string { - inps := make( - []input.Input, 0, len(inputs), - ) - for _, in := range inputs { - inps = append(inps, in) - } - - return inputTypeSummary(inps) + return inputsMapToString(inputs) })) // Attempt to sweep any pending inputs. @@ -1244,10 +1251,10 @@ func (s *UtxoSweeper) handleNewInput(input *sweepInputMessage) error { log.Tracef("input %v, state=%v, added to inputs", outpoint, pi.state) log.Infof("Registered sweep request at block %d: out_point=%v, "+ - "witness_type=%v, amount=%v, deadline=%d, params=(%v)", - s.currentHeight, pi.OutPoint(), pi.WitnessType(), + "witness_type=%v, amount=%v, deadline=%d, state=%v, "+ + "params=(%v)", s.currentHeight, pi.OutPoint(), pi.WitnessType(), btcutil.Amount(pi.SignDesc().Output.Value), pi.DeadlineHeight, - pi.params) + pi.state, pi.params) // Start watching for spend of this input, either by us or the remote // party. @@ -1523,12 +1530,8 @@ func (s *UtxoSweeper) updateSweeperInputs() InputsMap { // If the input has a locktime that's not yet reached, we will // skip this input and wait for the locktime to be reached. - mature, locktime := input.isMature(uint32(s.currentHeight)) + mature, _ := input.isMature(uint32(s.currentHeight)) if !mature { - log.Debugf("Skipping input %v due to locktime=%v not "+ - "reached, current height is %v", op, locktime, - s.currentHeight) - continue } diff --git a/sweep/txgenerator.go b/sweep/txgenerator.go index 993ee9e59d..995949b15a 100644 --- a/sweep/txgenerator.go +++ b/sweep/txgenerator.go @@ -315,5 +315,6 @@ func inputTypeSummary(inputs []input.Input) string { part := fmt.Sprintf("%v (%v)", i.OutPoint(), i.WitnessType()) parts = append(parts, part) } - return strings.Join(parts, ", ") + + return strings.Join(parts, "\n") } diff --git a/sweep/txgenerator_test.go b/sweep/txgenerator_test.go index 71477bd6ec..3f2051646a 100644 --- a/sweep/txgenerator_test.go +++ b/sweep/txgenerator_test.go @@ -16,10 +16,12 @@ var ( input.HtlcOfferedRemoteTimeout, input.WitnessKeyHash, } - expectedWeight = int64(1460) - expectedSummary = "0000000000000000000000000000000000000000000000000000000000000000:10 (CommitmentTimeLock), " + - "0000000000000000000000000000000000000000000000000000000000000001:11 (HtlcAcceptedSuccessSecondLevel), " + - "0000000000000000000000000000000000000000000000000000000000000002:12 (HtlcOfferedRemoteTimeout), " + + expectedWeight = int64(1460) + + //nolint:ll + expectedSummary = "0000000000000000000000000000000000000000000000000000000000000000:10 (CommitmentTimeLock)\n" + + "0000000000000000000000000000000000000000000000000000000000000001:11 (HtlcAcceptedSuccessSecondLevel)\n" + + "0000000000000000000000000000000000000000000000000000000000000002:12 (HtlcOfferedRemoteTimeout)\n" + "0000000000000000000000000000000000000000000000000000000000000003:13 (WitnessKeyHash)" )