Skip to content

Commit b38ed00

Browse files
Roasbeefguggero
authored andcommitted
itest: add breach force close test
1 parent 8d8da0e commit b38ed00

File tree

4 files changed

+326
-4
lines changed

4 files changed

+326
-4
lines changed

itest/litd_custom_channels_test.go

Lines changed: 225 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -941,6 +941,227 @@ func testCustomChannelsForceClose(_ context.Context, net *NetworkHarness,
941941
t.Logf("Dave UTXOs: %v", toProtoJSON(t.t, daveUTXOs))
942942
}
943943

944+
func testCustomChannelsBreach(_ context.Context, net *NetworkHarness,
945+
t *harnessTest) {
946+
947+
lndArgs := slices.Clone(lndArgsTemplate)
948+
litdArgs := slices.Clone(litdArgsTemplate)
949+
950+
// Zane will act as our Universe server for the duration of the test.
951+
zane, err := net.NewNode(
952+
t.t, "Zane", lndArgs, false, true, litdArgs...,
953+
)
954+
require.NoError(t.t, err)
955+
956+
// For our litd args, make sure that they all seen Zane as the main
957+
// Universe server.
958+
litdArgs = append(litdArgs, fmt.Sprintf(
959+
"--taproot-assets.proofcourieraddr=%s://%s",
960+
proof.UniverseRpcCourierType, zane.Cfg.LitAddr(),
961+
))
962+
963+
// Charlie will be the breached party. We set --nolisten to ensure Dave
964+
// won't be able to connect to him and trigger the channel protection
965+
// logic automatically. We also can't have Charlie automatically
966+
// reconnect too early, otherwise DLP would be initiated instead of the
967+
// breach we want to provoke.
968+
charlieFlags := append(
969+
slices.Clone(lndArgs), "--nolisten", "--minbackoff=1h",
970+
)
971+
972+
// For this simple test, we'll just have Carol -> Dave as an assets
973+
// channel.
974+
charlie, err := net.NewNode(
975+
t.t, "Charlie", charlieFlags, false, true, litdArgs...,
976+
)
977+
require.NoError(t.t, err)
978+
979+
dave, err := net.NewNode(t.t, "Dave", lndArgs, false, true, litdArgs...)
980+
require.NoError(t.t, err)
981+
982+
// Next we'll connect all the nodes and also fund them with some coins.
983+
nodes := []*HarnessNode{charlie, dave}
984+
connectAllNodes(t.t, net, nodes)
985+
fundAllNodes(t.t, net, nodes)
986+
987+
charlieTap := newTapClient(t.t, charlie)
988+
daveTap := newTapClient(t.t, dave)
989+
990+
ctxb := context.Background()
991+
992+
// Now we'll make an asset for Charlie that we'll use in the test to
993+
// open a channel.
994+
mintedAssets := itest.MintAssetsConfirmBatch(
995+
t.t, t.lndHarness.Miner.Client, charlieTap,
996+
[]*mintrpc.MintAssetRequest{
997+
{
998+
Asset: itestAsset,
999+
},
1000+
},
1001+
)
1002+
cents := mintedAssets[0]
1003+
assetID := cents.AssetGenesis.AssetId
1004+
1005+
t.Logf("Minted %d lightning cents, syncing universes...", cents.Amount)
1006+
syncUniverses(t.t, charlieTap, dave)
1007+
t.Logf("Universes synced between all nodes, distributing assets...")
1008+
1009+
// TODO(roasbeef): consolidate w/ the other test
1010+
1011+
// Next we can open an asset channel from Charlie -> Dave, then kick
1012+
// off the main scenario.
1013+
t.Logf("Opening asset channels...")
1014+
assetFundResp, err := charlieTap.FundChannel(
1015+
ctxb, &tchrpc.FundChannelRequest{
1016+
AssetAmount: fundingAmount,
1017+
AssetId: assetID,
1018+
PeerPubkey: dave.PubKey[:],
1019+
FeeRateSatPerVbyte: 5,
1020+
},
1021+
)
1022+
require.NoError(t.t, err)
1023+
t.Logf("Funded channel between Charlie and Dave: %v", assetFundResp)
1024+
1025+
// With the channel open, mine a block to confirm it.
1026+
mineBlocks(t, net, 6, 1)
1027+
1028+
time.Sleep(time.Second * 1)
1029+
1030+
// Next, we'll make keysend payments from Charlie to Dave. we'll use
1031+
// this to reach a state where both parties have funds in the channel.
1032+
const (
1033+
numPayments = 5
1034+
keySendAmount = 100
1035+
btcAmt = int64(5_000)
1036+
)
1037+
for i := 0; i < numPayments; i++ {
1038+
sendAssetKeySendPayment(
1039+
t.t, charlie, dave, keySendAmount, assetID,
1040+
fn.Some(btcAmt),
1041+
)
1042+
}
1043+
1044+
logBalance(t.t, nodes, assetID, "after keysend -- breach state")
1045+
1046+
// Now we'll create an on disk snapshot that we'll use to restore back
1047+
// to as our breached state.
1048+
require.NoError(t.t, net.StopAndBackupDB(dave))
1049+
connectAllNodes(t.t, net, nodes)
1050+
1051+
// We'll send one more keysend payment now to revoke the state we were
1052+
// just at above.
1053+
sendAssetKeySendPayment(
1054+
t.t, charlie, dave, keySendAmount, assetID, fn.Some(btcAmt),
1055+
)
1056+
logBalance(t.t, nodes, assetID, "after keysend -- final state")
1057+
1058+
// With the final state achieved, we'll now restore Dave (who will be
1059+
// force closing) to that old state, the breach state.
1060+
require.NoError(t.t, net.StopAndRestoreDB(dave))
1061+
1062+
// With Dave restored, we'll now execute the force close.
1063+
t.Logf("Force close by Dave to breach...")
1064+
daveChanPoint := &lnrpc.ChannelPoint{
1065+
OutputIndex: uint32(assetFundResp.OutputIndex),
1066+
FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
1067+
FundingTxidStr: assetFundResp.Txid,
1068+
},
1069+
}
1070+
_, breachTxid, err := net.CloseChannel(dave, daveChanPoint, true)
1071+
require.NoError(t.t, err)
1072+
1073+
t.Logf("Channel closed! Mining blocks, close_txid=%v", breachTxid)
1074+
1075+
// Next, we'll mine a block to confirm the breach transaction.
1076+
mineBlocks(t, net, 1, 1)
1077+
1078+
// We should be able to find the transfer of the breach for both
1079+
// parties.
1080+
charlieBreachTransfer := locateAssetTransfers(
1081+
t.t, charlieTap, *breachTxid,
1082+
)
1083+
daveBreachTransfer := locateAssetTransfers(
1084+
t.t, daveTap, *breachTxid,
1085+
)
1086+
1087+
t.Logf("Charlie breach transfer: %v",
1088+
toProtoJSON(t.t, charlieBreachTransfer))
1089+
t.Logf("Dave breach transfer: %v",
1090+
toProtoJSON(t.t, daveBreachTransfer))
1091+
1092+
// With the breach transaction mined, Charlie should now have a
1093+
// transaction in the mempool sweeping the *both* commitment outputs.
1094+
charlieJusticeTxid, err := waitForNTxsInMempool(
1095+
net.Miner.Client, 1, time.Second*5,
1096+
)
1097+
require.NoError(t.t, err)
1098+
1099+
t.Logf("Charlie justice txid: %v", charlieJusticeTxid)
1100+
1101+
// Next, we'll mine a block to confirm Charlie's justice transaction.
1102+
mineBlocks(t, net, 1, 1)
1103+
1104+
// Charlie should now have a transfer for his justice transaction.
1105+
charlieJusticeTransfer := locateAssetTransfers(
1106+
t.t, charlieTap, *charlieJusticeTxid[0],
1107+
)
1108+
1109+
t.Logf("Charlie justice transfer: %v",
1110+
toProtoJSON(t.t, charlieJusticeTransfer))
1111+
1112+
// Charlie's balance should now be the same as before the breach
1113+
// attempt: the amount he minted at the very start.
1114+
charlieBalance := itestAsset.Amount
1115+
assertAssetBalance(t.t, charlieTap, assetID, charlieBalance)
1116+
1117+
t.Logf("Charlie balance after breach: %d", charlieBalance)
1118+
1119+
// Charlie should now have 2 total UTXOs: the change from the funding
1120+
// output, and now the sweep output from the justice transaction.
1121+
charlieUTXOs := assertNumAssetUTXOs(t.t, charlieTap, 2)
1122+
1123+
t.Logf("Charlie UTXOs after breach: %v", toProtoJSON(t.t, charlieUTXOs))
1124+
}
1125+
1126+
func assertNumAssetUTXOs(t *testing.T, tapdClient *tapClient,
1127+
numUTXOs int) *taprpc.ListUtxosResponse {
1128+
1129+
ctxb := context.Background()
1130+
1131+
err := wait.NoError(func() error {
1132+
clientUTXOs, err := tapdClient.ListUtxos(
1133+
ctxb, &taprpc.ListUtxosRequest{},
1134+
)
1135+
if err != nil {
1136+
return err
1137+
}
1138+
1139+
if len(clientUTXOs.ManagedUtxos) != numUTXOs {
1140+
return fmt.Errorf("expected %v UTXO, got %d", numUTXOs,
1141+
len(clientUTXOs.ManagedUtxos))
1142+
}
1143+
1144+
return nil
1145+
}, defaultTimeout)
1146+
1147+
clientUTXOs, err2 := tapdClient.ListUtxos(
1148+
ctxb, &taprpc.ListUtxosRequest{},
1149+
)
1150+
require.NoError(t, err2)
1151+
1152+
if err != nil {
1153+
t.Logf("wrong amount of UTXOs, got %d, expected %d: %v",
1154+
len(clientUTXOs.ManagedUtxos), numUTXOs,
1155+
toProtoJSON(t, clientUTXOs))
1156+
1157+
t.Fatalf("failed to assert UTXOs: %v", err)
1158+
1159+
return nil
1160+
}
1161+
1162+
return clientUTXOs
1163+
}
1164+
9441165
func locateAssetTransfers(t *testing.T, tapdClient *tapClient,
9451166
txid chainhash.Hash) *taprpc.AssetTransfer {
9461167

@@ -953,12 +1174,12 @@ func locateAssetTransfers(t *testing.T, tapdClient *tapClient,
9531174
},
9541175
)
9551176
if err != nil {
956-
return fmt.Errorf("unable to list charlie "+
957-
"transfers: %w", err)
1177+
return fmt.Errorf("unable to list %v transfers: %w",
1178+
tapdClient.node.Name(), err)
9581179
}
9591180
if len(forceCloseTransfer.Transfers) != 1 {
960-
return fmt.Errorf("charlie is missing force close " +
961-
"transfer")
1181+
return fmt.Errorf("%v is missing force close "+
1182+
"transfer", tapdClient.node.Name())
9621183
}
9631184

9641185
transfer = forceCloseTransfer.Transfers[0]

itest/litd_node.go

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,9 @@ type LitNodeConfig struct {
8989

9090
LitPort int
9191
LitRESTPort int
92+
93+
// backupDBDir is the path where a database backup is stored, if any.
94+
backupDBDir string
9295
}
9396

9497
func (cfg *LitNodeConfig) LitAddr() string {
@@ -2062,3 +2065,38 @@ func connectLitRPC(ctx context.Context, hostPort, tlsCertPath,
20622065

20632066
return grpc.DialContext(ctx, hostPort, opts...)
20642067
}
2068+
2069+
// copyAll copies all files and directories from srcDir to dstDir recursively.
2070+
// Note that this function does not support links.
2071+
func copyAll(dstDir, srcDir string) error {
2072+
entries, err := os.ReadDir(srcDir)
2073+
if err != nil {
2074+
return err
2075+
}
2076+
2077+
for _, entry := range entries {
2078+
srcPath := filepath.Join(srcDir, entry.Name())
2079+
dstPath := filepath.Join(dstDir, entry.Name())
2080+
2081+
info, err := os.Stat(srcPath)
2082+
if err != nil {
2083+
return err
2084+
}
2085+
2086+
if info.IsDir() {
2087+
err := os.Mkdir(dstPath, info.Mode())
2088+
if err != nil && !os.IsExist(err) {
2089+
return err
2090+
}
2091+
2092+
err = copyAll(dstPath, srcPath)
2093+
if err != nil {
2094+
return err
2095+
}
2096+
} else if err := CopyFile(dstPath, srcPath); err != nil {
2097+
return err
2098+
}
2099+
}
2100+
2101+
return nil
2102+
}

itest/litd_test_list_on_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,4 +28,8 @@ var allTestCases = []*testCase{
2828
name: "test custom channels force close",
2929
test: testCustomChannelsForceClose,
3030
},
31+
{
32+
name: "test custom channels breach",
33+
test: testCustomChannelsBreach,
34+
},
3135
}

itest/network_harness.go

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,12 @@ tryconnect:
375375
"finish syncing")
376376
}
377377
}
378+
379+
// Ignore "already connected to peer" errors.
380+
if strings.Contains(err.Error(), "already connected to peer") {
381+
return nil
382+
}
383+
378384
return err
379385
}
380386

@@ -707,6 +713,58 @@ func (n *NetworkHarness) StopNode(node *HarnessNode) error {
707713
return node.Stop()
708714
}
709715

716+
// StopAndBackupDB backs up the database of the target node.
717+
func (n *NetworkHarness) StopAndBackupDB(node *HarnessNode) error {
718+
restart, err := n.SuspendNode(node)
719+
if err != nil {
720+
return err
721+
}
722+
723+
// Backup files.
724+
tempDir, err := os.MkdirTemp("", "past-state")
725+
if err != nil {
726+
return fmt.Errorf("unable to create temp db folder: %w",
727+
err)
728+
}
729+
730+
if err := copyAll(tempDir, node.Cfg.DBDir()); err != nil {
731+
return fmt.Errorf("unable to copy database files: %w",
732+
err)
733+
}
734+
735+
node.Cfg.backupDBDir = tempDir
736+
737+
return restart()
738+
}
739+
740+
// StopAndRestoreDB stops the target node, restores the database from a backup
741+
// and starts the node again.
742+
func (n *NetworkHarness) StopAndRestoreDB(node *HarnessNode) error {
743+
restart, err := n.SuspendNode(node)
744+
if err != nil {
745+
return err
746+
}
747+
748+
// Restore files.
749+
if node.Cfg.backupDBDir == "" {
750+
return fmt.Errorf("no database backup created")
751+
}
752+
753+
err = copyAll(node.Cfg.DBDir(), node.Cfg.backupDBDir)
754+
if err != nil {
755+
return fmt.Errorf("unable to copy database files: %w",
756+
err)
757+
}
758+
759+
if err := os.RemoveAll(node.Cfg.backupDBDir); err != nil {
760+
return fmt.Errorf("unable to remove backup dir: %w",
761+
err)
762+
}
763+
node.Cfg.backupDBDir = ""
764+
765+
return restart()
766+
}
767+
710768
// OpenChannel attempts to open a channel between srcNode and destNode with the
711769
// passed channel funding parameters. If the passed context has a timeout, then
712770
// if the timeout is reached before the channel pending notification is
@@ -997,6 +1055,7 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
9971055
if !force {
9981056
closeReq.SatPerVbyte = 5
9991057
}
1058+
10001059
closeRespStream, err = lnNode.CloseChannel(ctx, closeReq)
10011060
if err != nil {
10021061
return fmt.Errorf("unable to close channel: %v", err)

0 commit comments

Comments
 (0)