|
12 | 12 |
|
13 | 13 | use crate::chain::transaction::OutPoint;
|
14 | 14 | use crate::chain::ChannelMonitorUpdateStatus;
|
15 |
| -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; |
| 15 | +use crate::events::{ClosureReason, Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; |
16 | 16 | use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState};
|
17 | 17 | use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry};
|
18 | 18 | use crate::ln::msgs;
|
@@ -1825,3 +1825,112 @@ fn test_force_closure_on_low_stale_fee() {
|
1825 | 1825 | };
|
1826 | 1826 | check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
|
1827 | 1827 | }
|
| 1828 | + |
| 1829 | +#[test] |
| 1830 | +fn test_pending_htlcs_arent_lost_on_mon_delay() { |
| 1831 | + // Test that HTLCs which were queued to be sent to peers but which never made it out due to a |
| 1832 | + // pending, not-completed `ChannelMonitorUpdate` which got dropped with the `Channel`. This is |
| 1833 | + // only possible when the `ChannelMonitorUpdate` is blocked, as otherwise it will be queued in |
| 1834 | + // the `ChannelManager` and go out before any closure update. |
| 1835 | + let chanmon_cfgs = create_chanmon_cfgs(3); |
| 1836 | + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); |
| 1837 | + |
| 1838 | + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); |
| 1839 | + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); |
| 1840 | + |
| 1841 | + let node_a_id = nodes[0].node.get_our_node_id(); |
| 1842 | + let node_b_id = nodes[1].node.get_our_node_id(); |
| 1843 | + let node_c_id = nodes[2].node.get_our_node_id(); |
| 1844 | + |
| 1845 | + create_announced_chan_between_nodes(&nodes, 0, 1); |
| 1846 | + let (_, _, chan_id_bc, ..) = create_announced_chan_between_nodes(&nodes, 1, 2); |
| 1847 | + |
| 1848 | + // First route a payment from node B to C, which will allow us to block `ChannelMonitorUpdate`s |
| 1849 | + // by not processing the `PaymentSent` event upon claim. |
| 1850 | + let (preimage_a, payment_hash_a, ..) = route_payment(&nodes[1], &[&nodes[2]], 500_000); |
| 1851 | + |
| 1852 | + nodes[2].node.claim_funds(preimage_a); |
| 1853 | + check_added_monitors(&nodes[2], 1); |
| 1854 | + expect_payment_claimed!(nodes[2], payment_hash_a, 500_000); |
| 1855 | + |
| 1856 | + let mut claim = get_htlc_update_msgs(&nodes[2], &node_b_id); |
| 1857 | + nodes[1].node.handle_update_fulfill_htlc(node_c_id, claim.update_fulfill_htlcs.pop().unwrap()); |
| 1858 | + |
| 1859 | + // Now, while sitting on the `PaymentSent` event, move the B <-> C channel forward until B is |
| 1860 | + // just waiting on C's last RAA. |
| 1861 | + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &claim.commitment_signed); |
| 1862 | + check_added_monitors(&nodes[1], 1); |
| 1863 | + |
| 1864 | + let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &node_c_id); |
| 1865 | + |
| 1866 | + nodes[2].node.handle_revoke_and_ack(node_b_id, &raa); |
| 1867 | + check_added_monitors(&nodes[2], 1); |
| 1868 | + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &cs); |
| 1869 | + check_added_monitors(&nodes[2], 1); |
| 1870 | + |
| 1871 | + let cs_last_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); |
| 1872 | + |
| 1873 | + // Now, while still sitting on the `PaymentSent` event, send an HTLC which will be relayed the |
| 1874 | + // moment `cs_last_raa` is received by B. |
| 1875 | + let (route_b, payment_hash_b, _preimage, payment_secret_b) = |
| 1876 | + get_route_and_payment_hash!(&nodes[0], nodes[2], 900_000); |
| 1877 | + let onion = RecipientOnionFields::secret_only(payment_secret_b); |
| 1878 | + let id = PaymentId(payment_hash_b.0); |
| 1879 | + nodes[0].node.send_payment_with_route(route_b, payment_hash_b, onion, id).unwrap(); |
| 1880 | + check_added_monitors(&nodes[0], 1); |
| 1881 | + let as_send = get_htlc_update_msgs(&nodes[0], &node_b_id); |
| 1882 | + |
| 1883 | + nodes[1].node.handle_update_add_htlc(node_a_id, &as_send.update_add_htlcs[0]); |
| 1884 | + commitment_signed_dance!(nodes[1], nodes[0], as_send.commitment_signed, false); |
| 1885 | + |
| 1886 | + // Place the HTLC in the B <-> C channel holding cell for release upon RAA and finally deliver |
| 1887 | + // `cs_last_raa`. Because we're still waiting to handle the `PaymentSent` event, the |
| 1888 | + // `ChannelMonitorUpdate` and update messages will be held. |
| 1889 | + nodes[1].node.process_pending_htlc_forwards(); |
| 1890 | + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); |
| 1891 | + check_added_monitors(&nodes[1], 0); |
| 1892 | + |
| 1893 | + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_last_raa); |
| 1894 | + check_added_monitors(&nodes[1], 0); |
| 1895 | + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); |
| 1896 | + |
| 1897 | + // Now force-close the B <-> C channel, making sure that we (finally) see the `PaymentSent`, as |
| 1898 | + // well as the channel closure and, importantly, the HTLC fail-back to A. |
| 1899 | + let message = "".to_string(); |
| 1900 | + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &node_c_id, message).unwrap(); |
| 1901 | + check_closed_broadcast(&nodes[1], 1, true); |
| 1902 | + check_added_monitors(&nodes[1], 1); |
| 1903 | + |
| 1904 | + let events = nodes[1].node.get_and_clear_pending_events(); |
| 1905 | + assert_eq!(events.len(), 3, "{events:?}"); |
| 1906 | + assert!(events.iter().any(|ev| { |
| 1907 | + if let Event::PaymentSent { payment_preimage: ev_preimage, .. } = ev { |
| 1908 | + assert_eq!(*ev_preimage, preimage_a); |
| 1909 | + true |
| 1910 | + } else { |
| 1911 | + false |
| 1912 | + } |
| 1913 | + })); |
| 1914 | + assert!(events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); |
| 1915 | + assert!(events.iter().any(|ev| { |
| 1916 | + if let Event::HTLCHandlingFailed { failure_type, failure_reason, .. } = ev { |
| 1917 | + assert!(matches!(failure_type, HTLCHandlingFailureType::Forward { .. })); |
| 1918 | + if let Some(HTLCHandlingFailureReason::Local { reason }) = failure_reason { |
| 1919 | + assert_eq!(*reason, LocalHTLCFailureReason::ChannelClosed); |
| 1920 | + } else { |
| 1921 | + panic!("Unexpected failure reason {failure_reason:?}"); |
| 1922 | + } |
| 1923 | + true |
| 1924 | + } else { |
| 1925 | + false |
| 1926 | + } |
| 1927 | + })); |
| 1928 | + |
| 1929 | + nodes[1].node.process_pending_htlc_forwards(); |
| 1930 | + check_added_monitors(&nodes[1], 1); |
| 1931 | + |
| 1932 | + let failures = get_htlc_update_msgs(&nodes[1], &node_a_id); |
| 1933 | + nodes[0].node.handle_update_fail_htlc(node_b_id, &failures.update_fail_htlcs[0]); |
| 1934 | + commitment_signed_dance!(nodes[0], nodes[1], failures.commitment_signed, false); |
| 1935 | + expect_payment_failed!(nodes[0], payment_hash_b, false); |
| 1936 | +} |
0 commit comments