From 78566963c2a198e94f7d9f2754a345259abb0e41 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 2 Mar 2026 15:47:37 +0200 Subject: [PATCH 1/9] ln/refactor: add previous_hop_data helper for HTLCSource --- lightning/src/ln/channelmanager.rs | 33 +++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d042a69bf80..31fe59b6c1d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -857,6 +857,14 @@ mod fuzzy_channelmanager { }, } } + + pub(crate) fn previous_hop_data(&self) -> &[HTLCPreviousHopData] { + match self { + HTLCSource::PreviousHopData(prev_hop) => core::slice::from_ref(prev_hop), + HTLCSource::TrampolineForward { previous_hop_data, .. } => &previous_hop_data[..], + HTLCSource::OutboundRoute { .. } => &[], + } + } } /// Tracks the inbound corresponding to an outbound HTLC @@ -12532,15 +12540,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.update_fulfill_htlc(&msg), chan_entry ); - let prev_hops = match &res.0 { - HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], - HTLCSource::TrampolineForward { previous_hop_data, .. } => { - previous_hop_data.iter().collect() - }, - _ => vec![], - }; let logger = WithChannelContext::from(&self.logger, &chan.context, None); - for prev_hop in prev_hops { + for prev_hop in res.0.previous_hop_data() { log_trace!(logger, "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor", ); @@ -19792,17 +19793,11 @@ impl< .into_iter() .filter_map(|(htlc_source, (htlc, preimage_opt))| { let payment_preimage = preimage_opt?; - let prev_htlcs = match &htlc_source { - HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], - HTLCSource::TrampolineForward { previous_hop_data, .. } => { - previous_hop_data.iter().collect() - }, - // If it was an outbound payment, we've handled it above - if a preimage - // came in and we persisted the `ChannelManager` we either handled it - // and are good to go or the channel force-closed - we don't have to - // handle the channel still live case here. - _ => vec![], - }; + // If it was an outbound payment, we've handled it above - if a preimage + // came in and we persisted the `ChannelManager` we either handled it + // and are good to go or the channel force-closed - we don't have to + // handle the channel still live case here. + let prev_htlcs = htlc_source.previous_hop_data(); let prev_htlcs_count = prev_htlcs.len(); if prev_htlcs_count == 0 { return None; From 5ee0ea79c71679f2849f53cebfe1b04961094b72 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 12 Mar 2026 08:25:58 -0400 Subject: [PATCH 2/9] ln/refactor: rename shared secret and populate in HTLCPreviousHopData --- lightning/src/ln/channelmanager.rs | 12 ++++++++---- lightning/src/ln/onion_payment.rs | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 31fe59b6c1d..818941db648 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -231,11 +231,12 @@ pub enum PendingHTLCRouting { }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { - /// The onion shared secret we build with the sender (or the preceding Trampoline node) used - /// to decrypt the onion. + /// The onion shared secret we build with the node that forwarded us this trampoline + /// forward (either the original sender, or a preceding Trampoline node), used to decrypt + /// the inner trampoline onion. /// /// This is later used to encrypt failure packets in the event that the HTLC is failed. - incoming_shared_secret: [u8; 32], + trampoline_shared_secret: [u8; 32], /// The onion which should be included in the forwarded HTLC, telling the next hop what to /// do with the HTLC. onion_packet: msgs::TrampolineOnionPacket, @@ -474,6 +475,9 @@ impl PendingAddHTLCInfo { PendingHTLCRouting::Receive { trampoline_shared_secret, .. } => { trampoline_shared_secret }, + PendingHTLCRouting::TrampolineForward { trampoline_shared_secret, .. } => { + Some(trampoline_shared_secret) + }, _ => None, }; @@ -17572,7 +17576,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (11, invoice_request, option), }, (3, TrampolineForward) => { - (0, incoming_shared_secret, required), + (0, trampoline_shared_secret, required), (2, onion_packet, required), (4, blinded, option), (6, node_id, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 5111f6982fe..bb5b8f21a48 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -249,7 +249,7 @@ pub(super) fn create_fwd_pending_htlc_info( hmac: next_hop_hmac, }; PendingHTLCRouting::TrampolineForward { - incoming_shared_secret: shared_secret.secret_bytes(), + trampoline_shared_secret: shared_secret.secret_bytes(), onion_packet: outgoing_packet, node_id: next_trampoline, incoming_cltv_expiry: msg.cltv_expiry, From 6014afbbf6a74c7a46cb3d14e7da534eea475e9b Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 25 Feb 2026 10:55:08 +0200 Subject: [PATCH 3/9] ln/refactor: move mpp timeout check into helper function We'll re-use this logic to timeout tick incoming trampoline MPP. --- lightning/src/ln/channelmanager.rs | 66 +++++++++++++++++++----------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 818941db648..1e4459f57ed 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -548,6 +548,14 @@ struct ClaimableHTLC { counterparty_skimmed_fee_msat: Option, } +impl ClaimableHTLC { + // Increments timer ticks and returns a boolean indicating whether HTLC is timed out. + fn mpp_timer_tick(&mut self) -> bool { + self.timer_ticks += 1; + self.timer_ticks >= MPP_TIMEOUT_TICKS + } +} + impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { @@ -1245,6 +1253,20 @@ impl ClaimablePayment { } } +/// Increments MPP timeout tick for all HTLCs and returns a boolean indicating whether the HTLC +/// set has hit its MPP timeout. Will return false if the set has reached the sender's intended +/// total, as the MPP has completed in this case. +fn check_mpp_timeout(payment: &mut ClaimablePayment) -> bool { + // This condition determining whether the MPP is complete here must match exactly the condition + // used in `process_pending_htlc_forwards`. + let total_intended_recvd_value = payment.htlcs.iter().map(|h| h.sender_intended_value).sum(); + let total_mpp_value = payment.onion_fields.total_mpp_amount_msat; + if total_mpp_value <= total_intended_recvd_value { + return false; + } + payment.htlcs.iter_mut().any(|htlc| htlc.mpp_timer_tick()) +} + /// Represent the channel funding transaction type. enum FundingType { /// This variant is useful when we want LDK to validate the funding transaction and @@ -8878,42 +8900,38 @@ impl< self.claimable_payments.lock().unwrap().claimable_payments.retain( |payment_hash, payment| { if payment.htlcs.is_empty() { - // This should be unreachable debug_assert!(false); return false; } if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload { - // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). - // In this case we're not going to handle any timeouts of the parts here. - // This condition determining whether the MPP is complete here must match - // exactly the condition used in `process_pending_htlc_forwards`. - let total_intended_recvd_value = - payment.htlcs.iter().map(|h| h.sender_intended_value).sum(); - let total_mpp_value = payment.onion_fields.total_mpp_amount_msat; - if total_mpp_value <= total_intended_recvd_value { - return true; - } else if payment.htlcs.iter_mut().any(|htlc| { - htlc.timer_ticks += 1; - return htlc.timer_ticks >= MPP_TIMEOUT_TICKS; - }) { - let htlcs = payment - .htlcs - .drain(..) - .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)); - timed_out_mpp_htlcs.extend(htlcs); - return false; + let mpp_timeout = check_mpp_timeout(payment); + if mpp_timeout { + timed_out_mpp_htlcs.extend(payment.htlcs.drain(..).map(|h| { + ( + HTLCSource::PreviousHopData(h.prev_hop), + *payment_hash, + HTLCHandlingFailureType::Receive { + payment_hash: *payment_hash, + }, + ) + })); } + return !mpp_timeout; } true }, ); - for htlc_source in timed_out_mpp_htlcs.drain(..) { - let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); + for (htlc_source, payment_hash, failure_type) in timed_out_mpp_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::MPPTimeout; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Receive { payment_hash: htlc_source.1 }; - self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver, None); + self.fail_htlc_backwards_internal( + &htlc_source, + &payment_hash, + &reason, + failure_type, + None, + ); } for (err, counterparty_node_id) in handle_errors { From a9e5d0372334f2c3905ffa72dee98eea6c3553f8 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 22 Jan 2026 12:11:27 -0500 Subject: [PATCH 4/9] ln/refactor: move on chain timeout check into claimable htlc We'll re-use this to check trampoline MPP timeout in future commits. --- lightning/src/ln/channelmanager.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1e4459f57ed..d45b30d0c29 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -554,6 +554,12 @@ impl ClaimableHTLC { self.timer_ticks += 1; self.timer_ticks >= MPP_TIMEOUT_TICKS } + + /// Returns a boolean indicating whether the HTLC has timed out on chain, accounting for a buffer + /// that gives us time to resolve it. + fn check_onchain_timeout(&self, height: u32, buffer: u32) -> bool { + height >= self.cltv_expiry - buffer + } } impl From<&ClaimableHTLC> for events::ClaimedHTLC { @@ -16225,14 +16231,16 @@ impl< } if let Some(height) = height_opt { + // If height is approaching the number of blocks we think it takes us to get our + // commitment transaction confirmed before the HTLC expires, plus the number of blocks + // we generally consider it to take to do a commitment update, just give up on it and + // fail the HTLC. self.claimable_payments.lock().unwrap().claimable_payments.retain( |payment_hash, payment| { payment.htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let htlc_timed_out = + htlc.check_onchain_timeout(height, HTLC_FAIL_BACK_BUFFER); + if htlc_timed_out { let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push(( HTLCSource::PreviousHopData(htlc.prev_hop.clone()), @@ -16245,10 +16253,8 @@ impl< payment_hash: payment_hash.clone(), }, )); - false - } else { - true } + !htlc_timed_out }); !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. }, From 8383aac264282f5749faddb99e9a4eddaced9333 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 25 Feb 2026 11:29:31 +0200 Subject: [PATCH 5/9] ln/refactor: move checks on incoming mpp accumulation into method We're going to use the same logic for trampoline and for incoming MPP payments, so we pull this out into a separate function. --- lightning/src/ln/channelmanager.rs | 308 +++++++++++++++++------------ 1 file changed, 185 insertions(+), 123 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d45b30d0c29..f5bf6b3b50b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1257,6 +1257,11 @@ impl ClaimablePayment { .map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.user_channel_id)) .collect() } + + /// Returns the total counterparty skimmed fee across all HTLCs. + fn total_counterparty_skimmed_msat(&self) -> u64 { + self.htlcs.iter().map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum() + } } /// Increments MPP timeout tick for all HTLCs and returns a boolean indicating whether the HTLC @@ -8232,6 +8237,136 @@ impl< } } + // Checks whether an incoming htlc can be added to our [`claimable_payments`], and handles + // MPP accumulation. On successful add, returns Ok() with a boolean indicating whether all + // MPP parts have arrived. Callers *MUST NOT* fail htlcs if Ok(..) is returned. + fn check_claimable_incoming_htlc( + &self, claimable_payment: &mut ClaimablePayment, claimable_htlc: ClaimableHTLC, + mut onion_fields: RecipientOnionFields, payment_hash: PaymentHash, + ) -> Result { + let onions_compatible = claimable_payment.onion_fields.check_merge(&mut onion_fields); + if onions_compatible.is_err() { + return Err(()); + } + let mut total_intended_recvd_value = claimable_htlc.sender_intended_value; + for htlc in claimable_payment.htlcs.iter() { + total_intended_recvd_value += htlc.sender_intended_value; + if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { + break; + } + } + let total_mpp_value = claimable_payment.onion_fields.total_mpp_amount_msat; + // The condition determining whether an MPP is complete must + // match exactly the condition used in `timer_tick_occurred` + if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { + return Err(()); + } else if total_intended_recvd_value - claimable_htlc.sender_intended_value + >= total_mpp_value + { + log_trace!( + self.logger, + "Failing HTLC with payment_hash {} as payment is already claimable", + &payment_hash + ); + return Err(()); + } else if total_intended_recvd_value >= total_mpp_value { + claimable_payment.htlcs.push(claimable_htlc); + let amount_msat = claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum(); + claimable_payment + .htlcs + .iter_mut() + .for_each(|htlc| htlc.total_value_received = Some(amount_msat)); + let counterparty_skimmed_fee_msat = claimable_payment.total_counterparty_skimmed_msat(); + debug_assert!( + total_intended_recvd_value.saturating_sub(amount_msat) + <= counterparty_skimmed_fee_msat + ); + claimable_payment.htlcs.sort(); + Ok(true) + } else { + // Nothing to do - we haven't reached the total + // payment value yet, wait until we receive more + // MPP parts. + claimable_payment.htlcs.push(claimable_htlc); + Ok(false) + } + } + + // Handles the addition of a HTLC associated with a payment we're receiving. Err(bool) indicates + // whether we have failed after committing to the HTLC - callers should assert that this + // value is false. + fn handle_claimable_htlc( + &self, purpose: events::PaymentPurpose, claimable_htlc: ClaimableHTLC, + onion_fields: RecipientOnionFields, payment_hash: PaymentHash, receiver_node_id: PublicKey, + new_events: &mut VecDeque<(Event, Option)>, + ) -> Result<(), bool> { + let mut committed_to_claimable = false; + + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { + return Err(committed_to_claimable); + } + + let ref mut claimable_payment = claimable_payments + .claimable_payments + .entry(payment_hash) + // Note that if we insert here we MUST NOT fail_htlc!() + .or_insert_with(|| { + committed_to_claimable = true; + ClaimablePayment { + purpose: purpose.clone(), + htlcs: Vec::new(), + onion_fields: onion_fields.clone(), + } + }); + + let is_keysend = purpose.is_keysend(); + if purpose != claimable_payment.purpose { + let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; + log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend)); + return Err(committed_to_claimable); + } + + let htlc_expiry = claimable_htlc.cltv_expiry; + if self + .check_claimable_incoming_htlc( + claimable_payment, + claimable_htlc, + onion_fields, + payment_hash, + ) + .map_err(|_| committed_to_claimable)? + { + let claim_deadline = Some( + match claimable_payment.htlcs.iter().map(|h| h.cltv_expiry).min() { + Some(claim_deadline) => claim_deadline, + None => { + debug_assert!(false, "no htlcs in completed claimable_payment"); + htlc_expiry + }, + } - HTLC_FAIL_BACK_BUFFER, + ); + new_events.push_back(( + events::Event::PaymentClaimable { + receiver_node_id: Some(receiver_node_id), + payment_hash, + purpose, + amount_msat: claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum(), + counterparty_skimmed_fee_msat: claimable_payment + .total_counterparty_skimmed_msat(), + receiving_channel_ids: claimable_payment.receiving_channel_ids(), + claim_deadline, + onion_fields: Some(claimable_payment.onion_fields.clone()), + payment_id: Some( + claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret), + ), + }, + None, + )); + } + Ok(()) + } + fn process_receive_htlcs( &self, pending_forwards: &mut Vec, new_events: &mut VecDeque<(Event, Option)>, @@ -8262,7 +8397,7 @@ impl< payment_data, payment_context, phantom_shared_secret, - mut onion_fields, + onion_fields, has_recipient_created_payment_secret, invoice_request_opt, trampoline_shared_secret, @@ -8334,12 +8469,26 @@ impl< panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); }, }; + let htlc_value = incoming_amt_msat.unwrap_or(outgoing_amt_msat); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + prev_outbound_scid_alias: prev_hop.prev_outbound_scid_alias, + user_channel_id: prev_hop.user_channel_id, + counterparty_node_id: prev_hop.counterparty_node_id, + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_hop.htlc_id, + incoming_packet_shared_secret: prev_hop.incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + cltv_expiry: Some(cltv_expiry), + }); let claimable_htlc = ClaimableHTLC { prev_hop, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete // if routing nodes overpay - value: incoming_amt_msat.unwrap_or(outgoing_amt_msat), + value: htlc_value, sender_intended_value: outgoing_amt_msat, timer_ticks: 0, total_value_received: None, @@ -8348,33 +8497,15 @@ impl< counterparty_skimmed_fee_msat: skimmed_fee_msat, }; - let mut committed_to_claimable = false; - macro_rules! fail_htlc { - ($htlc: expr, $payment_hash: expr) => { - debug_assert!(!committed_to_claimable); + ($htlc: expr, $payment_hash: expr, $committed_to_claimable: expr) => { + debug_assert!(!$committed_to_claimable); let err_data = invalid_payment_err_data( - $htlc.value, + htlc_value, self.best_block.read().unwrap().height, ); - let counterparty_node_id = $htlc.prev_hop.counterparty_node_id; - let incoming_packet_shared_secret = - $htlc.prev_hop.incoming_packet_shared_secret; - let prev_outbound_scid_alias = $htlc.prev_hop.prev_outbound_scid_alias; failed_forwards.push(( - HTLCSource::PreviousHopData(HTLCPreviousHopData { - prev_outbound_scid_alias, - user_channel_id: $htlc.prev_hop.user_channel_id, - counterparty_node_id, - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: $htlc.prev_hop.htlc_id, - incoming_packet_shared_secret, - phantom_shared_secret, - trampoline_shared_secret, - blinded_failure, - cltv_expiry: Some(cltv_expiry), - }), + htlc_source, payment_hash, HTLCFailReason::reason( LocalHTLCFailureReason::IncorrectPaymentDetails, @@ -8394,94 +8525,6 @@ impl< .expect("Failed to get node_id for phantom node recipient"); } - macro_rules! check_total_value { - ($purpose: expr) => {{ - let mut payment_claimable_generated = false; - let is_keysend = $purpose.is_keysend(); - let mut claimable_payments = self.claimable_payments.lock().unwrap(); - if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { - fail_htlc!(claimable_htlc, payment_hash); - } - let ref mut claimable_payment = claimable_payments.claimable_payments - .entry(payment_hash) - // Note that if we insert here we MUST NOT fail_htlc!() - .or_insert_with(|| { - committed_to_claimable = true; - ClaimablePayment { - purpose: $purpose.clone(), - htlcs: Vec::new(), - onion_fields: onion_fields.clone(), - } - }); - if $purpose != claimable_payment.purpose { - let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; - log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend)); - fail_htlc!(claimable_htlc, payment_hash); - } - let onions_compatible = - claimable_payment.onion_fields.check_merge(&mut onion_fields); - if onions_compatible.is_err() { - fail_htlc!(claimable_htlc, payment_hash); - } - let mut total_intended_recvd_value = - claimable_htlc.sender_intended_value; - let mut earliest_expiry = claimable_htlc.cltv_expiry; - for htlc in claimable_payment.htlcs.iter() { - total_intended_recvd_value += htlc.sender_intended_value; - earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry); - if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { break; } - } - let total_mpp_value = - claimable_payment.onion_fields.total_mpp_amount_msat; - // The condition determining whether an MPP is complete must - // match exactly the condition used in `timer_tick_occurred` - if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { - fail_htlc!(claimable_htlc, payment_hash); - } else if total_intended_recvd_value - claimable_htlc.sender_intended_value >= total_mpp_value { - log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", - &payment_hash); - fail_htlc!(claimable_htlc, payment_hash); - } else if total_intended_recvd_value >= total_mpp_value { - #[allow(unused_assignments)] { - committed_to_claimable = true; - } - claimable_payment.htlcs.push(claimable_htlc); - let amount_msat = - claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum(); - claimable_payment.htlcs.iter_mut() - .for_each(|htlc| htlc.total_value_received = Some(amount_msat)); - let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter() - .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum(); - debug_assert!(total_intended_recvd_value.saturating_sub(amount_msat) - <= counterparty_skimmed_fee_msat); - claimable_payment.htlcs.sort(); - let payment_id = - claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret); - new_events.push_back((events::Event::PaymentClaimable { - receiver_node_id: Some(receiver_node_id), - payment_hash, - purpose: $purpose, - amount_msat, - counterparty_skimmed_fee_msat, - receiving_channel_ids: claimable_payment.receiving_channel_ids(), - claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER), - onion_fields: Some(claimable_payment.onion_fields.clone()), - payment_id: Some(payment_id), - }, None)); - payment_claimable_generated = true; - } else { - // Nothing to do - we haven't reached the total - // payment value yet, wait until we receive more - // MPP parts. - claimable_payment.htlcs.push(claimable_htlc); - #[allow(unused_assignments)] { - committed_to_claimable = true; - } - } - payment_claimable_generated - }} - } - // Check that the payment hash and secret are known. Note that we // MUST take care to handle the "unknown payment hash" and // "incorrect payment secret" cases here identically or we'd expose @@ -8501,7 +8544,7 @@ impl< Ok(result) => result, Err(()) => { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); }, }; if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { @@ -8511,12 +8554,12 @@ impl< if (cltv_expiry as u64) < expected_min_expiry_height { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", &payment_hash, cltv_expiry, expected_min_expiry_height); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); } } payment_preimage } else { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); } } else { None @@ -8532,10 +8575,20 @@ impl< let purpose = match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); }, }; - check_total_value!(purpose); + + if let Err(committed_to_claimable) = self.handle_claimable_htlc( + purpose, + claimable_htlc, + onion_fields, + payment_hash, + receiver_node_id, + new_events, + ) { + fail_htlc!(claimable_htlc, payment_hash, committed_to_claimable); + } }, OnionPayload::Spontaneous(keysend_preimage) => { let purpose = if let Some(PaymentContext::AsyncBolt12Offer( @@ -8549,7 +8602,7 @@ impl< false, "We checked that payment_data is Some above" ); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); }, }; @@ -8568,13 +8621,13 @@ impl< verified_invreq.amount_msats() { if payment_data.total_msat < invreq_amt_msat { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); } } verified_invreq }, None => { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); }, }; let payment_purpose_context = @@ -8590,16 +8643,25 @@ impl< match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); }, } } else if payment_context.is_some() { log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(claimable_htlc, payment_hash, false); } else { events::PaymentPurpose::SpontaneousPayment(keysend_preimage) }; - check_total_value!(purpose); + if let Err(committed_to_claimable) = self.handle_claimable_htlc( + purpose, + claimable_htlc, + onion_fields, + payment_hash, + receiver_node_id, + new_events, + ) { + fail_htlc!(claimable_htlc, payment_hash, committed_to_claimable); + } }, } }, From f1eb379bf484f12a21edb4353733f76f3f6cf430 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 12 Feb 2026 14:08:43 +0200 Subject: [PATCH 6/9] ln: move receive-specific failures into fail_htlc macro We'll only use this for non-trampoline incoming accumulated htlcs, because we want different source/failure for trampoline. --- lightning/src/ln/channelmanager.rs | 63 +++++++++++++++++------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f5bf6b3b50b..675082e62f0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8470,19 +8470,15 @@ impl< }, }; let htlc_value = incoming_amt_msat.unwrap_or(outgoing_amt_msat); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - prev_outbound_scid_alias: prev_hop.prev_outbound_scid_alias, - user_channel_id: prev_hop.user_channel_id, - counterparty_node_id: prev_hop.counterparty_node_id, - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_hop.htlc_id, - incoming_packet_shared_secret: prev_hop.incoming_packet_shared_secret, - phantom_shared_secret, - trampoline_shared_secret, - blinded_failure, - cltv_expiry: Some(cltv_expiry), - }); + let HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id, + counterparty_node_id, + htlc_id, + incoming_packet_shared_secret, + .. + } = prev_hop; + let claimable_htlc = ClaimableHTLC { prev_hop, // We differentiate the received value from the sender intended value @@ -8497,8 +8493,21 @@ impl< counterparty_skimmed_fee_msat: skimmed_fee_msat, }; - macro_rules! fail_htlc { - ($htlc: expr, $payment_hash: expr, $committed_to_claimable: expr) => { + macro_rules! fail_receive_htlc { + ($committed_to_claimable: expr) => { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id, + counterparty_node_id, + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id, + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + cltv_expiry: Some(cltv_expiry), + }); debug_assert!(!$committed_to_claimable); let err_data = invalid_payment_err_data( htlc_value, @@ -8511,7 +8520,7 @@ impl< LocalHTLCFailureReason::IncorrectPaymentDetails, err_data, ), - HTLCHandlingFailureType::Receive { payment_hash: $payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash }, )); continue 'next_forwardable_htlc; }; @@ -8544,7 +8553,7 @@ impl< Ok(result) => result, Err(()) => { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash); - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); }, }; if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { @@ -8554,12 +8563,12 @@ impl< if (cltv_expiry as u64) < expected_min_expiry_height { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", &payment_hash, cltv_expiry, expected_min_expiry_height); - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); } } payment_preimage } else { - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); } } else { None @@ -8575,7 +8584,7 @@ impl< let purpose = match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); }, }; @@ -8587,7 +8596,7 @@ impl< receiver_node_id, new_events, ) { - fail_htlc!(claimable_htlc, payment_hash, committed_to_claimable); + fail_receive_htlc!(committed_to_claimable); } }, OnionPayload::Spontaneous(keysend_preimage) => { @@ -8602,7 +8611,7 @@ impl< false, "We checked that payment_data is Some above" ); - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); }, }; @@ -8621,13 +8630,13 @@ impl< verified_invreq.amount_msats() { if payment_data.total_msat < invreq_amt_msat { - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); } } verified_invreq }, None => { - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); }, }; let payment_purpose_context = @@ -8643,12 +8652,12 @@ impl< match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); }, } } else if payment_context.is_some() { log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context); - fail_htlc!(claimable_htlc, payment_hash, false); + fail_receive_htlc!(false); } else { events::PaymentPurpose::SpontaneousPayment(keysend_preimage) }; @@ -8660,7 +8669,7 @@ impl< receiver_node_id, new_events, ) { - fail_htlc!(claimable_htlc, payment_hash, committed_to_claimable); + fail_receive_htlc!(committed_to_claimable); } }, } From 6907deeedca01373f681f586fa1a85fc60501db3 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 17 Mar 2026 08:37:11 -0400 Subject: [PATCH 7/9] blinded_path/refactor: make construction generic over forwarding type To use helper functions for either trampoline or regular paths. --- lightning/src/blinded_path/payment.rs | 94 +++++++++++++++++++++------ 1 file changed, 73 insertions(+), 21 deletions(-) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 03b676adc92..60a3774f9f9 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -161,8 +161,12 @@ impl BlindedPaymentPath { ) } - fn new_inner( - intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + fn new_inner< + F: ForwardTlvsInfo, + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( + intermediate_nodes: &[ForwardNode], payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, @@ -323,18 +327,36 @@ impl BlindedPaymentPath { } } -/// An intermediate node, its outbound channel, and relay parameters. +/// Common interface for forward TLV types used in blinded payment paths. +/// +/// Both [`ForwardTlvs`] (channel-based forwarding) and [`TrampolineForwardTlvs`] (trampoline +/// node-based forwarding) implement this trait, allowing blinded path construction to be generic +/// over the forwarding mechanism. +pub trait ForwardTlvsInfo: Writeable + Clone { + /// The payment relay parameters for this hop. + fn payment_relay(&self) -> &PaymentRelay; + /// The payment constraints for this hop. + fn payment_constraints(&self) -> &PaymentConstraints; + /// The features for this hop. + fn features(&self) -> &BlindedHopFeatures; +} + +/// An intermediate node, its forwarding parameters, and its [`ForwardTlvsInfo`] for use in a +/// [`BlindedPaymentPath`]. #[derive(Clone, Debug)] -pub struct PaymentForwardNode { +pub struct ForwardNode { /// The TLVs for this node's [`BlindedHop`], where the fee parameters contained within are also /// used for [`BlindedPayInfo`] construction. - pub tlvs: ForwardTlvs, + pub tlvs: F, /// This node's pubkey. pub node_id: PublicKey, /// The maximum value, in msat, that may be accepted by this node. pub htlc_maximum_msat: u64, } +/// An intermediate node for a regular (non-trampoline) [`BlindedPaymentPath`]. +pub type PaymentForwardNode = ForwardNode; + /// Data to construct a [`BlindedHop`] for forwarding a payment. #[derive(Clone, Debug)] pub struct ForwardTlvs { @@ -354,6 +376,18 @@ pub struct ForwardTlvs { pub next_blinding_override: Option, } +impl ForwardTlvsInfo for ForwardTlvs { + fn payment_relay(&self) -> &PaymentRelay { + &self.payment_relay + } + fn payment_constraints(&self) -> &PaymentConstraints { + &self.payment_constraints + } + fn features(&self) -> &BlindedHopFeatures { + &self.features + } +} + /// Data to construct a [`BlindedHop`] for forwarding a Trampoline payment. #[derive(Clone, Debug)] pub struct TrampolineForwardTlvs { @@ -373,6 +407,18 @@ pub struct TrampolineForwardTlvs { pub next_blinding_override: Option, } +impl ForwardTlvsInfo for TrampolineForwardTlvs { + fn payment_relay(&self) -> &PaymentRelay { + &self.payment_relay + } + fn payment_constraints(&self) -> &PaymentConstraints { + &self.payment_constraints + } + fn features(&self) -> &BlindedHopFeatures { + &self.features + } +} + /// TLVs carried by a dummy hop within a blinded payment path. /// /// Dummy hops do not correspond to real forwarding decisions, but are processed @@ -440,8 +486,8 @@ pub(crate) enum BlindedTrampolineTlvs { // Used to include forward and receive TLVs in the same iterator for encoding. #[derive(Clone)] -enum BlindedPaymentTlvsRef<'a> { - Forward(&'a ForwardTlvs), +enum BlindedPaymentTlvsRef<'a, F: ForwardTlvsInfo = ForwardTlvs> { + Forward(&'a F), Dummy(&'a DummyTlvs), Receive(&'a ReceiveTlvs), } @@ -619,7 +665,7 @@ impl Writeable for ReceiveTlvs { } } -impl<'a> Writeable for BlindedPaymentTlvsRef<'a> { +impl<'a, F: ForwardTlvsInfo> Writeable for BlindedPaymentTlvsRef<'a, F> { fn write(&self, w: &mut W) -> Result<(), io::Error> { match self { Self::Forward(tlvs) => tlvs.write(w)?, @@ -723,8 +769,8 @@ impl Readable for BlindedTrampolineTlvs { pub(crate) const PAYMENT_PADDING_ROUND_OFF: usize = 30; /// Construct blinded payment hops for the given `intermediate_nodes` and payee info. -pub(super) fn blinded_hops( - secp_ctx: &Secp256k1, intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, +pub(super) fn blinded_hops( + secp_ctx: &Secp256k1, intermediate_nodes: &[ForwardNode], payee_node_id: PublicKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, ) -> Vec { @@ -823,15 +869,15 @@ where Ok((curr_base_fee, curr_prop_mil)) } -pub(super) fn compute_payinfo( - intermediate_nodes: &[PaymentForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, +pub(super) fn compute_payinfo( + intermediate_nodes: &[ForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, ) -> Result { let routing_fees = intermediate_nodes .iter() .map(|node| RoutingFees { - base_msat: node.tlvs.payment_relay.fee_base_msat, - proportional_millionths: node.tlvs.payment_relay.fee_proportional_millionths, + base_msat: node.tlvs.payment_relay().fee_base_msat, + proportional_millionths: node.tlvs.payment_relay().fee_proportional_millionths, }) .chain(dummy_tlvs.iter().map(|tlvs| RoutingFees { base_msat: tlvs.payment_relay.fee_base_msat, @@ -847,24 +893,24 @@ pub(super) fn compute_payinfo( for node in intermediate_nodes.iter() { // In the future, we'll want to take the intersection of all supported features for the // `BlindedPayInfo`, but there are no features in that context right now. - if node.tlvs.features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { + if node.tlvs.features().requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()); } cltv_expiry_delta = - cltv_expiry_delta.checked_add(node.tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; + cltv_expiry_delta.checked_add(node.tlvs.payment_relay().cltv_expiry_delta).ok_or(())?; // The min htlc for an intermediate node is that node's min minus the fees charged by all of the // following hops for forwarding that min, since that fee amount will automatically be included // in the amount that this node receives and contribute towards reaching its min. htlc_minimum_msat = amt_to_forward_msat( - core::cmp::max(node.tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat), - &node.tlvs.payment_relay, + core::cmp::max(node.tlvs.payment_constraints().htlc_minimum_msat, htlc_minimum_msat), + node.tlvs.payment_relay(), ) .unwrap_or(1); // If underflow occurs, we definitely reached this node's min htlc_maximum_msat = amt_to_forward_msat( core::cmp::min(node.htlc_maximum_msat, htlc_maximum_msat), - &node.tlvs.payment_relay, + node.tlvs.payment_relay(), ) .ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max } @@ -1038,8 +1084,14 @@ mod tests { payment_constraints: PaymentConstraints { max_cltv_expiry: 0, htlc_minimum_msat: 1 }, payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), }; - let blinded_payinfo = - super::compute_payinfo(&[], &[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); + let blinded_payinfo = super::compute_payinfo::( + &[], + &[], + &recv_tlvs, + 4242, + TEST_FINAL_CLTV as u16, + ) + .unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 0); assert_eq!(blinded_payinfo.fee_proportional_millionths, 0); assert_eq!(blinded_payinfo.cltv_expiry_delta, TEST_FINAL_CLTV as u16); From 3229acf10e3e792929e9141bceed24c5ce5e25d8 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 18 Mar 2026 08:11:20 -0400 Subject: [PATCH 8/9] ln/refactor: add constructor for ClaimableHTLC Added so that we can create ClaimableHTLC for trampoline tests in a separate file, rather than needing to make several fields pub(crate) just for the sake of tests. --- lightning/src/ln/channelmanager.rs | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 675082e62f0..21e457b7a51 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -549,6 +549,22 @@ struct ClaimableHTLC { } impl ClaimableHTLC { + pub(crate) fn new( + prev_hop: HTLCPreviousHopData, value: u64, sender_intended_value: u64, cltv_expiry: u32, + onion_payload: OnionPayload, counterparty_skimmed_fee_msat: Option, + ) -> Self { + ClaimableHTLC { + prev_hop, + cltv_expiry, + value, + sender_intended_value, + onion_payload, + timer_ticks: 0, + total_value_received: None, + counterparty_skimmed_fee_msat, + } + } + // Increments timer ticks and returns a boolean indicating whether HTLC is timed out. fn mpp_timer_tick(&mut self) -> bool { self.timer_ticks += 1; @@ -8479,19 +8495,17 @@ impl< .. } = prev_hop; - let claimable_htlc = ClaimableHTLC { + let claimable_htlc = ClaimableHTLC::new( prev_hop, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete // if routing nodes overpay - value: htlc_value, - sender_intended_value: outgoing_amt_msat, - timer_ticks: 0, - total_value_received: None, + htlc_value, + outgoing_amt_msat, cltv_expiry, onion_payload, - counterparty_skimmed_fee_msat: skimmed_fee_msat, - }; + skimmed_fee_msat, + ); macro_rules! fail_receive_htlc { ($committed_to_claimable: expr) => { From f15271e8b4eeb51fcd476859226239f7972af768 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 12 Feb 2026 13:31:49 +0200 Subject: [PATCH 9/9] ln/refactor: pass minimum delta into check_incoming_htlc_cltv For trampoline payments, we don't want to enforce a minimum cltv delta between our incoming and outer onion outgoing CLTV because we'll calculate our delta from the inner trampoline onion's value. However, we still want to check that we get at least the CLTV that the sending node intended for us and we still want to validate our incoming value. Refactor to allow setting a zero delta, for use for trampoline payments. --- lightning/src/ln/channelmanager.rs | 8 ++++++-- lightning/src/ln/onion_payment.rs | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 21e457b7a51..2a715271a65 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5219,8 +5219,12 @@ impl< }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv(cur_height, next_hop.outgoing_cltv_value, msg.cltv_expiry)?; - + check_incoming_htlc_cltv( + cur_height, + next_hop.outgoing_cltv_value, + msg.cltv_expiry, + MIN_CLTV_EXPIRY_DELTA, + )?; Ok(intercept) } diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index bb5b8f21a48..615c357d11b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -515,7 +515,7 @@ pub fn peel_payment_onion }; if let Err(reason) = check_incoming_htlc_cltv( - cur_height, outgoing_cltv_value, msg.cltv_expiry, + cur_height, outgoing_cltv_value, msg.cltv_expiry, MIN_CLTV_EXPIRY_DELTA, ) { return Err(InboundHTLCErr { msg: "incoming cltv check failed", @@ -719,9 +719,9 @@ pub(super) fn decode_incoming_update_add_htlc_onion Result<(), LocalHTLCFailureReason> { - if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { + if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + min_cltv_expiry_delta as u64 { return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); } // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,