diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 869a431e757..c1b43a891c4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3317,8 +3317,11 @@ macro_rules! process_events_body { // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. - if $self.process_pending_monitor_events() { - result = NotifyOption::DoPersist; + match $self.process_pending_monitor_events() { + NotifyOption::DoPersist => result = NotifyOption::DoPersist, + NotifyOption::SkipPersistHandleEvents if result == NotifyOption::SkipPersistNoEvents => + result = NotifyOption::SkipPersistHandleEvents, + _ => {}, } } @@ -9930,6 +9933,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if self.background_events_processed_since_startup.load(Ordering::Acquire) { let update_res = self.chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); + // Ensure the ChannelManager is persisted so that `in_flight_monitor_updates` + // is in sync with the updates applied to the chain monitor. + self.needs_persist_flag.store(true, Ordering::Release); let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None); let update_completed = self.handle_monitor_update_res(update_res, logger); @@ -12839,19 +12845,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - /// Process pending events from the [`chain::Watch`], returning whether any events were processed. - fn process_pending_monitor_events(&self) -> bool { + /// Process pending events from the [`chain::Watch`], returning a [`NotifyOption`] indicating + /// whether persistence is needed. + fn process_pending_monitor_events(&self) -> NotifyOption { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock let mut failed_channels: Vec<(Result, _)> = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); - let has_pending_monitor_events = !pending_monitor_events.is_empty(); + let mut result = NotifyOption::SkipPersistNoEvents; for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { + result = NotifyOption::DoPersist; let logger = WithContext::from( &self.logger, Some(counterparty_node_id), @@ -12904,6 +12912,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => { + result = NotifyOption::DoPersist; let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12936,6 +12945,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, MonitorEvent::CommitmentTxConfirmed(_) => { + result = NotifyOption::DoPersist; let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12957,6 +12967,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, MonitorEvent::Completed { channel_id, monitor_update_id, .. } => { + // Completed events don't require ChannelManager persistence. + // `channel_monitor_updated` clears transient flags and may + // trigger actions like forwarding HTLCs or finalizing claims, + // but all of these are re-derived on restart: the serialized + // `in_flight_monitor_updates` will still contain the completed + // entries, and deserialization will detect that the monitor is + // ahead, synthesizing a `MonitorUpdatesComplete` background + // event that re-drives `channel_monitor_updated`. + if result == NotifyOption::SkipPersistNoEvents { + result = NotifyOption::SkipPersistHandleEvents; + } self.channel_monitor_updated( &channel_id, Some(monitor_update_id), @@ -12971,7 +12992,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let _ = self.handle_error(err, counterparty_node_id); } - has_pending_monitor_events + result } fn handle_holding_cell_free_result(&self, result: FreeHoldingCellsResult) { @@ -15005,8 +15026,14 @@ impl< // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. - if self.process_pending_monitor_events() { - result = NotifyOption::DoPersist; + match self.process_pending_monitor_events() { + NotifyOption::DoPersist => result = NotifyOption::DoPersist, + NotifyOption::SkipPersistHandleEvents + if result == NotifyOption::SkipPersistNoEvents => + { + result = NotifyOption::SkipPersistHandleEvents + }, + _ => {}, } if self.maybe_generate_initial_closing_signed() {