Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Make event handling fallible
Previously, we would require our users to handle all events
successfully inline or panic will trying to do so. If they would exit
the `EventHandler` any other way we'd forget about the event and
wouldn't replay them after restart.

Here, we implement fallible event handling, allowing the user to return
`Err(())` which signals to our event providers they should abort event
processing and replay any unhandled events later (i.e., in the next
invocation).
  • Loading branch information
tnull committed Jul 18, 2024
commit 018908fe9e23f3994ea1a015cdb955708ac5fdc1
81 changes: 49 additions & 32 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ use lightning::chain::chainmonitor::{ChainMonitor, Persist};
use lightning::events::EventHandler;
#[cfg(feature = "std")]
use lightning::events::EventsProvider;
#[cfg(feature = "futures")]
use lightning::events::ReplayEvent;
use lightning::events::{Event, PathFailure};

use lightning::ln::channelmanager::AChannelManager;
Expand Down Expand Up @@ -583,6 +585,7 @@ use futures_util::{dummy_waker, Selector, SelectorOutput};
/// could setup `process_events_async` like this:
/// ```
/// # use lightning::io;
/// # use lightning::events::ReplayEvent;
/// # use std::sync::{Arc, RwLock};
/// # use std::sync::atomic::{AtomicBool, Ordering};
/// # use std::time::SystemTime;
Expand All @@ -600,7 +603,7 @@ use futures_util::{dummy_waker, Selector, SelectorOutput};
/// # }
/// # struct EventHandler {}
/// # impl EventHandler {
/// # async fn handle_event(&self, _: lightning::events::Event) {}
/// # async fn handle_event(&self, _: lightning::events::Event) -> Result<(), ReplayEvent> { Ok(()) }
/// # }
/// # #[derive(Eq, PartialEq, Clone, Hash)]
/// # struct SocketDescriptor {}
Expand Down Expand Up @@ -698,7 +701,7 @@ pub async fn process_events_async<
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
L: 'static + Deref + Send + Sync,
P: 'static + Deref + Send + Sync,
EventHandlerFuture: core::future::Future<Output = ()>,
EventHandlerFuture: core::future::Future<Output = Result<(), ReplayEvent>>,
EventHandler: Fn(Event) -> EventHandlerFuture,
PS: 'static + Deref + Send,
M: 'static
Expand Down Expand Up @@ -751,12 +754,16 @@ where
if update_scorer(scorer, &event, duration_since_epoch) {
log_trace!(logger, "Persisting scorer after update");
if let Err(e) = persister.persist_scorer(&scorer) {
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e);
// We opt not to abort early on persistence failure here as persisting
// the scorer is non-critical and we still hope that it will have
// resolved itself when it is potentially critical in event handling
// below.
}
}
}
}
event_handler(event).await;
event_handler(event).await
})
};
define_run_body!(
Expand Down Expand Up @@ -913,7 +920,7 @@ impl BackgroundProcessor {
}
}
}
event_handler.handle_event(event);
event_handler.handle_event(event)
};
define_run_body!(
persister,
Expand Down Expand Up @@ -1757,7 +1764,7 @@ mod tests {
// Initiate the background processors to watch each node.
let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let bg_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -1847,7 +1854,7 @@ mod tests {
let (_, nodes) = create_nodes(1, "test_timer_tick_called");
let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let bg_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -1889,7 +1896,7 @@ mod tests {
let persister = Arc::new(
Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"),
);
let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let bg_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -1924,7 +1931,7 @@ mod tests {

let bp_future = super::process_events_async(
persister,
|_: _| async {},
|_: _| async { Ok(()) },
nodes[0].chain_monitor.clone(),
nodes[0].node.clone(),
Some(nodes[0].messenger.clone()),
Expand Down Expand Up @@ -1957,7 +1964,7 @@ mod tests {
let data_dir = nodes[0].kv_store.get_data_dir();
let persister =
Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let bg_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -1986,7 +1993,7 @@ mod tests {
let data_dir = nodes[0].kv_store.get_data_dir();
let persister =
Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let bg_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -2021,13 +2028,16 @@ mod tests {
// Set up a background event handler for FundingGenerationReady events.
let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
let event_handler = move |event: Event| match event {
Event::FundingGenerationReady { .. } => funding_generation_send
.send(handle_funding_generation_ready!(event, channel_value))
.unwrap(),
Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
Event::ChannelReady { .. } => {},
_ => panic!("Unexpected event: {:?}", event),
let event_handler = move |event: Event| {
match event {
Event::FundingGenerationReady { .. } => funding_generation_send
.send(handle_funding_generation_ready!(event, channel_value))
.unwrap(),
Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
Event::ChannelReady { .. } => {},
_ => panic!("Unexpected event: {:?}", event),
}
Ok(())
};

let bg_processor = BackgroundProcessor::start(
Expand Down Expand Up @@ -2082,11 +2092,14 @@ mod tests {

// Set up a background event handler for SpendableOutputs events.
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let event_handler = move |event: Event| match event {
Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
Event::ChannelReady { .. } => {},
Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event: {:?}", event),
let event_handler = move |event: Event| {
match event {
Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
Event::ChannelReady { .. } => {},
Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event: {:?}", event),
}
Ok(())
};
let persister = Arc::new(Persister::new(data_dir));
let bg_processor = BackgroundProcessor::start(
Expand Down Expand Up @@ -2220,7 +2233,7 @@ mod tests {
let (_, nodes) = create_nodes(2, "test_scorer_persistence");
let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let bg_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -2315,7 +2328,7 @@ mod tests {
let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));

let event_handler = |_: _| {};
let event_handler = |_: _| Ok(());
let background_processor = BackgroundProcessor::start(
persister,
event_handler,
Expand Down Expand Up @@ -2350,7 +2363,7 @@ mod tests {
let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
let bp_future = super::process_events_async(
persister,
|_: _| async {},
|_: _| async { Ok(()) },
nodes[0].chain_monitor.clone(),
nodes[0].node.clone(),
Some(nodes[0].messenger.clone()),
Expand Down Expand Up @@ -2492,12 +2505,15 @@ mod tests {
#[test]
fn test_payment_path_scoring() {
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let event_handler = move |event: Event| match event {
Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
Event::ProbeFailed { .. } => sender.send(event).unwrap(),
_ => panic!("Unexpected event: {:?}", event),
let event_handler = move |event: Event| {
match event {
Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
Event::ProbeFailed { .. } => sender.send(event).unwrap(),
_ => panic!("Unexpected event: {:?}", event),
}
Ok(())
};

let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
Expand Down Expand Up @@ -2543,6 +2559,7 @@ mod tests {
Event::ProbeFailed { .. } => sender_ref.send(event).await.unwrap(),
_ => panic!("Unexpected event: {:?}", event),
}
Ok(())
}
};

Expand Down
1 change: 1 addition & 0 deletions lightning-invoice/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1391,6 +1391,7 @@ mod test {
} else {
other_events.borrow_mut().push(event);
}
Ok(())
};
nodes[fwd_idx].node.process_pending_events(&forward_event_handler);
nodes[fwd_idx].node.process_pending_events(&forward_event_handler);
Expand Down
23 changes: 16 additions & 7 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::ln::types::ChannelId;
use crate::sign::ecdsa::EcdsaChannelSigner;
use crate::events;
use crate::events::{Event, EventHandler};
use crate::events::{self, Event, EventHandler, ReplayEvent};
use crate::util::logger::{Logger, WithContext};
use crate::util::errors::APIError;
use crate::util::wakers::{Future, Notifier};
Expand Down Expand Up @@ -533,7 +532,7 @@ where C::Target: chain::Filter,
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
use crate::events::EventsProvider;
let events = core::cell::RefCell::new(Vec::new());
let event_handler = |event: events::Event| events.borrow_mut().push(event);
let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
self.process_pending_events(&event_handler);
events.into_inner()
}
Expand All @@ -544,16 +543,21 @@ where C::Target: chain::Filter,
/// See the trait-level documentation of [`EventsProvider`] for requirements.
///
/// [`EventsProvider`]: crate::events::EventsProvider
pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
&self, handler: H
) {
// Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
// crazy dance to process a monitor's events then only remove them once we've done so.
let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
for funding_txo in mons_to_process {
let mut ev;
super::channelmonitor::process_events_body!(
self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
match super::channelmonitor::process_events_body!(
self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await) {
Ok(()) => {},
Err(ReplayEvent ()) => {
self.event_notifier.notify();
}
}
}
}

Expand Down Expand Up @@ -880,7 +884,12 @@ impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref,
/// [`BumpTransaction`]: events::Event::BumpTransaction
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
for monitor_state in self.monitors.read().unwrap().values() {
monitor_state.monitor.process_pending_events(&handler);
match monitor_state.monitor.process_pending_events(&handler) {
Ok(()) => {},
Err(ReplayEvent ()) => {
self.event_notifier.notify();
}
}
}
}
}
Expand Down
51 changes: 36 additions & 15 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ use crate::chain::Filter;
use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::{ClosureReason, Event, EventHandler, ReplayEvent};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};

#[allow(unused_imports)]
Expand Down Expand Up @@ -1159,34 +1159,53 @@ impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
macro_rules! _process_events_body {
($self_opt: expr, $event_to_handle: expr, $handle_event: expr) => {
loop {
let mut handling_res = Ok(());
let (pending_events, repeated_events);
if let Some(us) = $self_opt {
let mut inner = us.inner.lock().unwrap();
if inner.is_processing_pending_events {
break;
break handling_res;
}
inner.is_processing_pending_events = true;

pending_events = inner.pending_events.clone();
repeated_events = inner.get_repeated_events();
} else { break; }
let num_events = pending_events.len();
} else { break handling_res; }

for event in pending_events.into_iter().chain(repeated_events.into_iter()) {
let mut num_handled_events = 0;
for event in pending_events {
$event_to_handle = event;
$handle_event;
match $handle_event {
Ok(()) => num_handled_events += 1,
Err(e) => {
// If we encounter an error we stop handling events and make sure to replay
// any unhandled events on the next invocation.
handling_res = Err(e);
break;
}
}
}

if handling_res.is_ok() {
for event in repeated_events {
// For repeated events we ignore any errors as they will be replayed eventually
// anyways.
$event_to_handle = event;
let _ = $handle_event;
}
}

if let Some(us) = $self_opt {
let mut inner = us.inner.lock().unwrap();
inner.pending_events.drain(..num_events);
inner.pending_events.drain(..num_handled_events);
inner.is_processing_pending_events = false;
if !inner.pending_events.is_empty() {
// If there's more events to process, go ahead and do so.
if handling_res.is_ok() && !inner.pending_events.is_empty() {
// If there's more events to process and we didn't fail so far, go ahead and do
// so.
continue;
}
}
break;
break handling_res;
}
}
}
Expand Down Expand Up @@ -1498,21 +1517,23 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
/// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
/// order to handle these events.
///
/// Will return a [`ReplayEvent`] error if event handling failed and should eventually be retried.
///
/// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs
/// [`BumpTransaction`]: crate::events::Event::BumpTransaction
pub fn process_pending_events<H: Deref>(&self, handler: &H) where H::Target: EventHandler {
pub fn process_pending_events<H: Deref>(&self, handler: &H) -> Result<(), ReplayEvent> where H::Target: EventHandler {
let mut ev;
process_events_body!(Some(self), ev, handler.handle_event(ev));
process_events_body!(Some(self), ev, handler.handle_event(ev))
}

/// Processes any events asynchronously.
///
/// See [`Self::process_pending_events`] for more information.
pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
&self, handler: &H
) {
) -> Result<(), ReplayEvent> {
let mut ev;
process_events_body!(Some(self), ev, { handler(ev).await });
process_events_body!(Some(self), ev, { handler(ev).await })
}

#[cfg(test)]
Expand Down
Loading