From e1ee175353ede02f279f68bf3dc49aac3c25d69d Mon Sep 17 00:00:00 2001 From: maciejnems Date: Fri, 27 Jan 2023 12:56:01 +0100 Subject: [PATCH 1/5] make aleph node compatible with state pruning --- bin/node/src/service.rs | 14 +- finality-aleph/src/nodes/nonvalidator_node.rs | 3 +- finality-aleph/src/nodes/validator_node.rs | 3 +- finality-aleph/src/session_map.rs | 126 +++++++++--------- .../src/sync/substrate/verification/cache.rs | 2 +- 5 files changed, 78 insertions(+), 70 deletions(-) diff --git a/bin/node/src/service.rs b/bin/node/src/service.rs index 1d789c8f0d..4ca83f71b6 100644 --- a/bin/node/src/service.rs +++ b/bin/node/src/service.rs @@ -28,7 +28,7 @@ use sp_blockchain::Backend as _; use sp_consensus_aura::{sr25519::AuthorityPair as AuraPair, Slot}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, Zero}, + traits::{Block as BlockT, Header as HeaderT}, }; use crate::{aleph_cli::AlephCli, chain_spec::DEFAULT_BACKUP_FOLDER, executor::AlephExecutor}; @@ -311,17 +311,19 @@ pub fn new_authority( .path(), ); + let finalized = client.info().finalized_number; + let session_period = SessionPeriod( client .runtime_api() - .session_period(&BlockId::Number(Zero::zero())) + .session_period(&BlockId::Number(finalized)) .unwrap(), ); let millisecs_per_block = MillisecsPerBlock( client .runtime_api() - .millisecs_per_block(&BlockId::Number(Zero::zero())) + .millisecs_per_block(&BlockId::Number(finalized)) .unwrap(), ); @@ -452,17 +454,19 @@ pub fn new_full( justification_tx, )?; + let finalized = client.info().finalized_number; + let session_period = SessionPeriod( client .runtime_api() - .session_period(&BlockId::Number(Zero::zero())) + .session_period(&BlockId::Number(finalized)) .unwrap(), ); let millisecs_per_block = MillisecsPerBlock( client .runtime_api() - .millisecs_per_block(&BlockId::Number(Zero::zero())) + .millisecs_per_block(&BlockId::Number(finalized)) .unwrap(), ); diff --git a/finality-aleph/src/nodes/nonvalidator_node.rs b/finality-aleph/src/nodes/nonvalidator_node.rs index a768f1cfd5..4a8c31e5c3 100644 --- a/finality-aleph/src/nodes/nonvalidator_node.rs +++ b/finality-aleph/src/nodes/nonvalidator_node.rs @@ -34,11 +34,12 @@ where let map_updater = SessionMapUpdater::<_, _, B>::new( AuthorityProviderImpl::new(client.clone()), FinalityNotificatorImpl::new(client.clone()), + session_period, ); let session_authorities = map_updater.readonly_session_map(); spawn_handle.spawn("aleph/updater", None, async move { debug!(target: "aleph-party", "SessionMapUpdater has started."); - map_updater.run(session_period).await + map_updater.run().await }); let (_, handler_task) = setup_justification_handler(JustificationParams { justification_rx, diff --git a/finality-aleph/src/nodes/validator_node.rs b/finality-aleph/src/nodes/validator_node.rs index d59f0c3bc7..6224d2259c 100644 --- a/finality-aleph/src/nodes/validator_node.rs +++ b/finality-aleph/src/nodes/validator_node.rs @@ -103,11 +103,12 @@ where let map_updater = SessionMapUpdater::<_, _, B>::new( AuthorityProviderImpl::new(client.clone()), FinalityNotificatorImpl::new(client.clone()), + session_period, ); let session_authorities = map_updater.readonly_session_map(); spawn_handle.spawn("aleph/updater", None, async move { debug!(target: "aleph-party", "SessionMapUpdater has started."); - map_updater.run(session_period).await + map_updater.run().await }); let (authority_justification_tx, handler_task) = diff --git a/finality-aleph/src/session_map.rs b/finality-aleph/src/session_map.rs index 59f1093b24..2828e3b6bc 100644 --- a/finality-aleph/src/session_map.rs +++ b/finality-aleph/src/session_map.rs @@ -8,7 +8,6 @@ use sc_utils::mpsc::TracingUnboundedReceiver; use sp_runtime::{ generic::BlockId, traits::{Block, Header, NumberFor}, - SaturatedConversion, }; use tokio::sync::{ oneshot::{Receiver as OneShotReceiver, Sender as OneShotSender}, @@ -31,6 +30,8 @@ pub trait AuthorityProvider { } /// Default implementation of authority provider trait. +/// If state pruning is on and set to `n`, will no longer be able to +/// answer for `num < finalized_number - n`. pub struct AuthorityProviderImpl where C: ClientForAleph + Send + Sync + 'static, @@ -228,26 +229,6 @@ impl ReadOnlySessionMap { } } -fn get_authority_data_for_session( - authority_provider: &AP, - session_id: SessionId, - first_block: NumberFor, -) -> SessionAuthorityData -where - B: Block, - AP: AuthorityProvider>, -{ - if session_id == SessionId(0) { - authority_provider - .authority_data(>::saturated_from(0u32)) - .expect("Authorities for the session 0 must be available from the beginning") - } else { - authority_provider.next_authority_data(first_block).unwrap_or_else(|| - panic!("Authorities for next session {:?} must be available at first block #{:?} of current session", session_id.0, first_block) - ) - } -} - /// Struct responsible for updating session map pub struct SessionMapUpdater where @@ -258,6 +239,7 @@ where session_map: SharedSessionMap, authority_provider: AP, finality_notificator: FN, + period: SessionPeriod, _phantom: PhantomData, } @@ -267,11 +249,12 @@ where FN: FinalityNotificator, NumberFor>, B: Block, { - pub fn new(authority_provider: AP, finality_notificator: FN) -> Self { + pub fn new(authority_provider: AP, finality_notificator: FN, period: SessionPeriod) -> Self { Self { session_map: SharedSessionMap::new(), authority_provider, finality_notificator, + period, _phantom: PhantomData, } } @@ -281,76 +264,95 @@ where self.session_map.read_only() } - /// puts authority data for the next session into the session map - async fn handle_first_block_of_session(&mut self, num: NumberFor, session_id: SessionId) { - debug!(target: "aleph-session-updater", "Handling first block #{:?} of session {:?}", num, session_id.0); - let next_session = SessionId(session_id.0 + 1); - let authority_provider = &self.authority_provider; - self.session_map - .update( - next_session, - get_authority_data_for_session::<_, B>(authority_provider, next_session, num), - ) - .await; - - // if this is the first session we also need to include starting authority data into the map - if session_id.0 == 0 { - let authority_provider = &self.authority_provider; + /// Puts authority data for the next session into the session map + async fn handle_first_block_of_session(&mut self, session_id: SessionId) { + let first_block = first_block_of_session(session_id, self.period); + debug!(target: "aleph-session-updater", + "Handling first block #{:?} of session {:?}", + first_block, session_id.0 + ); + + if let Some(authority_data) = self.authority_provider.next_authority_data(first_block) { self.session_map - .update( - session_id, - get_authority_data_for_session::<_, B>(authority_provider, session_id, num), - ) + .update(SessionId(session_id.0 + 1), authority_data) .await; + } else { + panic!("Authorities for next session {:?} must be available at first block #{:?} of current session", session_id.0, first_block); } - if session_id.0 >= PRUNING_THRESHOLD && session_id.0 % PRUNING_THRESHOLD == 0 { - debug!(target: "aleph-session-updater", "Pruning session map below session #{:?}", session_id.0 - PRUNING_THRESHOLD); + if session_id.0 > PRUNING_THRESHOLD && session_id.0 % PRUNING_THRESHOLD == 0 { + debug!(target: "aleph-session-updater", + "Pruning session map below session #{:?}", + session_id.0 - PRUNING_THRESHOLD + ); self.session_map .prune_below(SessionId(session_id.0 - PRUNING_THRESHOLD)) .await; } } - async fn update_session(&mut self, session_id: SessionId, period: SessionPeriod) { - let first_block = first_block_of_session(session_id, period); - self.handle_first_block_of_session(first_block, session_id) - .await; + fn authorities_for_session(&mut self, session_id: SessionId) -> Option { + let first_block = first_block_of_session(session_id, self.period); + self.authority_provider.authority_data(first_block) } - fn catch_up_boundaries(&self, period: SessionPeriod) -> (SessionId, SessionId) { + /// Puts current and next session authorities in the session map. + /// If previous authorities are still available in `AuthorityProvider`, also puts them in the session map. + async fn catch_up(&mut self) -> SessionId { let last_finalized = self.finality_notificator.last_finalized(); - let current_session = session_id_from_block_num(last_finalized, period); - let starting_session = SessionId(current_session.0.saturating_sub(PRUNING_THRESHOLD)); + let current_session = session_id_from_block_num(last_finalized, self.period); + let starting_session = SessionId(current_session.0.saturating_sub(PRUNING_THRESHOLD - 1)); - (starting_session, current_session) - } + debug!(target: "aleph-session-updater", + "Last finalized is {:?}; Catching up with authorities starting from session {:?} up to next session {:?}", + last_finalized, starting_session.0, current_session.0 + 1 + ); - pub async fn run(mut self, period: SessionPeriod) { - let mut notifications = self.finality_notificator.notification_stream(); + // lets catch up with previous sessions + for session in starting_session.0..current_session.0 { + let id = SessionId(session); + if let Some(authority_data) = self.authorities_for_session(id) { + self.session_map.update(id, authority_data).await; + } else { + debug!(target: "aleph-session-updater", "No authorities for session {:?} during catch-up. Most likely already pruned.", id.0) + } + } - let (starting_session, current_session) = self.catch_up_boundaries(period); + // lets catch up with previous session + match self.authorities_for_session(current_session) { + Some(current_authority_data) => { + self.session_map + .update(current_session, current_authority_data) + .await + } + None => panic!( + "Authorities for current session {:?} must be available from the beginning", + current_session.0 + ), + }; - // lets catch up - for session in starting_session.0..=current_session.0 { - self.update_session(SessionId(session), period).await; - } + self.handle_first_block_of_session(current_session).await; - let mut last_updated = current_session; + current_session + } + + pub async fn run(mut self) { + let mut notifications = self.finality_notificator.notification_stream(); + let mut last_updated = self.catch_up().await; while let Some(FinalityNotification { header, .. }) = notifications.next().await { let last_finalized = header.number(); trace!(target: "aleph-session-updater", "got FinalityNotification about #{:?}", last_finalized); - let session_id = session_id_from_block_num(*last_finalized, period); + let session_id = session_id_from_block_num(*last_finalized, self.period); if last_updated >= session_id { continue; } for session in (last_updated.0 + 1)..=session_id.0 { - self.update_session(SessionId(session), period).await; + self.handle_first_block_of_session(SessionId(session)).await; } last_updated = session_id; diff --git a/finality-aleph/src/sync/substrate/verification/cache.rs b/finality-aleph/src/sync/substrate/verification/cache.rs index 545302b475..9efb9d56fa 100644 --- a/finality-aleph/src/sync/substrate/verification/cache.rs +++ b/finality-aleph/src/sync/substrate/verification/cache.rs @@ -38,7 +38,7 @@ impl Display for CacheError { UnknownAuthorities(session) => { write!( f, - "authorities for session {:?} not present on chain even though they should be", + "authorities for session {:?} not present on chain. Most likely state is already pruned", session ) } From f35d172183c87b43b3c6484c579631c41afa15e4 Mon Sep 17 00:00:00 2001 From: maciejnems Date: Fri, 27 Jan 2023 13:18:55 +0100 Subject: [PATCH 2/5] add/fix session map tests --- finality-aleph/src/session_map.rs | 247 +++++++++++++++++++++--------- 1 file changed, 176 insertions(+), 71 deletions(-) diff --git a/finality-aleph/src/session_map.rs b/finality-aleph/src/session_map.rs index 2828e3b6bc..c6a8afc8ae 100644 --- a/finality-aleph/src/session_map.rs +++ b/finality-aleph/src/session_map.rs @@ -380,7 +380,6 @@ mod tests { struct MockProvider { pub session_map: HashMap, SessionAuthorityData>, pub next_session_map: HashMap, SessionAuthorityData>, - pub asked_for: Arc>>>, } struct MockNotificator { @@ -393,9 +392,15 @@ mod tests { Self { session_map: HashMap::new(), next_session_map: HashMap::new(), - asked_for: Arc::new(Mutex::new(Vec::new())), } } + + fn add_session(&mut self, session_id: u64) { + self.session_map + .insert(session_id, authority_data_for_session(session_id)); + self.next_session_map + .insert(session_id, authority_data_for_session(session_id + 1)); + } } impl MockNotificator { @@ -409,14 +414,10 @@ mod tests { impl AuthorityProvider> for MockProvider { fn authority_data(&self, b: NumberFor) -> Option { - let mut asked = self.asked_for.lock().unwrap(); - asked.push(b); self.session_map.get(&b).cloned() } fn next_authority_data(&self, b: NumberFor) -> Option { - let mut asked = self.asked_for.lock().unwrap(); - asked.push(b); self.next_session_map.get(&b).cloned() } } @@ -450,6 +451,45 @@ mod tests { .collect() } + fn authority_data_for_session(session_id: u64) -> SessionAuthorityData { + authority_data(session_id * 4, (session_id + 1) * 4) + } + + fn to_notification(block: TBlock) -> FinalityNotification { + FinalityNotification { + hash: block.header.hash(), + header: block.header, + tree_route: Arc::new([]), + stale_heads: Arc::new([]), + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn genesis_catch_up() { + let (_sender, receiver) = tracing_unbounded("test"); + let mut mock_provider = MockProvider::new(); + let mock_notificator = MockNotificator::new(receiver); + + mock_provider.add_session(0); + + let updater = SessionMapUpdater::new(mock_provider, mock_notificator, SessionPeriod(1)); + let session_map = updater.readonly_session_map(); + + let _handle = tokio::spawn(updater.run()); + + // wait a bit + Delay::new(Duration::from_millis(50)).await; + + assert_eq!( + session_map.get(SessionId(0)).await, + Some(authority_data(0, 4)) + ); + assert_eq!( + session_map.get(SessionId(1)).await, + Some(authority_data(4, 8)) + ); + } + #[tokio::test(flavor = "multi_thread")] async fn updates_session_map_on_notifications() { let mut client = Arc::new(TestClientBuilder::new().build()); @@ -457,41 +497,18 @@ mod tests { let mut mock_provider = MockProvider::new(); let mock_notificator = MockNotificator::new(receiver); - mock_provider.session_map.insert(0, authority_data(0, 4)); - mock_provider - .next_session_map - .insert(0, authority_data(4, 8)); - mock_provider - .next_session_map - .insert(1, authority_data(8, 12)); - mock_provider - .next_session_map - .insert(2, authority_data(12, 16)); - - let updater = SessionMapUpdater::new(mock_provider, mock_notificator); + mock_provider.add_session(0); + mock_provider.add_session(1); + mock_provider.add_session(2); + + let updater = SessionMapUpdater::new(mock_provider, mock_notificator, SessionPeriod(1)); let session_map = updater.readonly_session_map(); - let blocks = n_new_blocks(&mut client, 2); - let block_1 = blocks.get(0).cloned().unwrap(); - let block_2 = blocks.get(1).cloned().unwrap(); - sender - .unbounded_send(FinalityNotification { - hash: block_1.header.hash(), - header: block_1.header, - tree_route: Arc::new([]), - stale_heads: Arc::new([]), - }) - .unwrap(); - sender - .unbounded_send(FinalityNotification { - hash: block_2.header.hash(), - header: block_2.header, - tree_route: Arc::new([]), - stale_heads: Arc::new([]), - }) - .unwrap(); + for block in n_new_blocks(&mut client, 2) { + sender.unbounded_send(to_notification(block)).unwrap(); + } - let _handle = tokio::spawn(updater.run(SessionPeriod(1))); + let _handle = tokio::spawn(updater.run()); // wait a bit Delay::new(Duration::from_millis(50)).await; @@ -515,80 +532,64 @@ mod tests { } #[tokio::test(flavor = "multi_thread")] - async fn updates_session_map_on_catching_up() { + async fn catch_up() { let (_sender, receiver) = tracing_unbounded("test"); let mut mock_provider = MockProvider::new(); let mut mock_notificator = MockNotificator::new(receiver); - mock_provider.session_map.insert(0, authority_data(0, 4)); - mock_provider - .next_session_map - .insert(0, authority_data(4, 8)); - mock_provider - .next_session_map - .insert(1, authority_data(8, 12)); - mock_provider - .next_session_map - .insert(2, authority_data(12, 16)); + mock_provider.add_session(0); + mock_provider.add_session(1); + mock_provider.add_session(2); mock_notificator.last_finalized = 2; - let updater = SessionMapUpdater::new(mock_provider, mock_notificator); + let updater = SessionMapUpdater::new(mock_provider, mock_notificator, SessionPeriod(1)); let session_map = updater.readonly_session_map(); - let _handle = tokio::spawn(updater.run(SessionPeriod(1))); + let _handle = tokio::spawn(updater.run()); // wait a bit Delay::new(Duration::from_millis(50)).await; assert_eq!( session_map.get(SessionId(0)).await, - Some(authority_data(0, 4)) + Some(authority_data_for_session(0)) ); assert_eq!( session_map.get(SessionId(1)).await, - Some(authority_data(4, 8)) + Some(authority_data_for_session(1)) ); assert_eq!( session_map.get(SessionId(2)).await, - Some(authority_data(8, 12)) + Some(authority_data_for_session(2)) ); assert_eq!( session_map.get(SessionId(3)).await, - Some(authority_data(12, 16)) + Some(authority_data_for_session(3)) ); } #[tokio::test(flavor = "multi_thread")] - async fn prunes_old_sessions() { + async fn catch_up_old_sessions() { let (_sender, receiver) = tracing_unbounded("test"); let mut mock_provider = MockProvider::new(); let mut mock_notificator = MockNotificator::new(receiver); - mock_provider.session_map.insert(0, authority_data(0, 4)); for i in 0..=2 * PRUNING_THRESHOLD { - mock_provider.next_session_map.insert( - i as u64, - authority_data(4 * (i + 1) as u64, 4 * (i + 2) as u64), - ); + mock_provider.add_session(i as u64); } mock_notificator.last_finalized = 20; - let asked = mock_provider.asked_for.clone(); - let updater = SessionMapUpdater::new(mock_provider, mock_notificator); + let updater = SessionMapUpdater::new(mock_provider, mock_notificator, SessionPeriod(1)); let session_map = updater.readonly_session_map(); - let _handle = tokio::spawn(updater.run(SessionPeriod(1))); + let _handle = tokio::spawn(updater.run()); // wait a bit Delay::new(Duration::from_millis(50)).await; - { - let asked = asked.lock().unwrap(); - assert_eq!((10..=20).into_iter().collect::>(), *asked); - } - for i in 0..=20 - PRUNING_THRESHOLD { + for i in 0..=PRUNING_THRESHOLD { assert_eq!( session_map.get(SessionId(i)).await, None, @@ -596,16 +597,120 @@ mod tests { i ); } - for i in 21 - PRUNING_THRESHOLD..=20 { + for i in PRUNING_THRESHOLD + 1..=2 * PRUNING_THRESHOLD { assert_eq!( session_map.get(SessionId(i)).await, - Some(authority_data(4 * i as u64, 4 * (i + 1) as u64)), + Some(authority_data_for_session(i as u64)), "Session {:?} should not be pruned", i ); } } + #[tokio::test(flavor = "multi_thread")] + async fn deals_with_database_pruned_authorities() { + let (_sender, receiver) = tracing_unbounded("test"); + let mut mock_provider = MockProvider::new(); + let mut mock_notificator = MockNotificator::new(receiver); + + mock_provider.add_session(5); + mock_notificator.last_finalized = 5; + + let updater = SessionMapUpdater::new(mock_provider, mock_notificator, SessionPeriod(1)); + let session_map = updater.readonly_session_map(); + + let _handle = tokio::spawn(updater.run()); + + // wait a bit + Delay::new(Duration::from_millis(50)).await; + + for i in 0..5 { + assert_eq!( + session_map.get(SessionId(i)).await, + None, + "Session {:?} should not be available", + i + ); + } + + assert_eq!( + session_map.get(SessionId(5)).await, + Some(authority_data_for_session(5)) + ); + assert_eq!( + session_map.get(SessionId(6)).await, + Some(authority_data_for_session(6)) + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn prunes_old_sessions() { + let mut client = Arc::new(TestClientBuilder::new().build()); + let (sender, receiver) = tracing_unbounded("test"); + let mut mock_provider = MockProvider::new(); + let mock_notificator = MockNotificator::new(receiver); + + for i in 0..=2 * PRUNING_THRESHOLD { + mock_provider.add_session(i as u64); + } + + let updater = SessionMapUpdater::new(mock_provider, mock_notificator, SessionPeriod(1)); + let session_map = updater.readonly_session_map(); + + let _handle = tokio::spawn(updater.run()); + + let mut blocks = n_new_blocks(&mut client, 2 * PRUNING_THRESHOLD as u64); + + for block in blocks.drain(..PRUNING_THRESHOLD as usize) { + sender.unbounded_send(to_notification(block)).unwrap(); + } + + // wait a bit + Delay::new(Duration::from_millis(50)).await; + + for i in 0..=PRUNING_THRESHOLD + 1 { + assert_eq!( + session_map.get(SessionId(i)).await, + Some(authority_data_for_session(i as u64)), + "Session {:?} should be available", + i + ); + } + + for i in PRUNING_THRESHOLD + 2..=21 { + assert_eq!( + session_map.get(SessionId(i)).await, + None, + "Session {:?} should not be avalable yet", + i + ); + } + + for block in blocks { + sender.unbounded_send(to_notification(block)).unwrap(); + } + + Delay::new(Duration::from_millis(50)).await; + + for i in 0..PRUNING_THRESHOLD { + assert_eq!( + session_map.get(SessionId(i)).await, + None, + "Session {:?} should be pruned", + i + ); + } + + for i in PRUNING_THRESHOLD + 1..=21 { + assert_eq!( + session_map.get(SessionId(i)).await, + Some(authority_data_for_session(i as u64)), + "Session {:?} should be avalable", + i + ); + } + } + #[tokio::test(flavor = "multi_thread")] async fn subscription_with_already_defined_session_works() { let mut shared = SharedSessionMap::new(); From 235681ccb7e96b7727f98ce3a30c45fa67403098 Mon Sep 17 00:00:00 2001 From: maciejnems Date: Fri, 27 Jan 2023 13:19:47 +0100 Subject: [PATCH 3/5] fix experimental pruning logs --- bin/node/src/main.rs | 69 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 15 deletions(-) diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index a8d550a40a..1b3c626f51 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -4,27 +4,29 @@ use aleph_node::{new_authority, new_full, new_partial, Cli, Subcommand}; #[cfg(feature = "try-runtime")] use aleph_runtime::Block; use log::warn; -use sc_cli::{clap::Parser, SubstrateCli}; +use sc_cli::{clap::Parser, PruningParams, SubstrateCli}; use sc_network::config::Role; use sc_service::PartialComponents; +const STATE_PRUNING: &str = "archive"; +const BLOCKS_PRUNING: &str = "archive-canonical"; + +fn pruning_changed(params: &PruningParams) -> bool { + let state_pruning_changed = + params.state_pruning != Some(STATE_PRUNING.into()) && params.state_pruning.is_some(); + + let blocks_pruning_changed = + params.blocks_pruning != Some(BLOCKS_PRUNING.into()) && params.blocks_pruning.is_some(); + + state_pruning_changed || blocks_pruning_changed +} + fn main() -> sc_cli::Result<()> { let mut cli = Cli::parse(); + let overwritten_pruning = pruning_changed(&cli.run.import_params.pruning_params); if !cli.aleph.experimental_pruning() { - if cli - .run - .import_params - .pruning_params - .blocks_pruning - .is_some() - || cli.run.import_params.pruning_params.state_pruning != Some("archive".into()) - { - warn!("Pruning not supported. Switching to keeping all block bodies and states."); - cli.run.import_params.pruning_params.blocks_pruning = None; - cli.run.import_params.pruning_params.state_pruning = Some("archive".into()); - } - } else { - warn!("Pruning not supported, but flag experimental_pruning was turned on. Usage of this flag can lead to misbehaviour, which can be punished."); + cli.run.import_params.pruning_params.state_pruning = Some(STATE_PRUNING.into()); + cli.run.import_params.pruning_params.blocks_pruning = Some(BLOCKS_PRUNING.into()); } match &cli.subcommand { @@ -112,6 +114,15 @@ fn main() -> sc_cli::Result<()> { .into()), None => { let runner = cli.create_runner(&cli.run)?; + if cli.aleph.experimental_pruning() { + warn!("Experimental_pruning was turned on. Usage of this flag can lead to misbehaviour, which can be punished. State pruning: {:?}; Blocks pruning: {:?};", + cli.run.import_params.pruning_params.state_pruning(), + cli.run.import_params.pruning_params.blocks_pruning(), + ); + } else if overwritten_pruning { + warn!("Pruning not supported. Switching to keeping all block bodies and states."); + } + let aleph_cli_config = cli.aleph; runner.run_node_until_exit(|config| async move { match config.role { @@ -126,3 +137,31 @@ fn main() -> sc_cli::Result<()> { } } } + +#[cfg(test)] +mod tests { + use sc_service::{BlocksPruning, PruningMode}; + + use super::{PruningParams, BLOCKS_PRUNING, STATE_PRUNING}; + + #[test] + fn pruning_sanity_check() { + let state_pruning = Some(String::from(STATE_PRUNING)); + let blocks_pruning = Some(String::from(BLOCKS_PRUNING)); + + let pruning_params = PruningParams { + state_pruning, + blocks_pruning, + }; + + assert_eq!( + pruning_params.blocks_pruning().unwrap(), + BlocksPruning::KeepFinalized + ); + + assert_eq!( + pruning_params.state_pruning().unwrap().unwrap(), + PruningMode::ArchiveAll + ); + } +} From 09d11c427fdd1491ad8d19907f3b4650e6a27343 Mon Sep 17 00:00:00 2001 From: maciejnems Date: Mon, 30 Jan 2023 13:25:48 +0100 Subject: [PATCH 4/5] remove unnecessary wrapping --- bin/node/src/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 1b3c626f51..5f9be712aa 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -4,7 +4,7 @@ use aleph_node::{new_authority, new_full, new_partial, Cli, Subcommand}; #[cfg(feature = "try-runtime")] use aleph_runtime::Block; use log::warn; -use sc_cli::{clap::Parser, PruningParams, SubstrateCli}; +use sc_cli::{clap::Parser, CliConfiguration, PruningParams, SubstrateCli}; use sc_network::config::Role; use sc_service::PartialComponents; @@ -116,8 +116,8 @@ fn main() -> sc_cli::Result<()> { let runner = cli.create_runner(&cli.run)?; if cli.aleph.experimental_pruning() { warn!("Experimental_pruning was turned on. Usage of this flag can lead to misbehaviour, which can be punished. State pruning: {:?}; Blocks pruning: {:?};", - cli.run.import_params.pruning_params.state_pruning(), - cli.run.import_params.pruning_params.blocks_pruning(), + cli.run.state_pruning()?.unwrap_or_default(), + cli.run.blocks_pruning()?, ); } else if overwritten_pruning { warn!("Pruning not supported. Switching to keeping all block bodies and states."); From 1ccf14305bc8a4a760fa6b728fccd050b092b1ff Mon Sep 17 00:00:00 2001 From: maciejnems Date: Mon, 30 Jan 2023 15:55:50 +0100 Subject: [PATCH 5/5] error message --- finality-aleph/src/sync/substrate/verification/cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finality-aleph/src/sync/substrate/verification/cache.rs b/finality-aleph/src/sync/substrate/verification/cache.rs index 9efb9d56fa..acd48a20a6 100644 --- a/finality-aleph/src/sync/substrate/verification/cache.rs +++ b/finality-aleph/src/sync/substrate/verification/cache.rs @@ -38,7 +38,7 @@ impl Display for CacheError { UnknownAuthorities(session) => { write!( f, - "authorities for session {:?} not present on chain. Most likely state is already pruned", + "authorities for session {:?} not known even though they should be", session ) }