Skip to content

Fix view sync across restarts #3279

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
May 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion crates/hotshot/example-types/src/storage_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ pub struct TestStorageState<TYPES: NodeType> {
drb_results: BTreeMap<TYPES::Epoch, DrbResult>,
drb_inputs: BTreeMap<u64, DrbInput>,
epoch_roots: BTreeMap<TYPES::Epoch, TYPES::BlockHeader>,
restart_view: TYPES::View,
}

impl<TYPES: NodeType> Default for TestStorageState<TYPES> {
Expand All @@ -86,6 +87,7 @@ impl<TYPES: NodeType> Default for TestStorageState<TYPES> {
drb_results: BTreeMap::new(),
drb_inputs: BTreeMap::new(),
epoch_roots: BTreeMap::new(),
restart_view: TYPES::View::genesis(),
}
}
}
Expand Down Expand Up @@ -143,6 +145,10 @@ impl<TYPES: NodeType> TestStorage<TYPES> {
self.inner.read().await.action
}

pub async fn restart_view(&self) -> TYPES::View {
self.inner.read().await.restart_view
}

pub async fn last_actioned_epoch(&self) -> Option<TYPES::Epoch> {
self.inner.read().await.epoch
}
Expand Down Expand Up @@ -281,14 +287,20 @@ impl<TYPES: NodeType> Storage<TYPES> for TestStorage<TYPES> {
bail!("Failed to append Action to storage");
}
let mut inner = self.inner.write().await;
if matches!(action, HotShotAction::Vote | HotShotAction::Propose) {
if matches!(
action,
HotShotAction::Vote | HotShotAction::Propose | HotShotAction::TimeoutVote
) {
if view > inner.action {
inner.action = view;
}
if epoch > inner.epoch {
inner.epoch = epoch;
}
}
if matches!(action, HotShotAction::Vote) {
inner.restart_view = view + 1;
}
Self::run_delay_settings_from_config(&self.delay_config).await;
Ok(())
}
Expand Down
8 changes: 3 additions & 5 deletions crates/hotshot/hotshot/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,10 +419,7 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>, V: Versions> SystemContext<T
async move {
sleep(Duration::from_millis(next_view_timeout)).await;
broadcast_event(
Arc::new(HotShotEvent::Timeout(
start_view + 1,
start_epoch.map(|x| x + 1),
)),
Arc::new(HotShotEvent::Timeout(start_view, start_epoch)),
&event_stream,
)
.await;
Expand Down Expand Up @@ -1158,6 +1155,7 @@ impl<TYPES: NodeType> HotShotInitializer<TYPES> {
QuorumCertificate2<TYPES>,
Option<NextEpochQuorumCertificate2<TYPES>>,
),
last_actioned_view: TYPES::View,
saved_proposals: BTreeMap<TYPES::View, Proposal<TYPES, QuorumProposalWrapper<TYPES>>>,
saved_vid_shares: VidShares<TYPES>,
decided_upgrade_certificate: Option<UpgradeCertificate<TYPES>>,
Expand All @@ -1178,7 +1176,7 @@ impl<TYPES: NodeType> HotShotInitializer<TYPES> {
high_qc,
start_view,
start_epoch,
last_actioned_view: start_view,
last_actioned_view,
saved_proposals,
saved_vid_shares,
next_epoch_high_qc,
Expand Down
2 changes: 1 addition & 1 deletion crates/hotshot/task-impls/src/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ impl<
Some((sender, message, TransmitType::Broadcast))
},
HotShotEvent::TimeoutVoteSend(vote) => {
*maybe_action = Some(HotShotAction::Vote);
*maybe_action = Some(HotShotAction::TimeoutVote);
let view_number = vote.view_number() + 1;
let leader = match self
.membership_coordinator
Expand Down
26 changes: 22 additions & 4 deletions crates/hotshot/testing/src/spinning_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ where
if proposal.data.justify_qc().view_number() > self.high_qc.view_number() {
self.high_qc = proposal.data.justify_qc().clone();
}
} else if let EventType::ViewTimeout { view_number } = event {
tracing::error!("View timeout for view {}", view_number);
}

let mut new_nodes = vec![];
Expand Down Expand Up @@ -154,7 +156,6 @@ where
// Node not initialized. Initialize it
// based on the received leaf.
LateNodeContext::UninitializedContext(late_context_params) => {
// We'll deconstruct the individual terms here.
let LateNodeContextParameters {
storage,
memberships,
Expand All @@ -172,6 +173,7 @@ where
genesis_epoch_from_version::<V, TYPES>(),
),
(self.high_qc.clone(), self.next_epoch_high_qc.clone()),
TYPES::View::genesis(),
BTreeMap::new(),
BTreeMap::new(),
None,
Expand Down Expand Up @@ -220,14 +222,22 @@ where
},
NodeAction::Down => {
if let Some(node) = self.handles.write().await.get_mut(idx) {
tracing::error!("Node {} shutting down", idx);
tracing::error!(
"Node {} shutting down in view {}",
idx,
view_number
);
node.handle.shut_down().await;
}
},
NodeAction::RestartDown(delay_views) => {
let node_id = idx.try_into().unwrap();
if let Some(node) = self.handles.write().await.get_mut(idx) {
tracing::error!("Node {} shutting down", idx);
tracing::error!(
"Node {} shutting down in view {}",
idx,
view_number
);
node.handle.shut_down().await;
// For restarted nodes generate the network on correct view
let generated_network = (self.channel_generator)(node_id).await;
Expand All @@ -245,7 +255,8 @@ where
let config = node.handle.hotshot.config.clone();

let next_epoch_high_qc = storage.next_epoch_high_qc_cloned().await;
let start_view = storage.last_actioned_view().await;
let start_view = storage.restart_view().await;
let last_actioned_view = storage.last_actioned_view().await;
let start_epoch = storage.last_actioned_epoch().await;
let high_qc = storage.high_qc_cloned().await.unwrap_or(
QuorumCertificate2::genesis::<V>(
Expand Down Expand Up @@ -281,6 +292,7 @@ where
self.last_decided_leaf.clone(),
(start_view, start_epoch),
(high_qc, next_epoch_high_qc),
last_actioned_view,
saved_proposals,
vid_shares,
decided_upgrade_certificate,
Expand Down Expand Up @@ -311,6 +323,12 @@ where
),
)
.await;
tracing::info!(
"Node {} restarting in view {} with start view {}",
idx,
view_number + delay_views,
start_view
);
if delay_views == 0 {
new_nodes.push((context, idx));
new_networks.push(generated_network.clone());
Expand Down
2 changes: 2 additions & 0 deletions crates/hotshot/testing/src/test_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,8 @@ impl<S: TestTaskState + Send + 'static> TestTask<S> {
break self.state.check().await;
}

self.receivers.retain(|receiver| !receiver.is_closed());

let mut messages = Vec::new();

for receiver in &mut self.receivers {
Expand Down
65 changes: 65 additions & 0 deletions crates/hotshot/testing/tests/test_epochs_restart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,3 +232,68 @@ cross_tests!(
metadata
},
);

fn create_node_change(
idx: usize,
view: u64,
view_up: u64,
change: &mut Vec<(u64, Vec<ChangeNode>)>,
) {
assert!(view_up >= view);
let views_down = view_up - view;
change.push((
view,
vec![ChangeNode {
idx,
updown: NodeAction::RestartDown(views_down),
}],
));
}
cross_tests!(
TestName: test_staggered_restart_double_restart,
Impls: [CombinedImpl],
Types: [TestTypes],
Versions: [EpochsTestVersions],
Ignore: false,
Metadata: {
let mut metadata = TestDescription::default().set_num_nodes(10,1);

let mut node_changes = vec![];
// idx, down view, up view
create_node_change(1, 6, 7, &mut node_changes);
create_node_change(2, 6, 7, &mut node_changes);
create_node_change(3, 7, 7, &mut node_changes);
create_node_change(4, 10, 10, &mut node_changes);
create_node_change(5, 7, 7, &mut node_changes);
create_node_change(6, 7, 7, &mut node_changes);
// KILL 3 NODES until well after view sync
create_node_change(7, 1, 15, &mut node_changes);
create_node_change(8, 1, 15, &mut node_changes);
create_node_change(9, 1, 15, &mut node_changes);


metadata.spinning_properties = SpinningTaskDescription {
node_changes,
};
metadata.view_sync_properties =
hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 50);

// Give the test some extra time because we are purposely timing out views
metadata.completion_task_description =
CompletionTaskDescription::TimeBasedCompletionTaskBuilder(
TimeBasedCompletionTaskDescription {
duration: Duration::from_secs(240),
},
);
metadata.overall_safety_properties = OverallSafetyPropertiesDescription {
// Make sure we keep committing rounds after the catchup, but not the full 50.
num_successful_views: 22,
expected_view_failures: vec![8,9],
possible_view_failures: vec![5,6,7,10,11,12,13,14,15,16,17, 18, 19],
decide_timeout: Duration::from_secs(60),
..Default::default()
};

metadata
},
);
2 changes: 2 additions & 0 deletions crates/hotshot/types/src/event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,8 @@ pub enum LegacyEventType<TYPES: NodeType> {
pub enum HotShotAction {
/// A quorum vote was sent
Vote,
/// A timeout vote was sent
TimeoutVote,
/// View Sync Vote
ViewSyncVote,
/// A quorum proposal was sent
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
CREATE TABLE restart_view (
-- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or
-- update that there is only a single entry in this table: the view we should restart from.
id INT PRIMARY KEY,

view BIGINT
);
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
CREATE TABLE restart_view (
-- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or
-- update that there is only a single entry in this table: the view we should restart from.
id INT PRIMARY KEY,
view BIGINT
);
65 changes: 62 additions & 3 deletions sequencer/src/persistence.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,15 +188,75 @@ mod persistence_tests {
);
}

#[tokio::test(flavor = "multi_thread")]
pub async fn test_restart_view<P: TestablePersistence>() {
setup_test();

let tmp = P::tmp_storage().await;
let storage = P::connect(&tmp).await;

// Initially, there is no saved view.
assert_eq!(storage.load_restart_view().await.unwrap(), None);

// Store a view.
let view1 = ViewNumber::genesis();
storage
.record_action(view1, None, HotShotAction::Vote)
.await
.unwrap();
assert_eq!(
storage.load_restart_view().await.unwrap().unwrap(),
view1 + 1
);

// Store a newer view, make sure storage gets updated.
let view2 = view1 + 1;
storage
.record_action(view2, None, HotShotAction::Vote)
.await
.unwrap();
assert_eq!(
storage.load_restart_view().await.unwrap().unwrap(),
view2 + 1
);

// Store an old view, make sure storage is unchanged.
storage
.record_action(view1, None, HotShotAction::Vote)
.await
.unwrap();
assert_eq!(
storage.load_restart_view().await.unwrap().unwrap(),
view2 + 1
);

// store a higher proposed view, make sure storage is unchanged.
storage
.record_action(view2 + 1, None, HotShotAction::Propose)
.await
.unwrap();
assert_eq!(
storage.load_restart_view().await.unwrap().unwrap(),
view2 + 1
);

// store a higher timeout vote view, make sure storage is unchanged.
storage
.record_action(view2 + 1, None, HotShotAction::TimeoutVote)
.await
.unwrap();
assert_eq!(
storage.load_restart_view().await.unwrap().unwrap(),
view2 + 1
);
}
#[tokio::test(flavor = "multi_thread")]
pub async fn test_store_drb_input<P: TestablePersistence>() {
use hotshot_types::drb::DrbInput;

setup_test();

let tmp = P::tmp_storage().await;
let storage = P::connect(&tmp).await;

// Initially, there is no saved info.
if storage.load_drb_input(10).await.is_ok() {
panic!("unexpected nonempty drb_input");
Expand Down Expand Up @@ -238,7 +298,6 @@ mod persistence_tests {
#[tokio::test(flavor = "multi_thread")]
pub async fn test_epoch_info<P: TestablePersistence>() {
setup_test();

let tmp = P::tmp_storage().await;
let storage = P::connect(&tmp).await;

Expand Down
Loading