Skip to content

Commit 0869f2b

Browse files
committed
Merge branch 'unstable' of https://github.com/sigp/lighthouse into single-attestation-full-implementation
2 parents 1f78f3f + 170cd0f commit 0869f2b

File tree

71 files changed

+1063
-305
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+1063
-305
lines changed

.github/workflows/local-testnet.yml

Lines changed: 48 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ jobs:
6767
working-directory: scripts/local_testnet
6868

6969
- name: Upload logs artifact
70+
if: always()
7071
uses: actions/upload-artifact@v4
7172
with:
7273
name: logs-local-testnet
@@ -125,6 +126,7 @@ jobs:
125126
working-directory: scripts/tests
126127

127128
- name: Upload logs artifact
129+
if: always()
128130
uses: actions/upload-artifact@v4
129131
with:
130132
name: logs-doppelganger-protection-success
@@ -160,13 +162,56 @@ jobs:
160162
working-directory: scripts/tests
161163

162164
- name: Upload logs artifact
165+
if: always()
163166
uses: actions/upload-artifact@v4
164167
with:
165168
name: logs-doppelganger-protection-failure
166169
path: |
167170
scripts/local_testnet/logs
168171
retention-days: 3
169172

173+
# Tests checkpoint syncing to a live network (current fork) and a running devnet (usually next scheduled fork)
174+
checkpoint-sync-test:
175+
name: checkpoint-sync-test-${{ matrix.network }}
176+
runs-on: ubuntu-latest
177+
needs: dockerfile-ubuntu
178+
if: contains(github.event.pull_request.labels.*.name, 'syncing')
179+
continue-on-error: true
180+
strategy:
181+
matrix:
182+
network: [sepolia, devnet]
183+
steps:
184+
- uses: actions/checkout@v4
185+
186+
- name: Install Kurtosis
187+
run: |
188+
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
189+
sudo apt update
190+
sudo apt install -y kurtosis-cli
191+
kurtosis analytics disable
192+
193+
- name: Download Docker image artifact
194+
uses: actions/download-artifact@v4
195+
with:
196+
name: lighthouse-docker
197+
path: .
198+
199+
- name: Load Docker image
200+
run: docker load -i lighthouse-docker.tar
201+
202+
- name: Run the checkpoint sync test script
203+
run: |
204+
./checkpoint-sync.sh "sync-${{ matrix.network }}" "checkpoint-sync-config-${{ matrix.network }}.yaml"
205+
working-directory: scripts/tests
206+
207+
- name: Upload logs artifact
208+
if: always()
209+
uses: actions/upload-artifact@v4
210+
with:
211+
name: logs-checkpoint-sync-${{ matrix.network }}
212+
path: |
213+
scripts/local_testnet/logs
214+
retention-days: 3
170215

171216
# This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether
172217
# a PR is safe to merge. New jobs should be added here.
@@ -182,4 +227,6 @@ jobs:
182227
steps:
183228
- uses: actions/checkout@v4
184229
- name: Check that success job is dependent on all others
185-
run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success
230+
run: |
231+
exclude_jobs='checkpoint-sync-test'
232+
./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success "$exclude_jobs"

Cargo.lock

Lines changed: 2 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Makefile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,9 @@ run-state-transition-tests:
218218
# Downloads and runs the EF test vectors.
219219
test-ef: make-ef-tests run-ef-tests
220220

221+
# Downloads and runs the nightly EF test vectors.
222+
test-ef-nightly: make-ef-tests-nightly run-ef-tests
223+
221224
# Downloads and runs the EF test vectors with nextest.
222225
nextest-ef: make-ef-tests nextest-run-ef-tests
223226

@@ -278,6 +281,10 @@ lint-full:
278281
make-ef-tests:
279282
make -C $(EF_TESTS)
280283

284+
# Download/extract the nightly EF test vectors.
285+
make-ef-tests-nightly:
286+
CONSENSUS_SPECS_TEST_VERSION=nightly make -C $(EF_TESTS)
287+
281288
# Verifies that crates compile with fuzzing features enabled
282289
arbitrary-fuzz:
283290
cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES)

account_manager/src/validator/import.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ pub fn cli_app() -> Command {
3232
.about(
3333
"Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \
3434
requesting passwords interactively. The directory flag provides a convenient \
35-
method for importing a directory of keys generated by the eth2-deposit-cli \
35+
method for importing a directory of keys generated by the ethstaker-deposit-cli \
3636
Python utility.",
3737
)
3838
.arg(

beacon_node/beacon_chain/src/builder.rs

Lines changed: 30 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -486,24 +486,36 @@ where
486486

487487
// Verify that blobs (if provided) match the block.
488488
if let Some(blobs) = &weak_subj_blobs {
489-
let commitments = weak_subj_block
490-
.message()
491-
.body()
492-
.blob_kzg_commitments()
493-
.map_err(|e| format!("Blobs provided but block does not reference them: {e:?}"))?;
494-
if blobs.len() != commitments.len() {
495-
return Err(format!(
496-
"Wrong number of blobs, expected: {}, got: {}",
497-
commitments.len(),
498-
blobs.len()
499-
));
500-
}
501-
if commitments
502-
.iter()
503-
.zip(blobs.iter())
504-
.any(|(commitment, blob)| *commitment != blob.kzg_commitment)
505-
{
506-
return Err("Checkpoint blob does not match block commitment".into());
489+
let fulu_enabled = weak_subj_block.fork_name_unchecked().fulu_enabled();
490+
if fulu_enabled && blobs.is_empty() {
491+
// Blobs expected for this block, but the checkpoint server is not able to serve them.
492+
// This is expected from Fulu, as only supernodes are able to serve blobs.
493+
// We can consider using backfill to retrieve the data columns from the p2p network,
494+
// but we can ignore this fow now until we have validator custody backfill
495+
// implemented as we'll likely be able to reuse the logic.
496+
// https://github.com/sigp/lighthouse/issues/6837
497+
} else {
498+
let commitments = weak_subj_block
499+
.message()
500+
.body()
501+
.blob_kzg_commitments()
502+
.map_err(|e| {
503+
format!("Blobs provided but block does not reference them: {e:?}")
504+
})?;
505+
if blobs.len() != commitments.len() {
506+
return Err(format!(
507+
"Wrong number of blobs, expected: {}, got: {}",
508+
commitments.len(),
509+
blobs.len()
510+
));
511+
}
512+
if commitments
513+
.iter()
514+
.zip(blobs.iter())
515+
.any(|(commitment, blob)| *commitment != blob.kzg_commitment)
516+
{
517+
return Err("Checkpoint blob does not match block commitment".into());
518+
}
507519
}
508520
}
509521

beacon_node/network/src/sync/block_lookups/mod.rs

Lines changed: 42 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ pub type SingleLookupId = u32;
106106
enum Action {
107107
Retry,
108108
ParentUnknown { parent_root: Hash256 },
109-
Drop,
109+
Drop(/* reason: */ String),
110110
Continue,
111111
}
112112

@@ -194,19 +194,22 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
194194

195195
/// Creates a parent lookup for the block with the given `block_root` and immediately triggers it.
196196
/// If a parent lookup exists or is triggered, a current lookup will be created.
197+
///
198+
/// Returns true if the lookup is created or already exists
197199
#[instrument(parent = None,
198200
level = "info",
199201
fields(service = "lookup_sync"),
200202
name = "lookup_sync",
201203
skip_all
202204
)]
205+
#[must_use = "only reference the new lookup if returns true"]
203206
pub fn search_child_and_parent(
204207
&mut self,
205208
block_root: Hash256,
206209
block_component: BlockComponent<T::EthSpec>,
207210
peer_id: PeerId,
208211
cx: &mut SyncNetworkContext<T>,
209-
) {
212+
) -> bool {
210213
let parent_root = block_component.parent_root();
211214

212215
let parent_lookup_exists =
@@ -223,25 +226,29 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
223226
// the lookup with zero peers to house the block components.
224227
&[],
225228
cx,
226-
);
229+
)
230+
} else {
231+
false
227232
}
228233
}
229234

230235
/// Seach a block whose parent root is unknown.
236+
///
231237
/// Returns true if the lookup is created or already exists
232238
#[instrument(parent = None,
233239
level = "info",
234240
fields(service = "lookup_sync"),
235241
name = "lookup_sync",
236242
skip_all
237243
)]
244+
#[must_use = "only reference the new lookup if returns true"]
238245
pub fn search_unknown_block(
239246
&mut self,
240247
block_root: Hash256,
241248
peer_source: &[PeerId],
242249
cx: &mut SyncNetworkContext<T>,
243-
) {
244-
self.new_current_lookup(block_root, None, None, peer_source, cx);
250+
) -> bool {
251+
self.new_current_lookup(block_root, None, None, peer_source, cx)
245252
}
246253

247254
/// A block or blob triggers the search of a parent.
@@ -256,6 +263,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
256263
name = "lookup_sync",
257264
skip_all
258265
)]
266+
#[must_use = "only reference the new lookup if returns true"]
259267
pub fn search_parent_of_child(
260268
&mut self,
261269
block_root_to_search: Hash256,
@@ -363,6 +371,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
363371
name = "lookup_sync",
364372
skip_all
365373
)]
374+
#[must_use = "only reference the new lookup if returns true"]
366375
fn new_current_lookup(
367376
&mut self,
368377
block_root: Hash256,
@@ -656,7 +665,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
656665
// This is unreachable because RPC blocks do not undergo gossip verification, and
657666
// this error can *only* come from gossip verification.
658667
error!(?block_root, "Single block lookup hit unreachable condition");
659-
Action::Drop
668+
Action::Drop("DuplicateImportStatusUnknown".to_owned())
660669
}
661670
BlockProcessingResult::Ignored => {
662671
// Beacon processor signalled to ignore the block processing result.
@@ -665,14 +674,14 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
665674
component = ?R::response_type(),
666675
"Lookup component processing ignored, cpu might be overloaded"
667676
);
668-
Action::Drop
677+
Action::Drop("Block processing ignored".to_owned())
669678
}
670679
BlockProcessingResult::Err(e) => {
671680
match e {
672681
BlockError::BeaconChainError(e) => {
673682
// Internal error
674683
error!(%block_root, error = ?e, "Beacon chain error processing lookup component");
675-
Action::Drop
684+
Action::Drop(format!("{e:?}"))
676685
}
677686
BlockError::ParentUnknown { parent_root, .. } => {
678687
// Reverts the status of this request to `AwaitingProcessing` holding the
@@ -691,7 +700,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
691700
error = ?e,
692701
"Single block lookup failed. Execution layer is offline / unsynced / misconfigured"
693702
);
694-
Action::Drop
703+
Action::Drop(format!("{e:?}"))
695704
}
696705
BlockError::AvailabilityCheck(e)
697706
if e.category() == AvailabilityCheckErrorCategory::Internal =>
@@ -703,7 +712,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
703712
// lookup state transition. This error invalidates both blob and block requests, and we don't know the
704713
// state of both requests. Blobs may have already successfullly processed for example.
705714
// We opt to drop the lookup instead.
706-
Action::Drop
715+
Action::Drop(format!("{e:?}"))
707716
}
708717
other => {
709718
debug!(
@@ -757,19 +766,32 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
757766
}
758767
Action::ParentUnknown { parent_root } => {
759768
let peers = lookup.all_peers();
769+
// Mark lookup as awaiting **before** creating the parent lookup. At this point the
770+
// lookup maybe inconsistent.
760771
lookup.set_awaiting_parent(parent_root);
761-
debug!(
762-
id = lookup.id,
763-
?block_root,
764-
?parent_root,
765-
"Marking lookup as awaiting parent"
766-
);
767-
self.search_parent_of_child(parent_root, block_root, &peers, cx);
768-
Ok(LookupResult::Pending)
772+
let parent_lookup_exists =
773+
self.search_parent_of_child(parent_root, block_root, &peers, cx);
774+
if parent_lookup_exists {
775+
// The parent lookup exist or has been created. It's safe for `lookup` to
776+
// reference the parent as awaiting.
777+
debug!(
778+
id = lookup_id,
779+
?block_root,
780+
?parent_root,
781+
"Marking lookup as awaiting parent"
782+
);
783+
Ok(LookupResult::Pending)
784+
} else {
785+
// The parent lookup is faulty and was not created, we must drop the `lookup` as
786+
// it's in an inconsistent state. We must drop all of its children too.
787+
Err(LookupRequestError::Failed(format!(
788+
"Parent lookup is faulty {parent_root:?}"
789+
)))
790+
}
769791
}
770-
Action::Drop => {
792+
Action::Drop(reason) => {
771793
// Drop with noop
772-
Err(LookupRequestError::Failed)
794+
Err(LookupRequestError::Failed(reason))
773795
}
774796
Action::Continue => {
775797
// Drop this completed lookup only

beacon_node/network/src/sync/block_lookups/single_block_lookup.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ pub enum LookupRequestError {
4040
/// Inconsistent lookup request state
4141
BadState(String),
4242
/// Lookup failed for some other reason and should be dropped
43-
Failed,
43+
Failed(/* reason: */ String),
4444
/// Received MissingComponents when all components have been processed. This should never
4545
/// happen, and indicates some internal bug
4646
MissingComponentsAfterAllProcessed,

beacon_node/network/src/sync/manager.rs

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -929,12 +929,20 @@ impl<T: BeaconChainTypes> SyncManager<T> {
929929
) {
930930
match self.should_search_for_block(Some(slot), &peer_id) {
931931
Ok(_) => {
932-
self.block_lookups.search_child_and_parent(
932+
if self.block_lookups.search_child_and_parent(
933933
block_root,
934934
block_component,
935935
peer_id,
936936
&mut self.network,
937-
);
937+
) {
938+
// Lookup created. No need to log here it's logged in `new_current_lookup`
939+
} else {
940+
debug!(
941+
?block_root,
942+
?parent_root,
943+
"No lookup created for child and parent"
944+
);
945+
}
938946
}
939947
Err(reason) => {
940948
debug!(%block_root, %parent_root, reason, "Ignoring unknown parent request");
@@ -945,8 +953,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
945953
fn handle_unknown_block_root(&mut self, peer_id: PeerId, block_root: Hash256) {
946954
match self.should_search_for_block(None, &peer_id) {
947955
Ok(_) => {
948-
self.block_lookups
949-
.search_unknown_block(block_root, &[peer_id], &mut self.network);
956+
if self.block_lookups.search_unknown_block(
957+
block_root,
958+
&[peer_id],
959+
&mut self.network,
960+
) {
961+
// Lookup created. No need to log here it's logged in `new_current_lookup`
962+
} else {
963+
debug!(?block_root, "No lookup created for unknown block");
964+
}
950965
}
951966
Err(reason) => {
952967
debug!(%block_root, reason, "Ignoring unknown block request");

0 commit comments

Comments
 (0)