Skip to content

Commit 460d46d

Browse files
committed
Merge branch 'dev' into lc-electra
2 parents 9db6d3f + bb8f3ca commit 460d46d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+856
-769
lines changed

configs/mainnet.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768
147147
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
148148
# `6`
149149
BLOB_SIDECAR_SUBNET_COUNT: 6
150+
## `uint64(6)`
151+
MAX_BLOBS_PER_BLOCK: 6
150152

151153
# Whisk
152154
# `Epoch(2**8)`

configs/minimal.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768
148148
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
149149
# `6`
150150
BLOB_SIDECAR_SUBNET_COUNT: 6
151+
## `uint64(6)`
152+
MAX_BLOBS_PER_BLOCK: 6
151153

152154
# Whisk
153155
WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4

presets/mainnet/deneb.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,5 @@
66
FIELD_ELEMENTS_PER_BLOB: 4096
77
# `uint64(2**12)` (= 4096)
88
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
9-
# `uint64(6)`
10-
MAX_BLOBS_PER_BLOCK: 6
119
# `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
1210
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17

presets/minimal/deneb.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,5 @@
66
FIELD_ELEMENTS_PER_BLOB: 4096
77
# [customized]
88
MAX_BLOB_COMMITMENTS_PER_BLOCK: 16
9-
# `uint64(6)`
10-
MAX_BLOBS_PER_BLOCK: 6
119
# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9
1210
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9

pysetup/spec_builders/deneb.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
7070
return {
7171
'BYTES_PER_FIELD_ELEMENT': spec_object.constant_vars['BYTES_PER_FIELD_ELEMENT'].value,
7272
'FIELD_ELEMENTS_PER_BLOB': spec_object.preset_vars['FIELD_ELEMENTS_PER_BLOB'].value,
73-
'MAX_BLOBS_PER_BLOCK': spec_object.preset_vars['MAX_BLOBS_PER_BLOCK'].value,
73+
'MAX_BLOBS_PER_BLOCK': spec_object.config_vars['MAX_BLOBS_PER_BLOCK'].value,
7474
'MAX_BLOB_COMMITMENTS_PER_BLOCK': spec_object.preset_vars['MAX_BLOB_COMMITMENTS_PER_BLOCK'].value,
7575
}
7676

specs/_features/eip7594/das-core.md

Lines changed: 90 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
- [`compute_extended_matrix`](#compute_extended_matrix)
2424
- [`recover_matrix`](#recover_matrix)
2525
- [`get_data_column_sidecars`](#get_data_column_sidecars)
26+
- [`get_extended_sample_count`](#get_extended_sample_count)
2627
- [Custody](#custody)
2728
- [Custody requirement](#custody-requirement)
2829
- [Public, deterministic selection](#public-deterministic-selection)
@@ -31,6 +32,8 @@
3132
- [Column gossip](#column-gossip)
3233
- [Parameters](#parameters)
3334
- [Peer sampling](#peer-sampling)
35+
- [Sample selection](#sample-selection)
36+
- [Sample queries](#sample-queries)
3437
- [Peer scoring](#peer-scoring)
3538
- [Reconstruction and cross-seeding](#reconstruction-and-cross-seeding)
3639
- [DAS providers](#das-providers)
@@ -65,8 +68,8 @@ The following values are (non-configurable) constants used throughout the specif
6568

6669
| Name | Value | Description |
6770
| - | - | - |
68-
| `NUMBER_OF_COLUMNS` | `uint64(CELLS_PER_EXT_BLOB)` (= 128) | Number of columns in the extended data matrix. |
69-
| `MAX_CELLS_IN_EXTENDED_MATRIX` | `uint64(MAX_BLOBS_PER_BLOCK * NUMBER_OF_COLUMNS)` (= 768) | The data size of `ExtendedMatrix`. |
71+
| `NUMBER_OF_COLUMNS` | `uint64(CELLS_PER_EXT_BLOB)` (= 128) | Number of columns in the extended data matrix |
72+
| `MAX_CELLS_IN_EXTENDED_MATRIX` | `uint64(MAX_BLOBS_PER_BLOCK * NUMBER_OF_COLUMNS)` (= 768) | The data size of `ExtendedMatrix` |
7073

7174
### Networking
7275

@@ -176,9 +179,8 @@ def recover_matrix(partial_matrix: Sequence[MatrixEntry],
176179
for blob_index in range(blob_count):
177180
cell_indices = [e.column_index for e in partial_matrix if e.row_index == blob_index]
178181
cells = [e.cell for e in partial_matrix if e.row_index == blob_index]
179-
proofs = [e.kzg_proof for e in partial_matrix if e.row_index == blob_index]
180182

181-
recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells, proofs)
183+
recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells)
182184
for cell_index, (cell, proof) in enumerate(zip(recovered_cells, recovered_proofs)):
183185
extended_matrix.append(MatrixEntry(
184186
cell=cell,
@@ -193,34 +195,80 @@ def recover_matrix(partial_matrix: Sequence[MatrixEntry],
193195

194196
```python
195197
def get_data_column_sidecars(signed_block: SignedBeaconBlock,
196-
blobs: Sequence[Blob]) -> Sequence[DataColumnSidecar]:
198+
cells_and_kzg_proofs: Sequence[Tuple[
199+
Vector[Cell, CELLS_PER_EXT_BLOB],
200+
Vector[KZGProof, CELLS_PER_EXT_BLOB]]]) -> Sequence[DataColumnSidecar]:
201+
"""
202+
Given a signed block and the cells/proofs associated with each blob in the
203+
block, assemble the sidecars which can be distributed to peers.
204+
"""
205+
blob_kzg_commitments = signed_block.message.body.blob_kzg_commitments
206+
assert len(cells_and_kzg_proofs) == len(blob_kzg_commitments)
197207
signed_block_header = compute_signed_block_header(signed_block)
198-
block = signed_block.message
199208
kzg_commitments_inclusion_proof = compute_merkle_proof(
200-
block.body,
209+
signed_block.message.body,
201210
get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments'),
202211
)
203-
cells_and_proofs = [compute_cells_and_kzg_proofs(blob) for blob in blobs]
204-
blob_count = len(blobs)
205-
cells = [cells_and_proofs[i][0] for i in range(blob_count)]
206-
proofs = [cells_and_proofs[i][1] for i in range(blob_count)]
212+
207213
sidecars = []
208214
for column_index in range(NUMBER_OF_COLUMNS):
209-
column_cells = [cells[row_index][column_index]
210-
for row_index in range(blob_count)]
211-
column_proofs = [proofs[row_index][column_index]
212-
for row_index in range(blob_count)]
215+
column_cells, column_proofs = [], []
216+
for cells, proofs in cells_and_kzg_proofs:
217+
column_cells.append(cells[column_index])
218+
column_proofs.append(proofs[column_index])
213219
sidecars.append(DataColumnSidecar(
214220
index=column_index,
215221
column=column_cells,
216-
kzg_commitments=block.body.blob_kzg_commitments,
222+
kzg_commitments=blob_kzg_commitments,
217223
kzg_proofs=column_proofs,
218224
signed_block_header=signed_block_header,
219225
kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof,
220226
))
221227
return sidecars
222228
```
223229

230+
#### `get_extended_sample_count`
231+
232+
```python
233+
def get_extended_sample_count(allowed_failures: uint64) -> uint64:
234+
assert 0 <= allowed_failures <= NUMBER_OF_COLUMNS // 2
235+
"""
236+
Return the sample count if allowing failures.
237+
238+
This helper demonstrates how to calculate the number of columns to query per slot when
239+
allowing given number of failures, assuming uniform random selection without replacement.
240+
Nested functions are direct replacements of Python library functions math.comb and
241+
scipy.stats.hypergeom.cdf, with the same signatures.
242+
"""
243+
244+
def math_comb(n: int, k: int) -> int:
245+
if not 0 <= k <= n:
246+
return 0
247+
r = 1
248+
for i in range(min(k, n - k)):
249+
r = r * (n - i) // (i + 1)
250+
return r
251+
252+
def hypergeom_cdf(k: uint64, M: uint64, n: uint64, N: uint64) -> float:
253+
# NOTE: It contains float-point computations.
254+
# Convert uint64 to Python integers before computations.
255+
k = int(k)
256+
M = int(M)
257+
n = int(n)
258+
N = int(N)
259+
return sum([math_comb(n, i) * math_comb(M - n, N - i) / math_comb(M, N)
260+
for i in range(k + 1)])
261+
262+
worst_case_missing = NUMBER_OF_COLUMNS // 2 + 1
263+
false_positive_threshold = hypergeom_cdf(0, NUMBER_OF_COLUMNS,
264+
worst_case_missing, SAMPLES_PER_SLOT)
265+
for sample_count in range(SAMPLES_PER_SLOT, NUMBER_OF_COLUMNS + 1):
266+
if hypergeom_cdf(allowed_failures, NUMBER_OF_COLUMNS,
267+
worst_case_missing, sample_count) <= false_positive_threshold:
268+
break
269+
return sample_count
270+
```
271+
224272
## Custody
225273

226274
### Custody requirement
@@ -259,23 +307,43 @@ In this construction, we extend the blobs using a one-dimensional erasure coding
259307

260308
For each column -- use `data_column_sidecar_{subnet_id}` subnets, where `subnet_id` can be computed with the `compute_subnet_for_data_column_sidecar(column_index: ColumnIndex)` helper. The sidecars can be computed with the `get_data_column_sidecars(signed_block: SignedBeaconBlock, blobs: Sequence[Blob])` helper.
261309

262-
To custody a particular column, a node joins the respective gossip subnet. Verifiable samples from their respective column are gossiped on the assigned subnet.
310+
Verifiable samples from their respective column are distributed on the assigned subnet. To custody a particular column, a node joins the respective gossipsub subnet. If a node fails to get a column on the column subnet, a node can also utilize the Req/Resp protocol to query the missing column from other peers.
263311

264312
## Peer sampling
265313

266-
A node SHOULD maintain a diverse set of peers for each column and each slot by verifying responsiveness to sample queries. At each slot, a node makes `SAMPLES_PER_SLOT` queries for samples from their peers via `DataColumnSidecarsByRoot` request. A node utilizes `get_custody_columns` helper to determine which peer(s) to request from. If a node has enough good/honest peers across all rows and columns, this has a high chance of success.
314+
### Sample selection
315+
316+
At each slot, a node SHOULD select at least `SAMPLES_PER_SLOT` column IDs for sampling. It is recommended to use uniform random selection without replacement based on local randomness. Sampling is considered successful if the node manages to retrieve all selected columns.
317+
318+
Alternatively, a node MAY use a method that selects more than `SAMPLES_PER_SLOT` columns while allowing some missing, respecting the same target false positive threshold (the probability of successful sampling of an unavailable block) as dictated by the `SAMPLES_PER_SLOT` parameter. If using uniform random selection without replacement, a node can use the `get_extended_sample_count(allowed_failures) -> sample_count` helper function to determine the sample count (number of unique column IDs) for any selected number of allowed failures. Sampling is then considered successful if any `sample_count - allowed_failures` columns are retrieved successfully.
319+
320+
For reference, the table below shows the number of samples and the number of allowed missing columns assuming `NUMBER_OF_COLUMNS = 128` and `SAMPLES_PER_SLOT = 16`.
321+
322+
| Allowed missing | 0| 1| 2| 3| 4| 5| 6| 7| 8|
323+
|-----------------|--|--|--|--|--|--|--|--|--|
324+
| Sample count |16|20|24|27|29|32|35|37|40|
325+
326+
### Sample queries
327+
328+
A node SHOULD maintain a diverse set of peers for each column and each slot by verifying responsiveness to sample queries.
329+
330+
A node SHOULD query for samples from selected peers via `DataColumnSidecarsByRoot` request. A node utilizes `get_custody_columns` helper to determine which peer(s) it could request from, identifying a list of candidate peers for each selected column.
331+
332+
If more than one candidate peer is found for a given column, a node SHOULD randomize its peer selection to distribute sample query load in the network. Nodes MAY use peer scoring to tune this selection (for example, by using weighted selection or by using a cut-off threshold). If possible, it is also recommended to avoid requesting many columns from the same peer in order to avoid relying on and exposing the sample selection to a single peer.
333+
334+
If a node already has a column because of custody, it is not required to send out queries for that column.
335+
336+
If a node has enough good/honest peers across all columns, and the data is being made available, the above procedure has a high chance of success.
267337

268338
## Peer scoring
269339

270340
Due to the deterministic custody functions, a node knows exactly what a peer should be able to respond to. In the event that a peer does not respond to samples of their custodied rows/columns, a node may downscore or disconnect from a peer.
271341

272342
## Reconstruction and cross-seeding
273343

274-
If the node obtains 50%+ of all the columns, they can reconstruct the full data matrix via `recover_matrix` helper.
275-
276-
If a node fails to sample a peer or fails to get a column on the column subnet, a node can utilize the Req/Resp message to query the missing column from other peers.
344+
If the node obtains 50%+ of all the columns, it SHOULD reconstruct the full data matrix via `recover_matrix` helper. Nodes MAY delay this reconstruction allowing time for other columns to arrive over the network. If delaying reconstruction, nodes may use a random delay in order to desynchronize reconstruction among nodes, thus reducing overall CPU load.
277345

278-
Once the node obtain the column, the node SHOULD send the missing columns to the column subnets.
346+
Once the node obtains a column through reconstruction, the node MUST expose the new column as if it had received it over the network. If the node is subscribed to the subnet corresponding to the column, it MUST send the reconstructed DataColumnSidecar to its topic mesh neighbors. If instead the node is not subscribed to the corresponding subnet, it SHOULD still expose the availability of the DataColumnSidecar as part of the gossip emission process.
279347

280348
*Note*: A node always maintains a matrix view of the rows and columns they are following, able to cross-reference and cross-seed in either direction.
281349

specs/_features/eip7594/p2p-interface.md

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,19 +68,18 @@ class DataColumnIdentifier(Container):
6868
```python
6969
def verify_data_column_sidecar_kzg_proofs(sidecar: DataColumnSidecar) -> bool:
7070
"""
71-
Verify if the proofs are correct
71+
Verify if the proofs are correct.
7272
"""
7373
assert sidecar.index < NUMBER_OF_COLUMNS
7474
assert len(sidecar.column) == len(sidecar.kzg_commitments) == len(sidecar.kzg_proofs)
7575

76-
row_indices = [RowIndex(i) for i in range(len(sidecar.column))]
77-
column_indices = [sidecar.index] * len(sidecar.column)
76+
# The column index also represents the cell index
77+
cell_indices = [CellIndex(sidecar.index)] * len(sidecar.column)
7878

79-
# KZG batch verifies that the cells match the corresponding commitments and proofs
79+
# Batch verify that the cells match the corresponding commitments and proofs
8080
return verify_cell_kzg_proof_batch(
81-
row_commitments_bytes=sidecar.kzg_commitments,
82-
row_indices=row_indices, # all rows
83-
column_indices=column_indices, # specific column
81+
commitments_bytes=sidecar.kzg_commitments,
82+
cell_indices=cell_indices,
8483
cells=sidecar.column,
8584
proofs_bytes=sidecar.kzg_proofs,
8685
)

0 commit comments

Comments
 (0)