Skip to content
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
c05347d
Draft periodic sig updates
litt3 Apr 24, 2025
bc85930
Clean up
litt3 Apr 24, 2025
0bd99d5
Use proper string builder
litt3 Apr 24, 2025
3e4ba55
Use routine
litt3 Apr 24, 2025
03b794e
Improve util method
litt3 Apr 24, 2025
5d3ea11
Try to fix test
litt3 Apr 24, 2025
2df5562
Make copies before sending attestation
litt3 Apr 25, 2025
07674e2
Merge branch 'master' into periodic-signature-updates
litt3 Apr 25, 2025
56f1c05
Clean up TODOs
litt3 Apr 25, 2025
a5d535c
Fix logs
litt3 Apr 25, 2025
aec015e
Remove unnecessary comment
litt3 Apr 25, 2025
b026783
Init chan with size
litt3 Apr 25, 2025
045310c
Make sig tick interval configurable
litt3 Apr 25, 2025
097ba51
Write tests and fix issues
litt3 Apr 28, 2025
1cd8155
Fix test
litt3 Apr 28, 2025
4c9d0a2
Clean up
litt3 Apr 28, 2025
2b64d53
Fix style
litt3 Apr 28, 2025
2a0b2a2
Improve test
litt3 Apr 28, 2025
ee6c2be
Merge branch 'master' into periodic-signature-updates
litt3 Apr 28, 2025
77b37d5
Revert error message change
litt3 Apr 29, 2025
6351dc2
Merge branch 'master' into periodic-signature-updates
litt3 Apr 29, 2025
67d8e3c
Revert changes that affect V1
litt3 Apr 29, 2025
3332ba2
Move signature receiver util
litt3 Apr 29, 2025
c2f6759
Add process signing message metric
litt3 Apr 29, 2025
838bcdb
Add channel latency metric
litt3 Apr 29, 2025
92e2a26
Track attestation update metric
litt3 Apr 29, 2025
4cea944
Implement 'significant threshold' metric
litt3 Apr 29, 2025
0f163eb
Fix unit test
litt3 Apr 29, 2025
626c2ce
Merge branch 'master' into periodic-signature-updates
litt3 Apr 29, 2025
6cfe7cc
Nil check metrics
litt3 Apr 29, 2025
29a7d6e
Improve payload disperser logs
litt3 Apr 29, 2025
4cb3883
Clean up new metrics
litt3 Apr 29, 2025
ddad1a1
Add attestation building metric
litt3 Apr 29, 2025
afa7304
Update db put verbiage, and improve logs
litt3 Apr 30, 2025
f3b5f28
Improve HandleSignatures doc
litt3 Apr 30, 2025
a164bf7
Choose faster attestation tick speed
litt3 Apr 30, 2025
780efba
Downgrade payload disperser failed status log
litt3 Apr 30, 2025
8ed2f53
Add attestation update count metric
litt3 Apr 30, 2025
70042aa
Don't submit empty initial attestation
litt3 Apr 30, 2025
235efc4
Fix unit test now that no empty attestation is written
litt3 Apr 30, 2025
7404ea9
Fix bug in dynamo metadata store
litt3 Apr 30, 2025
de6e086
Fix blob status unit test
litt3 Apr 30, 2025
4aa8441
Promote warn logs to error
litt3 May 1, 2025
7639906
Add length checks
litt3 May 1, 2025
84315df
Merge branch 'master' into periodic-signature-updates
litt3 May 1, 2025
46c76cb
Return empty attestation if none found
litt3 May 1, 2025
7b0103d
Remove TODOs
litt3 May 1, 2025
bb60720
Fix one test
litt3 May 1, 2025
8755300
Try fixing tests again
litt3 May 1, 2025
ac4c35c
Test fixes
litt3 May 1, 2025
58b1779
Admit defeat and put empty attestation
litt3 May 2, 2025
37e185d
Merge branch 'master' into periodic-signature-updates
litt3 May 2, 2025
4c14b86
Revert another change
litt3 May 2, 2025
318f860
Revert test change
litt3 May 2, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion api/clients/v2/payloaddispersal/payload_disperser.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,8 @@ func (pd *PayloadDisperser) pollBlobStatusUntilSigned(
// If this call fails to return in a timely fashion, the timeout configured for the poll loop will trigger
blobStatusReply, err := pd.disperserClient.GetBlobStatus(ctx, blobKey)
if err != nil {
pd.logger.Warn("get blob status", "err", err, "blobKey", blobKey.Hex())
// this is expected to fail multiple times before we get a valid response, so only do a Debug log
pd.logger.Debug("get blob status", "err", err, "blobKey", blobKey.Hex())
continue
}

Expand Down
6 changes: 5 additions & 1 deletion disperser/apiserver/server_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,11 @@ func TestV2GetBlobStatus(t *testing.T) {
require.NoError(t, err)
require.Equal(t, pbv2.BlobStatus_ENCODED, status.Status)

// Complete blob status
// First transition to GatheringSignatures state
err = c.BlobMetadataStore.UpdateBlobStatus(ctx, blobKey, dispv2.GatheringSignatures)
require.NoError(t, err)

// Then transition to Complete state
err = c.BlobMetadataStore.UpdateBlobStatus(ctx, blobKey, dispv2.Complete)
require.NoError(t, err)
batchHeader := &corev2.BatchHeader{
Expand Down
2 changes: 1 addition & 1 deletion disperser/cmd/controller/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ var (
Usage: "Interval at which new Attestations will be submitted as signature gathering progresses",
Required: false,
EnvVar: common.PrefixEnvVar(envVarPrefix, "SIGNATURE_TICK_INTERVAL"),
Value: 1 * time.Second,
Value: 100 * time.Millisecond,
}
FinalizationBlockDelayFlag = cli.Uint64Flag{
Name: common.PrefixFlag(FlagPrefix, "finalization-block-delay"),
Expand Down
36 changes: 23 additions & 13 deletions disperser/common/v2/blobstore/dynamo_metadata_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,7 @@ var (
v2.Queued: {},
v2.Encoded: {v2.Queued},
v2.GatheringSignatures: {v2.Encoded},
// TODO: when GatheringSignatures is fully supported, remove v2.Encoded from below
v2.Complete: {v2.Encoded, v2.GatheringSignatures},
v2.Complete: {v2.GatheringSignatures},
v2.Failed: {v2.Queued, v2.Encoded, v2.GatheringSignatures},
}
ErrInvalidStateTransition = errors.New("invalid state transition")
Expand Down Expand Up @@ -1197,15 +1196,20 @@ func (s *BlobMetadataStore) PutAttestation(ctx context.Context, attestation *cor
}

func (s *BlobMetadataStore) GetAttestation(ctx context.Context, batchHeaderHash [32]byte) (*corev2.Attestation, error) {
item, err := s.dynamoDBClient.GetItem(ctx, s.tableName, map[string]types.AttributeValue{
"PK": &types.AttributeValueMemberS{
Value: batchHeaderKeyPrefix + hex.EncodeToString(batchHeaderHash[:]),
},
"SK": &types.AttributeValueMemberS{
Value: attestationSK,
input := &dynamodb.GetItemInput{
TableName: aws.String(s.tableName),
Key: map[string]types.AttributeValue{
"PK": &types.AttributeValueMemberS{
Value: batchHeaderKeyPrefix + hex.EncodeToString(batchHeaderHash[:]),
},
"SK": &types.AttributeValueMemberS{
Value: attestationSK,
},
},
})
ConsistentRead: aws.Bool(true), // Use strongly consistent read to prevent race conditions
}

item, err := s.dynamoDBClient.GetItemWithInput(ctx, input)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1362,12 +1366,18 @@ func (s *BlobMetadataStore) GetBlobInclusionInfos(ctx context.Context, blobKey c
}

func (s *BlobMetadataStore) GetSignedBatch(ctx context.Context, batchHeaderHash [32]byte) (*corev2.BatchHeader, *corev2.Attestation, error) {
items, err := s.dynamoDBClient.Query(ctx, s.tableName, "PK = :pk", commondynamodb.ExpressionValues{
":pk": &types.AttributeValueMemberS{
Value: batchHeaderKeyPrefix + hex.EncodeToString(batchHeaderHash[:]),
input := &dynamodb.QueryInput{
TableName: aws.String(s.tableName),
KeyConditionExpression: aws.String("PK = :pk"),
ExpressionAttributeValues: map[string]types.AttributeValue{
":pk": &types.AttributeValueMemberS{
Value: batchHeaderKeyPrefix + hex.EncodeToString(batchHeaderHash[:]),
},
},
})
ConsistentRead: aws.Bool(true), // Use strongly consistent read to prevent race conditions
}

items, err := s.dynamoDBClient.QueryWithInput(ctx, input)
if err != nil {
return nil, nil, err
}
Expand Down
79 changes: 34 additions & 45 deletions disperser/controller/dispatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ type DispatcherConfig struct {
AttestationTimeout time.Duration
// The maximum time permitted to wait for all nodes to provide signatures for a batch.
BatchAttestationTimeout time.Duration
// SignatureTickInterval is the interval at which new Attestations will be submitted to the blobMetadataStore,
// SignatureTickInterval is the interval at which Attestations will be updated in the blobMetadataStore,
// as signature gathering progresses.
SignatureTickInterval time.Duration
NumRequestRetries int
Expand Down Expand Up @@ -320,10 +320,12 @@ func (d *Dispatcher) HandleBatch(ctx context.Context) (chan core.SigningMessage,
return sigChan, batchData, nil
}

// HandleSignatures receives signatures from operators, validates, and aggregates them.
// HandleSignatures receives SigningMessages from operators for a given batch through the input sigChan. The signatures
// are validated, aggregated, and used to put an Attestation for the batch into the blobMetadataStore. The Attestation
// is periodically updated as additional signatures are gathered.
//
// This method submits Attestations to the blobMetadataStore, containing signing data from the SigningMessages received
// through the sigChan. It periodically submits Attestations, as signatures are gathered.
// This method will continue gathering signatures until a SigningMessage has been received from every operator, or until
// the global attestationCtx times out.
func (d *Dispatcher) HandleSignatures(
ctx context.Context,
attestationCtx context.Context,
Expand All @@ -350,26 +352,7 @@ func (d *Dispatcher) HandleSignatures(
}
}

// submit an empty attestation before starting to gather signatures.
// a new attestation will be periodically resubmitted as signatures are gathered.
attestation := &corev2.Attestation{
BatchHeader: batchData.Batch.BatchHeader,
AttestedAt: uint64(time.Now().UnixNano()),
NonSignerPubKeys: nil,
APKG2: nil,
QuorumAPKs: nil,
Sigma: nil,
QuorumNumbers: nil,
QuorumResults: nil,
}
err := d.blobMetadataStore.PutAttestation(ctx, attestation)
if err != nil {
// this error isn't fatal: a subsequent PutAttestation attempt might succeed
// TODO: this used to cause the HandleSignatures method to fail entirely. Is it ok to continue trying here?
d.logger.Error("error calling PutAttestation",
"err", err,
"batchHeaderHash", batchHeaderHash)
}
// TODO: I removed the initial empty attestation update. Is that ok?
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reminder to resolve TODO

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

removed

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The empty attestation is created here so that an attestation is available for query "immediately" after the blob is marked as GatheringSignatures considering that GetSignedBatch will return an attestation for the signed batch.
If we remove this empty attestation, we should update GetSignedBatch and behavior in get_blob_status_v2.go to make sure it returns empty attestation upon GetBlobStatus call

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I updated the GetSignedBatch in dynamo_metadata_store.go to return an empty attestation if none is found

46c76cb

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After struggling with CI for a while, I decided to just keep the empty attestation put for now, to unblock this PR.


// This channel will remain open until the attestationTimeout triggers, or until signatures from all validators
// have been received and processed. It will periodically yield QuorumAttestations with the latest set of received
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

received and processed ? I assume because they're part of the same function if not we could have quit just after receiving ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The SigningMessages must be processed in order for the attestationChan to be able to yield attestations, so it needs to stay open until all SigningMessages are processed

Expand Down Expand Up @@ -400,9 +383,9 @@ func (d *Dispatcher) HandleSignatures(
finalAttestation := &core.QuorumAttestation{}
// continue receiving attestations from the channel until it's closed
for receivedQuorumAttestation := range attestationChan {
err := d.submitAttestation(ctx, batchData, receivedQuorumAttestation)
err := d.updateAttestation(ctx, batchData, receivedQuorumAttestation)
if err != nil {
d.logger.Warnf("error submitting attestation for batch %s: %v", batchHeaderHash, err)
d.logger.Warnf("error updating attestation for batch %s: %v", batchHeaderHash, err)
continue
}

Expand All @@ -412,7 +395,7 @@ func (d *Dispatcher) HandleSignatures(
d.metrics.reportReceiveSignaturesLatency(time.Since(handleSignaturesStart))

updateBatchStatusStartTime := time.Now()
_, quorumPercentages := d.parseAndLogQuorumPercentages(batchHeaderHash, finalAttestation.QuorumResults)
_, quorumPercentages := d.parseQuorumPercentages(finalAttestation.QuorumResults)
err = d.updateBatchStatus(ctx, batchData, quorumPercentages)
d.metrics.reportUpdateBatchStatusLatency(time.Since(updateBatchStatusStartTime))
if err != nil {
Expand All @@ -438,15 +421,13 @@ func (d *Dispatcher) HandleSignatures(
return nil
}

// submitAttestation submits a QuorumAttestation to the blobMetadataStore
func (d *Dispatcher) submitAttestation(
// updateAttestation updates the QuorumAttestation in the blobMetadataStore
func (d *Dispatcher) updateAttestation(
ctx context.Context,
batchData *batchData,
quorumAttestation *core.QuorumAttestation,
) error {
sortedNonZeroQuorums, quorumPercentages := d.parseAndLogQuorumPercentages(
hex.EncodeToString(batchData.BatchHeaderHash[:]),
quorumAttestation.QuorumResults)
sortedNonZeroQuorums, quorumPercentages := d.parseQuorumPercentages(quorumAttestation.QuorumResults)
if len(sortedNonZeroQuorums) == 0 {
return errors.New("all quorums received no attestation for batch")
}
Expand Down Expand Up @@ -481,39 +462,47 @@ func (d *Dispatcher) submitAttestation(
return fmt.Errorf("put attestation: %w", err)
}

d.logAttestationUpdate(hex.EncodeToString(batchData.BatchHeaderHash[:]), quorumAttestation.QuorumResults)

return nil
}

// parseAndLogQuorumPercentages iterates over the map of QuorumResults, and logs the signing percentages of each quorum.
//
// This method returns a sorted slice of nonZeroQuorums (quorums with >0 signing percentage), and a map from QuorumID to
// signing percentage.
func (d *Dispatcher) parseAndLogQuorumPercentages(
batchHeaderHash string,
// parseQuorumPercentages iterates over the map of QuorumResults, and returns a sorted slice of nonZeroQuorums
// (quorums with >0 signing percentage), and a map from QuorumID to signing percentage.
func (d *Dispatcher) parseQuorumPercentages(
quorumResults map[core.QuorumID]*core.QuorumResult,
) ([]core.QuorumID, map[core.QuorumID]uint8) {
nonZeroQuorums := make([]core.QuorumID, 0)
quorumPercentages := make(map[core.QuorumID]uint8)

messageBuilder := strings.Builder{}
messageBuilder.WriteString(fmt.Sprintf("batchHeaderHash: %s (quorumID, percentSigned)", batchHeaderHash))

for quorumID, quorumResult := range quorumResults {
messageBuilder.WriteString(fmt.Sprintf("\n%d, %d%%", quorumID, quorumResult.PercentSigned))

if quorumResult.PercentSigned > 0 {
nonZeroQuorums = append(nonZeroQuorums, quorumID)
quorumPercentages[quorumID] = quorumResult.PercentSigned
}
}

d.logger.Debug(messageBuilder.String())

slices.Sort(nonZeroQuorums)

return nonZeroQuorums, quorumPercentages
}

// logAttestationUpdate logs the attestation details, including batch header hash and quorum signing percentages
func (d *Dispatcher) logAttestationUpdate(batchHeaderHash string, quorumResults map[core.QuorumID]*core.QuorumResult) {
quorumPercentagesBuilder := strings.Builder{}
quorumPercentagesBuilder.WriteString("(")

for quorumID, quorumResult := range quorumResults {
quorumPercentagesBuilder.WriteString(
fmt.Sprintf("quorum_%d: %d%%, ", quorumID, quorumResult.PercentSigned))
}
quorumPercentagesBuilder.WriteString(")")

d.logger.Debug("attestation updated",
"batchHeaderHash", batchHeaderHash,
"quorumPercentages", quorumPercentagesBuilder.String())
}

func (d *Dispatcher) dedupBlobs(blobs []*v2.BlobMetadata) []*v2.BlobMetadata {
dedupedBlobs := make([]*v2.BlobMetadata, 0)
for _, blob := range blobs {
Expand Down
69 changes: 51 additions & 18 deletions disperser/controller/dispatcher_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,16 @@ type dispatcherMetrics struct {
processSigningMessageLatency *prometheus.SummaryVec
signingMessageChannelLatency *prometheus.SummaryVec
attestationUpdateLatency *prometheus.SummaryVec
attestationBuildingLatency *prometheus.SummaryVec
thresholdSignedToDoneLatency *prometheus.SummaryVec
receiveSignaturesLatency *prometheus.SummaryVec
aggregateSignaturesLatency *prometheus.SummaryVec
putAttestationLatency *prometheus.SummaryVec
attestationUpdateCount *prometheus.SummaryVec
updateBatchStatusLatency *prometheus.SummaryVec
blobE2EDispersalLatency *prometheus.SummaryVec
completedBlobs *prometheus.CounterVec
attestation *prometheus.GaugeVec
thresholdSignedToDoneLatency *prometheus.GaugeVec
blobSetSize *prometheus.GaugeVec
}

Expand All @@ -58,16 +60,6 @@ func newDispatcherMetrics(registry *prometheus.Registry) *dispatcherMetrics {
[]string{"type", "quorum"},
)

thresholdSignedToDoneLatency := promauto.With(registry).NewGaugeVec(
prometheus.GaugeOpts{
Namespace: dispatcherNamespace,
Name: "threshold_signed_to_done_latency_ms",
Help: "the time elapsed between the signing percentage reaching a configured threshold, and the end " +
"of signature gathering",
},
[]string{"quorum"},
)

handleBatchLatency := promauto.With(registry).NewSummaryVec(
prometheus.SummaryOpts{
Namespace: dispatcherNamespace,
Expand Down Expand Up @@ -251,12 +243,43 @@ func newDispatcherMetrics(registry *prometheus.Registry) *dispatcherMetrics {
prometheus.SummaryOpts{
Namespace: dispatcherNamespace,
Name: "attestation_update_latency_ms",
Help: "The time it takes for the signature receiver to yield a new attestation (part of HandleSignatures()).",
Help: "The time between the signature receiver yielding attestations (part of HandleSignatures()).",
Objectives: objectives,
},
[]string{},
)

attestationBuildingLatency := promauto.With(registry).NewSummaryVec(
prometheus.SummaryOpts{
Namespace: dispatcherNamespace,
Name: "attestation_building_latency_ms",
Help: "The time it takes for the signature receiver to build and send a single attestation (part of HandleSignatures()).",
Objectives: objectives,
},
[]string{},
)

attestationUpdateCount := promauto.With(registry).NewSummaryVec(
prometheus.SummaryOpts{
Namespace: dispatcherNamespace,
Name: "attestation_update_count",
Help: "The number of updates to the batch attestation throughout the signature gathering process.",
Objectives: objectives,
},
[]string{},
)

thresholdSignedToDoneLatency := promauto.With(registry).NewSummaryVec(
prometheus.SummaryOpts{
Namespace: dispatcherNamespace,
Name: "threshold_signed_to_done_latency_ms",
Help: "the time elapsed between the signing percentage reaching a configured threshold, and the end " +
"of signature gathering",
Objectives: objectives,
},
[]string{"quorum"},
)

receiveSignaturesLatency := promauto.With(registry).NewSummaryVec(
prometheus.SummaryOpts{
Namespace: dispatcherNamespace,
Expand Down Expand Up @@ -345,14 +368,16 @@ func newDispatcherMetrics(registry *prometheus.Registry) *dispatcherMetrics {
processSigningMessageLatency: processSigningMessageLatency,
signingMessageChannelLatency: signingMessageChannelLatency,
attestationUpdateLatency: attestationUpdateLatency,
attestationBuildingLatency: attestationBuildingLatency,
thresholdSignedToDoneLatency: thresholdSignedToDoneLatency,
receiveSignaturesLatency: receiveSignaturesLatency,
aggregateSignaturesLatency: aggregateSignaturesLatency,
putAttestationLatency: putAttestationLatency,
attestationUpdateCount: attestationUpdateCount,
updateBatchStatusLatency: updateBatchStatusLatency,
blobE2EDispersalLatency: blobE2EDispersalLatency,
completedBlobs: completedBlobs,
attestation: attestation,
thresholdSignedToDoneLatency: thresholdSignedToDoneLatency,
blobSetSize: blobSetSize,
}
}
Expand Down Expand Up @@ -433,6 +458,15 @@ func (m *dispatcherMetrics) reportAttestationUpdateLatency(duration time.Duratio
m.attestationUpdateLatency.WithLabelValues().Observe(common.ToMilliseconds(duration))
}

func (m *dispatcherMetrics) reportAttestationBuildingLatency(duration time.Duration) {
m.attestationBuildingLatency.WithLabelValues().Observe(common.ToMilliseconds(duration))
}

func (m *dispatcherMetrics) reportThresholdSignedToDoneLatency(quorumID core.QuorumID, duration time.Duration) {
m.thresholdSignedToDoneLatency.WithLabelValues(fmt.Sprintf("%d", quorumID)).Observe(
common.ToMilliseconds(duration))
}

func (m *dispatcherMetrics) reportReceiveSignaturesLatency(duration time.Duration) {
m.receiveSignaturesLatency.WithLabelValues().Observe(common.ToMilliseconds(duration))
}
Expand All @@ -445,6 +479,10 @@ func (m *dispatcherMetrics) reportPutAttestationLatency(duration time.Duration)
m.putAttestationLatency.WithLabelValues().Observe(common.ToMilliseconds(duration))
}

func (m *dispatcherMetrics) reportAttestationUpdateCount(attestationCount float64) {
m.attestationUpdateCount.WithLabelValues().Observe(attestationCount)
}

func (m *dispatcherMetrics) reportUpdateBatchStatusLatency(duration time.Duration) {
m.updateBatchStatusLatency.WithLabelValues().Observe(common.ToMilliseconds(duration))
}
Expand Down Expand Up @@ -491,8 +529,3 @@ func (m *dispatcherMetrics) reportAttestation(operatorCount map[core.QuorumID]in
m.attestation.WithLabelValues("percent_signed", quorumStr).Set(float64(quorumResult.PercentSigned))
}
}

func (m *dispatcherMetrics) reportThresholdSignedToDoneLatency(quorumID core.QuorumID, duration time.Duration) {
m.thresholdSignedToDoneLatency.WithLabelValues(fmt.Sprintf("%d", quorumID)).Set(
common.ToMilliseconds(duration))
}
Loading
Loading