Skip to content

Commit 8989ef8

Browse files
authored
Enable arithmetic lint in rate-limiter (#7025)
#6875 - Enabled the linter in rate-limiter and fixed errors. - Changed the type of `Quota::max_tokens` from `u64` to `NonZeroU64` because `max_tokens` cannot be zero. - Added a test to ensure that a large value for `tokens`, which causes an overflow, is handled properly.
1 parent 7c89b97 commit 8989ef8

File tree

3 files changed

+58
-28
lines changed

3 files changed

+58
-28
lines changed

beacon_node/lighthouse_network/src/rpc/config.rs

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1+
use super::{rate_limiter::Quota, Protocol};
2+
use std::num::NonZeroU64;
13
use std::{
24
fmt::{Debug, Display},
35
str::FromStr,
46
time::Duration,
57
};
68

7-
use super::{rate_limiter::Quota, Protocol};
8-
99
use serde::{Deserialize, Serialize};
1010

1111
/// Auxiliary struct to aid on configuration parsing.
@@ -100,24 +100,30 @@ pub struct RateLimiterConfig {
100100
}
101101

102102
impl RateLimiterConfig {
103-
pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10);
104-
pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5);
105-
pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15);
103+
pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(NonZeroU64::new(2).unwrap(), 10);
104+
pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(NonZeroU64::new(2).unwrap(), 5);
105+
pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(NonZeroU64::new(5).unwrap(), 15);
106106
pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10);
107107
// The number is chosen to balance between upload bandwidth required to serve
108108
// blocks and a decent syncing rate for honest nodes. Malicious nodes would need to
109109
// spread out their requests over the time window to max out bandwidth on the server.
110-
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10);
111-
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
110+
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota =
111+
Quota::n_every(NonZeroU64::new(128).unwrap(), 10);
112+
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota =
113+
Quota::n_every(NonZeroU64::new(128).unwrap(), 10);
112114
// `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage
113-
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10);
114-
pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10);
115+
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota =
116+
Quota::n_every(NonZeroU64::new(896).unwrap(), 10);
117+
pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota =
118+
Quota::n_every(NonZeroU64::new(896).unwrap(), 10);
115119
// 320 blocks worth of columns for regular node, or 40 blocks for supernode.
116120
// Range sync load balances when requesting blocks, and each batch is 32 blocks.
117-
pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10);
121+
pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota =
122+
Quota::n_every(NonZeroU64::new(5120).unwrap(), 10);
118123
// 512 columns per request from spec. This should be plenty as peers are unlikely to send all
119124
// sampling requests to a single peer.
120-
pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10);
125+
pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota =
126+
Quota::n_every(NonZeroU64::new(512).unwrap(), 10);
121127
pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10);
122128
pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10);
123129
pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10);
@@ -275,7 +281,7 @@ mod tests {
275281
protocol: Protocol::Goodbye,
276282
quota: Quota {
277283
replenish_all_every: Duration::from_secs(10),
278-
max_tokens: 8,
284+
max_tokens: NonZeroU64::new(8).unwrap(),
279285
},
280286
};
281287
assert_eq!(quota.to_string().parse(), Ok(quota))

beacon_node/lighthouse_network/src/rpc/rate_limiter.rs

Lines changed: 38 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
1+
#![deny(clippy::arithmetic_side_effects)]
2+
13
use super::config::RateLimiterConfig;
24
use crate::rpc::Protocol;
35
use fnv::FnvHashMap;
46
use libp2p::PeerId;
57
use serde::{Deserialize, Serialize};
68
use std::future::Future;
79
use std::hash::Hash;
10+
use std::num::NonZeroU64;
811
use std::pin::Pin;
912
use std::sync::Arc;
1013
use std::task::{Context, Poll};
@@ -55,20 +58,20 @@ pub struct Quota {
5558
pub(super) replenish_all_every: Duration,
5659
/// Token limit. This translates on how large can an instantaneous batch of
5760
/// tokens be.
58-
pub(super) max_tokens: u64,
61+
pub(super) max_tokens: NonZeroU64,
5962
}
6063

6164
impl Quota {
6265
/// A hard limit of one token every `seconds`.
6366
pub const fn one_every(seconds: u64) -> Self {
6467
Quota {
6568
replenish_all_every: Duration::from_secs(seconds),
66-
max_tokens: 1,
69+
max_tokens: NonZeroU64::new(1).unwrap(),
6770
}
6871
}
6972

7073
/// Allow `n` tokens to be use used every `seconds`.
71-
pub const fn n_every(n: u64, seconds: u64) -> Self {
74+
pub const fn n_every(n: NonZeroU64, seconds: u64) -> Self {
7275
Quota {
7376
replenish_all_every: Duration::from_secs(seconds),
7477
max_tokens: n,
@@ -236,7 +239,9 @@ impl RPCRateLimiterBuilder {
236239

237240
// check for peers to prune every 30 seconds, starting in 30 seconds
238241
let prune_every = tokio::time::Duration::from_secs(30);
239-
let prune_start = tokio::time::Instant::now() + prune_every;
242+
let prune_start = tokio::time::Instant::now()
243+
.checked_add(prune_every)
244+
.ok_or("prune time overflow")?;
240245
let prune_interval = tokio::time::interval_at(prune_start, prune_every);
241246
Ok(RPCRateLimiter {
242247
prune_interval,
@@ -412,14 +417,13 @@ pub struct Limiter<Key: Hash + Eq + Clone> {
412417

413418
impl<Key: Hash + Eq + Clone> Limiter<Key> {
414419
pub fn from_quota(quota: Quota) -> Result<Self, &'static str> {
415-
if quota.max_tokens == 0 {
416-
return Err("Max number of tokens should be positive");
417-
}
418420
let tau = quota.replenish_all_every.as_nanos();
419421
if tau == 0 {
420422
return Err("Replenish time must be positive");
421423
}
422-
let t = (tau / quota.max_tokens as u128)
424+
let t = tau
425+
.checked_div(quota.max_tokens.get() as u128)
426+
.expect("Division by zero never occurs, since Quota::max_token is of type NonZeroU64.")
423427
.try_into()
424428
.map_err(|_| "total replenish time is too long")?;
425429
let tau = tau
@@ -442,7 +446,7 @@ impl<Key: Hash + Eq + Clone> Limiter<Key> {
442446
let tau = self.tau;
443447
let t = self.t;
444448
// how long does it take to replenish these tokens
445-
let additional_time = t * tokens;
449+
let additional_time = t.saturating_mul(tokens);
446450
if additional_time > tau {
447451
// the time required to process this amount of tokens is longer than the time that
448452
// makes the bucket full. So, this batch can _never_ be processed
@@ -455,16 +459,16 @@ impl<Key: Hash + Eq + Clone> Limiter<Key> {
455459
.entry(key.clone())
456460
.or_insert(time_since_start);
457461
// check how soon could the request be made
458-
let earliest_time = (*tat + additional_time).saturating_sub(tau);
462+
let earliest_time = (*tat).saturating_add(additional_time).saturating_sub(tau);
459463
// earliest_time is in the future
460464
if time_since_start < earliest_time {
461465
Err(RateLimitedErr::TooSoon(Duration::from_nanos(
462466
/* time they need to wait, i.e. how soon were they */
463-
earliest_time - time_since_start,
467+
earliest_time.saturating_sub(time_since_start),
464468
)))
465469
} else {
466470
// calculate the new TAT
467-
*tat = time_since_start.max(*tat) + additional_time;
471+
*tat = time_since_start.max(*tat).saturating_add(additional_time);
468472
Ok(())
469473
}
470474
}
@@ -479,14 +483,15 @@ impl<Key: Hash + Eq + Clone> Limiter<Key> {
479483

480484
#[cfg(test)]
481485
mod tests {
482-
use crate::rpc::rate_limiter::{Limiter, Quota};
486+
use crate::rpc::rate_limiter::{Limiter, Quota, RateLimitedErr};
487+
use std::num::NonZeroU64;
483488
use std::time::Duration;
484489

485490
#[test]
486491
fn it_works_a() {
487492
let mut limiter = Limiter::from_quota(Quota {
488493
replenish_all_every: Duration::from_secs(2),
489-
max_tokens: 4,
494+
max_tokens: NonZeroU64::new(4).unwrap(),
490495
})
491496
.unwrap();
492497
let key = 10;
@@ -523,7 +528,7 @@ mod tests {
523528
fn it_works_b() {
524529
let mut limiter = Limiter::from_quota(Quota {
525530
replenish_all_every: Duration::from_secs(2),
526-
max_tokens: 4,
531+
max_tokens: NonZeroU64::new(4).unwrap(),
527532
})
528533
.unwrap();
529534
let key = 10;
@@ -547,4 +552,22 @@ mod tests {
547552
.allows(Duration::from_secs_f32(0.4), &key, 1)
548553
.is_err());
549554
}
555+
556+
#[test]
557+
fn large_tokens() {
558+
// These have been adjusted so that an overflow occurs when calculating `additional_time` in
559+
// `Limiter::allows`. If we don't handle overflow properly, `Limiter::allows` returns `Ok`
560+
// in this case.
561+
let replenish_all_every = 2;
562+
let tokens = u64::MAX / 2 + 1;
563+
564+
let mut limiter = Limiter::from_quota(Quota {
565+
replenish_all_every: Duration::from_nanos(replenish_all_every),
566+
max_tokens: NonZeroU64::new(1).unwrap(),
567+
})
568+
.unwrap();
569+
570+
let result = limiter.allows(Duration::from_secs_f32(0.0), &10, tokens);
571+
assert!(matches!(result, Err(RateLimitedErr::TooLarge)));
572+
}
550573
}

beacon_node/lighthouse_network/src/rpc/self_limiter.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,7 @@ mod tests {
316316
use crate::service::api_types::{AppRequestId, SingleLookupReqId, SyncRequestId};
317317
use libp2p::PeerId;
318318
use logging::create_test_tracing_subscriber;
319+
use std::num::NonZeroU64;
319320
use std::time::Duration;
320321
use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot};
321322

@@ -324,7 +325,7 @@ mod tests {
324325
async fn test_next_peer_request_ready() {
325326
create_test_tracing_subscriber();
326327
let config = OutboundRateLimiterConfig(RateLimiterConfig {
327-
ping_quota: Quota::n_every(1, 2),
328+
ping_quota: Quota::n_every(NonZeroU64::new(1).unwrap(), 2),
328329
..Default::default()
329330
});
330331
let fork_context = std::sync::Arc::new(ForkContext::new::<MainnetEthSpec>(

0 commit comments

Comments
 (0)