@@ -311,8 +311,8 @@ pub trait Rng: RngCore {
311
311
///
312
312
/// [`fill_bytes`]: RngCore::fill_bytes
313
313
#[ track_caller]
314
- fn fill < T : Fill + ? Sized > ( & mut self , dest : & mut T ) {
315
- dest . fill ( self )
314
+ fn fill < T : Fill > ( & mut self , dest : & mut [ T ] ) {
315
+ Fill :: fill_slice ( dest , self )
316
316
}
317
317
318
318
/// Alias for [`Rng::random`].
@@ -356,40 +356,24 @@ pub trait Rng: RngCore {
356
356
357
357
impl < R : RngCore + ?Sized > Rng for R { }
358
358
359
- /// Types which may be filled with random data
359
+ /// Support filling a slice with random data
360
360
///
361
- /// This trait allows arrays to be efficiently filled with random data.
361
+ /// This trait allows slices of "plain data" types to be efficiently filled
362
+ /// with random data.
362
363
///
363
364
/// Implementations are expected to be portable across machines unless
364
365
/// clearly documented otherwise (see the
365
366
/// [Chapter on Portability](https://rust-random.github.io/book/portability.html)).
366
- pub trait Fill {
367
- /// Fill self with random data
368
- fn fill < R : Rng + ?Sized > ( & mut self , rng : & mut R ) ;
367
+ /// The implementations provided achieve this by byte-swapping on big-endian
368
+ /// machines.
369
+ pub trait Fill : Sized {
370
+ /// Fill this with random data
371
+ fn fill_slice < R : Rng + ?Sized > ( this : & mut [ Self ] , rng : & mut R ) ;
369
372
}
370
373
371
- macro_rules! impl_fill_each {
372
- ( ) => { } ;
373
- ( $t: ty) => {
374
- impl Fill for [ $t] {
375
- fn fill<R : Rng + ?Sized >( & mut self , rng: & mut R ) {
376
- for elt in self . iter_mut( ) {
377
- * elt = rng. random( ) ;
378
- }
379
- }
380
- }
381
- } ;
382
- ( $t: ty, $( $tt: ty, ) * ) => {
383
- impl_fill_each!( $t) ;
384
- impl_fill_each!( $( $tt, ) * ) ;
385
- } ;
386
- }
387
-
388
- impl_fill_each ! ( bool , char , f32 , f64 , ) ;
389
-
390
- impl Fill for [ u8 ] {
391
- fn fill < R : Rng + ?Sized > ( & mut self , rng : & mut R ) {
392
- rng. fill_bytes ( self )
374
+ impl Fill for u8 {
375
+ fn fill_slice < R : Rng + ?Sized > ( this : & mut [ Self ] , rng : & mut R ) {
376
+ rng. fill_bytes ( this)
393
377
}
394
378
}
395
379
@@ -402,56 +386,45 @@ const unsafe fn __unsafe() {}
402
386
/// All bit patterns of `[u8; size_of::<$t>()]` must represent values of `$t`.
403
387
macro_rules! impl_fill {
404
388
( ) => { } ;
389
+ ( to_le! plain $x: ident) => {
390
+ $x. to_le( )
391
+ } ;
392
+ ( to_le! wrapping $x: ident) => {
393
+ Wrapping ( $x. 0 . to_le( ) )
394
+ } ;
395
+ ( fill_slice! $t: ty, $to_le: tt) => {
396
+ fn fill_slice<R : Rng + ?Sized >( this: & mut [ Self ] , rng: & mut R ) {
397
+ if this. len( ) > 0 {
398
+ let size = mem:: size_of_val( this) ;
399
+ rng. fill_bytes(
400
+ // SAFETY: `this` non-null and valid for reads and writes within its `size`
401
+ // bytes. `this` meets the alignment requirements of `&mut [u8]`.
402
+ // The contents of `this` are initialized. Both `[u8]` and `[$t]` are valid
403
+ // for all bit-patterns of their contents (note that the SAFETY requirement
404
+ // on callers of this macro). `this` is not borrowed.
405
+ unsafe {
406
+ slice:: from_raw_parts_mut( this. as_mut_ptr( )
407
+ as * mut u8 ,
408
+ size
409
+ )
410
+ }
411
+ ) ;
412
+ for x in this {
413
+ * x = impl_fill!( to_le! $to_le x) ;
414
+ }
415
+ }
416
+ }
417
+ } ;
405
418
( $t: ty) => { {
406
419
// Force caller to wrap with an `unsafe` block
407
420
__unsafe( ) ;
408
421
409
- impl Fill for [ $t] {
410
- fn fill<R : Rng + ?Sized >( & mut self , rng: & mut R ) {
411
- if self . len( ) > 0 {
412
- let size = mem:: size_of_val( self ) ;
413
- rng. fill_bytes(
414
- // SAFETY: `self` non-null and valid for reads and writes within its `size`
415
- // bytes. `self` meets the alignment requirements of `&mut [u8]`.
416
- // The contents of `self` are initialized. Both `[u8]` and `[$t]` are valid
417
- // for all bit-patterns of their contents (note that the SAFETY requirement
418
- // on callers of this macro). `self` is not borrowed.
419
- unsafe {
420
- slice:: from_raw_parts_mut( self . as_mut_ptr( )
421
- as * mut u8 ,
422
- size
423
- )
424
- }
425
- ) ;
426
- for x in self {
427
- * x = x. to_le( ) ;
428
- }
429
- }
430
- }
422
+ impl Fill for $t {
423
+ impl_fill!( fill_slice! $t, plain) ;
431
424
}
432
425
433
- impl Fill for [ Wrapping <$t>] {
434
- fn fill<R : Rng + ?Sized >( & mut self , rng: & mut R ) {
435
- if self . len( ) > 0 {
436
- let size = self . len( ) * mem:: size_of:: <$t>( ) ;
437
- rng. fill_bytes(
438
- // SAFETY: `self` non-null and valid for reads and writes within its `size`
439
- // bytes. `self` meets the alignment requirements of `&mut [u8]`.
440
- // The contents of `self` are initialized. Both `[u8]` and `[$t]` are valid
441
- // for all bit-patterns of their contents (note that the SAFETY requirement
442
- // on callers of this macro). `self` is not borrowed.
443
- unsafe {
444
- slice:: from_raw_parts_mut( self . as_mut_ptr( )
445
- as * mut u8 ,
446
- size
447
- )
448
- }
449
- ) ;
450
- for x in self {
451
- * x = Wrapping ( x. 0 . to_le( ) ) ;
452
- }
453
- }
454
- }
426
+ impl Fill for Wrapping <$t> {
427
+ impl_fill!( fill_slice! $t, wrapping) ;
455
428
} }
456
429
} ;
457
430
( $t: ty, $( $tt: ty, ) * ) => { {
@@ -467,15 +440,6 @@ const _: () = unsafe { impl_fill!(u16, u32, u64, u128,) };
467
440
// SAFETY: All bit patterns of `[u8; size_of::<$t>()]` represent values of `i*`.
468
441
const _: ( ) = unsafe { impl_fill ! ( i8 , i16 , i32 , i64 , i128 , ) } ;
469
442
470
- impl < T , const N : usize > Fill for [ T ; N ]
471
- where
472
- [ T ] : Fill ,
473
- {
474
- fn fill < R : Rng + ?Sized > ( & mut self , rng : & mut R ) {
475
- <[ T ] as Fill >:: fill ( self , rng)
476
- }
477
- }
478
-
479
443
#[ cfg( test) ]
480
444
mod test {
481
445
use super :: * ;
@@ -510,27 +474,21 @@ mod test {
510
474
511
475
// Convert to byte sequence and back to u64; byte-swap twice if BE.
512
476
let mut array = [ 0u64 ; 2 ] ;
513
- rng. fill ( & mut array[ .. ] ) ;
477
+ rng. fill ( & mut array) ;
514
478
assert_eq ! ( array, [ x, x] ) ;
515
479
assert_eq ! ( rng. next_u64( ) , x) ;
516
480
517
481
// Convert to bytes then u32 in LE order
518
482
let mut array = [ 0u32 ; 2 ] ;
519
- rng. fill ( & mut array[ .. ] ) ;
483
+ rng. fill ( & mut array) ;
520
484
assert_eq ! ( array, [ x as u32 , ( x >> 32 ) as u32 ] ) ;
521
485
assert_eq ! ( rng. next_u32( ) , x as u32 ) ;
522
486
523
487
// Check equivalence using wrapped arrays
524
488
let mut warray = [ Wrapping ( 0u32 ) ; 2 ] ;
525
- rng. fill ( & mut warray[ .. ] ) ;
489
+ rng. fill ( & mut warray) ;
526
490
assert_eq ! ( array[ 0 ] , warray[ 0 ] . 0 ) ;
527
491
assert_eq ! ( array[ 1 ] , warray[ 1 ] . 0 ) ;
528
-
529
- // Check equivalence for generated floats
530
- let mut array = [ 0f32 ; 2 ] ;
531
- rng. fill ( & mut array) ;
532
- let arr2: [ f32 ; 2 ] = rng. random ( ) ;
533
- assert_eq ! ( array, arr2) ;
534
492
}
535
493
536
494
#[ test]
0 commit comments