|
| 1 | +/* |
| 2 | + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. |
| 3 | + * SPDX-License-Identifier: Apache-2.0 |
| 4 | + */ |
| 5 | + |
| 6 | +use aws_sdk_s3::types::ByteStream; |
| 7 | +use aws_sdk_s3::Client; |
| 8 | +use clap::Parser; |
| 9 | +use concurrency::Runtime; |
| 10 | +use std::iter::repeat_with; |
| 11 | + |
| 12 | +const DEFAULT_CONCURRENCY_LIMIT: usize = 1_000; |
| 13 | +const DEFAULT_KEY_PREFIX: &str = "concurrency_test/object"; |
| 14 | +const DEFAULT_OBJECT_SIZE_IN_BYTES: usize = 100_000; |
| 15 | +const DEFAULT_RUNTIME: Runtime = Runtime::MultiThreaded; |
| 16 | +const DEFAULT_TASK_COUNT: usize = 10_000; |
| 17 | + |
| 18 | +#[derive(Parser, Debug)] |
| 19 | +#[command(author, version, about, long_about = None)] |
| 20 | +struct Args { |
| 21 | + /// The name of the S3 bucket that test objects will be uploaded to. |
| 22 | + #[arg(long)] |
| 23 | + bucket: String, |
| 24 | + |
| 25 | + /// The prefix used to create keys for objects to be uploaded. For example, with the default |
| 26 | + /// prefix 'concurrency_test/object', the key of the first uploaded object will have the key |
| 27 | + /// 'concurrency_test/object_00000.txt'. |
| 28 | + #[arg(long, default_value_t = DEFAULT_KEY_PREFIX.to_string())] |
| 29 | + key_prefix: String, |
| 30 | + |
| 31 | + /// The size of each uploaded object in bytes (100KB by default.) Each object will be a random |
| 32 | + /// alphanumeric string. The total amount of data uploaded will be equal to |
| 33 | + /// <task-count> * <object_size_in_bytes>. Larger sizes cause task creation to take more time. |
| 34 | + /// All of the strings are stored in memory while sending the requests so make sure you have |
| 35 | + /// enough RAM before setting this to a very large value. |
| 36 | + #[arg(long, default_value_t = DEFAULT_OBJECT_SIZE_IN_BYTES)] |
| 37 | + object_size_in_bytes: usize, |
| 38 | + |
| 39 | + /// The total number of objects to upload to the S3 bucket. |
| 40 | + #[arg(long, default_value_t = DEFAULT_TASK_COUNT)] |
| 41 | + task_count: usize, |
| 42 | + |
| 43 | + /// The maximum number of uploads running at a time. |
| 44 | + #[arg(long, default_value_t = DEFAULT_CONCURRENCY_LIMIT)] |
| 45 | + concurrency_limit: usize, |
| 46 | + |
| 47 | + /// The runtime to use when running the tasks. |
| 48 | + #[arg(long, default_value_t = DEFAULT_RUNTIME)] |
| 49 | + runtime: Runtime, |
| 50 | +} |
| 51 | + |
| 52 | +fn main() { |
| 53 | + let args = Args::parse(); |
| 54 | + |
| 55 | + let runtime = match args.runtime { |
| 56 | + Runtime::SingleThreaded => tokio::runtime::Builder::new_current_thread().build(), |
| 57 | + Runtime::MultiThreaded => tokio::runtime::Runtime::new(), |
| 58 | + } |
| 59 | + .unwrap(); |
| 60 | + |
| 61 | + runtime.block_on(async move { async_main(args).await }) |
| 62 | +} |
| 63 | + |
| 64 | +async fn async_main(args: Args) { |
| 65 | + let sdk_config = aws_config::load_from_env().await; |
| 66 | + let client = Client::new(&sdk_config); |
| 67 | + |
| 68 | + let send_message_futures = (0..args.task_count).map(|i| { |
| 69 | + let key = format!("{}_{i:05}.txt", args.key_prefix); |
| 70 | + let body: Vec<_> = repeat_with(fastrand::alphanumeric) |
| 71 | + .take(args.object_size_in_bytes) |
| 72 | + .map(|c| c as u8) |
| 73 | + .collect(); |
| 74 | + let fut = client |
| 75 | + .put_object() |
| 76 | + .bucket(&args.bucket) |
| 77 | + .key(key) |
| 78 | + .body(ByteStream::from(body)) |
| 79 | + .send(); |
| 80 | + |
| 81 | + async move { |
| 82 | + // We unwrap here in order to stop running futures as soon as one of them fails. |
| 83 | + fut.await.expect("request should succeed") |
| 84 | + } |
| 85 | + }); |
| 86 | + |
| 87 | + let _res = |
| 88 | + concurrency::run_futures_concurrently(send_message_futures, args.concurrency_limit).await; |
| 89 | +} |
0 commit comments