|
6 | 6 |
|
7 | 7 | #define TEST_VALUE 0x1234 |
8 | 8 | #define FILL_VALUE 0xdeadbeef |
| 9 | +#define PCPU_MIN_UNIT_SIZE 32768 |
9 | 10 |
|
10 | 11 | static int nr_cpus; |
11 | 12 | static int duration; |
@@ -118,6 +119,35 @@ static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected) |
118 | 119 |
|
119 | 120 | return 0; |
120 | 121 | } |
| 122 | +/* |
| 123 | + * percpu map value size is bound by PCPU_MIN_UNIT_SIZE |
| 124 | + * check the errno when the value exceed PCPU_MIN_UNIT_SIZE |
| 125 | + */ |
| 126 | +static void test_pcpu_map_value_size(void) |
| 127 | +{ |
| 128 | + struct test_map_init *skel; |
| 129 | + int err; |
| 130 | + int value_sz = PCPU_MIN_UNIT_SIZE + 1; |
| 131 | + enum bpf_map_type map_types[] = { BPF_MAP_TYPE_PERCPU_ARRAY, |
| 132 | + BPF_MAP_TYPE_PERCPU_HASH, |
| 133 | + BPF_MAP_TYPE_LRU_PERCPU_HASH }; |
| 134 | + for (int i = 0; i < ARRAY_SIZE(map_types); i++) { |
| 135 | + skel = test_map_init__open(); |
| 136 | + if (!ASSERT_OK_PTR(skel, "skel_open")) |
| 137 | + return; |
| 138 | + err = bpf_map__set_type(skel->maps.hashmap2, map_types[i]); |
| 139 | + if (!ASSERT_OK(err, "bpf_map__set_type")) |
| 140 | + goto error; |
| 141 | + err = bpf_map__set_value_size(skel->maps.hashmap2, value_sz); |
| 142 | + if (!ASSERT_OK(err, "bpf_map__set_value_size")) |
| 143 | + goto error; |
| 144 | + |
| 145 | + err = test_map_init__load(skel); |
| 146 | + ASSERT_EQ(err, -E2BIG, "skel_load"); |
| 147 | +error: |
| 148 | + test_map_init__destroy(skel); |
| 149 | + } |
| 150 | +} |
121 | 151 |
|
122 | 152 | /* Add key=1 elem with values set for all CPUs |
123 | 153 | * Delete elem key=1 |
@@ -211,4 +241,6 @@ void test_map_init(void) |
211 | 241 | test_pcpu_map_init(); |
212 | 242 | if (test__start_subtest("pcpu_lru_map_init")) |
213 | 243 | test_pcpu_lru_map_init(); |
| 244 | + if (test__start_subtest("pcpu map value size")) |
| 245 | + test_pcpu_map_value_size(); |
214 | 246 | } |
0 commit comments