Skip to content

Commit 90c6371

Browse files
anakryikoKernel Patches Daemon
authored andcommitted
libbpf: make RINGBUF map size adjustments more eagerly
Make libbpf adjust RINGBUF map size (rounding it up to closest power-of-2 of page_size) more eagerly: during open phase when initializing the map and on explicit calls to bpf_map__set_max_entries(). Such approach allows user to check actual size of BPF ringbuf even before it's created in the kernel, but also it prevents various edge case scenarios where BPF ringbuf size can get out of sync with what it would be in kernel. One of them (reported in [0]) is during an attempt to pin/reuse BPF ringbuf. Move adjust_ringbuf_sz() helper closer to its first actual use. The implementation of the helper is unchanged. [0] Closes: https://github.com/libbpf/libbpf/issue/530 Fixes: 0087a68 ("libbpf: Automatically fix up BPF_MAP_TYPE_RINGBUF size, if necessary") Signed-off-by: Andrii Nakryiko <[email protected]>
1 parent 4db7d2a commit 90c6371

File tree

1 file changed

+42
-35
lines changed

1 file changed

+42
-35
lines changed

tools/lib/bpf/libbpf.c

Lines changed: 42 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -2320,6 +2320,37 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
23202320
return 0;
23212321
}
23222322

2323+
static size_t adjust_ringbuf_sz(size_t sz)
2324+
{
2325+
__u32 page_sz = sysconf(_SC_PAGE_SIZE);
2326+
__u32 mul;
2327+
2328+
/* if user forgot to set any size, make sure they see error */
2329+
if (sz == 0)
2330+
return 0;
2331+
/* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2332+
* a power-of-2 multiple of kernel's page size. If user diligently
2333+
* satisified these conditions, pass the size through.
2334+
*/
2335+
if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2336+
return sz;
2337+
2338+
/* Otherwise find closest (page_sz * power_of_2) product bigger than
2339+
* user-set size to satisfy both user size request and kernel
2340+
* requirements and substitute correct max_entries for map creation.
2341+
*/
2342+
for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2343+
if (mul * page_sz > sz)
2344+
return mul * page_sz;
2345+
}
2346+
2347+
/* if it's impossible to satisfy the conditions (i.e., user size is
2348+
* very close to UINT_MAX but is not a power-of-2 multiple of
2349+
* page_size) then just return original size and let kernel reject it
2350+
*/
2351+
return sz;
2352+
}
2353+
23232354
static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
23242355
{
23252356
map->def.type = def->map_type;
@@ -2333,6 +2364,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
23332364
map->btf_key_type_id = def->key_type_id;
23342365
map->btf_value_type_id = def->value_type_id;
23352366

2367+
/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2368+
if (map->def.type == BPF_MAP_TYPE_RINGBUF)
2369+
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2370+
23362371
if (def->parts & MAP_DEF_MAP_TYPE)
23372372
pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
23382373

@@ -4306,9 +4341,15 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
43064341

43074342
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
43084343
{
4309-
if (map->fd >= 0)
4344+
if (map->obj->loaded)
43104345
return libbpf_err(-EBUSY);
4346+
43114347
map->def.max_entries = max_entries;
4348+
4349+
/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4350+
if (map->def.type == BPF_MAP_TYPE_RINGBUF)
4351+
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4352+
43124353
return 0;
43134354
}
43144355

@@ -4859,37 +4900,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
48594900

48604901
static void bpf_map__destroy(struct bpf_map *map);
48614902

4862-
static size_t adjust_ringbuf_sz(size_t sz)
4863-
{
4864-
__u32 page_sz = sysconf(_SC_PAGE_SIZE);
4865-
__u32 mul;
4866-
4867-
/* if user forgot to set any size, make sure they see error */
4868-
if (sz == 0)
4869-
return 0;
4870-
/* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
4871-
* a power-of-2 multiple of kernel's page size. If user diligently
4872-
* satisified these conditions, pass the size through.
4873-
*/
4874-
if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
4875-
return sz;
4876-
4877-
/* Otherwise find closest (page_sz * power_of_2) product bigger than
4878-
* user-set size to satisfy both user size request and kernel
4879-
* requirements and substitute correct max_entries for map creation.
4880-
*/
4881-
for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
4882-
if (mul * page_sz > sz)
4883-
return mul * page_sz;
4884-
}
4885-
4886-
/* if it's impossible to satisfy the conditions (i.e., user size is
4887-
* very close to UINT_MAX but is not a power-of-2 multiple of
4888-
* page_size) then just return original size and let kernel reject it
4889-
*/
4890-
return sz;
4891-
}
4892-
48934903
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
48944904
{
48954905
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
@@ -4928,9 +4938,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
49284938
}
49294939

49304940
switch (def->type) {
4931-
case BPF_MAP_TYPE_RINGBUF:
4932-
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4933-
/* fallthrough */
49344941
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
49354942
case BPF_MAP_TYPE_CGROUP_ARRAY:
49364943
case BPF_MAP_TYPE_STACK_TRACE:

0 commit comments

Comments
 (0)