Skip to content

Commit 922c6f4

Browse files
liu-song-6Nobody
authored andcommitted
bpf: select proper size for bpf_prog_pack
Using HPAGE_PMD_SIZE as the size for bpf_prog_pack is not ideal in some cases. Specifically, for NUMA systems, __vmalloc_node_range requires PMD_SIZE * num_online_nodes() to allocate huge pages. Also, if the system does not support huge pages (i.e., with cmdline option nohugevmalloc), it is better to use PAGE_SIZE packs. Add logic to select proper size for bpf_prog_pack. This solution is not ideal, as it makes assumption about the behavior of module_alloc and __vmalloc_node_range. However, it appears to be the easiest solution as it doesn't require changes in module_alloc and vmalloc code. Signed-off-by: Song Liu <[email protected]>
1 parent 689a8b9 commit 922c6f4

File tree

1 file changed

+44
-22
lines changed

1 file changed

+44
-22
lines changed

kernel/bpf/core.c

Lines changed: 44 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
#include <linux/extable.h>
3434
#include <linux/log2.h>
3535
#include <linux/bpf_verifier.h>
36+
#include <linux/nodemask.h>
3637

3738
#include <asm/barrier.h>
3839
#include <asm/unaligned.h>
@@ -814,46 +815,67 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
814815
* allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
815816
* to host BPF programs.
816817
*/
817-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
818-
#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE
819-
#else
820-
#define BPF_PROG_PACK_SIZE PAGE_SIZE
821-
#endif
822818
#define BPF_PROG_CHUNK_SHIFT 6
823819
#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
824820
#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
825-
#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
826821

827822
struct bpf_prog_pack {
828823
struct list_head list;
829824
void *ptr;
830825
unsigned long bitmap[];
831826
};
832827

833-
#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE
834828
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
835829

830+
static size_t bpf_prog_pack_size = -1;
831+
832+
static inline int bpf_prog_chunk_count(void)
833+
{
834+
WARN_ON_ONCE(bpf_prog_pack_size == -1);
835+
return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
836+
}
837+
836838
static DEFINE_MUTEX(pack_mutex);
837839
static LIST_HEAD(pack_list);
838840

839841
static struct bpf_prog_pack *alloc_new_pack(void)
840842
{
841843
struct bpf_prog_pack *pack;
844+
size_t size;
845+
void *ptr;
842846

843-
pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL);
844-
if (!pack)
847+
if (bpf_prog_pack_size == -1) {
848+
/* Test whether we can get huge pages. If not just use
849+
* PAGE_SIZE packs.
850+
*/
851+
size = PMD_SIZE * num_online_nodes();
852+
ptr = module_alloc(size);
853+
if (ptr && is_vm_area_hugepages(ptr)) {
854+
bpf_prog_pack_size = size;
855+
goto got_ptr;
856+
} else {
857+
bpf_prog_pack_size = PAGE_SIZE;
858+
vfree(ptr);
859+
}
860+
}
861+
862+
ptr = module_alloc(bpf_prog_pack_size);
863+
if (!ptr)
845864
return NULL;
846-
pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
847-
if (!pack->ptr) {
848-
kfree(pack);
865+
got_ptr:
866+
pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())),
867+
GFP_KERNEL);
868+
if (!pack) {
869+
vfree(ptr);
849870
return NULL;
850871
}
851-
bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
872+
pack->ptr = ptr;
873+
bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
852874
list_add_tail(&pack->list, &pack_list);
853875

854876
set_vm_flush_reset_perms(pack->ptr);
855-
set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
856-
set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
877+
set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
878+
set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
857879
return pack;
858880
}
859881

@@ -864,7 +886,7 @@ static void *bpf_prog_pack_alloc(u32 size)
864886
unsigned long pos;
865887
void *ptr = NULL;
866888

867-
if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
889+
if (size > bpf_prog_pack_size) {
868890
size = round_up(size, PAGE_SIZE);
869891
ptr = module_alloc(size);
870892
if (ptr) {
@@ -876,9 +898,9 @@ static void *bpf_prog_pack_alloc(u32 size)
876898
}
877899
mutex_lock(&pack_mutex);
878900
list_for_each_entry(pack, &pack_list, list) {
879-
pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
901+
pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
880902
nbits, 0);
881-
if (pos < BPF_PROG_CHUNK_COUNT)
903+
if (pos < bpf_prog_chunk_count())
882904
goto found_free_area;
883905
}
884906

@@ -904,12 +926,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
904926
unsigned long pos;
905927
void *pack_ptr;
906928

907-
if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
929+
if (hdr->size > bpf_prog_pack_size) {
908930
module_memfree(hdr);
909931
return;
910932
}
911933

912-
pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
934+
pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1));
913935
mutex_lock(&pack_mutex);
914936

915937
list_for_each_entry(tmp, &pack_list, list) {
@@ -926,8 +948,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
926948
pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
927949

928950
bitmap_clear(pack->bitmap, pos, nbits);
929-
if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
930-
BPF_PROG_CHUNK_COUNT, 0) == 0) {
951+
if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
952+
bpf_prog_chunk_count(), 0) == 0) {
931953
list_del(&pack->list);
932954
module_memfree(pack->ptr);
933955
kfree(pack);

0 commit comments

Comments
 (0)