Skip to content

Commit bd27626

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-arm64-support-for-timed-may_goto'
Puranjay Mohan says: ==================== bpf, arm64: support for timed may_goto Changes in v2->v3: v2: https://lore.kernel.org/all/[email protected]/ - Rebased on bpf-next/master - Added Acked-by: tags from Xu and Kumar Changes in v1->v2: v1: https://lore.kernel.org/bpf/[email protected]/ - Added comment in arch_bpf_timed_may_goto() about BPF_REG_FP setup (Xu Kuohai) This set adds support for the timed may_goto instruction for the arm64. The timed may_goto instruction is implemented by the verifier by reserving 2 8byte slots in the program stack and then calling arch_bpf_timed_may_goto() in a loop with the stack offset of these two slots in BPF_REG_AX. It expects the function to put a timestamp in the first slot and the returned count in BPF_REG_AX is put into the second slot by a store instruction emitted by the verifier. arch_bpf_timed_may_goto() is special as it receives the parameter in BPF_REG_AX and is expected to return the result in BPF_REG_AX as well. It can't clobber any caller saved registers because verifier doesn't save anything before emitting the call. So, arch_bpf_timed_may_goto() is implemented in assembly so the exact registers that are stored/restored can be controlled (BPF caller saved registers here) and it also needs to take care of moving arguments and return values to and from BPF_REG_AX <-> arm64 R0. So, arch_bpf_timed_may_goto() acts as a trampoline to call bpf_check_timed_may_goto() which does the main logic of placing the timestamp and returning the count. All tests that use may_goto instruction pass after the changing some of them in patch 2 #404 stream_errors:OK [...] #406/2 stream_success/stream_cond_break:OK [...] #494/23 verifier_bpf_fastcall/may_goto_interaction_x86_64:SKIP #494/24 verifier_bpf_fastcall/may_goto_interaction_arm64:OK [...] #539/1 verifier_may_goto_1/may_goto 0:OK #539/2 verifier_may_goto_1/batch 2 of may_goto 0:OK #539/3 verifier_may_goto_1/may_goto batch with offsets 2/1/0:OK #539/4 verifier_may_goto_1/may_goto batch with offsets 2/0:OK #539 verifier_may_goto_1:OK #540/1 verifier_may_goto_2/C code with may_goto 0:OK #540 verifier_may_goto_2:OK Summary: 7/16 PASSED, 25 SKIPPED, 0 FAILED ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 4c229f3 + 22b22bf commit bd27626

File tree

6 files changed

+76
-42
lines changed

6 files changed

+76
-42
lines changed

arch/arm64/net/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
#
33
# ARM64 networking code
44
#
5-
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
5+
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o

arch/arm64/net/bpf_jit_comp.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1558,7 +1558,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
15581558
if (ret < 0)
15591559
return ret;
15601560
emit_call(func_addr, ctx);
1561-
emit(A64_MOV(1, r0, A64_R(0)), ctx);
1561+
/*
1562+
* Call to arch_bpf_timed_may_goto() is emitted by the
1563+
* verifier and called with custom calling convention with
1564+
* first argument and return value in BPF_REG_AX (x9).
1565+
*/
1566+
if (func_addr != (u64)arch_bpf_timed_may_goto)
1567+
emit(A64_MOV(1, r0, A64_R(0)), ctx);
15621568
break;
15631569
}
15641570
/* tail call */
@@ -3038,6 +3044,11 @@ bool bpf_jit_bypass_spec_v4(void)
30383044
return true;
30393045
}
30403046

3047+
bool bpf_jit_supports_timed_may_goto(void)
3048+
{
3049+
return true;
3050+
}
3051+
30413052
bool bpf_jit_inlines_helper_call(s32 imm)
30423053
{
30433054
switch (imm) {

arch/arm64/net/bpf_timed_may_goto.S

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/* Copyright (c) 2025 Puranjay Mohan <[email protected]> */
3+
4+
#include <linux/linkage.h>
5+
6+
SYM_FUNC_START(arch_bpf_timed_may_goto)
7+
/* Allocate stack space and emit frame record */
8+
stp x29, x30, [sp, #-64]!
9+
mov x29, sp
10+
11+
/* Save BPF registers R0 - R5 (x7, x0-x4)*/
12+
stp x7, x0, [sp, #16]
13+
stp x1, x2, [sp, #32]
14+
stp x3, x4, [sp, #48]
15+
16+
/*
17+
* Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP
18+
* (x25) to get the pointer to count and timestamp and pass it as the
19+
* first argument in x0.
20+
*
21+
* Before generating the call to arch_bpf_timed_may_goto, the verifier
22+
* generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP -
23+
* stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64
24+
* jit in this case.
25+
*/
26+
add x0, x9, x25
27+
bl bpf_check_timed_may_goto
28+
/* BPF_REG_AX(x9) will be stored into count, so move return value to it. */
29+
mov x9, x0
30+
31+
/* Restore BPF registers R0 - R5 (x7, x0-x4) */
32+
ldp x7, x0, [sp, #16]
33+
ldp x1, x2, [sp, #32]
34+
ldp x3, x4, [sp, #48]
35+
36+
/* Restore FP and LR */
37+
ldp x29, x30, [sp], #64
38+
39+
ret
40+
SYM_FUNC_END(arch_bpf_timed_may_goto)

tools/testing/selftests/bpf/prog_tests/stream.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ void test_stream_errors(void)
7777
ASSERT_OK(ret, "ret");
7878
ASSERT_OK(opts.retval, "retval");
7979

80-
#if !defined(__x86_64__) && !defined(__s390x__)
80+
#if !defined(__x86_64__) && !defined(__s390x__) && !defined(__aarch64__)
8181
ASSERT_TRUE(1, "Timed may_goto unsupported, skip.");
8282
if (i == 0) {
8383
ret = bpf_prog_stream_read(prog_fd, 2, buf, sizeof(buf), &ropts);

tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -660,19 +660,24 @@ __naked void may_goto_interaction_x86_64(void)
660660

661661
SEC("raw_tp")
662662
__arch_arm64
663-
__log_level(4) __msg("stack depth 16")
664-
/* may_goto counter at -16 */
665-
__xlated("0: *(u64 *)(r10 -16) =")
666-
__xlated("1: r1 = 1")
667-
__xlated("2: call bpf_get_smp_processor_id")
663+
__log_level(4) __msg("stack depth 24")
664+
/* may_goto counter at -24 */
665+
__xlated("0: *(u64 *)(r10 -24) =")
666+
/* may_goto timestamp at -16 */
667+
__xlated("1: *(u64 *)(r10 -16) =")
668+
__xlated("2: r1 = 1")
669+
__xlated("3: call bpf_get_smp_processor_id")
668670
/* may_goto expansion starts */
669-
__xlated("3: r11 = *(u64 *)(r10 -16)")
670-
__xlated("4: if r11 == 0x0 goto pc+3")
671-
__xlated("5: r11 -= 1")
672-
__xlated("6: *(u64 *)(r10 -16) = r11")
671+
__xlated("4: r11 = *(u64 *)(r10 -24)")
672+
__xlated("5: if r11 == 0x0 goto pc+6")
673+
__xlated("6: r11 -= 1")
674+
__xlated("7: if r11 != 0x0 goto pc+2")
675+
__xlated("8: r11 = -24")
676+
__xlated("9: call unknown")
677+
__xlated("10: *(u64 *)(r10 -24) = r11")
673678
/* may_goto expansion ends */
674-
__xlated("7: *(u64 *)(r10 -8) = r1")
675-
__xlated("8: exit")
679+
__xlated("11: *(u64 *)(r10 -8) = r1")
680+
__xlated("12: exit")
676681
__success
677682
__naked void may_goto_interaction_arm64(void)
678683
{

tools/testing/selftests/bpf/progs/verifier_may_goto_1.c

Lines changed: 6 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ SEC("raw_tp")
1010
__description("may_goto 0")
1111
__arch_x86_64
1212
__arch_s390x
13+
__arch_arm64
1314
__xlated("0: r0 = 1")
1415
__xlated("1: exit")
1516
__success
@@ -29,6 +30,7 @@ SEC("raw_tp")
2930
__description("batch 2 of may_goto 0")
3031
__arch_x86_64
3132
__arch_s390x
33+
__arch_arm64
3234
__xlated("0: r0 = 1")
3335
__xlated("1: exit")
3436
__success
@@ -50,6 +52,7 @@ SEC("raw_tp")
5052
__description("may_goto batch with offsets 2/1/0")
5153
__arch_x86_64
5254
__arch_s390x
55+
__arch_arm64
5356
__xlated("0: r0 = 1")
5457
__xlated("1: exit")
5558
__success
@@ -72,9 +75,10 @@ __naked void may_goto_batch_1(void)
7275
}
7376

7477
SEC("raw_tp")
75-
__description("may_goto batch with offsets 2/0 - x86_64 and s390x")
78+
__description("may_goto batch with offsets 2/0")
7679
__arch_x86_64
7780
__arch_s390x
81+
__arch_arm64
7882
__xlated("0: *(u64 *)(r10 -16) = 65535")
7983
__xlated("1: *(u64 *)(r10 -8) = 0")
8084
__xlated("2: r11 = *(u64 *)(r10 -16)")
@@ -88,33 +92,7 @@ __xlated("9: r0 = 1")
8892
__xlated("10: r0 = 2")
8993
__xlated("11: exit")
9094
__success
91-
__naked void may_goto_batch_2_x86_64_s390x(void)
92-
{
93-
asm volatile (
94-
".8byte %[may_goto1];"
95-
".8byte %[may_goto3];"
96-
"r0 = 1;"
97-
"r0 = 2;"
98-
"exit;"
99-
:
100-
: __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
101-
__imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
102-
: __clobber_all);
103-
}
104-
105-
SEC("raw_tp")
106-
__description("may_goto batch with offsets 2/0 - arm64")
107-
__arch_arm64
108-
__xlated("0: *(u64 *)(r10 -8) = 8388608")
109-
__xlated("1: r11 = *(u64 *)(r10 -8)")
110-
__xlated("2: if r11 == 0x0 goto pc+3")
111-
__xlated("3: r11 -= 1")
112-
__xlated("4: *(u64 *)(r10 -8) = r11")
113-
__xlated("5: r0 = 1")
114-
__xlated("6: r0 = 2")
115-
__xlated("7: exit")
116-
__success
117-
__naked void may_goto_batch_2_arm64(void)
95+
__naked void may_goto_batch_2(void)
11896
{
11997
asm volatile (
12098
".8byte %[may_goto1];"

0 commit comments

Comments
 (0)