Skip to content

Commit c643e0d

Browse files
committed
feat: add syscall helper macros
Add macros and functions to streamline syscall argument extraction in Tracee, supporting various architectures and syscall wrapping scenarios. Also enhance the TRACE_SYS_* macros for easier system call tracing.
1 parent 0a32ea2 commit c643e0d

File tree

1 file changed

+179
-0
lines changed

1 file changed

+179
-0
lines changed

pkg/ebpf/c/common/probes.h

Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <bpf/bpf_tracing.h>
77

8+
#include <common/arch.h>
89
#include <common/arguments.h>
910
#include <common/buffer.h>
1011
#include <common/context.h>
@@ -44,4 +45,182 @@
4445
return events_perf_submit(&p, PT_REGS_RC(ctx)); \
4546
}
4647

48+
#define TRACE_FUNC(name, id) \
49+
SEC("kprobe/" #name) \
50+
TRACE_ENT_FUNC(name, id) \
51+
SEC("kretprobe/" #name) \
52+
TRACE_RET_FUNC(name, id)
53+
54+
statfunc long long get_syscall_arg(struct task_struct *task,
55+
struct pt_regs *sys_regs,
56+
bool is_wrapped,
57+
unsigned int arg_id)
58+
{
59+
struct pt_regs *regs = sys_regs;
60+
if (is_wrapped && get_kconfig(ARCH_HAS_SYSCALL_WRAPPER))
61+
regs = (struct pt_regs *) PT_REGS_PARM1(sys_regs);
62+
63+
if (is_x86_compat(task)) {
64+
#if defined(bpf_target_x86)
65+
switch (arg_id) {
66+
case 1:
67+
return BPF_CORE_READ(regs, bx);
68+
case 2:
69+
return BPF_CORE_READ(regs, cx);
70+
case 3:
71+
return BPF_CORE_READ(regs, dx);
72+
case 4:
73+
return BPF_CORE_READ(regs, si);
74+
case 5:
75+
return BPF_CORE_READ(regs, di);
76+
case 6:
77+
return BPF_CORE_READ(regs, bp);
78+
}
79+
#endif // bpf_target_x86
80+
} else {
81+
switch (arg_id) {
82+
case 1:
83+
return PT_REGS_PARM1_CORE_SYSCALL(regs);
84+
case 2:
85+
return PT_REGS_PARM2_CORE_SYSCALL(regs);
86+
case 3:
87+
return PT_REGS_PARM3_CORE_SYSCALL(regs);
88+
case 4:
89+
return PT_REGS_PARM4_CORE_SYSCALL(regs);
90+
case 5:
91+
return PT_REGS_PARM5_CORE_SYSCALL(regs);
92+
case 6:
93+
return PT_REGS_PARM6_CORE_SYSCALL(regs);
94+
}
95+
}
96+
97+
return 0;
98+
}
99+
100+
statfunc long long get_syscall_arg1(struct task_struct *task,
101+
struct pt_regs *sys_regs,
102+
bool is_wrapped)
103+
{
104+
return get_syscall_arg(task, sys_regs, is_wrapped, 1);
105+
}
106+
107+
statfunc long long get_syscall_arg2(struct task_struct *task,
108+
struct pt_regs *sys_regs,
109+
bool is_wrapped)
110+
{
111+
return get_syscall_arg(task, sys_regs, is_wrapped, 2);
112+
}
113+
114+
statfunc long long get_syscall_arg3(struct task_struct *task,
115+
struct pt_regs *sys_regs,
116+
bool is_wrapped)
117+
{
118+
return get_syscall_arg(task, sys_regs, is_wrapped, 3);
119+
}
120+
121+
statfunc long long get_syscall_arg4(struct task_struct *task,
122+
struct pt_regs *sys_regs,
123+
bool is_wrapped)
124+
{
125+
return get_syscall_arg(task, sys_regs, is_wrapped, 4);
126+
}
127+
128+
statfunc long long get_syscall_arg5(struct task_struct *task,
129+
struct pt_regs *sys_regs,
130+
bool is_wrapped)
131+
{
132+
return get_syscall_arg(task, sys_regs, is_wrapped, 5);
133+
}
134+
135+
statfunc long long get_syscall_arg6(struct task_struct *task,
136+
struct pt_regs *sys_regs,
137+
bool is_wrapped)
138+
{
139+
return get_syscall_arg(task, sys_regs, is_wrapped, 6);
140+
}
141+
142+
statfunc void get_syscall_args(struct task_struct *task,
143+
struct pt_regs *sys_regs,
144+
syscall_data_t *sys)
145+
{
146+
struct pt_regs *regs = get_kconfig(ARCH_HAS_SYSCALL_WRAPPER)
147+
? (struct pt_regs *) PT_REGS_PARM1(sys_regs)
148+
: sys_regs;
149+
150+
if (is_x86_compat(task)) {
151+
#if defined(bpf_target_x86)
152+
sys->args.args[0] = BPF_CORE_READ(regs, bx);
153+
sys->args.args[1] = BPF_CORE_READ(regs, cx);
154+
sys->args.args[2] = BPF_CORE_READ(regs, dx);
155+
sys->args.args[3] = BPF_CORE_READ(regs, si);
156+
sys->args.args[4] = BPF_CORE_READ(regs, di);
157+
sys->args.args[5] = BPF_CORE_READ(regs, bp);
158+
#endif // bpf_target_x86
159+
} else {
160+
sys->args.args[0] = PT_REGS_PARM1_CORE_SYSCALL(regs);
161+
sys->args.args[1] = PT_REGS_PARM2_CORE_SYSCALL(regs);
162+
sys->args.args[2] = PT_REGS_PARM3_CORE_SYSCALL(regs);
163+
sys->args.args[3] = PT_REGS_PARM4_CORE_SYSCALL(regs);
164+
sys->args.args[4] = PT_REGS_PARM5_CORE_SYSCALL(regs);
165+
sys->args.args[5] = PT_REGS_PARM6_CORE_SYSCALL(regs);
166+
}
167+
}
168+
169+
#define TRACE_SYS_ENT_FUNC(name, _id) \
170+
int trace_##name(struct pt_regs *ctx) \
171+
{ \
172+
struct task_struct *task = (struct task_struct *) bpf_get_current_task(); \
173+
\
174+
u32 tid = bpf_get_current_pid_tgid(); \
175+
task_info_t *task_info = bpf_map_lookup_elem(&task_info_map, &tid); \
176+
if (unlikely(task_info == NULL)) { \
177+
task_info = init_task_info(tid, 0); \
178+
if (unlikely(task_info == NULL)) \
179+
return 0; \
180+
\
181+
int zero = 0; \
182+
config_entry_t *config = bpf_map_lookup_elem(&config_map, &zero); \
183+
if (unlikely(config == NULL)) \
184+
return 0; \
185+
\
186+
init_task_context(&task_info->context, task, config->options); \
187+
} \
188+
\
189+
syscall_data_t *sys = &task_info->syscall_data; \
190+
sys->id = _id; \
191+
sys->ts = get_current_time_in_ns(); \
192+
task_info->syscall_traced = true; \
193+
\
194+
get_syscall_args(task, ctx, sys); \
195+
\
196+
return 0; \
197+
}
198+
199+
#define TRACE_SYS_RET_FUNC(name, id) \
200+
int trace_ret_##name(struct pt_regs *ctx) \
201+
{ \
202+
program_data_t p = {}; \
203+
if (!init_program_data(&p, ctx, id)) \
204+
return 0; \
205+
\
206+
p.task_info->syscall_traced = false; \
207+
\
208+
if (!evaluate_scope_filters(&p)) \
209+
return 0; \
210+
\
211+
syscall_data_t *sys = &p.task_info->syscall_data; \
212+
\
213+
save_args_to_submit_buf(p.event, &sys->args); \
214+
p.event->context.ts = sys->ts; \
215+
events_perf_submit(&p, PT_REGS_RC(ctx)); \
216+
\
217+
return 0; \
218+
}
219+
220+
#define TRACE_SYSCALL(name, id) \
221+
SEC("kprobe/" #name) \
222+
TRACE_SYS_ENT_FUNC(name, id) \
223+
SEC("kretprobe/" #name) \
224+
TRACE_SYS_RET_FUNC(name, id)
225+
47226
#endif

0 commit comments

Comments
 (0)