-
Notifications
You must be signed in to change notification settings - Fork 4.1k
Expand file tree
/
Copy pathopensnoop.bpf.c
More file actions
187 lines (160 loc) · 4.51 KB
/
opensnoop.bpf.c
File metadata and controls
187 lines (160 loc) · 4.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
// Copyright (c) 2020 Netflix
#include <vmlinux.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include "compat.bpf.h"
#include "opensnoop.h"
#include "path_helpers.bpf.h"
#ifndef O_CREAT
#define O_CREAT 00000100
#endif
#ifndef O_TMPFILE
#define O_TMPFILE 020200000
#endif
const volatile pid_t targ_pid = 0;
const volatile pid_t targ_tgid = 0;
const volatile uid_t targ_uid = 0;
const volatile bool targ_failed = false;
const volatile bool full_path = false;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 10240);
__type(key, u32);
__type(value, struct args_t);
} start SEC(".maps");
static __always_inline bool valid_uid(uid_t uid) {
return uid != INVALID_UID;
}
static __always_inline
bool trace_allowed(u32 tgid, u32 pid)
{
u32 uid;
/* filters */
if (targ_tgid && targ_tgid != tgid)
return false;
if (targ_pid && targ_pid != pid)
return false;
if (valid_uid(targ_uid)) {
uid = (u32)bpf_get_current_uid_gid();
if (targ_uid != uid) {
return false;
}
}
return true;
}
SEC("tracepoint/syscalls/sys_enter_open")
int tracepoint__syscalls__sys_enter_open(struct syscall_trace_enter* ctx)
{
u64 id = bpf_get_current_pid_tgid();
/* use kernel terminology here for tgid/pid: */
u32 tgid = id >> 32;
u32 pid = id;
/* store arg info for later lookup */
if (trace_allowed(tgid, pid)) {
struct args_t args = {};
args.fname = (const char *)ctx->args[0];
args.flags = (int)ctx->args[1];
args.mode = (__u32)ctx->args[2];
bpf_map_update_elem(&start, &pid, &args, 0);
}
return 0;
}
SEC("tracepoint/syscalls/sys_enter_openat")
int tracepoint__syscalls__sys_enter_openat(struct syscall_trace_enter* ctx)
{
u64 id = bpf_get_current_pid_tgid();
/* use kernel terminology here for tgid/pid: */
u32 tgid = id >> 32;
u32 pid = id;
/* store arg info for later lookup */
if (trace_allowed(tgid, pid)) {
struct args_t args = {};
args.fname = (const char *)ctx->args[1];
args.flags = (int)ctx->args[2];
args.mode = (__u32)ctx->args[3];
bpf_map_update_elem(&start, &pid, &args, 0);
}
return 0;
}
SEC("tracepoint/syscalls/sys_enter_openat2")
int tracepoint__syscalls__sys_enter_openat2(struct syscall_trace_enter* ctx)
{
u64 id = bpf_get_current_pid_tgid();
/* use kernel terminology here for tgid/pid: */
u32 tgid = id >> 32;
u32 pid = id;
/* store arg info for later lookup */
if (trace_allowed(tgid, pid)) {
struct args_t args = {};
struct open_how how = {};
args.fname = (const char *)ctx->args[1];
bpf_probe_read_user(&how, sizeof(how), (void *)ctx->args[2]);
args.flags = (int)how.flags;
args.mode = (__u32)how.mode;
bpf_map_update_elem(&start, &pid, &args, 0);
}
return 0;
}
static __always_inline
int trace_exit(struct syscall_trace_exit* ctx)
{
struct event *eventp;
struct args_t *ap;
uintptr_t stack[3];
int ret;
u32 pid = bpf_get_current_pid_tgid();
ap = bpf_map_lookup_elem(&start, &pid);
if (!ap)
return 0; /* missed entry */
ret = ctx->ret;
if (targ_failed && ret >= 0)
goto cleanup; /* want failed only */
eventp = reserve_buf(sizeof(*eventp));
if (!eventp)
goto cleanup;
/* event data */
eventp->pid = bpf_get_current_pid_tgid() >> 32;
eventp->uid = bpf_get_current_uid_gid();
bpf_get_current_comm(&eventp->comm, sizeof(eventp->comm));
bpf_probe_read_user_str(&eventp->fname.pathes, sizeof(eventp->fname.pathes),
ap->fname);
eventp->fname.depth = 0;
eventp->flags = ap->flags;
if (ap->flags & O_CREAT || (ap->flags & O_TMPFILE) == O_TMPFILE)
eventp->mode = ap->mode;
else
eventp->mode = 0;
eventp->ret = ret;
bpf_get_stack(ctx, &stack, sizeof(stack),
BPF_F_USER_STACK);
/* Skip the first address that is usually the syscall it-self */
eventp->callers[0] = stack[1];
eventp->callers[1] = stack[2];
if (full_path && eventp->fname.pathes[0] != '/')
bpf_getcwd(eventp->fname.pathes + NAME_MAX, NAME_MAX,
MAX_PATH_DEPTH - 1,
&eventp->fname.failed, &eventp->fname.depth);
/* emit event */
submit_buf(ctx, eventp, sizeof(*eventp));
cleanup:
bpf_map_delete_elem(&start, &pid);
return 0;
}
SEC("tracepoint/syscalls/sys_exit_open")
int tracepoint__syscalls__sys_exit_open(struct syscall_trace_exit* ctx)
{
return trace_exit(ctx);
}
SEC("tracepoint/syscalls/sys_exit_openat")
int tracepoint__syscalls__sys_exit_openat(struct syscall_trace_exit* ctx)
{
return trace_exit(ctx);
}
SEC("tracepoint/syscalls/sys_exit_openat2")
int tracepoint__syscalls__sys_exit_openat2(struct syscall_trace_exit* ctx)
{
return trace_exit(ctx);
}
char LICENSE[] SEC("license") = "GPL";