Skip to content

Commit 49b73fa

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-arm64-add-support-for-bpf-arena'
Puranjay Mohan says: ==================== bpf,arm64: Add support for BPF Arena Changes in V4 V3: https://lore.kernel.org/bpf/[email protected]/ - Use more descriptive variable names. - Use insn_is_cast_user() helper. Changes in V3 V2: https://lore.kernel.org/bpf/[email protected]/ - Optimize bpf_addr_space_cast as suggested by Xu Kuohai Changes in V2 V1: https://lore.kernel.org/bpf/[email protected]/ - Fix build warnings by using 5 in place of 32 as DONT_CLEAR marker. R5 is not mapped to any BPF register so it can safely be used here. This series adds the support for PROBE_MEM32 and bpf_addr_space_cast instructions to the ARM64 BPF JIT. These two instructions allow the enablement of BPF Arena. All arena related selftests are passing. [root@ip-172-31-6-62 bpf]# ./test_progs -a "*arena*" #3/1 arena_htab/arena_htab_llvm:OK #3/2 arena_htab/arena_htab_asm:OK #3 arena_htab:OK #4/1 arena_list/arena_list_1:OK #4/2 arena_list/arena_list_1000:OK #4 arena_list:OK torvalds#434/1 verifier_arena/basic_alloc1:OK torvalds#434/2 verifier_arena/basic_alloc2:OK torvalds#434/3 verifier_arena/basic_alloc3:OK torvalds#434/4 verifier_arena/iter_maps1:OK torvalds#434/5 verifier_arena/iter_maps2:OK torvalds#434/6 verifier_arena/iter_maps3:OK torvalds#434 verifier_arena:OK Summary: 3/10 PASSED, 0 SKIPPED, 0 FAILED This will need the patch [1] that introduced insn_is_cast_user() helper to build. The verifier_arena selftest could fail in the CI because the following commit[2] is missing from bpf-next: [1] https://lore.kernel.org/bpf/[email protected]/ [2] https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/commit/?id=fa3550dca8f02ec312727653a94115ef3ab68445 Here is a CI run with all dependencies added: kernel-patches/bpf#6641 ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents c07b4bc + 4dd3124 commit 49b73fa

File tree

2 files changed

+76
-12
lines changed

2 files changed

+76
-12
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 76 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
3030
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
3131
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
32+
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
3233

3334
#define check_imm(bits, imm) do { \
3435
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -67,6 +68,8 @@ static const int bpf2a64[] = {
6768
/* temporary register for blinding constants */
6869
[BPF_REG_AX] = A64_R(9),
6970
[FP_BOTTOM] = A64_R(27),
71+
/* callee saved register for kern_vm_start address */
72+
[ARENA_VM_START] = A64_R(28),
7073
};
7174

7275
struct jit_ctx {
@@ -79,6 +82,7 @@ struct jit_ctx {
7982
__le32 *ro_image;
8083
u32 stack_size;
8184
int fpb_offset;
85+
u64 user_vm_start;
8286
};
8387

8488
struct bpf_plt {
@@ -295,7 +299,7 @@ static bool is_lsi_offset(int offset, int scale)
295299
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
296300

297301
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
298-
bool is_exception_cb)
302+
bool is_exception_cb, u64 arena_vm_start)
299303
{
300304
const struct bpf_prog *prog = ctx->prog;
301305
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -306,6 +310,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
306310
const u8 fp = bpf2a64[BPF_REG_FP];
307311
const u8 tcc = bpf2a64[TCALL_CNT];
308312
const u8 fpb = bpf2a64[FP_BOTTOM];
313+
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
309314
const int idx0 = ctx->idx;
310315
int cur_offset;
311316

@@ -411,6 +416,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
411416

412417
/* Set up function call stack */
413418
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
419+
420+
if (arena_vm_start)
421+
emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
422+
414423
return 0;
415424
}
416425

@@ -738,14 +747,16 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
738747

739748
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
740749
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
750+
#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
741751

742752
bool ex_handler_bpf(const struct exception_table_entry *ex,
743753
struct pt_regs *regs)
744754
{
745755
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
746756
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
747757

748-
regs->regs[dst_reg] = 0;
758+
if (dst_reg != DONT_CLEAR)
759+
regs->regs[dst_reg] = 0;
749760
regs->pc = (unsigned long)&ex->fixup - offset;
750761
return true;
751762
}
@@ -765,7 +776,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
765776
return 0;
766777

767778
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
768-
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
779+
BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
780+
BPF_MODE(insn->code) != BPF_PROBE_MEM32)
769781
return 0;
770782

771783
if (!ctx->prog->aux->extable ||
@@ -810,6 +822,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
810822

811823
ex->insn = ins_offset;
812824

825+
if (BPF_CLASS(insn->code) != BPF_LDX)
826+
dst_reg = DONT_CLEAR;
827+
813828
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
814829
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
815830

@@ -829,12 +844,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
829844
bool extra_pass)
830845
{
831846
const u8 code = insn->code;
832-
const u8 dst = bpf2a64[insn->dst_reg];
833-
const u8 src = bpf2a64[insn->src_reg];
847+
u8 dst = bpf2a64[insn->dst_reg];
848+
u8 src = bpf2a64[insn->src_reg];
834849
const u8 tmp = bpf2a64[TMP_REG_1];
835850
const u8 tmp2 = bpf2a64[TMP_REG_2];
836851
const u8 fp = bpf2a64[BPF_REG_FP];
837852
const u8 fpb = bpf2a64[FP_BOTTOM];
853+
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
838854
const s16 off = insn->off;
839855
const s32 imm = insn->imm;
840856
const int i = insn - ctx->prog->insnsi;
@@ -853,6 +869,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
853869
/* dst = src */
854870
case BPF_ALU | BPF_MOV | BPF_X:
855871
case BPF_ALU64 | BPF_MOV | BPF_X:
872+
if (insn_is_cast_user(insn)) {
873+
emit(A64_MOV(0, tmp, src), ctx); // 32-bit mov clears the upper 32 bits
874+
emit_a64_mov_i(0, dst, ctx->user_vm_start >> 32, ctx);
875+
emit(A64_LSL(1, dst, dst, 32), ctx);
876+
emit(A64_CBZ(1, tmp, 2), ctx);
877+
emit(A64_ORR(1, tmp, dst, tmp), ctx);
878+
emit(A64_MOV(1, dst, tmp), ctx);
879+
break;
880+
}
856881
switch (insn->off) {
857882
case 0:
858883
emit(A64_MOV(is64, dst, src), ctx);
@@ -1237,7 +1262,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
12371262
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
12381263
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
12391264
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1240-
if (ctx->fpb_offset > 0 && src == fp) {
1265+
case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1266+
case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1267+
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1268+
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1269+
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
1270+
emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
1271+
src = tmp2;
1272+
}
1273+
if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
12411274
src_adj = fpb;
12421275
off_adj = off + ctx->fpb_offset;
12431276
} else {
@@ -1322,7 +1355,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
13221355
case BPF_ST | BPF_MEM | BPF_H:
13231356
case BPF_ST | BPF_MEM | BPF_B:
13241357
case BPF_ST | BPF_MEM | BPF_DW:
1325-
if (ctx->fpb_offset > 0 && dst == fp) {
1358+
case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1359+
case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1360+
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1361+
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1362+
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
1363+
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
1364+
dst = tmp2;
1365+
}
1366+
if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
13261367
dst_adj = fpb;
13271368
off_adj = off + ctx->fpb_offset;
13281369
} else {
@@ -1365,14 +1406,26 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
13651406
}
13661407
break;
13671408
}
1409+
1410+
ret = add_exception_handler(insn, ctx, dst);
1411+
if (ret)
1412+
return ret;
13681413
break;
13691414

13701415
/* STX: *(size *)(dst + off) = src */
13711416
case BPF_STX | BPF_MEM | BPF_W:
13721417
case BPF_STX | BPF_MEM | BPF_H:
13731418
case BPF_STX | BPF_MEM | BPF_B:
13741419
case BPF_STX | BPF_MEM | BPF_DW:
1375-
if (ctx->fpb_offset > 0 && dst == fp) {
1420+
case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1421+
case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1422+
case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1423+
case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1424+
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
1425+
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
1426+
dst = tmp2;
1427+
}
1428+
if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
13761429
dst_adj = fpb;
13771430
off_adj = off + ctx->fpb_offset;
13781431
} else {
@@ -1413,6 +1466,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
14131466
}
14141467
break;
14151468
}
1469+
1470+
ret = add_exception_handler(insn, ctx, dst);
1471+
if (ret)
1472+
return ret;
14161473
break;
14171474

14181475
case BPF_STX | BPF_ATOMIC | BPF_W:
@@ -1594,6 +1651,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
15941651
bool tmp_blinded = false;
15951652
bool extra_pass = false;
15961653
struct jit_ctx ctx;
1654+
u64 arena_vm_start;
15971655
u8 *image_ptr;
15981656
u8 *ro_image_ptr;
15991657

@@ -1611,6 +1669,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
16111669
prog = tmp;
16121670
}
16131671

1672+
arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
16141673
jit_data = prog->aux->jit_data;
16151674
if (!jit_data) {
16161675
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
@@ -1641,14 +1700,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
16411700
}
16421701

16431702
ctx.fpb_offset = find_fpb_offset(prog);
1703+
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
16441704

16451705
/*
16461706
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
16471707
*
16481708
* BPF line info needs ctx->offset[i] to be the offset of
16491709
* instruction[i] in jited image, so build prologue first.
16501710
*/
1651-
if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
1711+
if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
1712+
arena_vm_start)) {
16521713
prog = orig_prog;
16531714
goto out_off;
16541715
}
@@ -1696,7 +1757,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
16961757
ctx.idx = 0;
16971758
ctx.exentry_idx = 0;
16981759

1699-
build_prologue(&ctx, was_classic, prog->aux->exception_cb);
1760+
build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
17001761

17011762
if (build_body(&ctx, extra_pass)) {
17021763
prog = orig_prog;
@@ -2461,6 +2522,11 @@ bool bpf_jit_supports_exceptions(void)
24612522
return true;
24622523
}
24632524

2525+
bool bpf_jit_supports_arena(void)
2526+
{
2527+
return true;
2528+
}
2529+
24642530
void bpf_jit_free(struct bpf_prog *prog)
24652531
{
24662532
if (prog->jited) {

tools/testing/selftests/bpf/DENYLIST.aarch64

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,5 +10,3 @@ fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_mu
1010
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
1111
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
1212
missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
13-
verifier_arena # JIT does not support arena
14-
arena_htab # JIT does not support arena

0 commit comments

Comments
 (0)