29
29
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
30
30
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
31
31
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
32
+ #define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
32
33
33
34
#define check_imm (bits , imm ) do { \
34
35
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -67,6 +68,8 @@ static const int bpf2a64[] = {
67
68
/* temporary register for blinding constants */
68
69
[BPF_REG_AX ] = A64_R (9 ),
69
70
[FP_BOTTOM ] = A64_R (27 ),
71
+ /* callee saved register for kern_vm_start address */
72
+ [ARENA_VM_START ] = A64_R (28 ),
70
73
};
71
74
72
75
struct jit_ctx {
@@ -79,6 +82,7 @@ struct jit_ctx {
79
82
__le32 * ro_image ;
80
83
u32 stack_size ;
81
84
int fpb_offset ;
85
+ u64 user_vm_start ;
82
86
};
83
87
84
88
struct bpf_plt {
@@ -295,7 +299,7 @@ static bool is_lsi_offset(int offset, int scale)
295
299
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
296
300
297
301
static int build_prologue (struct jit_ctx * ctx , bool ebpf_from_cbpf ,
298
- bool is_exception_cb )
302
+ bool is_exception_cb , u64 arena_vm_start )
299
303
{
300
304
const struct bpf_prog * prog = ctx -> prog ;
301
305
const bool is_main_prog = !bpf_is_subprog (prog );
@@ -306,6 +310,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
306
310
const u8 fp = bpf2a64 [BPF_REG_FP ];
307
311
const u8 tcc = bpf2a64 [TCALL_CNT ];
308
312
const u8 fpb = bpf2a64 [FP_BOTTOM ];
313
+ const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
309
314
const int idx0 = ctx -> idx ;
310
315
int cur_offset ;
311
316
@@ -411,6 +416,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
411
416
412
417
/* Set up function call stack */
413
418
emit (A64_SUB_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
419
+
420
+ if (arena_vm_start )
421
+ emit_a64_mov_i64 (arena_vm_base , arena_vm_start , ctx );
422
+
414
423
return 0 ;
415
424
}
416
425
@@ -738,14 +747,16 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
738
747
739
748
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
740
749
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
750
+ #define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
741
751
742
752
bool ex_handler_bpf (const struct exception_table_entry * ex ,
743
753
struct pt_regs * regs )
744
754
{
745
755
off_t offset = FIELD_GET (BPF_FIXUP_OFFSET_MASK , ex -> fixup );
746
756
int dst_reg = FIELD_GET (BPF_FIXUP_REG_MASK , ex -> fixup );
747
757
748
- regs -> regs [dst_reg ] = 0 ;
758
+ if (dst_reg != DONT_CLEAR )
759
+ regs -> regs [dst_reg ] = 0 ;
749
760
regs -> pc = (unsigned long )& ex -> fixup - offset ;
750
761
return true;
751
762
}
@@ -765,7 +776,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
765
776
return 0 ;
766
777
767
778
if (BPF_MODE (insn -> code ) != BPF_PROBE_MEM &&
768
- BPF_MODE (insn -> code ) != BPF_PROBE_MEMSX )
779
+ BPF_MODE (insn -> code ) != BPF_PROBE_MEMSX &&
780
+ BPF_MODE (insn -> code ) != BPF_PROBE_MEM32 )
769
781
return 0 ;
770
782
771
783
if (!ctx -> prog -> aux -> extable ||
@@ -810,6 +822,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
810
822
811
823
ex -> insn = ins_offset ;
812
824
825
+ if (BPF_CLASS (insn -> code ) != BPF_LDX )
826
+ dst_reg = DONT_CLEAR ;
827
+
813
828
ex -> fixup = FIELD_PREP (BPF_FIXUP_OFFSET_MASK , fixup_offset ) |
814
829
FIELD_PREP (BPF_FIXUP_REG_MASK , dst_reg );
815
830
@@ -829,12 +844,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
829
844
bool extra_pass )
830
845
{
831
846
const u8 code = insn -> code ;
832
- const u8 dst = bpf2a64 [insn -> dst_reg ];
833
- const u8 src = bpf2a64 [insn -> src_reg ];
847
+ u8 dst = bpf2a64 [insn -> dst_reg ];
848
+ u8 src = bpf2a64 [insn -> src_reg ];
834
849
const u8 tmp = bpf2a64 [TMP_REG_1 ];
835
850
const u8 tmp2 = bpf2a64 [TMP_REG_2 ];
836
851
const u8 fp = bpf2a64 [BPF_REG_FP ];
837
852
const u8 fpb = bpf2a64 [FP_BOTTOM ];
853
+ const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
838
854
const s16 off = insn -> off ;
839
855
const s32 imm = insn -> imm ;
840
856
const int i = insn - ctx -> prog -> insnsi ;
@@ -853,6 +869,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
853
869
/* dst = src */
854
870
case BPF_ALU | BPF_MOV | BPF_X :
855
871
case BPF_ALU64 | BPF_MOV | BPF_X :
872
+ if (insn_is_cast_user (insn )) {
873
+ emit (A64_MOV (0 , tmp , src ), ctx ); // 32-bit mov clears the upper 32 bits
874
+ emit_a64_mov_i (0 , dst , ctx -> user_vm_start >> 32 , ctx );
875
+ emit (A64_LSL (1 , dst , dst , 32 ), ctx );
876
+ emit (A64_CBZ (1 , tmp , 2 ), ctx );
877
+ emit (A64_ORR (1 , tmp , dst , tmp ), ctx );
878
+ emit (A64_MOV (1 , dst , tmp ), ctx );
879
+ break ;
880
+ }
856
881
switch (insn -> off ) {
857
882
case 0 :
858
883
emit (A64_MOV (is64 , dst , src ), ctx );
@@ -1237,7 +1262,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1237
1262
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B :
1238
1263
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H :
1239
1264
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W :
1240
- if (ctx -> fpb_offset > 0 && src == fp ) {
1265
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B :
1266
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H :
1267
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W :
1268
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW :
1269
+ if (BPF_MODE (insn -> code ) == BPF_PROBE_MEM32 ) {
1270
+ emit (A64_ADD (1 , tmp2 , src , arena_vm_base ), ctx );
1271
+ src = tmp2 ;
1272
+ }
1273
+ if (ctx -> fpb_offset > 0 && src == fp && BPF_MODE (insn -> code ) != BPF_PROBE_MEM32 ) {
1241
1274
src_adj = fpb ;
1242
1275
off_adj = off + ctx -> fpb_offset ;
1243
1276
} else {
@@ -1322,7 +1355,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1322
1355
case BPF_ST | BPF_MEM | BPF_H :
1323
1356
case BPF_ST | BPF_MEM | BPF_B :
1324
1357
case BPF_ST | BPF_MEM | BPF_DW :
1325
- if (ctx -> fpb_offset > 0 && dst == fp ) {
1358
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B :
1359
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H :
1360
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W :
1361
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW :
1362
+ if (BPF_MODE (insn -> code ) == BPF_PROBE_MEM32 ) {
1363
+ emit (A64_ADD (1 , tmp2 , dst , arena_vm_base ), ctx );
1364
+ dst = tmp2 ;
1365
+ }
1366
+ if (ctx -> fpb_offset > 0 && dst == fp && BPF_MODE (insn -> code ) != BPF_PROBE_MEM32 ) {
1326
1367
dst_adj = fpb ;
1327
1368
off_adj = off + ctx -> fpb_offset ;
1328
1369
} else {
@@ -1365,14 +1406,26 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1365
1406
}
1366
1407
break ;
1367
1408
}
1409
+
1410
+ ret = add_exception_handler (insn , ctx , dst );
1411
+ if (ret )
1412
+ return ret ;
1368
1413
break ;
1369
1414
1370
1415
/* STX: *(size *)(dst + off) = src */
1371
1416
case BPF_STX | BPF_MEM | BPF_W :
1372
1417
case BPF_STX | BPF_MEM | BPF_H :
1373
1418
case BPF_STX | BPF_MEM | BPF_B :
1374
1419
case BPF_STX | BPF_MEM | BPF_DW :
1375
- if (ctx -> fpb_offset > 0 && dst == fp ) {
1420
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B :
1421
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H :
1422
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W :
1423
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW :
1424
+ if (BPF_MODE (insn -> code ) == BPF_PROBE_MEM32 ) {
1425
+ emit (A64_ADD (1 , tmp2 , dst , arena_vm_base ), ctx );
1426
+ dst = tmp2 ;
1427
+ }
1428
+ if (ctx -> fpb_offset > 0 && dst == fp && BPF_MODE (insn -> code ) != BPF_PROBE_MEM32 ) {
1376
1429
dst_adj = fpb ;
1377
1430
off_adj = off + ctx -> fpb_offset ;
1378
1431
} else {
@@ -1413,6 +1466,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1413
1466
}
1414
1467
break ;
1415
1468
}
1469
+
1470
+ ret = add_exception_handler (insn , ctx , dst );
1471
+ if (ret )
1472
+ return ret ;
1416
1473
break ;
1417
1474
1418
1475
case BPF_STX | BPF_ATOMIC | BPF_W :
@@ -1594,6 +1651,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1594
1651
bool tmp_blinded = false;
1595
1652
bool extra_pass = false;
1596
1653
struct jit_ctx ctx ;
1654
+ u64 arena_vm_start ;
1597
1655
u8 * image_ptr ;
1598
1656
u8 * ro_image_ptr ;
1599
1657
@@ -1611,6 +1669,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1611
1669
prog = tmp ;
1612
1670
}
1613
1671
1672
+ arena_vm_start = bpf_arena_get_kern_vm_start (prog -> aux -> arena );
1614
1673
jit_data = prog -> aux -> jit_data ;
1615
1674
if (!jit_data ) {
1616
1675
jit_data = kzalloc (sizeof (* jit_data ), GFP_KERNEL );
@@ -1641,14 +1700,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1641
1700
}
1642
1701
1643
1702
ctx .fpb_offset = find_fpb_offset (prog );
1703
+ ctx .user_vm_start = bpf_arena_get_user_vm_start (prog -> aux -> arena );
1644
1704
1645
1705
/*
1646
1706
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
1647
1707
*
1648
1708
* BPF line info needs ctx->offset[i] to be the offset of
1649
1709
* instruction[i] in jited image, so build prologue first.
1650
1710
*/
1651
- if (build_prologue (& ctx , was_classic , prog -> aux -> exception_cb )) {
1711
+ if (build_prologue (& ctx , was_classic , prog -> aux -> exception_cb ,
1712
+ arena_vm_start )) {
1652
1713
prog = orig_prog ;
1653
1714
goto out_off ;
1654
1715
}
@@ -1696,7 +1757,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1696
1757
ctx .idx = 0 ;
1697
1758
ctx .exentry_idx = 0 ;
1698
1759
1699
- build_prologue (& ctx , was_classic , prog -> aux -> exception_cb );
1760
+ build_prologue (& ctx , was_classic , prog -> aux -> exception_cb , arena_vm_start );
1700
1761
1701
1762
if (build_body (& ctx , extra_pass )) {
1702
1763
prog = orig_prog ;
@@ -2461,6 +2522,11 @@ bool bpf_jit_supports_exceptions(void)
2461
2522
return true;
2462
2523
}
2463
2524
2525
+ bool bpf_jit_supports_arena (void )
2526
+ {
2527
+ return true;
2528
+ }
2529
+
2464
2530
void bpf_jit_free (struct bpf_prog * prog )
2465
2531
{
2466
2532
if (prog -> jited ) {
0 commit comments