Skip to content

Commit d5a2151

Browse files
Chenghao DuanAirFortressIlikara
authored andcommitted
LoongArch: BPF: Add dynamic code modification support
This commit adds support for BPF dynamic code modification on the LoongArch architecture: 1. Add bpf_arch_text_copy() for instruction block copying. 2. Add bpf_arch_text_poke() for runtime instruction patching. 3. Add bpf_arch_text_invalidate() for code invalidation. On LoongArch, since symbol addresses in the direct mapping region can't be reached via relative jump instructions from the paged mapping region, we use the move_imm+jirl instruction pair as absolute jump instructions. These require 2-5 instructions, so we reserve 5 NOP instructions in the program as placeholders for function jumps. The larch_insn_text_copy() function is solely used for BPF. And the use of larch_insn_text_copy() requires PAGE_SIZE alignment. Currently, only the size of the BPF trampoline is page-aligned. Co-developed-by: George Guo <[email protected]> Signed-off-by: George Guo <[email protected]> Signed-off-by: Chenghao Duan <[email protected]> Signed-off-by: Huacai Chen <[email protected]> Signed-off-by: maoxiaochuan <[email protected]>
1 parent 5ae2553 commit d5a2151

File tree

3 files changed

+151
-1
lines changed

3 files changed

+151
-1
lines changed

arch/loongarch/include/asm/inst.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs);
502502
int larch_insn_read(void *addr, u32 *insnp);
503503
int larch_insn_write(void *addr, u32 insn);
504504
int larch_insn_patch_text(void *addr, u32 insn);
505+
int larch_insn_text_copy(void *dst, void *src, size_t len);
505506

506507
u32 larch_insn_gen_nop(void);
507508
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest);

arch/loongarch/kernel/inst.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
*/
55
#include <linux/sizes.h>
66
#include <linux/uaccess.h>
7+
#include <linux/set_memory.h>
8+
#include <linux/stop_machine.h>
79

810
#include <asm/cacheflush.h>
911
#include <asm/inst.h>
@@ -230,6 +232,50 @@ int larch_insn_patch_text(void *addr, u32 insn)
230232
return ret;
231233
}
232234

235+
struct insn_copy {
236+
void *dst;
237+
void *src;
238+
size_t len;
239+
unsigned int cpu;
240+
};
241+
242+
static int text_copy_cb(void *data)
243+
{
244+
int ret = 0;
245+
struct insn_copy *copy = data;
246+
247+
if (smp_processor_id() == copy->cpu) {
248+
ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len);
249+
if (ret)
250+
pr_err("%s: operation failed\n", __func__);
251+
}
252+
253+
flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len);
254+
255+
return ret;
256+
}
257+
258+
int larch_insn_text_copy(void *dst, void *src, size_t len)
259+
{
260+
int ret = 0;
261+
size_t start, end;
262+
struct insn_copy copy = {
263+
.dst = dst,
264+
.src = src,
265+
.len = len,
266+
.cpu = smp_processor_id(),
267+
};
268+
269+
start = round_down((size_t)dst, PAGE_SIZE);
270+
end = round_up((size_t)dst + len, PAGE_SIZE);
271+
272+
set_memory_rw(start, (end - start) / PAGE_SIZE);
273+
ret = stop_machine(text_copy_cb, &copy, cpu_online_mask);
274+
set_memory_rox(start, (end - start) / PAGE_SIZE);
275+
276+
return ret;
277+
}
278+
233279
u32 larch_insn_gen_nop(void)
234280
{
235281
return INSN_NOP;

arch/loongarch/net/bpf_jit.c

Lines changed: 104 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,12 @@
44
*
55
* Copyright (C) 2022 Loongson Technology Corporation Limited
66
*/
7+
#include <linux/memory.h>
78
#include "bpf_jit.h"
89

10+
#define LOONGARCH_LONG_JUMP_NINSNS 5
11+
#define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4)
12+
913
#define REG_TCC LOONGARCH_GPR_A6
1014
#define TCC_SAVED LOONGARCH_GPR_S5
1115

@@ -88,7 +92,7 @@ static u8 tail_call_reg(struct jit_ctx *ctx)
8892
*/
8993
static void build_prologue(struct jit_ctx *ctx)
9094
{
91-
int stack_adjust = 0, store_offset, bpf_stack_adjust;
95+
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
9296

9397
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
9498

@@ -98,6 +102,10 @@ static void build_prologue(struct jit_ctx *ctx)
98102
stack_adjust = round_up(stack_adjust, 16);
99103
stack_adjust += bpf_stack_adjust;
100104

105+
/* Reserve space for the move_imm + jirl instruction */
106+
for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
107+
emit_insn(ctx, nop);
108+
101109
/*
102110
* First instruction initializes the tail call count (TCC).
103111
* On tail call we skip this instruction, and the TCC is
@@ -1184,6 +1192,101 @@ static int validate_ctx(struct jit_ctx *ctx)
11841192
return 0;
11851193
}
11861194

1195+
static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target)
1196+
{
1197+
if (!target) {
1198+
pr_err("bpf_jit: jump target address is error\n");
1199+
return -EFAULT;
1200+
}
1201+
1202+
move_imm(ctx, LOONGARCH_GPR_T1, target, false);
1203+
emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
1204+
1205+
return 0;
1206+
}
1207+
1208+
static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
1209+
{
1210+
int i;
1211+
struct jit_ctx ctx;
1212+
1213+
ctx.idx = 0;
1214+
ctx.image = (union loongarch_instruction *)insns;
1215+
1216+
if (!target) {
1217+
for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++)
1218+
emit_insn((&ctx), nop);
1219+
return 0;
1220+
}
1221+
1222+
return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
1223+
}
1224+
1225+
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
1226+
{
1227+
int ret;
1228+
1229+
mutex_lock(&text_mutex);
1230+
ret = larch_insn_text_copy(dst, src, len);
1231+
mutex_unlock(&text_mutex);
1232+
1233+
return ret ? ERR_PTR(-EINVAL) : dst;
1234+
}
1235+
1236+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
1237+
void *old_addr, void *new_addr)
1238+
{
1239+
int ret;
1240+
bool is_call = (poke_type == BPF_MOD_CALL);
1241+
u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1242+
u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
1243+
1244+
if (!is_kernel_text((unsigned long)ip) &&
1245+
!is_bpf_text_address((unsigned long)ip))
1246+
return -ENOTSUPP;
1247+
1248+
ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
1249+
if (ret)
1250+
return ret;
1251+
1252+
if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES))
1253+
return -EFAULT;
1254+
1255+
ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call);
1256+
if (ret)
1257+
return ret;
1258+
1259+
mutex_lock(&text_mutex);
1260+
if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
1261+
ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
1262+
mutex_unlock(&text_mutex);
1263+
1264+
return ret;
1265+
}
1266+
1267+
int bpf_arch_text_invalidate(void *dst, size_t len)
1268+
{
1269+
int i;
1270+
int ret = 0;
1271+
u32 *inst;
1272+
1273+
inst = kvmalloc(len, GFP_KERNEL);
1274+
if (!inst)
1275+
return -ENOMEM;
1276+
1277+
for (i = 0; i < (len / sizeof(u32)); i++)
1278+
inst[i] = INSN_BREAK;
1279+
1280+
mutex_lock(&text_mutex);
1281+
if (larch_insn_text_copy(dst, inst, len))
1282+
ret = -EINVAL;
1283+
mutex_unlock(&text_mutex);
1284+
1285+
kvfree(inst);
1286+
1287+
return ret;
1288+
}
1289+
11871290
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
11881291
{
11891292
bool tmp_blinded = false, extra_pass = false;

0 commit comments

Comments
 (0)