Skip to content

Commit b7540d6

Browse files
rnavmpe
authored andcommitted
powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC
Emit similar instruction sequences to commit a048a07 ("powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit") when encountering BPF_NOSPEC. Mitigations are enabled depending on what the firmware advertises. In particular, we do not gate these mitigations based on current settings, just like in x86. Due to this, we don't need to take any action if mitigations are enabled or disabled at runtime. Signed-off-by: Naveen N. Rao <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/956570cbc191cd41f8274bed48ee757a86dac62a.1633464148.git.naveen.n.rao@linux.vnet.ibm.com
1 parent 0309059 commit b7540d6

File tree

2 files changed

+55
-8
lines changed

2 files changed

+55
-8
lines changed

arch/powerpc/net/bpf_jit64.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,18 +16,18 @@
1616
* with our redzone usage.
1717
*
1818
* [ prev sp ] <-------------
19-
* [ nv gpr save area ] 6*8 |
19+
* [ nv gpr save area ] 5*8 |
2020
* [ tail_call_cnt ] 8 |
21-
* [ local_tmp_var ] 8 |
21+
* [ local_tmp_var ] 16 |
2222
* fp (r31) --> [ ebpf stack space ] upto 512 |
2323
* [ frame header ] 32/112 |
2424
* sp (r1) ---> [ stack pointer ] --------------
2525
*/
2626

2727
/* for gpr non volatile registers BPG_REG_6 to 10 */
28-
#define BPF_PPC_STACK_SAVE (6*8)
28+
#define BPF_PPC_STACK_SAVE (5*8)
2929
/* for bpf JIT code internal usage */
30-
#define BPF_PPC_STACK_LOCALS 16
30+
#define BPF_PPC_STACK_LOCALS 24
3131
/* stack frame excluding BPF stack, ensure this is quadword aligned */
3232
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
3333
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)

arch/powerpc/net/bpf_jit_comp64.c

Lines changed: 51 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/if_vlan.h>
1616
#include <asm/kprobes.h>
1717
#include <linux/bpf.h>
18+
#include <asm/security_features.h>
1819

1920
#include "bpf_jit64.h"
2021

@@ -35,22 +36,22 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
3536
* [ prev sp ] <-------------
3637
* [ ... ] |
3738
* sp (r1) ---> [ stack pointer ] --------------
38-
* [ nv gpr save area ] 6*8
39+
* [ nv gpr save area ] 5*8
3940
* [ tail_call_cnt ] 8
40-
* [ local_tmp_var ] 8
41+
* [ local_tmp_var ] 16
4142
* [ unused red zone ] 208 bytes protected
4243
*/
4344
static int bpf_jit_stack_local(struct codegen_context *ctx)
4445
{
4546
if (bpf_has_stack_frame(ctx))
4647
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
4748
else
48-
return -(BPF_PPC_STACK_SAVE + 16);
49+
return -(BPF_PPC_STACK_SAVE + 24);
4950
}
5051

5152
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
5253
{
53-
return bpf_jit_stack_local(ctx) + 8;
54+
return bpf_jit_stack_local(ctx) + 16;
5455
}
5556

5657
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -272,10 +273,33 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
272273
return 0;
273274
}
274275

276+
/*
277+
* We spill into the redzone always, even if the bpf program has its own stackframe.
278+
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
279+
*/
280+
void bpf_stf_barrier(void);
281+
282+
asm (
283+
" .global bpf_stf_barrier ;"
284+
" bpf_stf_barrier: ;"
285+
" std 21,-64(1) ;"
286+
" std 22,-56(1) ;"
287+
" sync ;"
288+
" ld 21,-64(1) ;"
289+
" ld 22,-56(1) ;"
290+
" ori 31,31,0 ;"
291+
" .rept 14 ;"
292+
" b 1f ;"
293+
" 1: ;"
294+
" .endr ;"
295+
" blr ;"
296+
);
297+
275298
/* Assemble the body code between the prologue & epilogue */
276299
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
277300
u32 *addrs, bool extra_pass)
278301
{
302+
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
279303
const struct bpf_insn *insn = fp->insnsi;
280304
int flen = fp->len;
281305
int i, ret;
@@ -646,6 +670,29 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
646670
* BPF_ST NOSPEC (speculation barrier)
647671
*/
648672
case BPF_ST | BPF_NOSPEC:
673+
if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
674+
!security_ftr_enabled(SEC_FTR_STF_BARRIER))
675+
break;
676+
677+
switch (stf_barrier) {
678+
case STF_BARRIER_EIEIO:
679+
EMIT(PPC_RAW_EIEIO() | 0x02000000);
680+
break;
681+
case STF_BARRIER_SYNC_ORI:
682+
EMIT(PPC_RAW_SYNC());
683+
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
684+
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
685+
break;
686+
case STF_BARRIER_FALLBACK:
687+
EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
688+
PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
689+
EMIT(PPC_RAW_MTCTR(12));
690+
EMIT(PPC_RAW_BCTRL());
691+
EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
692+
break;
693+
case STF_BARRIER_NONE:
694+
break;
695+
}
649696
break;
650697

651698
/*

0 commit comments

Comments
 (0)