Skip to content

Commit 338d4f4

Browse files
James Morsewildea01
authored andcommitted
arm64: kernel: Add support for Privileged Access Never
'Privileged Access Never' is a new arm8.1 feature which prevents privileged code from accessing any virtual address where read or write access is also permitted at EL0. This patch enables the PAN feature on all CPUs, and modifies {get,put}_user helpers temporarily to permit access. This will catch kernel bugs where user memory is accessed directly. 'Unprivileged loads and stores' using ldtrb et al are unaffected by PAN. Reviewed-by: Catalin Marinas <[email protected]> Signed-off-by: James Morse <[email protected]> [will: use ALTERNATIVE in asm and tidy up pan_enable check] Signed-off-by: Will Deacon <[email protected]>
1 parent 9ded63a commit 338d4f4

File tree

14 files changed

+121
-2
lines changed

14 files changed

+121
-2
lines changed

arch/arm64/Kconfig

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -596,6 +596,20 @@ config FORCE_MAX_ZONEORDER
596596
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
597597
default "11"
598598

599+
config ARM64_PAN
600+
bool "Enable support for Privileged Access Never (PAN)"
601+
default y
602+
help
603+
Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
604+
prevents the kernel or hypervisor from accessing user-space (EL0)
605+
memory directly.
606+
607+
Choosing this option will cause any unprotected (not using
608+
copy_to_user et al) memory access to fail with a permission fault.
609+
610+
The feature is detected at runtime, and will remain as a 'nop'
611+
instruction if the cpu does not implement the feature.
612+
599613
menuconfig ARMV8_DEPRECATED
600614
bool "Emulate deprecated/obsolete ARMv8 instructions"
601615
depends on COMPAT

arch/arm64/include/asm/cpufeature.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,9 @@
2525
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
2626
#define ARM64_WORKAROUND_845719 2
2727
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
28+
#define ARM64_HAS_PAN 4
2829

29-
#define ARM64_NCAPS 4
30+
#define ARM64_NCAPS 5
3031

3132
#ifndef __ASSEMBLY__
3233

arch/arm64/include/asm/futex.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,16 @@
2020

2121
#include <linux/futex.h>
2222
#include <linux/uaccess.h>
23+
24+
#include <asm/alternative.h>
25+
#include <asm/cpufeature.h>
2326
#include <asm/errno.h>
27+
#include <asm/sysreg.h>
2428

2529
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
2630
asm volatile( \
31+
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
32+
CONFIG_ARM64_PAN) \
2733
"1: ldxr %w1, %2\n" \
2834
insn "\n" \
2935
"2: stlxr %w3, %w0, %2\n" \
@@ -39,6 +45,8 @@
3945
" .align 3\n" \
4046
" .quad 1b, 4b, 2b, 4b\n" \
4147
" .popsection\n" \
48+
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
49+
CONFIG_ARM64_PAN) \
4250
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
4351
: "r" (oparg), "Ir" (-EFAULT) \
4452
: "memory")

arch/arm64/include/asm/processor.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,4 +186,6 @@ static inline void spin_lock_prefetch(const void *x)
186186

187187
#endif
188188

189+
void cpu_enable_pan(void);
190+
189191
#endif /* __ASM_PROCESSOR_H */

arch/arm64/include/asm/sysreg.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
#ifndef __ASM_SYSREG_H
2121
#define __ASM_SYSREG_H
2222

23+
#include <asm/opcodes.h>
24+
2325
#define SCTLR_EL1_CP15BEN (0x1 << 5)
2426
#define SCTLR_EL1_SED (0x1 << 8)
2527

@@ -36,6 +38,12 @@
3638
#define sys_reg(op0, op1, crn, crm, op2) \
3739
((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
3840

41+
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
42+
#define SCTLR_EL1_SPAN (1 << 23)
43+
44+
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
45+
(!!x)<<8 | 0x1f)
46+
3947
#ifdef __ASSEMBLY__
4048

4149
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30

arch/arm64/include/asm/uaccess.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,10 @@
2424
#include <linux/string.h>
2525
#include <linux/thread_info.h>
2626

27+
#include <asm/alternative.h>
28+
#include <asm/cpufeature.h>
2729
#include <asm/ptrace.h>
30+
#include <asm/sysreg.h>
2831
#include <asm/errno.h>
2932
#include <asm/memory.h>
3033
#include <asm/compiler.h>
@@ -131,6 +134,8 @@ static inline void set_fs(mm_segment_t fs)
131134
do { \
132135
unsigned long __gu_val; \
133136
__chk_user_ptr(ptr); \
137+
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
138+
CONFIG_ARM64_PAN)); \
134139
switch (sizeof(*(ptr))) { \
135140
case 1: \
136141
__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
@@ -148,6 +153,8 @@ do { \
148153
BUILD_BUG(); \
149154
} \
150155
(x) = (__force __typeof__(*(ptr)))__gu_val; \
156+
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
157+
CONFIG_ARM64_PAN)); \
151158
} while (0)
152159

153160
#define __get_user(x, ptr) \
@@ -194,6 +201,8 @@ do { \
194201
do { \
195202
__typeof__(*(ptr)) __pu_val = (x); \
196203
__chk_user_ptr(ptr); \
204+
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
205+
CONFIG_ARM64_PAN)); \
197206
switch (sizeof(*(ptr))) { \
198207
case 1: \
199208
__put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
@@ -210,6 +219,8 @@ do { \
210219
default: \
211220
BUILD_BUG(); \
212221
} \
222+
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
223+
CONFIG_ARM64_PAN)); \
213224
} while (0)
214225

215226
#define __put_user(x, ptr) \

arch/arm64/include/uapi/asm/ptrace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
#define PSR_I_BIT 0x00000080
4545
#define PSR_A_BIT 0x00000100
4646
#define PSR_D_BIT 0x00000200
47+
#define PSR_PAN_BIT 0x00400000
4748
#define PSR_Q_BIT 0x08000000
4849
#define PSR_V_BIT 0x10000000
4950
#define PSR_C_BIT 0x20000000

arch/arm64/kernel/armv8_deprecated.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
#include <linux/slab.h>
1515
#include <linux/sysctl.h>
1616

17+
#include <asm/alternative.h>
18+
#include <asm/cpufeature.h>
1719
#include <asm/insn.h>
1820
#include <asm/opcodes.h>
1921
#include <asm/sysreg.h>
@@ -280,6 +282,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
280282
*/
281283
#define __user_swpX_asm(data, addr, res, temp, B) \
282284
__asm__ __volatile__( \
285+
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
286+
CONFIG_ARM64_PAN) \
283287
" mov %w2, %w1\n" \
284288
"0: ldxr"B" %w1, [%3]\n" \
285289
"1: stxr"B" %w0, %w2, [%3]\n" \
@@ -295,7 +299,9 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
295299
" .align 3\n" \
296300
" .quad 0b, 3b\n" \
297301
" .quad 1b, 3b\n" \
298-
" .popsection" \
302+
" .popsection\n" \
303+
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
304+
CONFIG_ARM64_PAN) \
299305
: "=&r" (res), "+r" (data), "=&r" (temp) \
300306
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
301307
: "memory")

arch/arm64/kernel/cpufeature.c

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <linux/types.h>
2222
#include <asm/cpu.h>
2323
#include <asm/cpufeature.h>
24+
#include <asm/processor.h>
2425

2526
static bool
2627
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
@@ -39,6 +40,15 @@ has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
3940
return feature_matches(val, entry);
4041
}
4142

43+
static bool __maybe_unused
44+
has_id_aa64mmfr1_feature(const struct arm64_cpu_capabilities *entry)
45+
{
46+
u64 val;
47+
48+
val = read_cpuid(id_aa64mmfr1_el1);
49+
return feature_matches(val, entry);
50+
}
51+
4252
static const struct arm64_cpu_capabilities arm64_features[] = {
4353
{
4454
.desc = "GIC system register CPU interface",
@@ -47,6 +57,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
4757
.field_pos = 24,
4858
.min_field_value = 1,
4959
},
60+
#ifdef CONFIG_ARM64_PAN
61+
{
62+
.desc = "Privileged Access Never",
63+
.capability = ARM64_HAS_PAN,
64+
.matches = has_id_aa64mmfr1_feature,
65+
.field_pos = 20,
66+
.min_field_value = 1,
67+
.enable = cpu_enable_pan,
68+
},
69+
#endif /* CONFIG_ARM64_PAN */
5070
{},
5171
};
5272

arch/arm64/lib/clear_user.S

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,11 @@
1616
* along with this program. If not, see <http://www.gnu.org/licenses/>.
1717
*/
1818
#include <linux/linkage.h>
19+
20+
#include <asm/alternative.h>
1921
#include <asm/assembler.h>
22+
#include <asm/cpufeature.h>
23+
#include <asm/sysreg.h>
2024

2125
.text
2226

@@ -29,6 +33,8 @@
2933
* Alignment fixed up by hardware.
3034
*/
3135
ENTRY(__clear_user)
36+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
37+
CONFIG_ARM64_PAN)
3238
mov x2, x1 // save the size for fixup return
3339
subs x1, x1, #8
3440
b.mi 2f
@@ -48,6 +54,8 @@ USER(9f, strh wzr, [x0], #2 )
4854
b.mi 5f
4955
USER(9f, strb wzr, [x0] )
5056
5: mov x0, #0
57+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
58+
CONFIG_ARM64_PAN)
5159
ret
5260
ENDPROC(__clear_user)
5361

arch/arm64/lib/copy_from_user.S

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,11 @@
1515
*/
1616

1717
#include <linux/linkage.h>
18+
19+
#include <asm/alternative.h>
1820
#include <asm/assembler.h>
21+
#include <asm/cpufeature.h>
22+
#include <asm/sysreg.h>
1923

2024
/*
2125
* Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,6 +32,8 @@
2832
* x0 - bytes not copied
2933
*/
3034
ENTRY(__copy_from_user)
35+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
36+
CONFIG_ARM64_PAN)
3137
add x5, x1, x2 // upper user buffer boundary
3238
subs x2, x2, #16
3339
b.mi 1f
@@ -56,6 +62,8 @@ USER(9f, ldrh w3, [x1], #2 )
5662
USER(9f, ldrb w3, [x1] )
5763
strb w3, [x0]
5864
5: mov x0, #0
65+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
66+
CONFIG_ARM64_PAN)
5967
ret
6068
ENDPROC(__copy_from_user)
6169

arch/arm64/lib/copy_in_user.S

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,11 @@
1717
*/
1818

1919
#include <linux/linkage.h>
20+
21+
#include <asm/alternative.h>
2022
#include <asm/assembler.h>
23+
#include <asm/cpufeature.h>
24+
#include <asm/sysreg.h>
2125

2226
/*
2327
* Copy from user space to user space (alignment handled by the hardware)
@@ -30,6 +34,8 @@
3034
* x0 - bytes not copied
3135
*/
3236
ENTRY(__copy_in_user)
37+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
38+
CONFIG_ARM64_PAN)
3339
add x5, x0, x2 // upper user buffer boundary
3440
subs x2, x2, #16
3541
b.mi 1f
@@ -58,6 +64,8 @@ USER(9f, strh w3, [x0], #2 )
5864
USER(9f, ldrb w3, [x1] )
5965
USER(9f, strb w3, [x0] )
6066
5: mov x0, #0
67+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
68+
CONFIG_ARM64_PAN)
6169
ret
6270
ENDPROC(__copy_in_user)
6371

arch/arm64/lib/copy_to_user.S

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,11 @@
1515
*/
1616

1717
#include <linux/linkage.h>
18+
19+
#include <asm/alternative.h>
1820
#include <asm/assembler.h>
21+
#include <asm/cpufeature.h>
22+
#include <asm/sysreg.h>
1923

2024
/*
2125
* Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,6 +32,8 @@
2832
* x0 - bytes not copied
2933
*/
3034
ENTRY(__copy_to_user)
35+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
36+
CONFIG_ARM64_PAN)
3137
add x5, x0, x2 // upper user buffer boundary
3238
subs x2, x2, #16
3339
b.mi 1f
@@ -56,6 +62,8 @@ USER(9f, strh w3, [x0], #2 )
5662
ldrb w3, [x1]
5763
USER(9f, strb w3, [x0] )
5864
5: mov x0, #0
65+
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
66+
CONFIG_ARM64_PAN)
5967
ret
6068
ENDPROC(__copy_to_user)
6169

arch/arm64/mm/fault.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,11 @@
3030
#include <linux/highmem.h>
3131
#include <linux/perf_event.h>
3232

33+
#include <asm/cpufeature.h>
3334
#include <asm/exception.h>
3435
#include <asm/debug-monitors.h>
3536
#include <asm/esr.h>
37+
#include <asm/sysreg.h>
3638
#include <asm/system_misc.h>
3739
#include <asm/pgtable.h>
3840
#include <asm/tlbflush.h>
@@ -223,6 +225,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
223225
mm_flags |= FAULT_FLAG_WRITE;
224226
}
225227

228+
/*
229+
* PAN bit set implies the fault happened in kernel space, but not
230+
* in the arch's user access functions.
231+
*/
232+
if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
233+
goto no_context;
234+
226235
/*
227236
* As per x86, we may deadlock here. However, since the kernel only
228237
* validly references user space from well defined areas of the code,
@@ -536,3 +545,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
536545

537546
return 0;
538547
}
548+
549+
#ifdef CONFIG_ARM64_PAN
550+
void cpu_enable_pan(void)
551+
{
552+
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
553+
}
554+
#endif /* CONFIG_ARM64_PAN */

0 commit comments

Comments
 (0)