Skip to content

Commit d4647f0

Browse files
committed
arm64: Rewrite Spectre-v2 mitigation code
The Spectre-v2 mitigation code is pretty unwieldy and hard to maintain. This is largely due to it being written hastily, without much clue as to how things would pan out, and also because it ends up mixing policy and state in such a way that it is very difficult to figure out what's going on. Rewrite the Spectre-v2 mitigation so that it clearly separates state from policy and follows a more structured approach to handling the mitigation. Signed-off-by: Will Deacon <[email protected]>
1 parent 455697a commit d4647f0

File tree

8 files changed

+327
-264
lines changed

8 files changed

+327
-264
lines changed

arch/arm64/include/asm/cpufeature.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -698,12 +698,6 @@ static inline bool system_supports_tlb_range(void)
698698
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
699699
}
700700

701-
#define ARM64_BP_HARDEN_UNKNOWN -1
702-
#define ARM64_BP_HARDEN_WA_NEEDED 0
703-
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
704-
705-
int get_spectre_v2_workaround_state(void);
706-
707701
#define ARM64_SSBD_UNKNOWN -1
708702
#define ARM64_SSBD_FORCE_DISABLE 0
709703
#define ARM64_SSBD_KERNEL 1

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -631,23 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
631631
static inline void kvm_clr_pmu_events(u32 clr) {}
632632
#endif
633633

634-
#define KVM_BP_HARDEN_UNKNOWN -1
635-
#define KVM_BP_HARDEN_WA_NEEDED 0
636-
#define KVM_BP_HARDEN_NOT_REQUIRED 1
637-
638-
static inline int kvm_arm_harden_branch_predictor(void)
639-
{
640-
switch (get_spectre_v2_workaround_state()) {
641-
case ARM64_BP_HARDEN_WA_NEEDED:
642-
return KVM_BP_HARDEN_WA_NEEDED;
643-
case ARM64_BP_HARDEN_NOT_REQUIRED:
644-
return KVM_BP_HARDEN_NOT_REQUIRED;
645-
case ARM64_BP_HARDEN_UNKNOWN:
646-
default:
647-
return KVM_BP_HARDEN_UNKNOWN;
648-
}
649-
}
650-
651634
#define KVM_SSBD_UNKNOWN -1
652635
#define KVM_SSBD_FORCE_DISABLE 0
653636
#define KVM_SSBD_KERNEL 1

arch/arm64/include/asm/processor.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
#include <asm/pgtable-hwdef.h>
3939
#include <asm/pointer_auth.h>
4040
#include <asm/ptrace.h>
41+
#include <asm/spectre.h>
4142
#include <asm/types.h>
4243

4344
/*

arch/arm64/include/asm/spectre.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Interface for managing mitigations for Spectre vulnerabilities.
4+
*
5+
* Copyright (C) 2020 Google LLC
6+
* Author: Will Deacon <[email protected]>
7+
*/
8+
9+
#ifndef __ASM_SPECTRE_H
10+
#define __ASM_SPECTRE_H
11+
12+
#include <asm/cpufeature.h>
13+
14+
/* Watch out, ordering is important here. */
15+
enum mitigation_state {
16+
SPECTRE_UNAFFECTED,
17+
SPECTRE_MITIGATED,
18+
SPECTRE_VULNERABLE,
19+
};
20+
21+
struct task_struct;
22+
23+
enum mitigation_state arm64_get_spectre_v2_state(void);
24+
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
25+
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
26+
27+
#endif /* __ASM_SPECTRE_H */

arch/arm64/kernel/cpu_errata.c

Lines changed: 3 additions & 233 deletions
Original file line numberDiff line numberDiff line change
@@ -106,145 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
106106
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
107107
}
108108

109-
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
110-
111-
#include <asm/mmu_context.h>
112-
#include <asm/cacheflush.h>
113-
114-
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
115-
116-
#ifdef CONFIG_RANDOMIZE_BASE
117-
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
118-
const char *hyp_vecs_end)
119-
{
120-
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
121-
int i;
122-
123-
for (i = 0; i < SZ_2K; i += 0x80)
124-
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
125-
126-
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
127-
}
128-
129-
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
130-
{
131-
static DEFINE_RAW_SPINLOCK(bp_lock);
132-
int cpu, slot = -1;
133-
const char *hyp_vecs_start = __smccc_workaround_1_smc;
134-
const char *hyp_vecs_end = __smccc_workaround_1_smc +
135-
__SMCCC_WORKAROUND_1_SMC_SZ;
136-
137-
/*
138-
* detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
139-
* we're a guest. Skip the hyp-vectors work.
140-
*/
141-
if (!is_hyp_mode_available()) {
142-
__this_cpu_write(bp_hardening_data.fn, fn);
143-
return;
144-
}
145-
146-
raw_spin_lock(&bp_lock);
147-
for_each_possible_cpu(cpu) {
148-
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
149-
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
150-
break;
151-
}
152-
}
153-
154-
if (slot == -1) {
155-
slot = atomic_inc_return(&arm64_el2_vector_last_slot);
156-
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
157-
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
158-
}
159-
160-
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
161-
__this_cpu_write(bp_hardening_data.fn, fn);
162-
raw_spin_unlock(&bp_lock);
163-
}
164-
#else
165-
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
166-
{
167-
__this_cpu_write(bp_hardening_data.fn, fn);
168-
}
169-
#endif /* CONFIG_RANDOMIZE_BASE */
170-
171-
#include <linux/arm-smccc.h>
172-
173-
static void __maybe_unused call_smc_arch_workaround_1(void)
174-
{
175-
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
176-
}
177-
178-
static void call_hvc_arch_workaround_1(void)
179-
{
180-
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
181-
}
182-
183-
static void qcom_link_stack_sanitization(void)
184-
{
185-
u64 tmp;
186-
187-
asm volatile("mov %0, x30 \n"
188-
".rept 16 \n"
189-
"bl . + 4 \n"
190-
".endr \n"
191-
"mov x30, %0 \n"
192-
: "=&r" (tmp));
193-
}
194-
195-
static bool __nospectre_v2;
196-
static int __init parse_nospectre_v2(char *str)
197-
{
198-
__nospectre_v2 = true;
199-
return 0;
200-
}
201-
early_param("nospectre_v2", parse_nospectre_v2);
202-
203-
/*
204-
* -1: No workaround
205-
* 0: No workaround required
206-
* 1: Workaround installed
207-
*/
208-
static int detect_harden_bp_fw(void)
209-
{
210-
bp_hardening_cb_t cb;
211-
struct arm_smccc_res res;
212-
u32 midr = read_cpuid_id();
213-
214-
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
215-
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
216-
217-
switch ((int)res.a0) {
218-
case 1:
219-
/* Firmware says we're just fine */
220-
return 0;
221-
case 0:
222-
break;
223-
default:
224-
return -1;
225-
}
226-
227-
switch (arm_smccc_1_1_get_conduit()) {
228-
case SMCCC_CONDUIT_HVC:
229-
cb = call_hvc_arch_workaround_1;
230-
break;
231-
232-
case SMCCC_CONDUIT_SMC:
233-
cb = call_smc_arch_workaround_1;
234-
break;
235-
236-
default:
237-
return -1;
238-
}
239-
240-
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
241-
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
242-
cb = qcom_link_stack_sanitization;
243-
244-
install_bp_hardening_cb(cb);
245-
return 1;
246-
}
247-
248109
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
249110

250111
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
@@ -508,83 +369,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
508369
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
509370
CAP_MIDR_RANGE_LIST(midr_list)
510371

511-
/* Track overall mitigation state. We are only mitigated if all cores are ok */
512-
static bool __hardenbp_enab = true;
513-
static bool __spectrev2_safe = true;
514-
515-
int get_spectre_v2_workaround_state(void)
516-
{
517-
if (__spectrev2_safe)
518-
return ARM64_BP_HARDEN_NOT_REQUIRED;
519-
520-
if (!__hardenbp_enab)
521-
return ARM64_BP_HARDEN_UNKNOWN;
522-
523-
return ARM64_BP_HARDEN_WA_NEEDED;
524-
}
525-
526-
/*
527-
* List of CPUs that do not need any Spectre-v2 mitigation at all.
528-
*/
529-
static const struct midr_range spectre_v2_safe_list[] = {
530-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
531-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
532-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
533-
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
534-
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
535-
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
536-
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
537-
{ /* sentinel */ }
538-
};
539-
540-
/*
541-
* Track overall bp hardening for all heterogeneous cores in the machine.
542-
* We are only considered "safe" if all booted cores are known safe.
543-
*/
544-
static bool __maybe_unused
545-
check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
546-
{
547-
int need_wa;
548-
549-
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
550-
551-
/* If the CPU has CSV2 set, we're safe */
552-
if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
553-
ID_AA64PFR0_CSV2_SHIFT))
554-
return false;
555-
556-
/* Alternatively, we have a list of unaffected CPUs */
557-
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
558-
return false;
559-
560-
/* Fallback to firmware detection */
561-
need_wa = detect_harden_bp_fw();
562-
if (!need_wa)
563-
return false;
564-
565-
__spectrev2_safe = false;
566-
567-
/* forced off */
568-
if (__nospectre_v2 || cpu_mitigations_off()) {
569-
pr_info_once("spectrev2 mitigation disabled by command line option\n");
570-
__hardenbp_enab = false;
571-
return false;
572-
}
573-
574-
if (need_wa < 0) {
575-
pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
576-
__hardenbp_enab = false;
577-
}
578-
579-
return (need_wa > 0);
580-
}
581-
582-
static void
583-
cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
584-
{
585-
cap->matches(cap, SCOPE_LOCAL_CPU);
586-
}
587-
588372
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
589373
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
590374
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
@@ -876,11 +660,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
876660
},
877661
#endif
878662
{
879-
.desc = "Branch predictor hardening",
663+
.desc = "Spectre-v2",
880664
.capability = ARM64_SPECTRE_V2,
881665
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
882-
.matches = check_branch_predictor,
883-
.cpu_enable = cpu_enable_branch_predictor_hardening,
666+
.matches = has_spectre_v2,
667+
.cpu_enable = spectre_v2_enable_mitigation,
884668
},
885669
#ifdef CONFIG_RANDOMIZE_BASE
886670
{
@@ -949,20 +733,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
949733
}
950734
};
951735

952-
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
953-
char *buf)
954-
{
955-
switch (get_spectre_v2_workaround_state()) {
956-
case ARM64_BP_HARDEN_NOT_REQUIRED:
957-
return sprintf(buf, "Not affected\n");
958-
case ARM64_BP_HARDEN_WA_NEEDED:
959-
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
960-
case ARM64_BP_HARDEN_UNKNOWN:
961-
default:
962-
return sprintf(buf, "Vulnerable\n");
963-
}
964-
}
965-
966736
ssize_t cpu_show_spec_store_bypass(struct device *dev,
967737
struct device_attribute *attr, char *buf)
968738
{

0 commit comments

Comments
 (0)