Skip to content

Commit e403e85

Browse files
diandersctmarinas
authored andcommitted
arm64: errata: Assume that unknown CPUs _are_ vulnerable to Spectre BHB
The code for detecting CPUs that are vulnerable to Spectre BHB was based on a hardcoded list of CPU IDs that were known to be affected. Unfortunately, the list mostly only contained the IDs of standard ARM cores. The IDs for many cores that are minor variants of the standard ARM cores (like many Qualcomm Kyro CPUs) weren't listed. This led the code to assume that those variants were not affected. Flip the code on its head and instead assume that a core is vulnerable if it doesn't have CSV2_3 but is unrecognized as being safe. This involves creating a "Spectre BHB safe" list. As of right now, the only CPU IDs added to the "Spectre BHB safe" list are ARM Cortex A35, A53, A55, A510, and A520. This list was created by looking for cores that weren't listed in ARM's list [1] as per review feedback on v2 of this patch [2]. Additionally Brahma A53 is added as per mailing list feedback [3]. NOTE: this patch will not actually _mitigate_ anyone, it will simply cause them to report themselves as vulnerable. If any cores in the system are reported as vulnerable but not mitigated then the whole system will be reported as vulnerable though the system will attempt to mitigate with the information it has about the known cores. [1] https://developer.arm.com/Arm%20Security%20Center/Spectre-BHB [2] https://lore.kernel.org/r/20241219175128.GA25477@willie-the-truck [3] https://lore.kernel.org/r/[email protected] Fixes: 558c303 ("arm64: Mitigate spectre style branch history side channels") Cc: [email protected] Reviewed-by: Julius Werner <[email protected]> Signed-off-by: Douglas Anderson <[email protected]> Link: https://lore.kernel.org/r/20250107120555.v4.2.I2040fa004dafe196243f67ebcc647cbedbb516e6@changeid Signed-off-by: Catalin Marinas <[email protected]>
1 parent ed1ce84 commit e403e85

File tree

2 files changed

+102
-102
lines changed

2 files changed

+102
-102
lines changed

arch/arm64/include/asm/spectre.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void);
9797

9898
enum mitigation_state arm64_get_spectre_bhb_state(void);
9999
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
100-
u8 spectre_bhb_loop_affected(int scope);
101100
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
102101
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
103102

arch/arm64/kernel/proton-pack.c

Lines changed: 102 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -845,53 +845,70 @@ static unsigned long system_bhb_mitigations;
845845
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
846846
* SCOPE_SYSTEM call will give the right answer.
847847
*/
848-
u8 spectre_bhb_loop_affected(int scope)
848+
static bool is_spectre_bhb_safe(int scope)
849+
{
850+
static const struct midr_range spectre_bhb_safe_list[] = {
851+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
852+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
853+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
854+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
855+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
856+
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
857+
{},
858+
};
859+
static bool all_safe = true;
860+
861+
if (scope != SCOPE_LOCAL_CPU)
862+
return all_safe;
863+
864+
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
865+
return true;
866+
867+
all_safe = false;
868+
869+
return false;
870+
}
871+
872+
static u8 spectre_bhb_loop_affected(void)
849873
{
850874
u8 k = 0;
851-
static u8 max_bhb_k;
852-
853-
if (scope == SCOPE_LOCAL_CPU) {
854-
static const struct midr_range spectre_bhb_k32_list[] = {
855-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
856-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
857-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
858-
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
859-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
860-
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
861-
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
862-
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
863-
{},
864-
};
865-
static const struct midr_range spectre_bhb_k24_list[] = {
866-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
867-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
868-
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
869-
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
870-
{},
871-
};
872-
static const struct midr_range spectre_bhb_k11_list[] = {
873-
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
874-
{},
875-
};
876-
static const struct midr_range spectre_bhb_k8_list[] = {
877-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
878-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
879-
{},
880-
};
881-
882-
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
883-
k = 32;
884-
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
885-
k = 24;
886-
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
887-
k = 11;
888-
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
889-
k = 8;
890-
891-
max_bhb_k = max(max_bhb_k, k);
892-
} else {
893-
k = max_bhb_k;
894-
}
875+
876+
static const struct midr_range spectre_bhb_k32_list[] = {
877+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
878+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
879+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
880+
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
881+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
882+
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
883+
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
884+
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
885+
{},
886+
};
887+
static const struct midr_range spectre_bhb_k24_list[] = {
888+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
889+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
890+
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
891+
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
892+
{},
893+
};
894+
static const struct midr_range spectre_bhb_k11_list[] = {
895+
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
896+
{},
897+
};
898+
static const struct midr_range spectre_bhb_k8_list[] = {
899+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
900+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
901+
{},
902+
};
903+
904+
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
905+
k = 32;
906+
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
907+
k = 24;
908+
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
909+
k = 11;
910+
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
911+
k = 8;
895912

896913
return k;
897914
}
@@ -917,29 +934,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
917934
}
918935
}
919936

920-
static bool is_spectre_bhb_fw_affected(int scope)
937+
static bool has_spectre_bhb_fw_mitigation(void)
921938
{
922-
static bool system_affected;
923939
enum mitigation_state fw_state;
924940
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
925-
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
926-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
927-
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
928-
{},
929-
};
930-
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
931-
spectre_bhb_firmware_mitigated_list);
932-
933-
if (scope != SCOPE_LOCAL_CPU)
934-
return system_affected;
935941

936942
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
937-
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
938-
system_affected = true;
939-
return true;
940-
}
941-
942-
return false;
943+
return has_smccc && fw_state == SPECTRE_MITIGATED;
943944
}
944945

945946
static bool supports_ecbhb(int scope)
@@ -955,6 +956,8 @@ static bool supports_ecbhb(int scope)
955956
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
956957
}
957958

959+
static u8 max_bhb_k;
960+
958961
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
959962
int scope)
960963
{
@@ -963,16 +966,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
963966
if (supports_csv2p3(scope))
964967
return false;
965968

966-
if (supports_clearbhb(scope))
967-
return true;
968-
969-
if (spectre_bhb_loop_affected(scope))
970-
return true;
969+
if (is_spectre_bhb_safe(scope))
970+
return false;
971971

972-
if (is_spectre_bhb_fw_affected(scope))
973-
return true;
972+
/*
973+
* At this point the core isn't known to be "safe" so we're going to
974+
* assume it's vulnerable. We still need to update `max_bhb_k` though,
975+
* but only if we aren't mitigating with clearbhb though.
976+
*/
977+
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
978+
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
974979

975-
return false;
980+
return true;
976981
}
977982

978983
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
@@ -1003,7 +1008,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
10031008
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
10041009
{
10051010
bp_hardening_cb_t cpu_cb;
1006-
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1011+
enum mitigation_state state = SPECTRE_VULNERABLE;
10071012
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
10081013

10091014
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
@@ -1029,7 +1034,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
10291034
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
10301035
state = SPECTRE_MITIGATED;
10311036
set_bit(BHB_INSN, &system_bhb_mitigations);
1032-
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1037+
} else if (spectre_bhb_loop_affected()) {
10331038
/*
10341039
* Ensure KVM uses the indirect vector which will have the
10351040
* branchy-loop added. A57/A72-r0 will already have selected
@@ -1042,32 +1047,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
10421047
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
10431048
state = SPECTRE_MITIGATED;
10441049
set_bit(BHB_LOOP, &system_bhb_mitigations);
1045-
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1046-
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1047-
if (fw_state == SPECTRE_MITIGATED) {
1048-
/*
1049-
* Ensure KVM uses one of the spectre bp_hardening
1050-
* vectors. The indirect vector doesn't include the EL3
1051-
* call, so needs upgrading to
1052-
* HYP_VECTOR_SPECTRE_INDIRECT.
1053-
*/
1054-
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1055-
data->slot += 1;
1056-
1057-
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1058-
1059-
/*
1060-
* The WA3 call in the vectors supersedes the WA1 call
1061-
* made during context-switch. Uninstall any firmware
1062-
* bp_hardening callback.
1063-
*/
1064-
cpu_cb = spectre_v2_get_sw_mitigation_cb();
1065-
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1066-
__this_cpu_write(bp_hardening_data.fn, NULL);
1067-
1068-
state = SPECTRE_MITIGATED;
1069-
set_bit(BHB_FW, &system_bhb_mitigations);
1070-
}
1050+
} else if (has_spectre_bhb_fw_mitigation()) {
1051+
/*
1052+
* Ensure KVM uses one of the spectre bp_hardening
1053+
* vectors. The indirect vector doesn't include the EL3
1054+
* call, so needs upgrading to
1055+
* HYP_VECTOR_SPECTRE_INDIRECT.
1056+
*/
1057+
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1058+
data->slot += 1;
1059+
1060+
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1061+
1062+
/*
1063+
* The WA3 call in the vectors supersedes the WA1 call
1064+
* made during context-switch. Uninstall any firmware
1065+
* bp_hardening callback.
1066+
*/
1067+
cpu_cb = spectre_v2_get_sw_mitigation_cb();
1068+
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1069+
__this_cpu_write(bp_hardening_data.fn, NULL);
1070+
1071+
state = SPECTRE_MITIGATED;
1072+
set_bit(BHB_FW, &system_bhb_mitigations);
10711073
}
10721074

10731075
update_mitigation_state(&spectre_bhb_state, state);
@@ -1101,7 +1103,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
11011103
{
11021104
u8 rd;
11031105
u32 insn;
1104-
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
11051106

11061107
BUG_ON(nr_inst != 1); /* MOV -> MOV */
11071108

@@ -1110,7 +1111,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
11101111

11111112
insn = le32_to_cpu(*origptr);
11121113
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1113-
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1114+
insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
11141115
AARCH64_INSN_VARIANT_64BIT,
11151116
AARCH64_INSN_MOVEWIDE_ZERO);
11161117
*updptr++ = cpu_to_le32(insn);

0 commit comments

Comments
 (0)