@@ -845,53 +845,70 @@ static unsigned long system_bhb_mitigations;
845
845
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
846
846
* SCOPE_SYSTEM call will give the right answer.
847
847
*/
848
- u8 spectre_bhb_loop_affected (int scope )
848
+ static bool is_spectre_bhb_safe (int scope )
849
+ {
850
+ static const struct midr_range spectre_bhb_safe_list [] = {
851
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A35 ),
852
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A53 ),
853
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A55 ),
854
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A510 ),
855
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A520 ),
856
+ MIDR_ALL_VERSIONS (MIDR_BRAHMA_B53 ),
857
+ {},
858
+ };
859
+ static bool all_safe = true;
860
+
861
+ if (scope != SCOPE_LOCAL_CPU )
862
+ return all_safe ;
863
+
864
+ if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_safe_list ))
865
+ return true;
866
+
867
+ all_safe = false;
868
+
869
+ return false;
870
+ }
871
+
872
+ static u8 spectre_bhb_loop_affected (void )
849
873
{
850
874
u8 k = 0 ;
851
- static u8 max_bhb_k ;
852
-
853
- if (scope == SCOPE_LOCAL_CPU ) {
854
- static const struct midr_range spectre_bhb_k32_list [] = {
855
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A78 ),
856
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A78AE ),
857
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A78C ),
858
- MIDR_ALL_VERSIONS (MIDR_CORTEX_X1 ),
859
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A710 ),
860
- MIDR_ALL_VERSIONS (MIDR_CORTEX_X2 ),
861
- MIDR_ALL_VERSIONS (MIDR_NEOVERSE_N2 ),
862
- MIDR_ALL_VERSIONS (MIDR_NEOVERSE_V1 ),
863
- {},
864
- };
865
- static const struct midr_range spectre_bhb_k24_list [] = {
866
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A76 ),
867
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A77 ),
868
- MIDR_ALL_VERSIONS (MIDR_NEOVERSE_N1 ),
869
- MIDR_ALL_VERSIONS (MIDR_QCOM_KRYO_4XX_GOLD ),
870
- {},
871
- };
872
- static const struct midr_range spectre_bhb_k11_list [] = {
873
- MIDR_ALL_VERSIONS (MIDR_AMPERE1 ),
874
- {},
875
- };
876
- static const struct midr_range spectre_bhb_k8_list [] = {
877
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A72 ),
878
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A57 ),
879
- {},
880
- };
881
-
882
- if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k32_list ))
883
- k = 32 ;
884
- else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k24_list ))
885
- k = 24 ;
886
- else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k11_list ))
887
- k = 11 ;
888
- else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k8_list ))
889
- k = 8 ;
890
-
891
- max_bhb_k = max (max_bhb_k , k );
892
- } else {
893
- k = max_bhb_k ;
894
- }
875
+
876
+ static const struct midr_range spectre_bhb_k32_list [] = {
877
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A78 ),
878
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A78AE ),
879
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A78C ),
880
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_X1 ),
881
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A710 ),
882
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_X2 ),
883
+ MIDR_ALL_VERSIONS (MIDR_NEOVERSE_N2 ),
884
+ MIDR_ALL_VERSIONS (MIDR_NEOVERSE_V1 ),
885
+ {},
886
+ };
887
+ static const struct midr_range spectre_bhb_k24_list [] = {
888
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A76 ),
889
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A77 ),
890
+ MIDR_ALL_VERSIONS (MIDR_NEOVERSE_N1 ),
891
+ MIDR_ALL_VERSIONS (MIDR_QCOM_KRYO_4XX_GOLD ),
892
+ {},
893
+ };
894
+ static const struct midr_range spectre_bhb_k11_list [] = {
895
+ MIDR_ALL_VERSIONS (MIDR_AMPERE1 ),
896
+ {},
897
+ };
898
+ static const struct midr_range spectre_bhb_k8_list [] = {
899
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A72 ),
900
+ MIDR_ALL_VERSIONS (MIDR_CORTEX_A57 ),
901
+ {},
902
+ };
903
+
904
+ if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k32_list ))
905
+ k = 32 ;
906
+ else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k24_list ))
907
+ k = 24 ;
908
+ else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k11_list ))
909
+ k = 11 ;
910
+ else if (is_midr_in_range_list (read_cpuid_id (), spectre_bhb_k8_list ))
911
+ k = 8 ;
895
912
896
913
return k ;
897
914
}
@@ -917,29 +934,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
917
934
}
918
935
}
919
936
920
- static bool is_spectre_bhb_fw_affected ( int scope )
937
+ static bool has_spectre_bhb_fw_mitigation ( void )
921
938
{
922
- static bool system_affected ;
923
939
enum mitigation_state fw_state ;
924
940
bool has_smccc = arm_smccc_1_1_get_conduit () != SMCCC_CONDUIT_NONE ;
925
- static const struct midr_range spectre_bhb_firmware_mitigated_list [] = {
926
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A73 ),
927
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A75 ),
928
- {},
929
- };
930
- bool cpu_in_list = is_midr_in_range_list (read_cpuid_id (),
931
- spectre_bhb_firmware_mitigated_list );
932
-
933
- if (scope != SCOPE_LOCAL_CPU )
934
- return system_affected ;
935
941
936
942
fw_state = spectre_bhb_get_cpu_fw_mitigation_state ();
937
- if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED )) {
938
- system_affected = true;
939
- return true;
940
- }
941
-
942
- return false;
943
+ return has_smccc && fw_state == SPECTRE_MITIGATED ;
943
944
}
944
945
945
946
static bool supports_ecbhb (int scope )
@@ -955,6 +956,8 @@ static bool supports_ecbhb(int scope)
955
956
ID_AA64MMFR1_EL1_ECBHB_SHIFT );
956
957
}
957
958
959
+ static u8 max_bhb_k ;
960
+
958
961
bool is_spectre_bhb_affected (const struct arm64_cpu_capabilities * entry ,
959
962
int scope )
960
963
{
@@ -963,16 +966,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
963
966
if (supports_csv2p3 (scope ))
964
967
return false;
965
968
966
- if (supports_clearbhb (scope ))
967
- return true;
968
-
969
- if (spectre_bhb_loop_affected (scope ))
970
- return true;
969
+ if (is_spectre_bhb_safe (scope ))
970
+ return false;
971
971
972
- if (is_spectre_bhb_fw_affected (scope ))
973
- return true;
972
+ /*
973
+ * At this point the core isn't known to be "safe" so we're going to
974
+ * assume it's vulnerable. We still need to update `max_bhb_k` though,
975
+ * but only if we aren't mitigating with clearbhb though.
976
+ */
977
+ if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb (SCOPE_LOCAL_CPU ))
978
+ max_bhb_k = max (max_bhb_k , spectre_bhb_loop_affected ());
974
979
975
- return false ;
980
+ return true ;
976
981
}
977
982
978
983
static void this_cpu_set_vectors (enum arm64_bp_harden_el1_vectors slot )
@@ -1003,7 +1008,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
1003
1008
void spectre_bhb_enable_mitigation (const struct arm64_cpu_capabilities * entry )
1004
1009
{
1005
1010
bp_hardening_cb_t cpu_cb ;
1006
- enum mitigation_state fw_state , state = SPECTRE_VULNERABLE ;
1011
+ enum mitigation_state state = SPECTRE_VULNERABLE ;
1007
1012
struct bp_hardening_data * data = this_cpu_ptr (& bp_hardening_data );
1008
1013
1009
1014
if (!is_spectre_bhb_affected (entry , SCOPE_LOCAL_CPU ))
@@ -1029,7 +1034,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1029
1034
this_cpu_set_vectors (EL1_VECTOR_BHB_CLEAR_INSN );
1030
1035
state = SPECTRE_MITIGATED ;
1031
1036
set_bit (BHB_INSN , & system_bhb_mitigations );
1032
- } else if (spectre_bhb_loop_affected (SCOPE_LOCAL_CPU )) {
1037
+ } else if (spectre_bhb_loop_affected ()) {
1033
1038
/*
1034
1039
* Ensure KVM uses the indirect vector which will have the
1035
1040
* branchy-loop added. A57/A72-r0 will already have selected
@@ -1042,32 +1047,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1042
1047
this_cpu_set_vectors (EL1_VECTOR_BHB_LOOP );
1043
1048
state = SPECTRE_MITIGATED ;
1044
1049
set_bit (BHB_LOOP , & system_bhb_mitigations );
1045
- } else if (is_spectre_bhb_fw_affected (SCOPE_LOCAL_CPU )) {
1046
- fw_state = spectre_bhb_get_cpu_fw_mitigation_state ();
1047
- if (fw_state == SPECTRE_MITIGATED ) {
1048
- /*
1049
- * Ensure KVM uses one of the spectre bp_hardening
1050
- * vectors. The indirect vector doesn't include the EL3
1051
- * call, so needs upgrading to
1052
- * HYP_VECTOR_SPECTRE_INDIRECT.
1053
- */
1054
- if (!data -> slot || data -> slot == HYP_VECTOR_INDIRECT )
1055
- data -> slot += 1 ;
1056
-
1057
- this_cpu_set_vectors (EL1_VECTOR_BHB_FW );
1058
-
1059
- /*
1060
- * The WA3 call in the vectors supersedes the WA1 call
1061
- * made during context-switch. Uninstall any firmware
1062
- * bp_hardening callback.
1063
- */
1064
- cpu_cb = spectre_v2_get_sw_mitigation_cb ();
1065
- if (__this_cpu_read (bp_hardening_data .fn ) != cpu_cb )
1066
- __this_cpu_write (bp_hardening_data .fn , NULL );
1067
-
1068
- state = SPECTRE_MITIGATED ;
1069
- set_bit (BHB_FW , & system_bhb_mitigations );
1070
- }
1050
+ } else if (has_spectre_bhb_fw_mitigation ()) {
1051
+ /*
1052
+ * Ensure KVM uses one of the spectre bp_hardening
1053
+ * vectors. The indirect vector doesn't include the EL3
1054
+ * call, so needs upgrading to
1055
+ * HYP_VECTOR_SPECTRE_INDIRECT.
1056
+ */
1057
+ if (!data -> slot || data -> slot == HYP_VECTOR_INDIRECT )
1058
+ data -> slot += 1 ;
1059
+
1060
+ this_cpu_set_vectors (EL1_VECTOR_BHB_FW );
1061
+
1062
+ /*
1063
+ * The WA3 call in the vectors supersedes the WA1 call
1064
+ * made during context-switch. Uninstall any firmware
1065
+ * bp_hardening callback.
1066
+ */
1067
+ cpu_cb = spectre_v2_get_sw_mitigation_cb ();
1068
+ if (__this_cpu_read (bp_hardening_data .fn ) != cpu_cb )
1069
+ __this_cpu_write (bp_hardening_data .fn , NULL );
1070
+
1071
+ state = SPECTRE_MITIGATED ;
1072
+ set_bit (BHB_FW , & system_bhb_mitigations );
1071
1073
}
1072
1074
1073
1075
update_mitigation_state (& spectre_bhb_state , state );
@@ -1101,7 +1103,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1101
1103
{
1102
1104
u8 rd ;
1103
1105
u32 insn ;
1104
- u16 loop_count = spectre_bhb_loop_affected (SCOPE_SYSTEM );
1105
1106
1106
1107
BUG_ON (nr_inst != 1 ); /* MOV -> MOV */
1107
1108
@@ -1110,7 +1111,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1110
1111
1111
1112
insn = le32_to_cpu (* origptr );
1112
1113
rd = aarch64_insn_decode_register (AARCH64_INSN_REGTYPE_RD , insn );
1113
- insn = aarch64_insn_gen_movewide (rd , loop_count , 0 ,
1114
+ insn = aarch64_insn_gen_movewide (rd , max_bhb_k , 0 ,
1114
1115
AARCH64_INSN_VARIANT_64BIT ,
1115
1116
AARCH64_INSN_MOVEWIDE_ZERO );
1116
1117
* updptr ++ = cpu_to_le32 (insn );
0 commit comments