@@ -106,145 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
106
106
sysreg_clear_set (sctlr_el1 , SCTLR_EL1_UCT , 0 );
107
107
}
108
108
109
- atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT (-1 );
110
-
111
- #include <asm/mmu_context.h>
112
- #include <asm/cacheflush.h>
113
-
114
- DEFINE_PER_CPU_READ_MOSTLY (struct bp_hardening_data , bp_hardening_data );
115
-
116
- #ifdef CONFIG_RANDOMIZE_BASE
117
- static void __copy_hyp_vect_bpi (int slot , const char * hyp_vecs_start ,
118
- const char * hyp_vecs_end )
119
- {
120
- void * dst = lm_alias (__bp_harden_hyp_vecs + slot * SZ_2K );
121
- int i ;
122
-
123
- for (i = 0 ; i < SZ_2K ; i += 0x80 )
124
- memcpy (dst + i , hyp_vecs_start , hyp_vecs_end - hyp_vecs_start );
125
-
126
- __flush_icache_range ((uintptr_t )dst , (uintptr_t )dst + SZ_2K );
127
- }
128
-
129
- static void install_bp_hardening_cb (bp_hardening_cb_t fn )
130
- {
131
- static DEFINE_RAW_SPINLOCK (bp_lock );
132
- int cpu , slot = -1 ;
133
- const char * hyp_vecs_start = __smccc_workaround_1_smc ;
134
- const char * hyp_vecs_end = __smccc_workaround_1_smc +
135
- __SMCCC_WORKAROUND_1_SMC_SZ ;
136
-
137
- /*
138
- * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
139
- * we're a guest. Skip the hyp-vectors work.
140
- */
141
- if (!is_hyp_mode_available ()) {
142
- __this_cpu_write (bp_hardening_data .fn , fn );
143
- return ;
144
- }
145
-
146
- raw_spin_lock (& bp_lock );
147
- for_each_possible_cpu (cpu ) {
148
- if (per_cpu (bp_hardening_data .fn , cpu ) == fn ) {
149
- slot = per_cpu (bp_hardening_data .hyp_vectors_slot , cpu );
150
- break ;
151
- }
152
- }
153
-
154
- if (slot == -1 ) {
155
- slot = atomic_inc_return (& arm64_el2_vector_last_slot );
156
- BUG_ON (slot >= BP_HARDEN_EL2_SLOTS );
157
- __copy_hyp_vect_bpi (slot , hyp_vecs_start , hyp_vecs_end );
158
- }
159
-
160
- __this_cpu_write (bp_hardening_data .hyp_vectors_slot , slot );
161
- __this_cpu_write (bp_hardening_data .fn , fn );
162
- raw_spin_unlock (& bp_lock );
163
- }
164
- #else
165
- static void install_bp_hardening_cb (bp_hardening_cb_t fn )
166
- {
167
- __this_cpu_write (bp_hardening_data .fn , fn );
168
- }
169
- #endif /* CONFIG_RANDOMIZE_BASE */
170
-
171
- #include <linux/arm-smccc.h>
172
-
173
- static void __maybe_unused call_smc_arch_workaround_1 (void )
174
- {
175
- arm_smccc_1_1_smc (ARM_SMCCC_ARCH_WORKAROUND_1 , NULL );
176
- }
177
-
178
- static void call_hvc_arch_workaround_1 (void )
179
- {
180
- arm_smccc_1_1_hvc (ARM_SMCCC_ARCH_WORKAROUND_1 , NULL );
181
- }
182
-
183
- static void qcom_link_stack_sanitization (void )
184
- {
185
- u64 tmp ;
186
-
187
- asm volatile ("mov %0, x30 \n"
188
- ".rept 16 \n"
189
- "bl . + 4 \n"
190
- ".endr \n"
191
- "mov x30, %0 \n"
192
- : "=&r" (tmp ));
193
- }
194
-
195
- static bool __nospectre_v2 ;
196
- static int __init parse_nospectre_v2 (char * str )
197
- {
198
- __nospectre_v2 = true;
199
- return 0 ;
200
- }
201
- early_param ("nospectre_v2" , parse_nospectre_v2 );
202
-
203
- /*
204
- * -1: No workaround
205
- * 0: No workaround required
206
- * 1: Workaround installed
207
- */
208
- static int detect_harden_bp_fw (void )
209
- {
210
- bp_hardening_cb_t cb ;
211
- struct arm_smccc_res res ;
212
- u32 midr = read_cpuid_id ();
213
-
214
- arm_smccc_1_1_invoke (ARM_SMCCC_ARCH_FEATURES_FUNC_ID ,
215
- ARM_SMCCC_ARCH_WORKAROUND_1 , & res );
216
-
217
- switch ((int )res .a0 ) {
218
- case 1 :
219
- /* Firmware says we're just fine */
220
- return 0 ;
221
- case 0 :
222
- break ;
223
- default :
224
- return -1 ;
225
- }
226
-
227
- switch (arm_smccc_1_1_get_conduit ()) {
228
- case SMCCC_CONDUIT_HVC :
229
- cb = call_hvc_arch_workaround_1 ;
230
- break ;
231
-
232
- case SMCCC_CONDUIT_SMC :
233
- cb = call_smc_arch_workaround_1 ;
234
- break ;
235
-
236
- default :
237
- return -1 ;
238
- }
239
-
240
- if (((midr & MIDR_CPU_MODEL_MASK ) == MIDR_QCOM_FALKOR ) ||
241
- ((midr & MIDR_CPU_MODEL_MASK ) == MIDR_QCOM_FALKOR_V1 ))
242
- cb = qcom_link_stack_sanitization ;
243
-
244
- install_bp_hardening_cb (cb );
245
- return 1 ;
246
- }
247
-
248
109
DEFINE_PER_CPU_READ_MOSTLY (u64 , arm64_ssbd_callback_required );
249
110
250
111
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL ;
@@ -508,83 +369,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
508
369
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
509
370
CAP_MIDR_RANGE_LIST(midr_list)
510
371
511
- /* Track overall mitigation state. We are only mitigated if all cores are ok */
512
- static bool __hardenbp_enab = true;
513
- static bool __spectrev2_safe = true;
514
-
515
- int get_spectre_v2_workaround_state (void )
516
- {
517
- if (__spectrev2_safe )
518
- return ARM64_BP_HARDEN_NOT_REQUIRED ;
519
-
520
- if (!__hardenbp_enab )
521
- return ARM64_BP_HARDEN_UNKNOWN ;
522
-
523
- return ARM64_BP_HARDEN_WA_NEEDED ;
524
- }
525
-
526
- /*
527
- * List of CPUs that do not need any Spectre-v2 mitigation at all.
528
- */
529
- static const struct midr_range spectre_v2_safe_list [] = {
530
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A35 ),
531
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A53 ),
532
- MIDR_ALL_VERSIONS (MIDR_CORTEX_A55 ),
533
- MIDR_ALL_VERSIONS (MIDR_BRAHMA_B53 ),
534
- MIDR_ALL_VERSIONS (MIDR_HISI_TSV110 ),
535
- MIDR_ALL_VERSIONS (MIDR_QCOM_KRYO_3XX_SILVER ),
536
- MIDR_ALL_VERSIONS (MIDR_QCOM_KRYO_4XX_SILVER ),
537
- { /* sentinel */ }
538
- };
539
-
540
- /*
541
- * Track overall bp hardening for all heterogeneous cores in the machine.
542
- * We are only considered "safe" if all booted cores are known safe.
543
- */
544
- static bool __maybe_unused
545
- check_branch_predictor (const struct arm64_cpu_capabilities * entry , int scope )
546
- {
547
- int need_wa ;
548
-
549
- WARN_ON (scope != SCOPE_LOCAL_CPU || preemptible ());
550
-
551
- /* If the CPU has CSV2 set, we're safe */
552
- if (cpuid_feature_extract_unsigned_field (read_cpuid (ID_AA64PFR0_EL1 ),
553
- ID_AA64PFR0_CSV2_SHIFT ))
554
- return false;
555
-
556
- /* Alternatively, we have a list of unaffected CPUs */
557
- if (is_midr_in_range_list (read_cpuid_id (), spectre_v2_safe_list ))
558
- return false;
559
-
560
- /* Fallback to firmware detection */
561
- need_wa = detect_harden_bp_fw ();
562
- if (!need_wa )
563
- return false;
564
-
565
- __spectrev2_safe = false;
566
-
567
- /* forced off */
568
- if (__nospectre_v2 || cpu_mitigations_off ()) {
569
- pr_info_once ("spectrev2 mitigation disabled by command line option\n" );
570
- __hardenbp_enab = false;
571
- return false;
572
- }
573
-
574
- if (need_wa < 0 ) {
575
- pr_warn_once ("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n" );
576
- __hardenbp_enab = false;
577
- }
578
-
579
- return (need_wa > 0 );
580
- }
581
-
582
- static void
583
- cpu_enable_branch_predictor_hardening (const struct arm64_cpu_capabilities * cap )
584
- {
585
- cap -> matches (cap , SCOPE_LOCAL_CPU );
586
- }
587
-
588
372
static const __maybe_unused struct midr_range tx2_family_cpus [] = {
589
373
MIDR_ALL_VERSIONS (MIDR_BRCM_VULCAN ),
590
374
MIDR_ALL_VERSIONS (MIDR_CAVIUM_THUNDERX2 ),
@@ -876,11 +660,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
876
660
},
877
661
#endif
878
662
{
879
- .desc = "Branch predictor hardening " ,
663
+ .desc = "Spectre-v2 " ,
880
664
.capability = ARM64_SPECTRE_V2 ,
881
665
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM ,
882
- .matches = check_branch_predictor ,
883
- .cpu_enable = cpu_enable_branch_predictor_hardening ,
666
+ .matches = has_spectre_v2 ,
667
+ .cpu_enable = spectre_v2_enable_mitigation ,
884
668
},
885
669
#ifdef CONFIG_RANDOMIZE_BASE
886
670
{
@@ -949,20 +733,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
949
733
}
950
734
};
951
735
952
- ssize_t cpu_show_spectre_v2 (struct device * dev , struct device_attribute * attr ,
953
- char * buf )
954
- {
955
- switch (get_spectre_v2_workaround_state ()) {
956
- case ARM64_BP_HARDEN_NOT_REQUIRED :
957
- return sprintf (buf , "Not affected\n" );
958
- case ARM64_BP_HARDEN_WA_NEEDED :
959
- return sprintf (buf , "Mitigation: Branch predictor hardening\n" );
960
- case ARM64_BP_HARDEN_UNKNOWN :
961
- default :
962
- return sprintf (buf , "Vulnerable\n" );
963
- }
964
- }
965
-
966
736
ssize_t cpu_show_spec_store_bypass (struct device * dev ,
967
737
struct device_attribute * attr , char * buf )
968
738
{
0 commit comments