2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/types.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
27 const struct arm64_midr_revidr *fix;
28 u32 midr = read_cpuid_id(), revidr;
30 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
31 if (!is_midr_in_range(midr, &entry->midr_range))
34 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
35 revidr = read_cpuid(REVIDR_EL1);
36 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
37 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
43 static bool __maybe_unused
44 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
47 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
48 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
51 static bool __maybe_unused
52 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
56 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
58 model = read_cpuid_id();
59 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
60 MIDR_ARCHITECTURE_MASK;
62 return model == entry->midr_range.model;
66 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
69 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
70 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
71 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
75 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
77 /* Clear SCTLR_EL1.UCT */
78 config_sctlr_el1(SCTLR_EL1_UCT, 0);
81 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
83 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
84 #include <asm/mmu_context.h>
85 #include <asm/cacheflush.h>
87 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
89 #ifdef CONFIG_KVM_INDIRECT_VECTORS
90 extern char __smccc_workaround_1_smc_start[];
91 extern char __smccc_workaround_1_smc_end[];
93 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
94 const char *hyp_vecs_end)
96 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
99 for (i = 0; i < SZ_2K; i += 0x80)
100 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
102 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
105 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106 const char *hyp_vecs_start,
107 const char *hyp_vecs_end)
109 static DEFINE_SPINLOCK(bp_lock);
113 for_each_possible_cpu(cpu) {
114 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
121 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
122 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
123 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
126 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
127 __this_cpu_write(bp_hardening_data.fn, fn);
128 spin_unlock(&bp_lock);
131 #define __smccc_workaround_1_smc_start NULL
132 #define __smccc_workaround_1_smc_end NULL
134 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
135 const char *hyp_vecs_start,
136 const char *hyp_vecs_end)
138 __this_cpu_write(bp_hardening_data.fn, fn);
140 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
142 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
143 bp_hardening_cb_t fn,
144 const char *hyp_vecs_start,
145 const char *hyp_vecs_end)
149 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
152 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
153 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
156 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
159 #include <uapi/linux/psci.h>
160 #include <linux/arm-smccc.h>
161 #include <linux/psci.h>
163 static void call_smc_arch_workaround_1(void)
165 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
168 static void call_hvc_arch_workaround_1(void)
170 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
173 static void qcom_link_stack_sanitization(void)
177 asm volatile("mov %0, x30 \n"
186 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
188 bp_hardening_cb_t cb;
189 void *smccc_start, *smccc_end;
190 struct arm_smccc_res res;
191 u32 midr = read_cpuid_id();
193 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
196 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
199 switch (psci_ops.conduit) {
200 case PSCI_CONDUIT_HVC:
201 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
202 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
205 cb = call_hvc_arch_workaround_1;
206 /* This is a guest, no need to patch KVM vectors */
211 case PSCI_CONDUIT_SMC:
212 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
213 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
216 cb = call_smc_arch_workaround_1;
217 smccc_start = __smccc_workaround_1_smc_start;
218 smccc_end = __smccc_workaround_1_smc_end;
225 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
226 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
227 cb = qcom_link_stack_sanitization;
229 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
233 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
235 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
236 .matches = is_affected_midr_range, \
237 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
239 #define CAP_MIDR_ALL_VERSIONS(model) \
240 .matches = is_affected_midr_range, \
241 .midr_range = MIDR_ALL_VERSIONS(model)
243 #define MIDR_FIXED(rev, revidr_mask) \
244 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
246 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
247 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
248 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
250 #define CAP_MIDR_RANGE_LIST(list) \
251 .matches = is_affected_midr_range_list, \
252 .midr_range_list = list
254 /* Errata affecting a range of revisions of given model variant */
255 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
256 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
258 /* Errata affecting a single variant/revision of a model */
259 #define ERRATA_MIDR_REV(model, var, rev) \
260 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
262 /* Errata affecting all variants/revisions of a given a model */
263 #define ERRATA_MIDR_ALL_VERSIONS(model) \
264 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
265 CAP_MIDR_ALL_VERSIONS(model)
267 /* Errata affecting a list of midr ranges, with same work around */
268 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
269 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
270 CAP_MIDR_RANGE_LIST(midr_list)
273 * Generic helper for handling capabilties with multiple (match,enable) pairs
274 * of call backs, sharing the same capability bit.
275 * Iterate over each entry to see if at least one matches.
277 static bool __maybe_unused
278 multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
280 const struct arm64_cpu_capabilities *caps;
282 for (caps = entry->match_list; caps->matches; caps++)
283 if (caps->matches(caps, scope))
290 * Take appropriate action for all matching entries in the shared capability
293 static void __maybe_unused
294 multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
296 const struct arm64_cpu_capabilities *caps;
298 for (caps = entry->match_list; caps->matches; caps++)
299 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
301 caps->cpu_enable(caps);
304 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
307 * List of CPUs where we need to issue a psci call to
308 * harden the branch predictor.
310 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
311 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
312 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
313 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
314 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
315 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
316 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
317 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
318 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
319 MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
325 #ifdef CONFIG_HARDEN_EL2_VECTORS
327 static const struct midr_range arm64_harden_el2_vectors[] = {
328 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
335 const struct arm64_cpu_capabilities arm64_errata[] = {
336 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
337 defined(CONFIG_ARM64_ERRATUM_827319) || \
338 defined(CONFIG_ARM64_ERRATUM_824069)
340 /* Cortex-A53 r0p[012] */
341 .desc = "ARM errata 826319, 827319, 824069",
342 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
343 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
344 .cpu_enable = cpu_enable_cache_maint_trap,
347 #ifdef CONFIG_ARM64_ERRATUM_819472
349 /* Cortex-A53 r0p[01] */
350 .desc = "ARM errata 819472",
351 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
352 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
353 .cpu_enable = cpu_enable_cache_maint_trap,
356 #ifdef CONFIG_ARM64_ERRATUM_832075
358 /* Cortex-A57 r0p0 - r1p2 */
359 .desc = "ARM erratum 832075",
360 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
361 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
366 #ifdef CONFIG_ARM64_ERRATUM_834220
368 /* Cortex-A57 r0p0 - r1p2 */
369 .desc = "ARM erratum 834220",
370 .capability = ARM64_WORKAROUND_834220,
371 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
376 #ifdef CONFIG_ARM64_ERRATUM_843419
378 /* Cortex-A53 r0p[01234] */
379 .desc = "ARM erratum 843419",
380 .capability = ARM64_WORKAROUND_843419,
381 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
382 MIDR_FIXED(0x4, BIT(8)),
385 #ifdef CONFIG_ARM64_ERRATUM_845719
387 /* Cortex-A53 r0p[01234] */
388 .desc = "ARM erratum 845719",
389 .capability = ARM64_WORKAROUND_845719,
390 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
393 #ifdef CONFIG_CAVIUM_ERRATUM_23154
395 /* Cavium ThunderX, pass 1.x */
396 .desc = "Cavium erratum 23154",
397 .capability = ARM64_WORKAROUND_CAVIUM_23154,
398 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
401 #ifdef CONFIG_CAVIUM_ERRATUM_27456
403 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
404 .desc = "Cavium erratum 27456",
405 .capability = ARM64_WORKAROUND_CAVIUM_27456,
406 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
411 /* Cavium ThunderX, T81 pass 1.0 */
412 .desc = "Cavium erratum 27456",
413 .capability = ARM64_WORKAROUND_CAVIUM_27456,
414 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
417 #ifdef CONFIG_CAVIUM_ERRATUM_30115
419 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
420 .desc = "Cavium erratum 30115",
421 .capability = ARM64_WORKAROUND_CAVIUM_30115,
422 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
427 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
428 .desc = "Cavium erratum 30115",
429 .capability = ARM64_WORKAROUND_CAVIUM_30115,
430 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
433 /* Cavium ThunderX, T83 pass 1.0 */
434 .desc = "Cavium erratum 30115",
435 .capability = ARM64_WORKAROUND_CAVIUM_30115,
436 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
440 .desc = "Mismatched cache line size",
441 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
442 .matches = has_mismatched_cache_line_size,
443 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
444 .cpu_enable = cpu_enable_trap_ctr_access,
446 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
448 .desc = "Qualcomm Technologies Falkor erratum 1003",
449 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
450 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
453 .desc = "Qualcomm Technologies Kryo erratum 1003",
454 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
455 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
456 .midr_range.model = MIDR_QCOM_KRYO,
457 .matches = is_kryo_midr,
460 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
462 .desc = "Qualcomm Technologies Falkor erratum 1009",
463 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
464 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
467 #ifdef CONFIG_ARM64_ERRATUM_858921
469 /* Cortex-A73 all versions */
470 .desc = "ARM erratum 858921",
471 .capability = ARM64_WORKAROUND_858921,
472 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
475 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
477 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
478 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
479 .cpu_enable = enable_smccc_arch_workaround_1,
480 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
483 #ifdef CONFIG_HARDEN_EL2_VECTORS
485 .desc = "EL2 vector hardening",
486 .capability = ARM64_HARDEN_EL2_VECTORS,
487 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
488 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),