select ACPI_MCFG if (ACPI && PCI)
select ACPI_SPCR_TABLE if ACPI
select ACPI_PPTT if ACPI
- select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_PREP_COHERENT
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
+ config ARCH_ENABLE_MEMORY_HOTREMOVE
+ def_bool y
+
config SMP
def_bool y
# Common NUMA Features
config NUMA
- bool "Numa Memory Allocation and Scheduler Support"
+ bool "NUMA Memory Allocation and Scheduler Support"
select ACPI_NUMA if ACPI
select OF_NUMA
help
- Enable NUMA (Non Uniform Memory Access) support.
+ Enable NUMA (Non-Uniform Memory Access) support.
The kernel will try to allocate memory used by a CPU on the
local memory of the CPU and add some more
bool "Enable support for pointer authentication"
default y
depends on !KVM || ARM64_VHE
+ depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+ depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
and other attacks.
This option enables these instructions at EL0 (i.e. for userspace).
-
Choosing this option will cause the kernel to initialise secret keys
for each process at exec() time, with these keys being
context-switched along with the process.
+ If the compiler supports the -mbranch-protection or
+ -msign-return-address flag (e.g. GCC 7 or later), then this option
+ will also cause the kernel itself to be compiled with return address
+ protection. In this case, and if the target hardware is known to
+ support pointer authentication, then CONFIG_STACKPROTECTOR can be
+ disabled with minimal loss of protection.
+
The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled. However, KVM guest also require VHE mode and hence
CONFIG_ARM64_VHE=y option to use this feature.
+ If the feature is present on the boot CPU but not on a late CPU, then
+ the late CPU will be parked. Also, if the boot CPU does not have
+ address auth and the late CPU has then the late CPU will still boot
+ but with the feature disabled. On such a system, this option should
+ not be selected.
+
+ This feature works with FUNCTION_GRAPH_TRACER option only if
+ DYNAMIC_FTRACE_WITH_REGS is enabled.
+
+ config CC_HAS_BRANCH_PROT_PAC_RET
+ # GCC 9 or later, clang 8 or later
+ def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
+
+ config CC_HAS_SIGN_RETURN_ADDRESS
+ # GCC 7, 8
+ def_bool $(cc-option,-msign-return-address=all)
+
+ config AS_HAS_PAC
+ def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)
+
+ config AS_HAS_CFI_NEGATE_RA_STATE
+ def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
+
+ endmenu
+
+ menu "ARMv8.4 architectural features"
+
+ config ARM64_AMU_EXTN
+ bool "Enable support for the Activity Monitors Unit CPU extension"
+ default y
+ help
+ The activity monitors extension is an optional extension introduced
+ by the ARMv8.4 CPU architecture. This enables support for version 1
+ of the activity monitors architecture, AMUv1.
+
+ To enable the use of this extension on CPUs that implement it, say Y.
+
+ Note that for architectural reasons, firmware _must_ implement AMU
+ support when running on CPUs that present the activity monitors
+ extension. The required support is present in:
+ * Version 1.5 and later of the ARM Trusted Firmware
+
+ For kernels that have this configuration enabled but boot with broken
+ firmware, you may need to say N here until the firmware is fixed.
+ Otherwise you may experience firmware panics or lockups when
+ accessing the counter registers. Even if you are not observing these
+ symptoms, the values returned by the register reads might not
+ correctly reflect reality. Most commonly, the value read will be 0,
+ indicating that the counter is not enabled.
+
endmenu
menu "ARMv8.5 architectural features"
* In some non-typical cases either both (a) and (b), or neither,
* should be permitted. This can be described by including neither
* or both flags in the capability's type field.
+ *
+ * In case of a conflict, the CPU is prevented from booting. If the
+ * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
+ * then a kernel panic is triggered.
*/
#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
/* Is it safe for a late CPU to miss this capability when system has it */
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
+ /* Panic when a conflict is detected */
+ #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
/*
* CPU errata workarounds that need to be enabled at boot time if one or
/*
* CPU feature used early in the boot based on the boot CPU. All secondary
- * CPUs must match the state of the capability as detected by the boot CPU.
+ * CPUs must match the state of the capability as detected by the boot CPU. In
+ * case of a conflict, a kernel panic is triggered.
+ */
+ #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
+ (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
+
+ /*
+ * CPU feature used early in the boot based on the boot CPU. It is safe for a
+ * late CPU to have this feature even though the boot CPU hasn't enabled it,
+ * although the feature will not be used by Linux in this case. If the boot CPU
+ * has enabled this feature already, then every late CPU must have it.
*/
- #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
+ #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
+ (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
struct arm64_cpu_capabilities {
const char *desc;
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
}
- static inline bool
- cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
- {
- return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
- }
-
- static inline bool
- cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
- {
- return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
- }
-
/*
* Generic helper for handling capabilties with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
- /* System capability check for constant caps */
- static __always_inline bool __cpus_have_const_cap(int num)
+ static __always_inline bool system_capabilities_finalized(void)
{
- if (num >= ARM64_NCAPS)
- return false;
- return static_branch_unlikely(&cpu_hwcap_keys[num]);
+ return static_branch_likely(&arm64_const_caps_ready);
}
+ /*
+ * Test for a capability with a runtime check.
+ *
+ * Before the capability is detected, this returns false.
+ */
static inline bool cpus_have_cap(unsigned int num)
{
if (num >= ARM64_NCAPS)
return test_bit(num, cpu_hwcaps);
}
+ /*
+ * Test for a capability without a runtime check.
+ *
+ * Before capabilities are finalized, this returns false.
+ * After capabilities are finalized, this is patched to avoid a runtime check.
+ *
+ * @num must be a compile-time constant.
+ */
+ static __always_inline bool __cpus_have_const_cap(int num)
+ {
+ if (num >= ARM64_NCAPS)
+ return false;
+ return static_branch_unlikely(&cpu_hwcap_keys[num]);
+ }
+
+ /*
+ * Test for a capability, possibly with a runtime check.
+ *
+ * Before capabilities are finalized, this behaves as cpus_have_cap().
+ * After capabilities are finalized, this is patched to avoid a runtime check.
+ *
+ * @num must be a compile-time constant.
+ */
static __always_inline bool cpus_have_const_cap(int num)
{
- if (static_branch_likely(&arm64_const_caps_ready))
+ if (system_capabilities_finalized())
return __cpus_have_const_cap(num);
else
return cpus_have_cap(num);
}
+ /*
+ * Test for a capability without a runtime check.
+ *
+ * Before capabilities are finalized, this will BUG().
+ * After capabilities are finalized, this is patched to avoid a runtime check.
+ *
+ * @num must be a compile-time constant.
+ */
+ static __always_inline bool cpus_have_final_cap(int num)
+ {
+ if (system_capabilities_finalized())
+ return __cpus_have_const_cap(num);
+ else
+ BUG();
+ }
+
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
return cpuid_feature_extract_signed_field_width(features, field, 4);
}
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
{
return (u64)(features << (64 - width - field)) >> (64 - width);
}
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field(u64 features, int field)
{
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
}
+ /*
+ * Fields that identify the version of the Performance Monitors Extension do
+ * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
+ * "Alternative ID scheme used for the Performance Monitors Extension version".
+ */
+ static inline u64 __attribute_const__
+ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
+ {
+ u64 val = cpuid_feature_extract_unsigned_field(features, field);
+ u64 mask = GENMASK_ULL(field + 3, field);
+
+ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
+ if (val == 0xf)
+ val = 0;
+
+ if (val > cap) {
+ features &= ~mask;
+ features |= (cap << field) & mask;
+ }
+
+ return features;
+ }
+
static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
{
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
return val == 0x1;
}
-static inline bool system_supports_fpsimd(void)
+static __always_inline bool system_supports_fpsimd(void)
{
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
}
!cpus_have_const_cap(ARM64_HAS_PAN);
}
-static inline bool system_supports_sve(void)
+static __always_inline bool system_supports_sve(void)
{
return IS_ENABLED(CONFIG_ARM64_SVE) &&
cpus_have_const_cap(ARM64_SVE);
}
-static inline bool system_supports_cnp(void)
+static __always_inline bool system_supports_cnp(void)
{
return IS_ENABLED(CONFIG_ARM64_CNP) &&
cpus_have_const_cap(ARM64_HAS_CNP);
static inline bool system_supports_address_auth(void)
{
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
- (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
- cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
+ cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
}
static inline bool system_supports_generic_auth(void)
{
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
- (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
- cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
+ cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}
static inline bool system_uses_irq_prio_masking(void)
system_uses_irq_prio_masking();
}
- static inline bool system_capabilities_finalized(void)
- {
- return static_branch_likely(&arm64_const_caps_ready);
- }
-
#define ARM64_BP_HARDEN_UNKNOWN -1
#define ARM64_BP_HARDEN_WA_NEEDED 0
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
ID_AA64MMFR1_HADBS_SHIFT);
}
+ #ifdef CONFIG_ARM64_AMU_EXTN
+ /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
+ extern bool cpu_has_amu_feat(int cpu);
+ #endif
+
#endif /* __ASSEMBLY__ */
#endif
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
-static inline unsigned long __kern_hyp_va(unsigned long v)
+static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n"
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;
+/* This is only called on a VHE system */
static inline void *kvm_get_hyp_vector(void)
{
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
int slot = -1;
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
- vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
+ vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
* HBP + HEL2 -> use hardened vertors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
- __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
+ __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
- phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
- unsigned long size = (__bp_harden_hyp_vecs_end -
- __bp_harden_hyp_vecs_start);
+ phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+ unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
/*
* Always allocate a spare vector slot, as we don't
#define TTBR_ASID_MASK (UL(0xffff) << 48)
#define BP_HARDEN_EL2_SLOTS 4
+ #define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
#ifndef __ASSEMBLY__
} mm_context_t;
/*
- * This macro is only used by the TLBI code, which cannot race with an
- * ASID change and therefore doesn't need to reload the counter using
- * atomic64_read.
+ * This macro is only used by the TLBI and low-level switch_mm() code,
+ * neither of which can race with an ASID change. We therefore don't
+ * need to reload the counter using atomic64_read().
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
-extern bool arm64_use_ng_mappings;
-
static inline bool arm64_kernel_unmapped_at_el0(void)
{
- return arm64_use_ng_mappings;
+ return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
typedef void (*bp_hardening_cb_t)(void);
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
defined(CONFIG_HARDEN_EL2_VECTORS))
- extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+ extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot;
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
#include <linux/string.h>
#include <linux/thread_info.h>
+#include <vdso/processor.h>
+
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h>
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
#ifdef CONFIG_ARM64_PTR_AUTH
- struct ptrauth_keys keys_user;
+ struct ptrauth_keys_user keys_user;
+ struct ptrauth_keys_kernel keys_kernel;
#endif
};
unsigned long get_wchan(struct task_struct *p);
-static inline void cpu_relax(void)
-{
- asm volatile("yield" ::: "memory");
-}
-
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
#include <linux/arch_topology.h>
+ #ifdef CONFIG_ARM64_AMU_EXTN
+ /*
+ * Replace task scheduler's default counter-based
+ * frequency-invariance scale factor setting.
+ */
+ void topology_scale_freq_tick(void);
+ #define arch_scale_freq_tick topology_scale_freq_tick
+ #endif /* CONFIG_ARM64_AMU_EXTN */
+
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_capacity topology_get_freq_scale
/* Enable topology flag updates */
#define arch_update_cpu_topology topology_update_cpu_topology
+/* Replace task scheduler's default thermal pressure retrieve API */
+#define arch_scale_thermal_pressure topology_get_thermal_pressure
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
- * functionality embodied in disable_nonboot_cpus() to achieve this.
+ * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
- disable_nonboot_cpus();
+ smp_shutdown_nonboot_cpus(reboot_cpu);
}
/*
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)lr);
+ printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
*/
fpsimd_flush_task_state(p);
+ ptrauth_thread_init_kernel(p);
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
contextidr_thread_switch(next);
entry_task_switch(next);
uao_thread_switch(next);
- ptrauth_thread_switch(next);
ssbs_thread_switch(next);
/*
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- if (cpu_ops[cpu]->cpu_boot)
- return cpu_ops[cpu]->cpu_boot(cpu);
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops->cpu_boot)
+ return ops->cpu_boot(cpu);
return -EOPNOTSUPP;
}
*/
secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
+ #if defined(CONFIG_ARM64_PTR_AUTH)
+ secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo;
+ secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi;
+ #endif
update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
- /*
- * Now bring the CPU into our world.
- */
+ /* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
- if (ret == 0) {
- /*
- * CPU was successfully started, wait for it to come online or
- * time out.
- */
- wait_for_completion_timeout(&cpu_running,
- msecs_to_jiffies(5000));
-
- if (!cpu_online(cpu)) {
- pr_crit("CPU%u: failed to come online\n", cpu);
- ret = -EIO;
- }
- } else {
+ if (ret) {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
return ret;
}
+ /*
+ * CPU was successfully started, wait for it to come online or
+ * time out.
+ */
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(5000));
+ if (cpu_online(cpu))
+ return 0;
+
+ pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
secondary_data.stack = NULL;
+ #if defined(CONFIG_ARM64_PTR_AUTH)
+ secondary_data.ptrauth_key.apia.lo = 0;
+ secondary_data.ptrauth_key.apia.hi = 0;
+ #endif
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
- if (ret && status) {
-
- if (status == CPU_MMU_OFF)
- status = READ_ONCE(__early_cpu_boot_status);
+ if (status == CPU_MMU_OFF)
+ status = READ_ONCE(__early_cpu_boot_status);
- switch (status & CPU_BOOT_STATUS_MASK) {
- default:
- pr_err("CPU%u: failed in unknown state : 0x%lx\n",
- cpu, status);
- cpus_stuck_in_kernel++;
- break;
- case CPU_KILL_ME:
- if (!op_cpu_kill(cpu)) {
- pr_crit("CPU%u: died during early boot\n", cpu);
- break;
- }
- pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
- case CPU_STUCK_IN_KERNEL:
- pr_crit("CPU%u: is stuck in kernel\n", cpu);
- if (status & CPU_STUCK_REASON_52_BIT_VA)
- pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
- if (status & CPU_STUCK_REASON_NO_GRAN)
- pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
- cpus_stuck_in_kernel++;
+ switch (status & CPU_BOOT_STATUS_MASK) {
+ default:
+ pr_err("CPU%u: failed in unknown state : 0x%lx\n",
+ cpu, status);
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_KILL_ME:
+ if (!op_cpu_kill(cpu)) {
+ pr_crit("CPU%u: died during early boot\n", cpu);
break;
- case CPU_PANIC_KERNEL:
- panic("CPU%u detected unsupported configuration\n", cpu);
}
+ pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ /* Fall through */
+ case CPU_STUCK_IN_KERNEL:
+ pr_crit("CPU%u: is stuck in kernel\n", cpu);
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+ if (status & CPU_STUCK_REASON_NO_GRAN) {
+ pr_crit("CPU%u: does not support %luK granule\n",
+ cpu, PAGE_SIZE / SZ_1K);
+ }
+ cpus_stuck_in_kernel++;
+ break;
+ case CPU_PANIC_KERNEL:
+ panic("CPU%u detected unsupported configuration\n", cpu);
}
return ret;
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
+ const struct cpu_operations *ops;
unsigned int cpu;
cpu = task_cpu(current);
*/
check_local_cpu_capabilities();
- if (cpu_ops[cpu]->cpu_postboot)
- cpu_ops[cpu]->cpu_postboot();
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_postboot)
+ ops->cpu_postboot();
/*
* Log the CPU info before it is marked online and might get read.
#ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_disable(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
/*
* If we don't have a cpu_die method, abort before we reach the point
* of no return. CPU0 may not have an cpu_ops, so test for it.
*/
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ if (!ops || !ops->cpu_die)
return -EOPNOTSUPP;
/*
* We may need to abort a hot unplug for some other mechanism-specific
* reason.
*/
- if (cpu_ops[cpu]->cpu_disable)
- return cpu_ops[cpu]->cpu_disable(cpu);
+ if (ops->cpu_disable)
+ return ops->cpu_disable(cpu);
return 0;
}
static int op_cpu_kill(unsigned int cpu)
{
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
/*
* If we have no means of synchronising with the dying CPU, then assume
* that it is really dead. We can only wait for an arbitrary length of
* time and hope that it's dead, so let's skip the wait and just hope.
*/
- if (!cpu_ops[cpu]->cpu_kill)
+ if (!ops->cpu_kill)
return 0;
- return cpu_ops[cpu]->cpu_kill(cpu);
+ return ops->cpu_kill(cpu);
}
/*
void cpu_die(void)
{
unsigned int cpu = smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
idle_task_exit();
* mechanism must perform all required cache maintenance to ensure that
* no dirty lines are lost in the process of shutting down the CPU.
*/
- cpu_ops[cpu]->cpu_die(cpu);
+ ops->cpu_die(cpu);
BUG();
}
#endif
+ static void __cpu_try_die(int cpu)
+ {
+ #ifdef CONFIG_HOTPLUG_CPU
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+ if (ops && ops->cpu_die)
+ ops->cpu_die(cpu);
+ #endif
+ }
+
/*
* Kill the calling secondary CPU, early in bringup before it is turned
* online.
/* Mark this CPU absent */
set_cpu_present(cpu, 0);
- #ifdef CONFIG_HOTPLUG_CPU
- update_cpu_boot_status(CPU_KILL_ME);
- /* Check if we can park ourselves */
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
- #endif
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ update_cpu_boot_status(CPU_KILL_ME);
+ __cpu_try_die(cpu);
+ }
+
update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
cpu_park_loop();
*/
static int __init smp_cpu_setup(int cpu)
{
- if (cpu_read_ops(cpu))
+ const struct cpu_operations *ops;
+
+ if (init_cpu_ops(cpu))
return -ENODEV;
- if (cpu_ops[cpu]->cpu_init(cpu))
+ ops = get_cpu_ops(cpu);
+ if (ops->cpu_init(cpu))
return -ENODEV;
set_cpu_possible(cpu, true);
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ const struct cpu_operations *ops;
int err;
unsigned int cpu;
unsigned int this_cpu;
if (cpu == smp_processor_id())
continue;
- if (!cpu_ops[cpu])
+ ops = get_cpu_ops(cpu);
+ if (!ops)
continue;
- err = cpu_ops[cpu]->cpu_prepare(cpu);
+ err = ops->cpu_prepare(cpu);
if (err)
continue;
local_irq_disable();
sdei_mask_local_cpu();
- #ifdef CONFIG_HOTPLUG_CPU
- if (cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
- #endif
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ __cpu_try_die(cpu);
/* just in case */
cpu_park_loop();
}
#endif
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+ unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+ return num_online_cpus() - this_cpu_online;
+}
+
void smp_send_stop(void)
{
unsigned long timeout;
- if (num_online_cpus() > 1) {
+ if (num_other_online_cpus()) {
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && timeout--)
+ while (num_other_online_cpus() && timeout--)
udelay(1);
- if (num_online_cpus() > 1)
+ if (num_other_online_cpus())
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
cpus_stopped = 1;
- if (num_online_cpus() == 1) {
+ /*
+ * If this cpu is the only one alive at this point in time, online or
+ * not, there are no stop messages to be sent around, so just back out.
+ */
+ if (num_other_online_cpus() == 0) {
sdei_mask_local_cpu();
return;
}
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
{
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
+ const struct cpu_operations *ops = get_cpu_ops(any_cpu);
- if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
+ if (ops && ops->cpu_die)
return true;
#endif
return false;
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
+
+ /*
+ * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
+ * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
+ * except for some missing controls, such as TAM.
+ * In this case, CPTR_EL2.TAM has the same position with or without
+ * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
+ * shift value for trapping the AMU accesses.
+ */
+
+ val |= CPTR_EL2_TAM;
+
if (update_fp_enabled(vcpu)) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
__activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP;
__activate_traps_fpsimd32(vcpu);
write_sysreg(val, cptr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
isb();
{
u64 hcr = vcpu->arch.hcr_el2;
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
write_sysreg(hcr, hcr_el2);
- if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
if (has_vhe())
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
u64 val;
/*
* resolve the IPA using the AT instruction.
*/
if (!(esr & ESR_ELx_S1PTW) &&
- (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+ (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar))
return false;
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
handle_tx2_tvm(vcpu))
return true;
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
{
- if (!cpus_have_const_cap(ARM64_SSBD))
+ if (!cpus_have_final_cap(ARM64_SSBD))
return false;
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
}
/* Switch to the guest for VHE systems running in EL2 */
-int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
return exit_code;
}
-NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
+NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
+
+int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ local_daif_mask();
+
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ *
+ * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
+ * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
+ */
+ pmr_sync();
+
+ ret = __kvm_vcpu_run_vhe(vcpu);
+
+ /*
+ * local_daif_restore() takes care to properly restore PSTATE.DAIF
+ * and the GIC PMR if the host is using IRQ priorities.
+ */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
+ /*
+ * When we exit from the guest we change a number of CPU configuration
+ * parameters, such as traps. Make sure these changes take effect
+ * before running the host or additional guests.
+ */
+ isb();
+
+ return ret;
+}
/* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
* Copyright (C) 2012 ARM Ltd.
*/
+ #include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
+ return;
+
asm(ALTERNATIVE("nop; nop; nop",
"ic iallu; dsb nsh; isb",
- ARM64_WORKAROUND_CAVIUM_27456,
- CONFIG_CAVIUM_ERRATUM_27456));
+ ARM64_WORKAROUND_CAVIUM_27456));
+ }
+
+ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
+ {
+ unsigned long ttbr1 = read_sysreg(ttbr1_el1);
+ unsigned long asid = ASID(mm);
+ unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
+
+ /* Skip CNP for the reserved ASID */
+ if (system_supports_cnp() && asid)
+ ttbr0 |= TTBR_CNP_BIT;
+
+ /* SW PAN needs a copy of the ASID in TTBR0 for entry */
+ if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
+ ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+
+ /* Set ASID in TTBR1 since TCR.A1 is set */
+ ttbr1 &= ~TTBR_ASID_MASK;
+ ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+
+ write_sysreg(ttbr1, ttbr1_el1);
+ isb();
+ write_sysreg(ttbr0, ttbr0_el1);
+ isb();
+ post_ttbr_update_workaround();
}
-static int asids_init(void)
+static int asids_update_limit(void)
{
- asid_bits = get_cpu_asid_bits();
+ unsigned long num_available_asids = NUM_USER_ASIDS;
+
+ if (arm64_kernel_unmapped_at_el0())
+ num_available_asids /= 2;
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
+ WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+ pr_info("ASID allocator initialised with %lu entries\n",
+ num_available_asids);
+ return 0;
+}
+arch_initcall(asids_update_limit);
+
+static int asids_init(void)
+{
+ asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
GFP_KERNEL);
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
set_kpti_asid_bits();
-
- pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}
early_initcall(asids_init);
#include <linux/sched.h>
#include <linux/smp.h>
+ __weak bool arch_freq_counters_available(struct cpumask *cpus)
+ {
+ return false;
+ }
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
unsigned long scale;
int i;
+ /*
+ * If the use of counters for FIE is enabled, just return as we don't
+ * want to update the scale factor with information from CPUFREQ.
+ * Instead the scale factor will be updated from arch_scale_freq_tick.
+ */
+ if (arch_freq_counters_available(cpus))
+ return;
+
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
for_each_cpu(i, cpus)
update_topology = 0;
}
-static u32 capacity_scale;
+static DEFINE_PER_CPU(u32, freq_factor) = 1;
static u32 *raw_capacity;
static int free_raw_capacity(void)
void topology_normalize_cpu_scale(void)
{
u64 capacity;
+ u64 capacity_scale;
int cpu;
if (!raw_capacity)
return;
- pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+ capacity_scale = 1;
for_each_possible_cpu(cpu) {
- pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
- cpu, raw_capacity[cpu]);
- capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
- / capacity_scale;
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity_scale = max(capacity, capacity_scale);
+ }
+
+ pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
+ for_each_possible_cpu(cpu) {
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(cpu));
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
+ struct clk *cpu_clk;
static bool cap_parsing_failed;
int ret;
u32 cpu_capacity;
return false;
}
}
- capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
cpu_node, raw_capacity[cpu]);
+
+ /*
+ * Update freq_factor for calculating early boot cpu capacities.
+ * For non-clk CPU DVFS mechanism, there's no way to get the
+ * frequency value now, assuming they are running at the same
+ * frequency (by keeping the initial freq_factor value).
+ */
+ cpu_clk = of_clk_get(cpu_node, 0);
+ if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ per_cpu(freq_factor, cpu) =
+ clk_get_rate(cpu_clk) / 1000;
+ clk_put(cpu_clk);
+ }
} else {
if (raw_capacity) {
pr_err("cpu_capacity: missing %pOF raw capacity\n",
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();
static int __init parse_core(struct device_node *core, int package_id,
int core_id)
{
- char name[10];
+ char name[20];
bool leaf = true;
int i = 0;
int cpu;
static int __init parse_cluster(struct device_node *cluster, int depth)
{
- char name[10];
+ char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
static bool arch_counter_suspend_stop;
-static bool vdso_default = true;
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
* change both the default value and the vdso itself.
*/
if (wa->read_cntvct_el0) {
- clocksource_counter.archdata.vdso_direct = false;
- vdso_default = false;
+ clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+ vdso_default = VDSO_CLOCKMODE_NONE;
}
}
return 0;
}
+ static int validate_timer_rate(void)
+ {
+ if (!arch_timer_rate)
+ return -EINVAL;
+
+ /* Arch timer frequency < 1MHz can cause trouble */
+ WARN_ON(arch_timer_rate < 1000000);
+
+ return 0;
+ }
+
/*
* For historical reasons, when probing with DT we use whichever (non-zero)
* rate was probed first, and don't verify that others match. If the first node
arch_timer_rate = rate;
/* Check the timer frequency. */
- if (arch_timer_rate == 0)
+ if (validate_timer_rate())
pr_warn("frequency not available\n");
}
}
arch_timer_read_counter = rd;
- clocksource_counter.archdata.vdso_direct = vdso_default;
+ clocksource_counter.vdso_clock_mode = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
* CNTFRQ value. This *must* be correct.
*/
arch_timer_rate = arch_timer_get_cntfrq();
- if (!arch_timer_rate) {
+ ret = validate_timer_rate();
+ if (ret) {
pr_err(FW_BUG "frequency not available.\n");
- return -EINVAL;
+ return ret;
}
arch_timer_uses_ppi = arch_timer_select_ppi();
pol = policy->last_policy;
} else if (def_gov) {
pol = cpufreq_parse_policy(def_gov->name);
- } else {
- return -ENODATA;
+ /*
+ * In case the default governor is neiter "performance"
+ * nor "powersave", fall back to the initial policy
+ * value set by the driver.
+ */
+ if (pol == CPUFREQ_POLICY_UNKNOWN)
+ pol = policy->policy;
}
+ if (pol != CPUFREQ_POLICY_PERFORMANCE &&
+ pol != CPUFREQ_POLICY_POWERSAVE)
+ return -ENODATA;
}
return cpufreq_set_policy(policy, gov, pol);
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
+ /**
+ * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
+ * @cpu: CPU number
+ *
+ * The default return value is the max_freq field of cpuinfo.
+ */
+ __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+ {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+ }
+ EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
+
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
if (unlikely(policy_is_inactive(policy)))
DECLARE_PER_CPU(unsigned long, cpu_scale);
-struct sched_domain;
-static inline
-unsigned long topology_get_cpu_scale(int cpu)
+static inline unsigned long topology_get_cpu_scale(int cpu)
{
return per_cpu(cpu_scale, cpu);
}
DECLARE_PER_CPU(unsigned long, freq_scale);
-static inline
-unsigned long topology_get_freq_scale(int cpu)
+static inline unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}
+ bool arch_freq_counters_available(struct cpumask *cpus);
+
+DECLARE_PER_CPU(unsigned long, thermal_pressure);
+
+static inline unsigned long topology_get_thermal_pressure(int cpu)
+{
+ return per_cpu(thermal_pressure, cpu);
+}
+
+void arch_set_thermal_pressure(struct cpumask *cpus,
+ unsigned long th_pressure);
+
struct cpu_topology {
int thread_id;
int core_id;
# Return y if the linker supports <flag>, n otherwise
ld-option = $(success,$(LD) -v $(1))
+ # $(as-option,<flag>)
+ # /dev/zero is used as output instead of /dev/null as some assembler cribs when
+ # both input and output are same. Also both of them have same write behaviour so
+ # can be easily substituted.
+ as-option = $(success, $(CC) $(CLANG_FLAGS) $(1) -c -x assembler /dev/null -o /dev/zero)
+
# $(as-instr,<instr>)
# Return y if the assembler supports <instr>, n otherwise
as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
# gcc version including patch level
gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh $(CC))
+
+# machine bit flags
+# $(m32-flag): -m32 if the compiler supports it, or an empty string otherwise.
+# $(m64-flag): -m64 if the compiler supports it, or an empty string otherwise.
+cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1))
+m32-flag := $(cc-option-bit,-m32)
+m64-flag := $(cc-option-bit,-m64)