int
default 3
+++++++++++++ +config ARM64_HAFT
+++++++++++++ + bool "Support for Hardware managed Access Flag for Table Descriptors"
+++++++++++++ + depends on ARM64_HW_AFDBM
+++++++++++++ + default y
+++++++++++++ + help
+++++++++++++ + The ARMv8.9/ARMv9.5 introduces the feature Hardware managed Access
+++++++++++++ + Flag for Table descriptors. When enabled an architectural executed
+++++++++++++ + memory access will update the Access Flag in each Table descriptor
+++++++++++++ + which is accessed during the translation table walk and for which
+++++++++++++ + the Access Flag is 0. The Access Flag of the Table descriptor use
+++++++++++++ + the same bit of PTE_AF.
+++++++++++++ +
+++++++++++++ + The feature will only be enabled if all the CPUs in the system
+++++++++++++ + support this feature. If unsure, say Y.
+++++++++++++ +
endmenu # "ARMv8.9 architectural features"
+ +++++++++++++menu "v9.4 architectural features"
+ +++++++++++++
+ +++++++++++++config ARM64_GCS
+ +++++++++++++ bool "Enable support for Guarded Control Stack (GCS)"
+ +++++++++++++ default y
+ +++++++++++++ select ARCH_HAS_USER_SHADOW_STACK
+ +++++++++++++ select ARCH_USES_HIGH_VMA_FLAGS
+ +++++++++++++ depends on !UPROBES
+ +++++++++++++ help
+ +++++++++++++ Guarded Control Stack (GCS) provides support for a separate
+ +++++++++++++ stack with restricted access which contains only return
+ +++++++++++++ addresses. This can be used to harden against some attacks
+ +++++++++++++ by comparing return address used by the program with what is
+ +++++++++++++ stored in the GCS, and may also be used to efficiently obtain
+ +++++++++++++ the call stack for applications such as profiling.
+ +++++++++++++
+ +++++++++++++ The feature is detected at runtime, and will remain disabled
+ +++++++++++++ if the system does not implement the feature.
+ +++++++++++++
+ +++++++++++++endmenu # "v9.4 architectural features"
+ +++++++++++++
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
static inline bool system_supports_poe(void)
{
----- --------- return IS_ENABLED(CONFIG_ARM64_POE) &&
----- --------- alternative_has_cap_unlikely(ARM64_HAS_S1POE);
+++++ +++++++++ return alternative_has_cap_unlikely(ARM64_HAS_S1POE);
+ +++ +++++++++}
+ +++ +++++++++
+ +++++++++++++static inline bool system_supports_gcs(void)
+ +++++++++++++{
+ +++++++++++++ return IS_ENABLED(CONFIG_ARM64_GCS) &&
+ +++++++++++++ alternative_has_cap_unlikely(ARM64_HAS_GCS);
+ +++++++++++ +}
+ +++++++++++ +
+++++++++++++ +static inline bool system_supports_haft(void)
+++++++++++++ +{
+++++++++++++ + return IS_ENABLED(CONFIG_ARM64_HAFT) &&
+++++++++++++ + cpus_have_final_cap(ARM64_HAFT);
+ + }
+ +
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
.fpsimd_cpu = NR_CPUS, \
}
-------- ------static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
++++++++ ++++++static inline void start_thread_common(struct pt_regs *regs, unsigned long pc,
++++++++ ++++++ unsigned long pstate)
{
-------- ------ s32 previous_syscall = regs->syscallno;
-------- ------ memset(regs, 0, sizeof(*regs));
-------- ------ regs->syscallno = previous_syscall;
-------- ------ regs->pc = pc;
++++++++ ++++++ /*
++++++++ ++++++ * Ensure all GPRs are zeroed, and initialize PC + PSTATE.
++++++++ ++++++ * The SP (or compat SP) will be initialized later.
++++++++ ++++++ */
++++++++ ++++++ regs->user_regs = (struct user_pt_regs) {
++++++++ ++++++ .pc = pc,
++++++++ ++++++ .pstate = pstate,
++++++++ ++++++ };
+
++++++++ ++++++ /*
++++++++ ++++++ * To allow the syscalls:sys_exit_execve tracepoint we need to preserve
++++++++ ++++++ * syscallno, but do not need orig_x0 or the original GPRs.
++++++++ ++++++ */
++++++++ ++++++ regs->orig_x0 = 0;
+ ++++++ ++++++
++++++++ ++++++ /*
++++++++ ++++++ * An exec from a kernel thread won't have an existing PMR value.
++++++++ ++++++ */
if (system_uses_irq_prio_masking())
-------- ------ regs->pmr_save = GIC_PRIO_IRQON;
++++++++ ++++++ regs->pmr = GIC_PRIO_IRQON;
++++++++ ++++++
++++++++ ++++++ /*
++++++++ ++++++ * The pt_regs::stackframe field must remain valid throughout this
++++++++ ++++++ * function as a stacktrace can be taken at any time. Any user or
++++++++ ++++++ * kernel task should have a valid final frame.
++++++++ ++++++ */
++++++++ ++++++ WARN_ON_ONCE(regs->stackframe.record.fp != 0);
++++++++ ++++++ WARN_ON_ONCE(regs->stackframe.record.lr != 0);
++++++++ ++++++ WARN_ON_ONCE(regs->stackframe.type != FRAME_META_TYPE_FINAL);
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc,