]> Git Repo - linux.git/blobdiff - arch/arm64/kernel/process.c
Merge branches 'for-next/gcs', 'for-next/probes', 'for-next/asm-offsets', 'for-next...
[linux.git] / arch / arm64 / kernel / process.c
index 0540653fbf382bf8a0984a7e9f97cafbedc3251b..60bebb3e7d48f7b5e746ecd1d26d3463448c280f 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/cacheflush.h>
 #include <asm/exec.h>
 #include <asm/fpsimd.h>
+#include <asm/gcs.h>
 #include <asm/mmu_context.h>
 #include <asm/mte.h>
 #include <asm/processor.h>
@@ -227,7 +228,7 @@ void __show_regs(struct pt_regs *regs)
        printk("sp : %016llx\n", sp);
 
        if (system_uses_irq_prio_masking())
-               printk("pmr_save: %08llx\n", regs->pmr_save);
+               printk("pmr: %08x\n", regs->pmr);
 
        i = top_reg;
 
@@ -280,6 +281,51 @@ static void flush_poe(void)
        write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
 }
 
+#ifdef CONFIG_ARM64_GCS
+
+static void flush_gcs(void)
+{
+       if (!system_supports_gcs())
+               return;
+
+       gcs_free(current);
+       current->thread.gcs_el0_mode = 0;
+       write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1);
+       write_sysreg_s(0, SYS_GCSPR_EL0);
+}
+
+static int copy_thread_gcs(struct task_struct *p,
+                          const struct kernel_clone_args *args)
+{
+       unsigned long gcs;
+
+       if (!system_supports_gcs())
+               return 0;
+
+       p->thread.gcs_base = 0;
+       p->thread.gcs_size = 0;
+
+       gcs = gcs_alloc_thread_stack(p, args);
+       if (IS_ERR_VALUE(gcs))
+               return PTR_ERR((void *)gcs);
+
+       p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
+       p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
+
+       return 0;
+}
+
+#else
+
+static void flush_gcs(void) { }
+static int copy_thread_gcs(struct task_struct *p,
+                          const struct kernel_clone_args *args)
+{
+       return 0;
+}
+
+#endif
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
@@ -287,11 +333,13 @@ void flush_thread(void)
        flush_ptrace_hw_breakpoint(current);
        flush_tagged_addr_state();
        flush_poe();
+       flush_gcs();
 }
 
 void arch_release_task_struct(struct task_struct *tsk)
 {
        fpsimd_release_task(tsk);
+       gcs_free(tsk);
 }
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -355,6 +403,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        unsigned long stack_start = args->stack;
        unsigned long tls = args->tls;
        struct pt_regs *childregs = task_pt_regs(p);
+       int ret;
 
        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
@@ -399,6 +448,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
                        p->thread.uw.tp_value = tls;
                        p->thread.tpidr2_el0 = 0;
                }
+
+               ret = copy_thread_gcs(p, args);
+               if (ret != 0)
+                       return ret;
        } else {
                /*
                 * A kthread has no context to ERET to, so ensure any buggy
@@ -409,6 +462,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
                 */
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
+               childregs->stackframe.type = FRAME_META_TYPE_FINAL;
 
                p->thread.cpu_context.x19 = (unsigned long)args->fn;
                p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
@@ -419,7 +473,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
         * For the benefit of the unwinder, set up childregs->stackframe
         * as the final frame for the new task.
         */
-       p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
+       p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe;
 
        ptrace_hw_copy_thread(p);
 
@@ -439,7 +493,7 @@ static void tls_thread_switch(struct task_struct *next)
 
        if (is_compat_thread(task_thread_info(next)))
                write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
-       else if (!arm64_kernel_unmapped_at_el0())
+       else
                write_sysreg(0, tpidrro_el0);
 
        write_sysreg(*task_user_tls(next), tpidr_el0);
@@ -484,6 +538,46 @@ static void entry_task_switch(struct task_struct *next)
        __this_cpu_write(__entry_task, next);
 }
 
+#ifdef CONFIG_ARM64_GCS
+
+void gcs_preserve_current_state(void)
+{
+       current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
+}
+
+static void gcs_thread_switch(struct task_struct *next)
+{
+       if (!system_supports_gcs())
+               return;
+
+       /* GCSPR_EL0 is always readable */
+       gcs_preserve_current_state();
+       write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0);
+
+       if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode)
+               gcs_set_el0_mode(next);
+
+       /*
+        * Ensure that GCS memory effects of the 'prev' thread are
+        * ordered before other memory accesses with release semantics
+        * (or preceded by a DMB) on the current PE. In addition, any
+        * memory accesses with acquire semantics (or succeeded by a
+        * DMB) are ordered before GCS memory effects of the 'next'
+        * thread. This will ensure that the GCS memory effects are
+        * visible to other PEs in case of migration.
+        */
+       if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next))
+               gcsb_dsync();
+}
+
+#else
+
+static void gcs_thread_switch(struct task_struct *next)
+{
+}
+
+#endif
+
 /*
  * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of
  * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0}
@@ -580,6 +674,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
        cntkctl_thread_switch(prev, next);
        ptrauth_thread_switch_user(next);
        permission_overlay_switch(next);
+       gcs_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
This page took 0.039144 seconds and 4 git commands to generate.