]> Git Repo - J-linux.git/blob - arch/arm64/mm/gcs.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / arm64 / mm / gcs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/mm.h>
4 #include <linux/mman.h>
5 #include <linux/syscalls.h>
6 #include <linux/types.h>
7
8 #include <asm/cmpxchg.h>
9 #include <asm/cpufeature.h>
10 #include <asm/gcs.h>
11 #include <asm/page.h>
12
13 static unsigned long alloc_gcs(unsigned long addr, unsigned long size)
14 {
15         int flags = MAP_ANONYMOUS | MAP_PRIVATE;
16         struct mm_struct *mm = current->mm;
17         unsigned long mapped_addr, unused;
18
19         if (addr)
20                 flags |= MAP_FIXED_NOREPLACE;
21
22         mmap_write_lock(mm);
23         mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags,
24                               VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL);
25         mmap_write_unlock(mm);
26
27         return mapped_addr;
28 }
29
30 static unsigned long gcs_size(unsigned long size)
31 {
32         if (size)
33                 return PAGE_ALIGN(size);
34
35         /* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */
36         size = PAGE_ALIGN(min_t(unsigned long long,
37                                 rlimit(RLIMIT_STACK) / 2, SZ_2G));
38         return max(PAGE_SIZE, size);
39 }
40
41 unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
42                                      const struct kernel_clone_args *args)
43 {
44         unsigned long addr, size;
45
46         if (!system_supports_gcs())
47                 return 0;
48
49         if (!task_gcs_el0_enabled(tsk))
50                 return 0;
51
52         if ((args->flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM) {
53                 tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
54                 return 0;
55         }
56
57         size = args->stack_size / 2;
58
59         size = gcs_size(size);
60         addr = alloc_gcs(0, size);
61         if (IS_ERR_VALUE(addr))
62                 return addr;
63
64         tsk->thread.gcs_base = addr;
65         tsk->thread.gcs_size = size;
66         tsk->thread.gcspr_el0 = addr + size - sizeof(u64);
67
68         return addr;
69 }
70
71 SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
72 {
73         unsigned long alloc_size;
74         unsigned long __user *cap_ptr;
75         unsigned long cap_val;
76         int ret = 0;
77         int cap_offset;
78
79         if (!system_supports_gcs())
80                 return -EOPNOTSUPP;
81
82         if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER))
83                 return -EINVAL;
84
85         if (!PAGE_ALIGNED(addr))
86                 return -EINVAL;
87
88         if (size == 8 || !IS_ALIGNED(size, 8))
89                 return -EINVAL;
90
91         /*
92          * An overflow would result in attempting to write the restore token
93          * to the wrong location. Not catastrophic, but just return the right
94          * error code and block it.
95          */
96         alloc_size = PAGE_ALIGN(size);
97         if (alloc_size < size)
98                 return -EOVERFLOW;
99
100         addr = alloc_gcs(addr, alloc_size);
101         if (IS_ERR_VALUE(addr))
102                 return addr;
103
104         /*
105          * Put a cap token at the end of the allocated region so it
106          * can be switched to.
107          */
108         if (flags & SHADOW_STACK_SET_TOKEN) {
109                 /* Leave an extra empty frame as a top of stack marker? */
110                 if (flags & SHADOW_STACK_SET_MARKER)
111                         cap_offset = 2;
112                 else
113                         cap_offset = 1;
114
115                 cap_ptr = (unsigned long __user *)(addr + size -
116                                                    (cap_offset * sizeof(unsigned long)));
117                 cap_val = GCS_CAP(cap_ptr);
118
119                 put_user_gcs(cap_val, cap_ptr, &ret);
120                 if (ret != 0) {
121                         vm_munmap(addr, size);
122                         return -EFAULT;
123                 }
124
125                 /*
126                  * Ensure the new cap is ordered before standard
127                  * memory accesses to the same location.
128                  */
129                 gcsb_dsync();
130         }
131
132         return addr;
133 }
134
135 /*
136  * Apply the GCS mode configured for the specified task to the
137  * hardware.
138  */
139 void gcs_set_el0_mode(struct task_struct *task)
140 {
141         u64 gcscre0_el1 = GCSCRE0_EL1_nTR;
142
143         if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)
144                 gcscre0_el1 |= GCSCRE0_EL1_RVCHKEN | GCSCRE0_EL1_PCRSEL;
145
146         if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE)
147                 gcscre0_el1 |= GCSCRE0_EL1_STREn;
148
149         if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH)
150                 gcscre0_el1 |= GCSCRE0_EL1_PUSHMEn;
151
152         write_sysreg_s(gcscre0_el1, SYS_GCSCRE0_EL1);
153 }
154
155 void gcs_free(struct task_struct *task)
156 {
157         if (!system_supports_gcs())
158                 return;
159
160         /*
161          * When fork() with CLONE_VM fails, the child (tsk) already
162          * has a GCS allocated, and exit_thread() calls this function
163          * to free it.  In this case the parent (current) and the
164          * child share the same mm struct.
165          */
166         if (!task->mm || task->mm != current->mm)
167                 return;
168
169         if (task->thread.gcs_base)
170                 vm_munmap(task->thread.gcs_base, task->thread.gcs_size);
171
172         task->thread.gcspr_el0 = 0;
173         task->thread.gcs_base = 0;
174         task->thread.gcs_size = 0;
175 }
176
177 int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg)
178 {
179         unsigned long gcs, size;
180         int ret;
181
182         if (!system_supports_gcs())
183                 return -EINVAL;
184
185         if (is_compat_thread(task_thread_info(task)))
186                 return -EINVAL;
187
188         /* Reject unknown flags */
189         if (arg & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
190                 return -EINVAL;
191
192         ret = gcs_check_locked(task, arg);
193         if (ret != 0)
194                 return ret;
195
196         /* If we are enabling GCS then make sure we have a stack */
197         if (arg & PR_SHADOW_STACK_ENABLE &&
198             !task_gcs_el0_enabled(task)) {
199                 /* Do not allow GCS to be reenabled */
200                 if (task->thread.gcs_base || task->thread.gcspr_el0)
201                         return -EINVAL;
202
203                 if (task != current)
204                         return -EBUSY;
205
206                 size = gcs_size(0);
207                 gcs = alloc_gcs(0, size);
208                 if (!gcs)
209                         return -ENOMEM;
210
211                 task->thread.gcspr_el0 = gcs + size - sizeof(u64);
212                 task->thread.gcs_base = gcs;
213                 task->thread.gcs_size = size;
214                 if (task == current)
215                         write_sysreg_s(task->thread.gcspr_el0,
216                                        SYS_GCSPR_EL0);
217         }
218
219         task->thread.gcs_el0_mode = arg;
220         if (task == current)
221                 gcs_set_el0_mode(task);
222
223         return 0;
224 }
225
226 int arch_get_shadow_stack_status(struct task_struct *task,
227                                  unsigned long __user *arg)
228 {
229         if (!system_supports_gcs())
230                 return -EINVAL;
231
232         if (is_compat_thread(task_thread_info(task)))
233                 return -EINVAL;
234
235         return put_user(task->thread.gcs_el0_mode, arg);
236 }
237
238 int arch_lock_shadow_stack_status(struct task_struct *task,
239                                   unsigned long arg)
240 {
241         if (!system_supports_gcs())
242                 return -EINVAL;
243
244         if (is_compat_thread(task_thread_info(task)))
245                 return -EINVAL;
246
247         /*
248          * We support locking unknown bits so applications can prevent
249          * any changes in a future proof manner.
250          */
251         task->thread.gcs_el0_locked |= arg;
252
253         return 0;
254 }
This page took 0.048675 seconds and 4 git commands to generate.