]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/seccomp.c | |
3 | * | |
4 | * Copyright 2004-2005 Andrea Arcangeli <[email protected]> | |
5 | * | |
e2cfabdf WD |
6 | * Copyright (C) 2012 Google, Inc. |
7 | * Will Drewry <[email protected]> | |
8 | * | |
9 | * This defines a simple but solid secure-computing facility. | |
10 | * | |
11 | * Mode 1 uses a fixed list of allowed system calls. | |
12 | * Mode 2 allows user-defined system call filters in the form | |
13 | * of Berkeley Packet Filters/Linux Socket Filters. | |
1da177e4 LT |
14 | */ |
15 | ||
e2cfabdf | 16 | #include <linux/atomic.h> |
85e7bac3 | 17 | #include <linux/audit.h> |
5b101740 | 18 | #include <linux/compat.h> |
e2cfabdf WD |
19 | #include <linux/sched.h> |
20 | #include <linux/seccomp.h> | |
1da177e4 LT |
21 | |
22 | /* #define SECCOMP_DEBUG 1 */ | |
e2cfabdf WD |
23 | |
24 | #ifdef CONFIG_SECCOMP_FILTER | |
25 | #include <asm/syscall.h> | |
26 | #include <linux/filter.h> | |
fb0fadf9 | 27 | #include <linux/ptrace.h> |
e2cfabdf WD |
28 | #include <linux/security.h> |
29 | #include <linux/slab.h> | |
30 | #include <linux/tracehook.h> | |
31 | #include <linux/uaccess.h> | |
32 | ||
33 | /** | |
34 | * struct seccomp_filter - container for seccomp BPF programs | |
35 | * | |
36 | * @usage: reference count to manage the object lifetime. | |
37 | * get/put helpers should be used when accessing an instance | |
38 | * outside of a lifetime-guarded section. In general, this | |
39 | * is only needed for handling filters shared across tasks. | |
40 | * @prev: points to a previously installed, or inherited, filter | |
41 | * @len: the number of instructions in the program | |
42 | * @insns: the BPF program instructions to evaluate | |
43 | * | |
44 | * seccomp_filter objects are organized in a tree linked via the @prev | |
45 | * pointer. For any task, it appears to be a singly-linked list starting | |
46 | * with current->seccomp.filter, the most recently attached or inherited filter. | |
47 | * However, multiple filters may share a @prev node, by way of fork(), which | |
48 | * results in a unidirectional tree existing in memory. This is similar to | |
49 | * how namespaces work. | |
50 | * | |
51 | * seccomp_filter objects should never be modified after being attached | |
52 | * to a task_struct (other than @usage). | |
53 | */ | |
54 | struct seccomp_filter { | |
55 | atomic_t usage; | |
56 | struct seccomp_filter *prev; | |
57 | unsigned short len; /* Instruction count */ | |
58 | struct sock_filter insns[]; | |
59 | }; | |
60 | ||
61 | /* Limit any path through the tree to 256KB worth of instructions. */ | |
62 | #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) | |
63 | ||
e2cfabdf WD |
64 | /** |
65 | * get_u32 - returns a u32 offset into data | |
66 | * @data: a unsigned 64 bit value | |
67 | * @index: 0 or 1 to return the first or second 32-bits | |
68 | * | |
69 | * This inline exists to hide the length of unsigned long. If a 32-bit | |
70 | * unsigned long is passed in, it will be extended and the top 32-bits will be | |
71 | * 0. If it is a 64-bit unsigned long, then whatever data is resident will be | |
72 | * properly returned. | |
73 | * | |
74 | * Endianness is explicitly ignored and left for BPF program authors to manage | |
75 | * as per the specific architecture. | |
76 | */ | |
77 | static inline u32 get_u32(u64 data, int index) | |
78 | { | |
79 | return ((u32 *)&data)[index]; | |
80 | } | |
81 | ||
82 | /* Helper for bpf_load below. */ | |
83 | #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) | |
84 | /** | |
85 | * bpf_load: checks and returns a pointer to the requested offset | |
86 | * @off: offset into struct seccomp_data to load from | |
87 | * | |
88 | * Returns the requested 32-bits of data. | |
89 | * seccomp_check_filter() should assure that @off is 32-bit aligned | |
90 | * and not out of bounds. Failure to do so is a BUG. | |
91 | */ | |
92 | u32 seccomp_bpf_load(int off) | |
93 | { | |
94 | struct pt_regs *regs = task_pt_regs(current); | |
95 | if (off == BPF_DATA(nr)) | |
96 | return syscall_get_nr(current, regs); | |
97 | if (off == BPF_DATA(arch)) | |
98 | return syscall_get_arch(current, regs); | |
99 | if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { | |
100 | unsigned long value; | |
101 | int arg = (off - BPF_DATA(args[0])) / sizeof(u64); | |
102 | int index = !!(off % sizeof(u64)); | |
103 | syscall_get_arguments(current, regs, arg, 1, &value); | |
104 | return get_u32(value, index); | |
105 | } | |
106 | if (off == BPF_DATA(instruction_pointer)) | |
107 | return get_u32(KSTK_EIP(current), 0); | |
108 | if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) | |
109 | return get_u32(KSTK_EIP(current), 1); | |
110 | /* seccomp_check_filter should make this impossible. */ | |
111 | BUG(); | |
112 | } | |
113 | ||
114 | /** | |
115 | * seccomp_check_filter - verify seccomp filter code | |
116 | * @filter: filter to verify | |
117 | * @flen: length of filter | |
118 | * | |
119 | * Takes a previously checked filter (by sk_chk_filter) and | |
120 | * redirects all filter code that loads struct sk_buff data | |
121 | * and related data through seccomp_bpf_load. It also | |
122 | * enforces length and alignment checking of those loads. | |
123 | * | |
124 | * Returns 0 if the rule set is legal or -EINVAL if not. | |
125 | */ | |
126 | static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |
127 | { | |
128 | int pc; | |
129 | for (pc = 0; pc < flen; pc++) { | |
130 | struct sock_filter *ftest = &filter[pc]; | |
131 | u16 code = ftest->code; | |
132 | u32 k = ftest->k; | |
133 | ||
134 | switch (code) { | |
135 | case BPF_S_LD_W_ABS: | |
136 | ftest->code = BPF_S_ANC_SECCOMP_LD_W; | |
137 | /* 32-bit aligned and not out of bounds. */ | |
138 | if (k >= sizeof(struct seccomp_data) || k & 3) | |
139 | return -EINVAL; | |
140 | continue; | |
141 | case BPF_S_LD_W_LEN: | |
142 | ftest->code = BPF_S_LD_IMM; | |
143 | ftest->k = sizeof(struct seccomp_data); | |
144 | continue; | |
145 | case BPF_S_LDX_W_LEN: | |
146 | ftest->code = BPF_S_LDX_IMM; | |
147 | ftest->k = sizeof(struct seccomp_data); | |
148 | continue; | |
149 | /* Explicitly include allowed calls. */ | |
150 | case BPF_S_RET_K: | |
151 | case BPF_S_RET_A: | |
152 | case BPF_S_ALU_ADD_K: | |
153 | case BPF_S_ALU_ADD_X: | |
154 | case BPF_S_ALU_SUB_K: | |
155 | case BPF_S_ALU_SUB_X: | |
156 | case BPF_S_ALU_MUL_K: | |
157 | case BPF_S_ALU_MUL_X: | |
158 | case BPF_S_ALU_DIV_X: | |
159 | case BPF_S_ALU_AND_K: | |
160 | case BPF_S_ALU_AND_X: | |
161 | case BPF_S_ALU_OR_K: | |
162 | case BPF_S_ALU_OR_X: | |
163 | case BPF_S_ALU_LSH_K: | |
164 | case BPF_S_ALU_LSH_X: | |
165 | case BPF_S_ALU_RSH_K: | |
166 | case BPF_S_ALU_RSH_X: | |
167 | case BPF_S_ALU_NEG: | |
168 | case BPF_S_LD_IMM: | |
169 | case BPF_S_LDX_IMM: | |
170 | case BPF_S_MISC_TAX: | |
171 | case BPF_S_MISC_TXA: | |
172 | case BPF_S_ALU_DIV_K: | |
173 | case BPF_S_LD_MEM: | |
174 | case BPF_S_LDX_MEM: | |
175 | case BPF_S_ST: | |
176 | case BPF_S_STX: | |
177 | case BPF_S_JMP_JA: | |
178 | case BPF_S_JMP_JEQ_K: | |
179 | case BPF_S_JMP_JEQ_X: | |
180 | case BPF_S_JMP_JGE_K: | |
181 | case BPF_S_JMP_JGE_X: | |
182 | case BPF_S_JMP_JGT_K: | |
183 | case BPF_S_JMP_JGT_X: | |
184 | case BPF_S_JMP_JSET_K: | |
185 | case BPF_S_JMP_JSET_X: | |
186 | continue; | |
187 | default: | |
188 | return -EINVAL; | |
189 | } | |
190 | } | |
191 | return 0; | |
192 | } | |
193 | ||
194 | /** | |
195 | * seccomp_run_filters - evaluates all seccomp filters against @syscall | |
196 | * @syscall: number of the current system call | |
197 | * | |
198 | * Returns valid seccomp BPF response codes. | |
199 | */ | |
200 | static u32 seccomp_run_filters(int syscall) | |
201 | { | |
202 | struct seccomp_filter *f; | |
acf3b2c7 WD |
203 | u32 ret = SECCOMP_RET_ALLOW; |
204 | ||
205 | /* Ensure unexpected behavior doesn't result in failing open. */ | |
206 | if (WARN_ON(current->seccomp.filter == NULL)) | |
207 | return SECCOMP_RET_KILL; | |
208 | ||
e2cfabdf WD |
209 | /* |
210 | * All filters in the list are evaluated and the lowest BPF return | |
acf3b2c7 | 211 | * value always takes priority (ignoring the DATA). |
e2cfabdf WD |
212 | */ |
213 | for (f = current->seccomp.filter; f; f = f->prev) { | |
acf3b2c7 WD |
214 | u32 cur_ret = sk_run_filter(NULL, f->insns); |
215 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) | |
216 | ret = cur_ret; | |
e2cfabdf WD |
217 | } |
218 | return ret; | |
219 | } | |
220 | ||
221 | /** | |
222 | * seccomp_attach_filter: Attaches a seccomp filter to current. | |
223 | * @fprog: BPF program to install | |
224 | * | |
225 | * Returns 0 on success or an errno on failure. | |
226 | */ | |
227 | static long seccomp_attach_filter(struct sock_fprog *fprog) | |
228 | { | |
229 | struct seccomp_filter *filter; | |
230 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); | |
231 | unsigned long total_insns = fprog->len; | |
232 | long ret; | |
233 | ||
234 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) | |
235 | return -EINVAL; | |
236 | ||
237 | for (filter = current->seccomp.filter; filter; filter = filter->prev) | |
238 | total_insns += filter->len + 4; /* include a 4 instr penalty */ | |
239 | if (total_insns > MAX_INSNS_PER_PATH) | |
240 | return -ENOMEM; | |
241 | ||
242 | /* | |
243 | * Installing a seccomp filter requires that the task have | |
244 | * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. | |
245 | * This avoids scenarios where unprivileged tasks can affect the | |
246 | * behavior of privileged children. | |
247 | */ | |
248 | if (!current->no_new_privs && | |
249 | security_capable_noaudit(current_cred(), current_user_ns(), | |
250 | CAP_SYS_ADMIN) != 0) | |
251 | return -EACCES; | |
252 | ||
253 | /* Allocate a new seccomp_filter */ | |
254 | filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, | |
255 | GFP_KERNEL|__GFP_NOWARN); | |
256 | if (!filter) | |
257 | return -ENOMEM; | |
258 | atomic_set(&filter->usage, 1); | |
259 | filter->len = fprog->len; | |
260 | ||
261 | /* Copy the instructions from fprog. */ | |
262 | ret = -EFAULT; | |
263 | if (copy_from_user(filter->insns, fprog->filter, fp_size)) | |
264 | goto fail; | |
265 | ||
266 | /* Check and rewrite the fprog via the skb checker */ | |
267 | ret = sk_chk_filter(filter->insns, filter->len); | |
268 | if (ret) | |
269 | goto fail; | |
270 | ||
271 | /* Check and rewrite the fprog for seccomp use */ | |
272 | ret = seccomp_check_filter(filter->insns, filter->len); | |
273 | if (ret) | |
274 | goto fail; | |
275 | ||
276 | /* | |
277 | * If there is an existing filter, make it the prev and don't drop its | |
278 | * task reference. | |
279 | */ | |
280 | filter->prev = current->seccomp.filter; | |
281 | current->seccomp.filter = filter; | |
282 | return 0; | |
283 | fail: | |
284 | kfree(filter); | |
285 | return ret; | |
286 | } | |
287 | ||
288 | /** | |
289 | * seccomp_attach_user_filter - attaches a user-supplied sock_fprog | |
290 | * @user_filter: pointer to the user data containing a sock_fprog. | |
291 | * | |
292 | * Returns 0 on success and non-zero otherwise. | |
293 | */ | |
294 | long seccomp_attach_user_filter(char __user *user_filter) | |
295 | { | |
296 | struct sock_fprog fprog; | |
297 | long ret = -EFAULT; | |
298 | ||
299 | #ifdef CONFIG_COMPAT | |
300 | if (is_compat_task()) { | |
301 | struct compat_sock_fprog fprog32; | |
302 | if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) | |
303 | goto out; | |
304 | fprog.len = fprog32.len; | |
305 | fprog.filter = compat_ptr(fprog32.filter); | |
306 | } else /* falls through to the if below. */ | |
307 | #endif | |
308 | if (copy_from_user(&fprog, user_filter, sizeof(fprog))) | |
309 | goto out; | |
310 | ret = seccomp_attach_filter(&fprog); | |
311 | out: | |
312 | return ret; | |
313 | } | |
314 | ||
315 | /* get_seccomp_filter - increments the reference count of the filter on @tsk */ | |
316 | void get_seccomp_filter(struct task_struct *tsk) | |
317 | { | |
318 | struct seccomp_filter *orig = tsk->seccomp.filter; | |
319 | if (!orig) | |
320 | return; | |
321 | /* Reference count is bounded by the number of total processes. */ | |
322 | atomic_inc(&orig->usage); | |
323 | } | |
324 | ||
325 | /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ | |
326 | void put_seccomp_filter(struct task_struct *tsk) | |
327 | { | |
328 | struct seccomp_filter *orig = tsk->seccomp.filter; | |
329 | /* Clean up single-reference branches iteratively. */ | |
330 | while (orig && atomic_dec_and_test(&orig->usage)) { | |
331 | struct seccomp_filter *freeme = orig; | |
332 | orig = orig->prev; | |
333 | kfree(freeme); | |
334 | } | |
335 | } | |
bb6ea430 WD |
336 | |
337 | /** | |
338 | * seccomp_send_sigsys - signals the task to allow in-process syscall emulation | |
339 | * @syscall: syscall number to send to userland | |
340 | * @reason: filter-supplied reason code to send to userland (via si_errno) | |
341 | * | |
342 | * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. | |
343 | */ | |
344 | static void seccomp_send_sigsys(int syscall, int reason) | |
345 | { | |
346 | struct siginfo info; | |
347 | memset(&info, 0, sizeof(info)); | |
348 | info.si_signo = SIGSYS; | |
349 | info.si_code = SYS_SECCOMP; | |
350 | info.si_call_addr = (void __user *)KSTK_EIP(current); | |
351 | info.si_errno = reason; | |
352 | info.si_arch = syscall_get_arch(current, task_pt_regs(current)); | |
353 | info.si_syscall = syscall; | |
354 | force_sig_info(SIGSYS, &info, current); | |
355 | } | |
e2cfabdf | 356 | #endif /* CONFIG_SECCOMP_FILTER */ |
1da177e4 LT |
357 | |
358 | /* | |
359 | * Secure computing mode 1 allows only read/write/exit/sigreturn. | |
360 | * To be fully secure this must be combined with rlimit | |
361 | * to limit the stack allocations too. | |
362 | */ | |
363 | static int mode1_syscalls[] = { | |
364 | __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, | |
365 | 0, /* null terminated */ | |
366 | }; | |
367 | ||
5b101740 | 368 | #ifdef CONFIG_COMPAT |
1da177e4 LT |
369 | static int mode1_syscalls_32[] = { |
370 | __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, | |
371 | 0, /* null terminated */ | |
372 | }; | |
373 | #endif | |
374 | ||
acf3b2c7 | 375 | int __secure_computing(int this_syscall) |
1da177e4 LT |
376 | { |
377 | int mode = current->seccomp.mode; | |
e2cfabdf WD |
378 | int exit_sig = 0; |
379 | int *syscall; | |
8156b451 | 380 | u32 ret; |
1da177e4 LT |
381 | |
382 | switch (mode) { | |
e2cfabdf | 383 | case SECCOMP_MODE_STRICT: |
1da177e4 | 384 | syscall = mode1_syscalls; |
5b101740 RM |
385 | #ifdef CONFIG_COMPAT |
386 | if (is_compat_task()) | |
1da177e4 LT |
387 | syscall = mode1_syscalls_32; |
388 | #endif | |
389 | do { | |
390 | if (*syscall == this_syscall) | |
acf3b2c7 | 391 | return 0; |
1da177e4 | 392 | } while (*++syscall); |
e2cfabdf | 393 | exit_sig = SIGKILL; |
8156b451 | 394 | ret = SECCOMP_RET_KILL; |
1da177e4 | 395 | break; |
e2cfabdf | 396 | #ifdef CONFIG_SECCOMP_FILTER |
8156b451 WD |
397 | case SECCOMP_MODE_FILTER: { |
398 | int data; | |
acf3b2c7 WD |
399 | ret = seccomp_run_filters(this_syscall); |
400 | data = ret & SECCOMP_RET_DATA; | |
8156b451 WD |
401 | ret &= SECCOMP_RET_ACTION; |
402 | switch (ret) { | |
acf3b2c7 WD |
403 | case SECCOMP_RET_ERRNO: |
404 | /* Set the low-order 16-bits as a errno. */ | |
405 | syscall_set_return_value(current, task_pt_regs(current), | |
406 | -data, 0); | |
407 | goto skip; | |
bb6ea430 WD |
408 | case SECCOMP_RET_TRAP: |
409 | /* Show the handler the original registers. */ | |
410 | syscall_rollback(current, task_pt_regs(current)); | |
411 | /* Let the filter pass back 16 bits of data. */ | |
412 | seccomp_send_sigsys(this_syscall, data); | |
413 | goto skip; | |
fb0fadf9 WD |
414 | case SECCOMP_RET_TRACE: |
415 | /* Skip these calls if there is no tracer. */ | |
416 | if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) | |
417 | goto skip; | |
418 | /* Allow the BPF to provide the event message */ | |
419 | ptrace_event(PTRACE_EVENT_SECCOMP, data); | |
420 | /* | |
421 | * The delivery of a fatal signal during event | |
422 | * notification may silently skip tracer notification. | |
423 | * Terminating the task now avoids executing a system | |
424 | * call that may not be intended. | |
425 | */ | |
426 | if (fatal_signal_pending(current)) | |
427 | break; | |
428 | return 0; | |
acf3b2c7 WD |
429 | case SECCOMP_RET_ALLOW: |
430 | return 0; | |
431 | case SECCOMP_RET_KILL: | |
432 | default: | |
433 | break; | |
434 | } | |
e2cfabdf WD |
435 | exit_sig = SIGSYS; |
436 | break; | |
8156b451 | 437 | } |
e2cfabdf | 438 | #endif |
1da177e4 LT |
439 | default: |
440 | BUG(); | |
441 | } | |
442 | ||
443 | #ifdef SECCOMP_DEBUG | |
444 | dump_stack(); | |
445 | #endif | |
acf3b2c7 | 446 | audit_seccomp(this_syscall, exit_sig, ret); |
e2cfabdf | 447 | do_exit(exit_sig); |
8156b451 | 448 | #ifdef CONFIG_SECCOMP_FILTER |
acf3b2c7 WD |
449 | skip: |
450 | audit_seccomp(this_syscall, exit_sig, ret); | |
8156b451 | 451 | #endif |
acf3b2c7 | 452 | return -1; |
1da177e4 | 453 | } |
1d9d02fe AA |
454 | |
455 | long prctl_get_seccomp(void) | |
456 | { | |
457 | return current->seccomp.mode; | |
458 | } | |
459 | ||
e2cfabdf WD |
460 | /** |
461 | * prctl_set_seccomp: configures current->seccomp.mode | |
462 | * @seccomp_mode: requested mode to use | |
463 | * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER | |
464 | * | |
465 | * This function may be called repeatedly with a @seccomp_mode of | |
466 | * SECCOMP_MODE_FILTER to install additional filters. Every filter | |
467 | * successfully installed will be evaluated (in reverse order) for each system | |
468 | * call the task makes. | |
469 | * | |
470 | * Once current->seccomp.mode is non-zero, it may not be changed. | |
471 | * | |
472 | * Returns 0 on success or -EINVAL on failure. | |
473 | */ | |
474 | long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) | |
1d9d02fe | 475 | { |
e2cfabdf | 476 | long ret = -EINVAL; |
1d9d02fe | 477 | |
e2cfabdf WD |
478 | if (current->seccomp.mode && |
479 | current->seccomp.mode != seccomp_mode) | |
1d9d02fe AA |
480 | goto out; |
481 | ||
e2cfabdf WD |
482 | switch (seccomp_mode) { |
483 | case SECCOMP_MODE_STRICT: | |
484 | ret = 0; | |
cf99abac AA |
485 | #ifdef TIF_NOTSC |
486 | disable_TSC(); | |
487 | #endif | |
e2cfabdf WD |
488 | break; |
489 | #ifdef CONFIG_SECCOMP_FILTER | |
490 | case SECCOMP_MODE_FILTER: | |
491 | ret = seccomp_attach_user_filter(filter); | |
492 | if (ret) | |
493 | goto out; | |
494 | break; | |
495 | #endif | |
496 | default: | |
497 | goto out; | |
1d9d02fe AA |
498 | } |
499 | ||
e2cfabdf WD |
500 | current->seccomp.mode = seccomp_mode; |
501 | set_thread_flag(TIF_SECCOMP); | |
502 | out: | |
1d9d02fe AA |
503 | return ret; |
504 | } |