]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/ptrace.c | |
3 | * | |
4 | * (C) Copyright 1999 Linus Torvalds | |
5 | * | |
6 | * Common interfaces for "ptrace()" which we do not want | |
7 | * to continually duplicate across every architecture. | |
8 | */ | |
9 | ||
c59ede7b | 10 | #include <linux/capability.h> |
9984de1a | 11 | #include <linux/export.h> |
1da177e4 | 12 | #include <linux/sched.h> |
6e84f315 | 13 | #include <linux/sched/mm.h> |
f7ccbae4 | 14 | #include <linux/sched/coredump.h> |
29930025 | 15 | #include <linux/sched/task.h> |
1da177e4 LT |
16 | #include <linux/errno.h> |
17 | #include <linux/mm.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/pagemap.h> | |
1da177e4 LT |
20 | #include <linux/ptrace.h> |
21 | #include <linux/security.h> | |
7ed20e1a | 22 | #include <linux/signal.h> |
a27bb332 | 23 | #include <linux/uio.h> |
a5cb013d | 24 | #include <linux/audit.h> |
b488893a | 25 | #include <linux/pid_namespace.h> |
f17d30a8 | 26 | #include <linux/syscalls.h> |
3a709703 | 27 | #include <linux/uaccess.h> |
2225a122 | 28 | #include <linux/regset.h> |
bf26c018 | 29 | #include <linux/hw_breakpoint.h> |
f701e5b7 | 30 | #include <linux/cn_proc.h> |
84c751bd | 31 | #include <linux/compat.h> |
1da177e4 | 32 | |
84d77d3f EB |
33 | /* |
34 | * Access another process' address space via ptrace. | |
35 | * Source/target buffer must be kernel space, | |
36 | * Do not walk the page table directly, use get_user_pages | |
37 | */ | |
38 | int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, | |
39 | void *buf, int len, unsigned int gup_flags) | |
40 | { | |
41 | struct mm_struct *mm; | |
42 | int ret; | |
43 | ||
44 | mm = get_task_mm(tsk); | |
45 | if (!mm) | |
46 | return 0; | |
47 | ||
48 | if (!tsk->ptrace || | |
49 | (current != tsk->parent) || | |
50 | ((get_dumpable(mm) != SUID_DUMP_USER) && | |
51 | !ptracer_capable(tsk, mm->user_ns))) { | |
52 | mmput(mm); | |
53 | return 0; | |
54 | } | |
55 | ||
56 | ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); | |
57 | mmput(mm); | |
58 | ||
59 | return ret; | |
60 | } | |
61 | ||
bf53de90 | 62 | |
c70d9d80 EB |
63 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, |
64 | const struct cred *ptracer_cred) | |
65 | { | |
66 | BUG_ON(!list_empty(&child->ptrace_entry)); | |
67 | list_add(&child->ptrace_entry, &new_parent->ptraced); | |
68 | child->parent = new_parent; | |
69 | child->ptracer_cred = get_cred(ptracer_cred); | |
70 | } | |
71 | ||
1da177e4 LT |
72 | /* |
73 | * ptrace a task: make the debugger its new parent and | |
74 | * move it to the ptrace list. | |
75 | * | |
76 | * Must be called with the tasklist lock write-held. | |
77 | */ | |
c70d9d80 | 78 | static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
1da177e4 | 79 | { |
64b875f7 | 80 | rcu_read_lock(); |
c70d9d80 | 81 | __ptrace_link(child, new_parent, __task_cred(new_parent)); |
64b875f7 | 82 | rcu_read_unlock(); |
1da177e4 | 83 | } |
3a709703 | 84 | |
e3bd058f TH |
85 | /** |
86 | * __ptrace_unlink - unlink ptracee and restore its execution state | |
87 | * @child: ptracee to be unlinked | |
1da177e4 | 88 | * |
0e9f0a4a TH |
89 | * Remove @child from the ptrace list, move it back to the original parent, |
90 | * and restore the execution state so that it conforms to the group stop | |
91 | * state. | |
92 | * | |
93 | * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer | |
94 | * exiting. For PTRACE_DETACH, unless the ptracee has been killed between | |
95 | * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. | |
96 | * If the ptracer is exiting, the ptracee can be in any state. | |
97 | * | |
98 | * After detach, the ptracee should be in a state which conforms to the | |
99 | * group stop. If the group is stopped or in the process of stopping, the | |
100 | * ptracee should be put into TASK_STOPPED; otherwise, it should be woken | |
101 | * up from TASK_TRACED. | |
102 | * | |
103 | * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, | |
104 | * it goes through TRACED -> RUNNING -> STOPPED transition which is similar | |
105 | * to but in the opposite direction of what happens while attaching to a | |
106 | * stopped task. However, in this direction, the intermediate RUNNING | |
107 | * state is not hidden even from the current ptracer and if it immediately | |
108 | * re-attaches and performs a WNOHANG wait(2), it may fail. | |
e3bd058f TH |
109 | * |
110 | * CONTEXT: | |
111 | * write_lock_irq(tasklist_lock) | |
1da177e4 | 112 | */ |
36c8b586 | 113 | void __ptrace_unlink(struct task_struct *child) |
1da177e4 | 114 | { |
64b875f7 | 115 | const struct cred *old_cred; |
5ecfbae0 ON |
116 | BUG_ON(!child->ptrace); |
117 | ||
0a5bf409 AN |
118 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
119 | ||
f470021a RM |
120 | child->parent = child->real_parent; |
121 | list_del_init(&child->ptrace_entry); | |
64b875f7 EB |
122 | old_cred = child->ptracer_cred; |
123 | child->ptracer_cred = NULL; | |
124 | put_cred(old_cred); | |
1da177e4 | 125 | |
1da177e4 | 126 | spin_lock(&child->sighand->siglock); |
1333ab03 | 127 | child->ptrace = 0; |
73ddff2b TH |
128 | /* |
129 | * Clear all pending traps and TRAPPING. TRAPPING should be | |
130 | * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. | |
131 | */ | |
132 | task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); | |
133 | task_clear_jobctl_trapping(child); | |
134 | ||
0e9f0a4a | 135 | /* |
a8f072c1 | 136 | * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and |
0e9f0a4a TH |
137 | * @child isn't dead. |
138 | */ | |
139 | if (!(child->flags & PF_EXITING) && | |
140 | (child->signal->flags & SIGNAL_STOP_STOPPED || | |
8a88951b | 141 | child->signal->group_stop_count)) { |
a8f072c1 | 142 | child->jobctl |= JOBCTL_STOP_PENDING; |
0e9f0a4a | 143 | |
8a88951b ON |
144 | /* |
145 | * This is only possible if this thread was cloned by the | |
146 | * traced task running in the stopped group, set the signal | |
147 | * for the future reports. | |
148 | * FIXME: we should change ptrace_init_task() to handle this | |
149 | * case. | |
150 | */ | |
151 | if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) | |
152 | child->jobctl |= SIGSTOP; | |
153 | } | |
154 | ||
0e9f0a4a TH |
155 | /* |
156 | * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick | |
157 | * @child in the butt. Note that @resume should be used iff @child | |
158 | * is in TASK_TRACED; otherwise, we might unduly disrupt | |
159 | * TASK_KILLABLE sleeps. | |
160 | */ | |
a8f072c1 | 161 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) |
910ffdb1 | 162 | ptrace_signal_wake_up(child, true); |
0e9f0a4a | 163 | |
1da177e4 | 164 | spin_unlock(&child->sighand->siglock); |
1da177e4 LT |
165 | } |
166 | ||
9899d11f ON |
167 | /* Ensure that nothing can wake it up, even SIGKILL */ |
168 | static bool ptrace_freeze_traced(struct task_struct *task) | |
169 | { | |
170 | bool ret = false; | |
171 | ||
172 | /* Lockless, nobody but us can set this flag */ | |
173 | if (task->jobctl & JOBCTL_LISTENING) | |
174 | return ret; | |
175 | ||
176 | spin_lock_irq(&task->sighand->siglock); | |
177 | if (task_is_traced(task) && !__fatal_signal_pending(task)) { | |
178 | task->state = __TASK_TRACED; | |
179 | ret = true; | |
180 | } | |
181 | spin_unlock_irq(&task->sighand->siglock); | |
182 | ||
183 | return ret; | |
184 | } | |
185 | ||
186 | static void ptrace_unfreeze_traced(struct task_struct *task) | |
187 | { | |
188 | if (task->state != __TASK_TRACED) | |
189 | return; | |
190 | ||
191 | WARN_ON(!task->ptrace || task->parent != current); | |
192 | ||
5402e97a | 193 | /* |
194 | * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. | |
195 | * Recheck state under the lock to close this race. | |
196 | */ | |
9899d11f | 197 | spin_lock_irq(&task->sighand->siglock); |
5402e97a | 198 | if (task->state == __TASK_TRACED) { |
199 | if (__fatal_signal_pending(task)) | |
200 | wake_up_state(task, __TASK_TRACED); | |
201 | else | |
202 | task->state = TASK_TRACED; | |
203 | } | |
9899d11f ON |
204 | spin_unlock_irq(&task->sighand->siglock); |
205 | } | |
206 | ||
755e276b TH |
207 | /** |
208 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation | |
209 | * @child: ptracee to check for | |
210 | * @ignore_state: don't check whether @child is currently %TASK_TRACED | |
211 | * | |
212 | * Check whether @child is being ptraced by %current and ready for further | |
213 | * ptrace operations. If @ignore_state is %false, @child also should be in | |
214 | * %TASK_TRACED state and on return the child is guaranteed to be traced | |
215 | * and not executing. If @ignore_state is %true, @child can be in any | |
216 | * state. | |
217 | * | |
218 | * CONTEXT: | |
219 | * Grabs and releases tasklist_lock and @child->sighand->siglock. | |
220 | * | |
221 | * RETURNS: | |
222 | * 0 on success, -ESRCH if %child is not ready. | |
1da177e4 | 223 | */ |
edea0d03 | 224 | static int ptrace_check_attach(struct task_struct *child, bool ignore_state) |
1da177e4 LT |
225 | { |
226 | int ret = -ESRCH; | |
227 | ||
228 | /* | |
229 | * We take the read lock around doing both checks to close a | |
230 | * possible race where someone else was tracing our child and | |
231 | * detached between these two checks. After this locked check, | |
232 | * we are sure that this is our traced child and that can only | |
233 | * be changed by us so it's not changing right after this. | |
234 | */ | |
235 | read_lock(&tasklist_lock); | |
9899d11f ON |
236 | if (child->ptrace && child->parent == current) { |
237 | WARN_ON(child->state == __TASK_TRACED); | |
c0c0b649 ON |
238 | /* |
239 | * child->sighand can't be NULL, release_task() | |
240 | * does ptrace_unlink() before __exit_signal(). | |
241 | */ | |
9899d11f | 242 | if (ignore_state || ptrace_freeze_traced(child)) |
321fb561 | 243 | ret = 0; |
1da177e4 LT |
244 | } |
245 | read_unlock(&tasklist_lock); | |
246 | ||
9899d11f ON |
247 | if (!ret && !ignore_state) { |
248 | if (!wait_task_inactive(child, __TASK_TRACED)) { | |
249 | /* | |
250 | * This can only happen if may_ptrace_stop() fails and | |
251 | * ptrace_stop() changes ->state back to TASK_RUNNING, | |
252 | * so we should not worry about leaking __TASK_TRACED. | |
253 | */ | |
254 | WARN_ON(child->state == __TASK_TRACED); | |
255 | ret = -ESRCH; | |
256 | } | |
257 | } | |
1da177e4 | 258 | |
1da177e4 LT |
259 | return ret; |
260 | } | |
261 | ||
69f594a3 EP |
262 | static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) |
263 | { | |
264 | if (mode & PTRACE_MODE_NOAUDIT) | |
265 | return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); | |
266 | else | |
267 | return has_ns_capability(current, ns, CAP_SYS_PTRACE); | |
268 | } | |
269 | ||
9f99798f TH |
270 | /* Returns 0 on success, -errno on denial. */ |
271 | static int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |
ab8d11be | 272 | { |
c69e8d9c | 273 | const struct cred *cred = current_cred(), *tcred; |
bfedb589 | 274 | struct mm_struct *mm; |
caaee623 JH |
275 | kuid_t caller_uid; |
276 | kgid_t caller_gid; | |
277 | ||
278 | if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { | |
279 | WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); | |
280 | return -EPERM; | |
281 | } | |
b6dff3ec | 282 | |
df26c40e EB |
283 | /* May we inspect the given task? |
284 | * This check is used both for attaching with ptrace | |
285 | * and for allowing access to sensitive information in /proc. | |
286 | * | |
287 | * ptrace_attach denies several cases that /proc allows | |
288 | * because setting up the necessary parent/child relationship | |
289 | * or halting the specified task is impossible. | |
290 | */ | |
caaee623 | 291 | |
df26c40e | 292 | /* Don't let security modules deny introspection */ |
73af963f | 293 | if (same_thread_group(task, current)) |
df26c40e | 294 | return 0; |
c69e8d9c | 295 | rcu_read_lock(); |
caaee623 JH |
296 | if (mode & PTRACE_MODE_FSCREDS) { |
297 | caller_uid = cred->fsuid; | |
298 | caller_gid = cred->fsgid; | |
299 | } else { | |
300 | /* | |
301 | * Using the euid would make more sense here, but something | |
302 | * in userland might rely on the old behavior, and this | |
303 | * shouldn't be a security problem since | |
304 | * PTRACE_MODE_REALCREDS implies that the caller explicitly | |
305 | * used a syscall that requests access to another process | |
306 | * (and not a filesystem syscall to procfs). | |
307 | */ | |
308 | caller_uid = cred->uid; | |
309 | caller_gid = cred->gid; | |
310 | } | |
c69e8d9c | 311 | tcred = __task_cred(task); |
caaee623 JH |
312 | if (uid_eq(caller_uid, tcred->euid) && |
313 | uid_eq(caller_uid, tcred->suid) && | |
314 | uid_eq(caller_uid, tcred->uid) && | |
315 | gid_eq(caller_gid, tcred->egid) && | |
316 | gid_eq(caller_gid, tcred->sgid) && | |
317 | gid_eq(caller_gid, tcred->gid)) | |
8409cca7 | 318 | goto ok; |
c4a4d603 | 319 | if (ptrace_has_cap(tcred->user_ns, mode)) |
8409cca7 SH |
320 | goto ok; |
321 | rcu_read_unlock(); | |
322 | return -EPERM; | |
323 | ok: | |
c69e8d9c | 324 | rcu_read_unlock(); |
bfedb589 EB |
325 | mm = task->mm; |
326 | if (mm && | |
327 | ((get_dumpable(mm) != SUID_DUMP_USER) && | |
328 | !ptrace_has_cap(mm->user_ns, mode))) | |
329 | return -EPERM; | |
ab8d11be | 330 | |
9e48858f | 331 | return security_ptrace_access_check(task, mode); |
ab8d11be MS |
332 | } |
333 | ||
006ebb40 | 334 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
ab8d11be MS |
335 | { |
336 | int err; | |
337 | task_lock(task); | |
006ebb40 | 338 | err = __ptrace_may_access(task, mode); |
ab8d11be | 339 | task_unlock(task); |
3a709703 | 340 | return !err; |
ab8d11be MS |
341 | } |
342 | ||
3544d72a | 343 | static int ptrace_attach(struct task_struct *task, long request, |
aa9147c9 | 344 | unsigned long addr, |
3544d72a | 345 | unsigned long flags) |
1da177e4 | 346 | { |
3544d72a | 347 | bool seize = (request == PTRACE_SEIZE); |
1da177e4 | 348 | int retval; |
f5b40e36 | 349 | |
3544d72a | 350 | retval = -EIO; |
aa9147c9 DV |
351 | if (seize) { |
352 | if (addr != 0) | |
353 | goto out; | |
aa9147c9 DV |
354 | if (flags & ~(unsigned long)PTRACE_O_MASK) |
355 | goto out; | |
356 | flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); | |
357 | } else { | |
358 | flags = PT_PTRACED; | |
359 | } | |
3544d72a | 360 | |
a5cb013d AV |
361 | audit_ptrace(task); |
362 | ||
1da177e4 | 363 | retval = -EPERM; |
b79b7ba9 ON |
364 | if (unlikely(task->flags & PF_KTHREAD)) |
365 | goto out; | |
bac0abd6 | 366 | if (same_thread_group(task, current)) |
f5b40e36 LT |
367 | goto out; |
368 | ||
f2f0b00a ON |
369 | /* |
370 | * Protect exec's credential calculations against our interference; | |
86b6c1f3 | 371 | * SUID, SGID and LSM creds get determined differently |
5e751e99 | 372 | * under ptrace. |
d84f4f99 | 373 | */ |
793285fc | 374 | retval = -ERESTARTNOINTR; |
9b1bf12d | 375 | if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) |
d84f4f99 | 376 | goto out; |
f5b40e36 | 377 | |
4b105cbb | 378 | task_lock(task); |
caaee623 | 379 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); |
4b105cbb | 380 | task_unlock(task); |
1da177e4 | 381 | if (retval) |
4b105cbb | 382 | goto unlock_creds; |
1da177e4 | 383 | |
4b105cbb | 384 | write_lock_irq(&tasklist_lock); |
b79b7ba9 ON |
385 | retval = -EPERM; |
386 | if (unlikely(task->exit_state)) | |
4b105cbb | 387 | goto unlock_tasklist; |
f2f0b00a | 388 | if (task->ptrace) |
4b105cbb | 389 | goto unlock_tasklist; |
b79b7ba9 | 390 | |
3544d72a | 391 | if (seize) |
aa9147c9 | 392 | flags |= PT_SEIZED; |
aa9147c9 | 393 | task->ptrace = flags; |
1da177e4 | 394 | |
c70d9d80 | 395 | ptrace_link(task, current); |
3544d72a TH |
396 | |
397 | /* SEIZE doesn't trap tracee on attach */ | |
398 | if (!seize) | |
079b22dc | 399 | send_sig_info(SIGSTOP, SEND_SIG_PRIV, task); |
b79b7ba9 | 400 | |
d79fdd6d TH |
401 | spin_lock(&task->sighand->siglock); |
402 | ||
403 | /* | |
73ddff2b | 404 | * If the task is already STOPPED, set JOBCTL_TRAP_STOP and |
d79fdd6d TH |
405 | * TRAPPING, and kick it so that it transits to TRACED. TRAPPING |
406 | * will be cleared if the child completes the transition or any | |
407 | * event which clears the group stop states happens. We'll wait | |
408 | * for the transition to complete before returning from this | |
409 | * function. | |
410 | * | |
411 | * This hides STOPPED -> RUNNING -> TRACED transition from the | |
412 | * attaching thread but a different thread in the same group can | |
413 | * still observe the transient RUNNING state. IOW, if another | |
414 | * thread's WNOHANG wait(2) on the stopped tracee races against | |
415 | * ATTACH, the wait(2) may fail due to the transient RUNNING. | |
416 | * | |
417 | * The following task_is_stopped() test is safe as both transitions | |
418 | * in and out of STOPPED are protected by siglock. | |
419 | */ | |
7dd3db54 | 420 | if (task_is_stopped(task) && |
73ddff2b | 421 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) |
910ffdb1 | 422 | signal_wake_up_state(task, __TASK_STOPPED); |
d79fdd6d TH |
423 | |
424 | spin_unlock(&task->sighand->siglock); | |
425 | ||
b79b7ba9 | 426 | retval = 0; |
4b105cbb ON |
427 | unlock_tasklist: |
428 | write_unlock_irq(&tasklist_lock); | |
429 | unlock_creds: | |
9b1bf12d | 430 | mutex_unlock(&task->signal->cred_guard_mutex); |
f5b40e36 | 431 | out: |
f701e5b7 | 432 | if (!retval) { |
7c3b00e0 ON |
433 | /* |
434 | * We do not bother to change retval or clear JOBCTL_TRAPPING | |
435 | * if wait_on_bit() was interrupted by SIGKILL. The tracer will | |
436 | * not return to user-mode, it will exit and clear this bit in | |
437 | * __ptrace_unlink() if it wasn't already cleared by the tracee; | |
438 | * and until then nobody can ptrace this task. | |
439 | */ | |
440 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); | |
f701e5b7 VZ |
441 | proc_ptrace_connector(task, PTRACE_ATTACH); |
442 | } | |
443 | ||
1da177e4 LT |
444 | return retval; |
445 | } | |
446 | ||
f2f0b00a ON |
447 | /** |
448 | * ptrace_traceme -- helper for PTRACE_TRACEME | |
449 | * | |
450 | * Performs checks and sets PT_PTRACED. | |
451 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | |
452 | */ | |
e3e89cc5 | 453 | static int ptrace_traceme(void) |
f2f0b00a ON |
454 | { |
455 | int ret = -EPERM; | |
456 | ||
4b105cbb ON |
457 | write_lock_irq(&tasklist_lock); |
458 | /* Are we already being traced? */ | |
f2f0b00a | 459 | if (!current->ptrace) { |
f2f0b00a | 460 | ret = security_ptrace_traceme(current->parent); |
f2f0b00a ON |
461 | /* |
462 | * Check PF_EXITING to ensure ->real_parent has not passed | |
463 | * exit_ptrace(). Otherwise we don't report the error but | |
464 | * pretend ->real_parent untraces us right after return. | |
465 | */ | |
466 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { | |
467 | current->ptrace = PT_PTRACED; | |
c70d9d80 | 468 | ptrace_link(current, current->real_parent); |
f2f0b00a | 469 | } |
f2f0b00a | 470 | } |
4b105cbb ON |
471 | write_unlock_irq(&tasklist_lock); |
472 | ||
f2f0b00a ON |
473 | return ret; |
474 | } | |
475 | ||
39c626ae ON |
476 | /* |
477 | * Called with irqs disabled, returns true if childs should reap themselves. | |
478 | */ | |
479 | static int ignoring_children(struct sighand_struct *sigh) | |
480 | { | |
481 | int ret; | |
482 | spin_lock(&sigh->siglock); | |
483 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || | |
484 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); | |
485 | spin_unlock(&sigh->siglock); | |
486 | return ret; | |
487 | } | |
488 | ||
489 | /* | |
490 | * Called with tasklist_lock held for writing. | |
491 | * Unlink a traced task, and clean it up if it was a traced zombie. | |
492 | * Return true if it needs to be reaped with release_task(). | |
493 | * (We can't call release_task() here because we already hold tasklist_lock.) | |
494 | * | |
495 | * If it's a zombie, our attachedness prevented normal parent notification | |
496 | * or self-reaping. Do notification now if it would have happened earlier. | |
497 | * If it should reap itself, return true. | |
498 | * | |
a7f0765e ON |
499 | * If it's our own child, there is no notification to do. But if our normal |
500 | * children self-reap, then this child was prevented by ptrace and we must | |
501 | * reap it now, in that case we must also wake up sub-threads sleeping in | |
502 | * do_wait(). | |
39c626ae ON |
503 | */ |
504 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |
505 | { | |
9843a1e9 ON |
506 | bool dead; |
507 | ||
39c626ae ON |
508 | __ptrace_unlink(p); |
509 | ||
9843a1e9 ON |
510 | if (p->exit_state != EXIT_ZOMBIE) |
511 | return false; | |
512 | ||
513 | dead = !thread_group_leader(p); | |
514 | ||
515 | if (!dead && thread_group_empty(p)) { | |
516 | if (!same_thread_group(p->real_parent, tracer)) | |
517 | dead = do_notify_parent(p, p->exit_signal); | |
518 | else if (ignoring_children(tracer->sighand)) { | |
519 | __wake_up_parent(p, tracer); | |
9843a1e9 | 520 | dead = true; |
39c626ae ON |
521 | } |
522 | } | |
9843a1e9 ON |
523 | /* Mark it as in the process of being reaped. */ |
524 | if (dead) | |
525 | p->exit_state = EXIT_DEAD; | |
526 | return dead; | |
39c626ae ON |
527 | } |
528 | ||
e3e89cc5 | 529 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
1da177e4 | 530 | { |
7ed20e1a | 531 | if (!valid_signal(data)) |
5ecfbae0 | 532 | return -EIO; |
1da177e4 LT |
533 | |
534 | /* Architecture-specific hardware disable .. */ | |
535 | ptrace_disable(child); | |
536 | ||
95c3eb76 | 537 | write_lock_irq(&tasklist_lock); |
39c626ae | 538 | /* |
64a4096c ON |
539 | * We rely on ptrace_freeze_traced(). It can't be killed and |
540 | * untraced by another thread, it can't be a zombie. | |
39c626ae | 541 | */ |
64a4096c ON |
542 | WARN_ON(!child->ptrace || child->exit_state); |
543 | /* | |
544 | * tasklist_lock avoids the race with wait_task_stopped(), see | |
545 | * the comment in ptrace_resume(). | |
546 | */ | |
547 | child->exit_code = data; | |
548 | __ptrace_detach(current, child); | |
1da177e4 LT |
549 | write_unlock_irq(&tasklist_lock); |
550 | ||
f701e5b7 | 551 | proc_ptrace_connector(child, PTRACE_DETACH); |
4576145c | 552 | |
1da177e4 LT |
553 | return 0; |
554 | } | |
555 | ||
39c626ae | 556 | /* |
c7e49c14 | 557 | * Detach all tasks we were using ptrace on. Called with tasklist held |
7c8bd232 | 558 | * for writing. |
39c626ae | 559 | */ |
7c8bd232 | 560 | void exit_ptrace(struct task_struct *tracer, struct list_head *dead) |
39c626ae ON |
561 | { |
562 | struct task_struct *p, *n; | |
c7e49c14 | 563 | |
39c626ae | 564 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { |
992fb6e1 | 565 | if (unlikely(p->ptrace & PT_EXITKILL)) |
079b22dc | 566 | send_sig_info(SIGKILL, SEND_SIG_PRIV, p); |
992fb6e1 | 567 | |
39c626ae | 568 | if (__ptrace_detach(tracer, p)) |
7c8bd232 | 569 | list_add(&p->ptrace_entry, dead); |
39c626ae ON |
570 | } |
571 | } | |
572 | ||
1da177e4 LT |
573 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
574 | { | |
575 | int copied = 0; | |
576 | ||
577 | while (len > 0) { | |
578 | char buf[128]; | |
579 | int this_len, retval; | |
580 | ||
581 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | |
84d77d3f EB |
582 | retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); |
583 | ||
1da177e4 LT |
584 | if (!retval) { |
585 | if (copied) | |
586 | break; | |
587 | return -EIO; | |
588 | } | |
589 | if (copy_to_user(dst, buf, retval)) | |
590 | return -EFAULT; | |
591 | copied += retval; | |
592 | src += retval; | |
593 | dst += retval; | |
3a709703 | 594 | len -= retval; |
1da177e4 LT |
595 | } |
596 | return copied; | |
597 | } | |
598 | ||
599 | int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) | |
600 | { | |
601 | int copied = 0; | |
602 | ||
603 | while (len > 0) { | |
604 | char buf[128]; | |
605 | int this_len, retval; | |
606 | ||
607 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | |
608 | if (copy_from_user(buf, src, this_len)) | |
609 | return -EFAULT; | |
84d77d3f | 610 | retval = ptrace_access_vm(tsk, dst, buf, this_len, |
f307ab6d | 611 | FOLL_FORCE | FOLL_WRITE); |
1da177e4 LT |
612 | if (!retval) { |
613 | if (copied) | |
614 | break; | |
615 | return -EIO; | |
616 | } | |
617 | copied += retval; | |
618 | src += retval; | |
619 | dst += retval; | |
3a709703 | 620 | len -= retval; |
1da177e4 LT |
621 | } |
622 | return copied; | |
623 | } | |
624 | ||
4abf9869 | 625 | static int ptrace_setoptions(struct task_struct *child, unsigned long data) |
1da177e4 | 626 | { |
86b6c1f3 DV |
627 | unsigned flags; |
628 | ||
8c5cf9e5 DV |
629 | if (data & ~(unsigned long)PTRACE_O_MASK) |
630 | return -EINVAL; | |
631 | ||
13c4a901 | 632 | if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { |
97f2645f MY |
633 | if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || |
634 | !IS_ENABLED(CONFIG_SECCOMP)) | |
13c4a901 TA |
635 | return -EINVAL; |
636 | ||
637 | if (!capable(CAP_SYS_ADMIN)) | |
638 | return -EPERM; | |
639 | ||
640 | if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || | |
641 | current->ptrace & PT_SUSPEND_SECCOMP) | |
642 | return -EPERM; | |
643 | } | |
644 | ||
86b6c1f3 DV |
645 | /* Avoid intermediate state when all opts are cleared */ |
646 | flags = child->ptrace; | |
647 | flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); | |
648 | flags |= (data << PT_OPT_FLAG_SHIFT); | |
649 | child->ptrace = flags; | |
1da177e4 | 650 | |
8c5cf9e5 | 651 | return 0; |
1da177e4 LT |
652 | } |
653 | ||
ae7795bc | 654 | static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info) |
1da177e4 | 655 | { |
e4961254 | 656 | unsigned long flags; |
1da177e4 LT |
657 | int error = -ESRCH; |
658 | ||
e4961254 | 659 | if (lock_task_sighand(child, &flags)) { |
1da177e4 | 660 | error = -EINVAL; |
1da177e4 | 661 | if (likely(child->last_siginfo != NULL)) { |
0752d7bf | 662 | copy_siginfo(info, child->last_siginfo); |
1da177e4 LT |
663 | error = 0; |
664 | } | |
e4961254 | 665 | unlock_task_sighand(child, &flags); |
1da177e4 | 666 | } |
1da177e4 LT |
667 | return error; |
668 | } | |
669 | ||
ae7795bc | 670 | static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info) |
1da177e4 | 671 | { |
e4961254 | 672 | unsigned long flags; |
1da177e4 LT |
673 | int error = -ESRCH; |
674 | ||
e4961254 | 675 | if (lock_task_sighand(child, &flags)) { |
1da177e4 | 676 | error = -EINVAL; |
1da177e4 | 677 | if (likely(child->last_siginfo != NULL)) { |
0752d7bf | 678 | copy_siginfo(child->last_siginfo, info); |
1da177e4 LT |
679 | error = 0; |
680 | } | |
e4961254 | 681 | unlock_task_sighand(child, &flags); |
1da177e4 | 682 | } |
1da177e4 LT |
683 | return error; |
684 | } | |
685 | ||
84c751bd AV |
686 | static int ptrace_peek_siginfo(struct task_struct *child, |
687 | unsigned long addr, | |
688 | unsigned long data) | |
689 | { | |
690 | struct ptrace_peeksiginfo_args arg; | |
691 | struct sigpending *pending; | |
692 | struct sigqueue *q; | |
693 | int ret, i; | |
694 | ||
695 | ret = copy_from_user(&arg, (void __user *) addr, | |
696 | sizeof(struct ptrace_peeksiginfo_args)); | |
697 | if (ret) | |
698 | return -EFAULT; | |
699 | ||
700 | if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) | |
701 | return -EINVAL; /* unknown flags */ | |
702 | ||
703 | if (arg.nr < 0) | |
704 | return -EINVAL; | |
705 | ||
706 | if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) | |
707 | pending = &child->signal->shared_pending; | |
708 | else | |
709 | pending = &child->pending; | |
710 | ||
711 | for (i = 0; i < arg.nr; ) { | |
ae7795bc | 712 | kernel_siginfo_t info; |
84c751bd AV |
713 | s32 off = arg.off + i; |
714 | ||
715 | spin_lock_irq(&child->sighand->siglock); | |
716 | list_for_each_entry(q, &pending->list, list) { | |
717 | if (!off--) { | |
718 | copy_siginfo(&info, &q->info); | |
719 | break; | |
720 | } | |
721 | } | |
722 | spin_unlock_irq(&child->sighand->siglock); | |
723 | ||
724 | if (off >= 0) /* beyond the end of the list */ | |
725 | break; | |
726 | ||
727 | #ifdef CONFIG_COMPAT | |
5c465217 | 728 | if (unlikely(in_compat_syscall())) { |
84c751bd AV |
729 | compat_siginfo_t __user *uinfo = compat_ptr(data); |
730 | ||
cc731525 | 731 | if (copy_siginfo_to_user32(uinfo, &info)) { |
706b23bd MD |
732 | ret = -EFAULT; |
733 | break; | |
734 | } | |
735 | ||
84c751bd AV |
736 | } else |
737 | #endif | |
738 | { | |
739 | siginfo_t __user *uinfo = (siginfo_t __user *) data; | |
740 | ||
cc731525 | 741 | if (copy_siginfo_to_user(uinfo, &info)) { |
706b23bd MD |
742 | ret = -EFAULT; |
743 | break; | |
744 | } | |
84c751bd AV |
745 | } |
746 | ||
747 | data += sizeof(siginfo_t); | |
748 | i++; | |
749 | ||
750 | if (signal_pending(current)) | |
751 | break; | |
752 | ||
753 | cond_resched(); | |
754 | } | |
755 | ||
756 | if (i > 0) | |
757 | return i; | |
758 | ||
759 | return ret; | |
760 | } | |
36df29d7 RM |
761 | |
762 | #ifdef PTRACE_SINGLESTEP | |
763 | #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) | |
764 | #else | |
765 | #define is_singlestep(request) 0 | |
766 | #endif | |
767 | ||
5b88abbf RM |
768 | #ifdef PTRACE_SINGLEBLOCK |
769 | #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) | |
770 | #else | |
771 | #define is_singleblock(request) 0 | |
772 | #endif | |
773 | ||
36df29d7 RM |
774 | #ifdef PTRACE_SYSEMU |
775 | #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) | |
776 | #else | |
777 | #define is_sysemu_singlestep(request) 0 | |
778 | #endif | |
779 | ||
4abf9869 NK |
780 | static int ptrace_resume(struct task_struct *child, long request, |
781 | unsigned long data) | |
36df29d7 | 782 | { |
b72c1869 ON |
783 | bool need_siglock; |
784 | ||
36df29d7 RM |
785 | if (!valid_signal(data)) |
786 | return -EIO; | |
787 | ||
788 | if (request == PTRACE_SYSCALL) | |
789 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
790 | else | |
791 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
792 | ||
793 | #ifdef TIF_SYSCALL_EMU | |
794 | if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) | |
795 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
796 | else | |
797 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
798 | #endif | |
799 | ||
5b88abbf RM |
800 | if (is_singleblock(request)) { |
801 | if (unlikely(!arch_has_block_step())) | |
802 | return -EIO; | |
803 | user_enable_block_step(child); | |
804 | } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { | |
36df29d7 RM |
805 | if (unlikely(!arch_has_single_step())) |
806 | return -EIO; | |
807 | user_enable_single_step(child); | |
3a709703 | 808 | } else { |
36df29d7 | 809 | user_disable_single_step(child); |
3a709703 | 810 | } |
36df29d7 | 811 | |
b72c1869 ON |
812 | /* |
813 | * Change ->exit_code and ->state under siglock to avoid the race | |
814 | * with wait_task_stopped() in between; a non-zero ->exit_code will | |
815 | * wrongly look like another report from tracee. | |
816 | * | |
817 | * Note that we need siglock even if ->exit_code == data and/or this | |
818 | * status was not reported yet, the new status must not be cleared by | |
819 | * wait_task_stopped() after resume. | |
820 | * | |
821 | * If data == 0 we do not care if wait_task_stopped() reports the old | |
822 | * status and clears the code too; this can't race with the tracee, it | |
823 | * takes siglock after resume. | |
824 | */ | |
825 | need_siglock = data && !thread_group_empty(current); | |
826 | if (need_siglock) | |
827 | spin_lock_irq(&child->sighand->siglock); | |
36df29d7 | 828 | child->exit_code = data; |
0666fb51 | 829 | wake_up_state(child, __TASK_TRACED); |
b72c1869 ON |
830 | if (need_siglock) |
831 | spin_unlock_irq(&child->sighand->siglock); | |
36df29d7 RM |
832 | |
833 | return 0; | |
834 | } | |
835 | ||
2225a122 SS |
836 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
837 | ||
838 | static const struct user_regset * | |
839 | find_regset(const struct user_regset_view *view, unsigned int type) | |
840 | { | |
841 | const struct user_regset *regset; | |
842 | int n; | |
843 | ||
844 | for (n = 0; n < view->n; ++n) { | |
845 | regset = view->regsets + n; | |
846 | if (regset->core_note_type == type) | |
847 | return regset; | |
848 | } | |
849 | ||
850 | return NULL; | |
851 | } | |
852 | ||
853 | static int ptrace_regset(struct task_struct *task, int req, unsigned int type, | |
854 | struct iovec *kiov) | |
855 | { | |
856 | const struct user_regset_view *view = task_user_regset_view(task); | |
857 | const struct user_regset *regset = find_regset(view, type); | |
858 | int regset_no; | |
859 | ||
860 | if (!regset || (kiov->iov_len % regset->size) != 0) | |
c6a0dd7e | 861 | return -EINVAL; |
2225a122 SS |
862 | |
863 | regset_no = regset - view->regsets; | |
864 | kiov->iov_len = min(kiov->iov_len, | |
865 | (__kernel_size_t) (regset->n * regset->size)); | |
866 | ||
867 | if (req == PTRACE_GETREGSET) | |
868 | return copy_regset_to_user(task, view, regset_no, 0, | |
869 | kiov->iov_len, kiov->iov_base); | |
870 | else | |
871 | return copy_regset_from_user(task, view, regset_no, 0, | |
872 | kiov->iov_len, kiov->iov_base); | |
873 | } | |
874 | ||
e8440c14 JS |
875 | /* |
876 | * This is declared in linux/regset.h and defined in machine-dependent | |
877 | * code. We put the export here, near the primary machine-neutral use, | |
878 | * to ensure no machine forgets it. | |
879 | */ | |
880 | EXPORT_SYMBOL_GPL(task_user_regset_view); | |
2225a122 SS |
881 | #endif |
882 | ||
1da177e4 | 883 | int ptrace_request(struct task_struct *child, long request, |
4abf9869 | 884 | unsigned long addr, unsigned long data) |
1da177e4 | 885 | { |
fca26f26 | 886 | bool seized = child->ptrace & PT_SEIZED; |
1da177e4 | 887 | int ret = -EIO; |
ae7795bc | 888 | kernel_siginfo_t siginfo, *si; |
9fed81dc NK |
889 | void __user *datavp = (void __user *) data; |
890 | unsigned long __user *datalp = datavp; | |
fca26f26 | 891 | unsigned long flags; |
1da177e4 LT |
892 | |
893 | switch (request) { | |
16c3e389 RM |
894 | case PTRACE_PEEKTEXT: |
895 | case PTRACE_PEEKDATA: | |
896 | return generic_ptrace_peekdata(child, addr, data); | |
897 | case PTRACE_POKETEXT: | |
898 | case PTRACE_POKEDATA: | |
899 | return generic_ptrace_pokedata(child, addr, data); | |
900 | ||
1da177e4 LT |
901 | #ifdef PTRACE_OLDSETOPTIONS |
902 | case PTRACE_OLDSETOPTIONS: | |
903 | #endif | |
904 | case PTRACE_SETOPTIONS: | |
905 | ret = ptrace_setoptions(child, data); | |
906 | break; | |
907 | case PTRACE_GETEVENTMSG: | |
9fed81dc | 908 | ret = put_user(child->ptrace_message, datalp); |
1da177e4 | 909 | break; |
e16b2781 | 910 | |
84c751bd AV |
911 | case PTRACE_PEEKSIGINFO: |
912 | ret = ptrace_peek_siginfo(child, addr, data); | |
913 | break; | |
914 | ||
1da177e4 | 915 | case PTRACE_GETSIGINFO: |
e16b2781 RM |
916 | ret = ptrace_getsiginfo(child, &siginfo); |
917 | if (!ret) | |
9fed81dc | 918 | ret = copy_siginfo_to_user(datavp, &siginfo); |
1da177e4 | 919 | break; |
e16b2781 | 920 | |
1da177e4 | 921 | case PTRACE_SETSIGINFO: |
4cd2e0e7 EB |
922 | ret = copy_siginfo_from_user(&siginfo, datavp); |
923 | if (!ret) | |
e16b2781 | 924 | ret = ptrace_setsiginfo(child, &siginfo); |
1da177e4 | 925 | break; |
e16b2781 | 926 | |
29000cae AV |
927 | case PTRACE_GETSIGMASK: |
928 | if (addr != sizeof(sigset_t)) { | |
929 | ret = -EINVAL; | |
930 | break; | |
931 | } | |
932 | ||
933 | if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) | |
934 | ret = -EFAULT; | |
935 | else | |
936 | ret = 0; | |
937 | ||
938 | break; | |
939 | ||
940 | case PTRACE_SETSIGMASK: { | |
941 | sigset_t new_set; | |
942 | ||
943 | if (addr != sizeof(sigset_t)) { | |
944 | ret = -EINVAL; | |
945 | break; | |
946 | } | |
947 | ||
948 | if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { | |
949 | ret = -EFAULT; | |
950 | break; | |
951 | } | |
952 | ||
953 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
954 | ||
955 | /* | |
956 | * Every thread does recalc_sigpending() after resume, so | |
957 | * retarget_shared_pending() and recalc_sigpending() are not | |
958 | * called here. | |
959 | */ | |
960 | spin_lock_irq(&child->sighand->siglock); | |
961 | child->blocked = new_set; | |
962 | spin_unlock_irq(&child->sighand->siglock); | |
963 | ||
964 | ret = 0; | |
965 | break; | |
966 | } | |
967 | ||
fca26f26 TH |
968 | case PTRACE_INTERRUPT: |
969 | /* | |
970 | * Stop tracee without any side-effect on signal or job | |
971 | * control. At least one trap is guaranteed to happen | |
972 | * after this request. If @child is already trapped, the | |
973 | * current trap is not disturbed and another trap will | |
974 | * happen after the current trap is ended with PTRACE_CONT. | |
975 | * | |
976 | * The actual trap might not be PTRACE_EVENT_STOP trap but | |
977 | * the pending condition is cleared regardless. | |
978 | */ | |
979 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) | |
980 | break; | |
981 | ||
544b2c91 TH |
982 | /* |
983 | * INTERRUPT doesn't disturb existing trap sans one | |
984 | * exception. If ptracer issued LISTEN for the current | |
985 | * STOP, this INTERRUPT should clear LISTEN and re-trap | |
986 | * tracee into STOP. | |
987 | */ | |
fca26f26 | 988 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) |
910ffdb1 | 989 | ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); |
544b2c91 TH |
990 | |
991 | unlock_task_sighand(child, &flags); | |
992 | ret = 0; | |
993 | break; | |
994 | ||
995 | case PTRACE_LISTEN: | |
996 | /* | |
997 | * Listen for events. Tracee must be in STOP. It's not | |
998 | * resumed per-se but is not considered to be in TRACED by | |
999 | * wait(2) or ptrace(2). If an async event (e.g. group | |
1000 | * stop state change) happens, tracee will enter STOP trap | |
1001 | * again. Alternatively, ptracer can issue INTERRUPT to | |
1002 | * finish listening and re-trap tracee into STOP. | |
1003 | */ | |
1004 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) | |
1005 | break; | |
1006 | ||
1007 | si = child->last_siginfo; | |
f9d81f61 ON |
1008 | if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { |
1009 | child->jobctl |= JOBCTL_LISTENING; | |
1010 | /* | |
1011 | * If NOTIFY is set, it means event happened between | |
1012 | * start of this trap and now. Trigger re-trap. | |
1013 | */ | |
1014 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | |
910ffdb1 | 1015 | ptrace_signal_wake_up(child, true); |
f9d81f61 ON |
1016 | ret = 0; |
1017 | } | |
fca26f26 | 1018 | unlock_task_sighand(child, &flags); |
fca26f26 TH |
1019 | break; |
1020 | ||
1bcf5482 AD |
1021 | case PTRACE_DETACH: /* detach a process that was attached. */ |
1022 | ret = ptrace_detach(child, data); | |
1023 | break; | |
36df29d7 | 1024 | |
9c1a1259 MF |
1025 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
1026 | case PTRACE_GETFDPIC: { | |
e0129ef9 | 1027 | struct mm_struct *mm = get_task_mm(child); |
9c1a1259 MF |
1028 | unsigned long tmp = 0; |
1029 | ||
e0129ef9 ON |
1030 | ret = -ESRCH; |
1031 | if (!mm) | |
1032 | break; | |
1033 | ||
9c1a1259 MF |
1034 | switch (addr) { |
1035 | case PTRACE_GETFDPIC_EXEC: | |
e0129ef9 | 1036 | tmp = mm->context.exec_fdpic_loadmap; |
9c1a1259 MF |
1037 | break; |
1038 | case PTRACE_GETFDPIC_INTERP: | |
e0129ef9 | 1039 | tmp = mm->context.interp_fdpic_loadmap; |
9c1a1259 MF |
1040 | break; |
1041 | default: | |
1042 | break; | |
1043 | } | |
e0129ef9 | 1044 | mmput(mm); |
9c1a1259 | 1045 | |
9fed81dc | 1046 | ret = put_user(tmp, datalp); |
9c1a1259 MF |
1047 | break; |
1048 | } | |
1049 | #endif | |
1050 | ||
36df29d7 RM |
1051 | #ifdef PTRACE_SINGLESTEP |
1052 | case PTRACE_SINGLESTEP: | |
1053 | #endif | |
5b88abbf RM |
1054 | #ifdef PTRACE_SINGLEBLOCK |
1055 | case PTRACE_SINGLEBLOCK: | |
1056 | #endif | |
36df29d7 RM |
1057 | #ifdef PTRACE_SYSEMU |
1058 | case PTRACE_SYSEMU: | |
1059 | case PTRACE_SYSEMU_SINGLESTEP: | |
1060 | #endif | |
1061 | case PTRACE_SYSCALL: | |
1062 | case PTRACE_CONT: | |
1063 | return ptrace_resume(child, request, data); | |
1064 | ||
1065 | case PTRACE_KILL: | |
1066 | if (child->exit_state) /* already dead */ | |
1067 | return 0; | |
1068 | return ptrace_resume(child, request, SIGKILL); | |
1069 | ||
2225a122 SS |
1070 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
1071 | case PTRACE_GETREGSET: | |
29000cae | 1072 | case PTRACE_SETREGSET: { |
2225a122 | 1073 | struct iovec kiov; |
9fed81dc | 1074 | struct iovec __user *uiov = datavp; |
2225a122 SS |
1075 | |
1076 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) | |
1077 | return -EFAULT; | |
1078 | ||
1079 | if (__get_user(kiov.iov_base, &uiov->iov_base) || | |
1080 | __get_user(kiov.iov_len, &uiov->iov_len)) | |
1081 | return -EFAULT; | |
1082 | ||
1083 | ret = ptrace_regset(child, request, addr, &kiov); | |
1084 | if (!ret) | |
1085 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | |
1086 | break; | |
1087 | } | |
1088 | #endif | |
f8e529ed TA |
1089 | |
1090 | case PTRACE_SECCOMP_GET_FILTER: | |
1091 | ret = seccomp_get_filter(child, addr, datavp); | |
1092 | break; | |
1093 | ||
26500475 TA |
1094 | case PTRACE_SECCOMP_GET_METADATA: |
1095 | ret = seccomp_get_metadata(child, addr, datavp); | |
1096 | break; | |
1097 | ||
1da177e4 LT |
1098 | default: |
1099 | break; | |
1100 | } | |
1101 | ||
1102 | return ret; | |
1103 | } | |
481bed45 | 1104 | |
0ac15559 CH |
1105 | #ifndef arch_ptrace_attach |
1106 | #define arch_ptrace_attach(child) do { } while (0) | |
1107 | #endif | |
1108 | ||
4abf9869 NK |
1109 | SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, |
1110 | unsigned long, data) | |
481bed45 CH |
1111 | { |
1112 | struct task_struct *child; | |
1113 | long ret; | |
1114 | ||
6b9c7ed8 CH |
1115 | if (request == PTRACE_TRACEME) { |
1116 | ret = ptrace_traceme(); | |
6ea6dd93 HS |
1117 | if (!ret) |
1118 | arch_ptrace_attach(current); | |
481bed45 | 1119 | goto out; |
6b9c7ed8 CH |
1120 | } |
1121 | ||
2ee08260 MR |
1122 | child = find_get_task_by_vpid(pid); |
1123 | if (!child) { | |
1124 | ret = -ESRCH; | |
6b9c7ed8 CH |
1125 | goto out; |
1126 | } | |
481bed45 | 1127 | |
3544d72a | 1128 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
aa9147c9 | 1129 | ret = ptrace_attach(child, request, addr, data); |
0ac15559 CH |
1130 | /* |
1131 | * Some architectures need to do book-keeping after | |
1132 | * a ptrace attach. | |
1133 | */ | |
1134 | if (!ret) | |
1135 | arch_ptrace_attach(child); | |
005f18df | 1136 | goto out_put_task_struct; |
481bed45 CH |
1137 | } |
1138 | ||
fca26f26 TH |
1139 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1140 | request == PTRACE_INTERRUPT); | |
481bed45 CH |
1141 | if (ret < 0) |
1142 | goto out_put_task_struct; | |
1143 | ||
1144 | ret = arch_ptrace(child, request, addr, data); | |
9899d11f ON |
1145 | if (ret || request != PTRACE_DETACH) |
1146 | ptrace_unfreeze_traced(child); | |
481bed45 CH |
1147 | |
1148 | out_put_task_struct: | |
1149 | put_task_struct(child); | |
1150 | out: | |
481bed45 CH |
1151 | return ret; |
1152 | } | |
76647323 | 1153 | |
4abf9869 NK |
1154 | int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, |
1155 | unsigned long data) | |
76647323 AD |
1156 | { |
1157 | unsigned long tmp; | |
1158 | int copied; | |
1159 | ||
84d77d3f | 1160 | copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); |
76647323 AD |
1161 | if (copied != sizeof(tmp)) |
1162 | return -EIO; | |
1163 | return put_user(tmp, (unsigned long __user *)data); | |
1164 | } | |
f284ce72 | 1165 | |
4abf9869 NK |
1166 | int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, |
1167 | unsigned long data) | |
f284ce72 AD |
1168 | { |
1169 | int copied; | |
1170 | ||
84d77d3f | 1171 | copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), |
f307ab6d | 1172 | FOLL_FORCE | FOLL_WRITE); |
f284ce72 AD |
1173 | return (copied == sizeof(data)) ? 0 : -EIO; |
1174 | } | |
032d82d9 | 1175 | |
96b8936a | 1176 | #if defined CONFIG_COMPAT |
032d82d9 RM |
1177 | |
1178 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, | |
1179 | compat_ulong_t addr, compat_ulong_t data) | |
1180 | { | |
1181 | compat_ulong_t __user *datap = compat_ptr(data); | |
1182 | compat_ulong_t word; | |
ae7795bc | 1183 | kernel_siginfo_t siginfo; |
032d82d9 RM |
1184 | int ret; |
1185 | ||
1186 | switch (request) { | |
1187 | case PTRACE_PEEKTEXT: | |
1188 | case PTRACE_PEEKDATA: | |
84d77d3f | 1189 | ret = ptrace_access_vm(child, addr, &word, sizeof(word), |
f307ab6d | 1190 | FOLL_FORCE); |
032d82d9 RM |
1191 | if (ret != sizeof(word)) |
1192 | ret = -EIO; | |
1193 | else | |
1194 | ret = put_user(word, datap); | |
1195 | break; | |
1196 | ||
1197 | case PTRACE_POKETEXT: | |
1198 | case PTRACE_POKEDATA: | |
84d77d3f | 1199 | ret = ptrace_access_vm(child, addr, &data, sizeof(data), |
f307ab6d | 1200 | FOLL_FORCE | FOLL_WRITE); |
032d82d9 RM |
1201 | ret = (ret != sizeof(data) ? -EIO : 0); |
1202 | break; | |
1203 | ||
1204 | case PTRACE_GETEVENTMSG: | |
1205 | ret = put_user((compat_ulong_t) child->ptrace_message, datap); | |
1206 | break; | |
1207 | ||
e16b2781 RM |
1208 | case PTRACE_GETSIGINFO: |
1209 | ret = ptrace_getsiginfo(child, &siginfo); | |
1210 | if (!ret) | |
1211 | ret = copy_siginfo_to_user32( | |
1212 | (struct compat_siginfo __user *) datap, | |
1213 | &siginfo); | |
1214 | break; | |
1215 | ||
1216 | case PTRACE_SETSIGINFO: | |
4cd2e0e7 EB |
1217 | ret = copy_siginfo_from_user32( |
1218 | &siginfo, (struct compat_siginfo __user *) datap); | |
1219 | if (!ret) | |
e16b2781 RM |
1220 | ret = ptrace_setsiginfo(child, &siginfo); |
1221 | break; | |
2225a122 SS |
1222 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
1223 | case PTRACE_GETREGSET: | |
1224 | case PTRACE_SETREGSET: | |
1225 | { | |
1226 | struct iovec kiov; | |
1227 | struct compat_iovec __user *uiov = | |
1228 | (struct compat_iovec __user *) datap; | |
1229 | compat_uptr_t ptr; | |
1230 | compat_size_t len; | |
1231 | ||
1232 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) | |
1233 | return -EFAULT; | |
1234 | ||
1235 | if (__get_user(ptr, &uiov->iov_base) || | |
1236 | __get_user(len, &uiov->iov_len)) | |
1237 | return -EFAULT; | |
1238 | ||
1239 | kiov.iov_base = compat_ptr(ptr); | |
1240 | kiov.iov_len = len; | |
1241 | ||
1242 | ret = ptrace_regset(child, request, addr, &kiov); | |
1243 | if (!ret) | |
1244 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | |
1245 | break; | |
1246 | } | |
1247 | #endif | |
e16b2781 | 1248 | |
032d82d9 RM |
1249 | default: |
1250 | ret = ptrace_request(child, request, addr, data); | |
1251 | } | |
1252 | ||
1253 | return ret; | |
1254 | } | |
c269f196 | 1255 | |
62a6fa97 HC |
1256 | COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, |
1257 | compat_long_t, addr, compat_long_t, data) | |
c269f196 RM |
1258 | { |
1259 | struct task_struct *child; | |
1260 | long ret; | |
1261 | ||
c269f196 RM |
1262 | if (request == PTRACE_TRACEME) { |
1263 | ret = ptrace_traceme(); | |
1264 | goto out; | |
1265 | } | |
1266 | ||
2ee08260 MR |
1267 | child = find_get_task_by_vpid(pid); |
1268 | if (!child) { | |
1269 | ret = -ESRCH; | |
c269f196 RM |
1270 | goto out; |
1271 | } | |
1272 | ||
3544d72a | 1273 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
aa9147c9 | 1274 | ret = ptrace_attach(child, request, addr, data); |
c269f196 RM |
1275 | /* |
1276 | * Some architectures need to do book-keeping after | |
1277 | * a ptrace attach. | |
1278 | */ | |
1279 | if (!ret) | |
1280 | arch_ptrace_attach(child); | |
1281 | goto out_put_task_struct; | |
1282 | } | |
1283 | ||
fca26f26 TH |
1284 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1285 | request == PTRACE_INTERRUPT); | |
9899d11f | 1286 | if (!ret) { |
c269f196 | 1287 | ret = compat_arch_ptrace(child, request, addr, data); |
9899d11f ON |
1288 | if (ret || request != PTRACE_DETACH) |
1289 | ptrace_unfreeze_traced(child); | |
1290 | } | |
c269f196 RM |
1291 | |
1292 | out_put_task_struct: | |
1293 | put_task_struct(child); | |
1294 | out: | |
c269f196 RM |
1295 | return ret; |
1296 | } | |
96b8936a | 1297 | #endif /* CONFIG_COMPAT */ |