]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/ptrace.c | |
3 | * | |
4 | * (C) Copyright 1999 Linus Torvalds | |
5 | * | |
6 | * Common interfaces for "ptrace()" which we do not want | |
7 | * to continually duplicate across every architecture. | |
8 | */ | |
9 | ||
c59ede7b | 10 | #include <linux/capability.h> |
9984de1a | 11 | #include <linux/export.h> |
1da177e4 LT |
12 | #include <linux/sched.h> |
13 | #include <linux/errno.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/highmem.h> | |
16 | #include <linux/pagemap.h> | |
1da177e4 LT |
17 | #include <linux/ptrace.h> |
18 | #include <linux/security.h> | |
7ed20e1a | 19 | #include <linux/signal.h> |
a5cb013d | 20 | #include <linux/audit.h> |
b488893a | 21 | #include <linux/pid_namespace.h> |
f17d30a8 | 22 | #include <linux/syscalls.h> |
3a709703 | 23 | #include <linux/uaccess.h> |
2225a122 | 24 | #include <linux/regset.h> |
bf26c018 | 25 | #include <linux/hw_breakpoint.h> |
f701e5b7 | 26 | #include <linux/cn_proc.h> |
1da177e4 | 27 | |
bf53de90 | 28 | |
62c124ff TH |
29 | static int ptrace_trapping_sleep_fn(void *flags) |
30 | { | |
31 | schedule(); | |
32 | return 0; | |
33 | } | |
34 | ||
1da177e4 LT |
35 | /* |
36 | * ptrace a task: make the debugger its new parent and | |
37 | * move it to the ptrace list. | |
38 | * | |
39 | * Must be called with the tasklist lock write-held. | |
40 | */ | |
36c8b586 | 41 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
1da177e4 | 42 | { |
f470021a RM |
43 | BUG_ON(!list_empty(&child->ptrace_entry)); |
44 | list_add(&child->ptrace_entry, &new_parent->ptraced); | |
1da177e4 | 45 | child->parent = new_parent; |
1da177e4 | 46 | } |
3a709703 | 47 | |
e3bd058f TH |
48 | /** |
49 | * __ptrace_unlink - unlink ptracee and restore its execution state | |
50 | * @child: ptracee to be unlinked | |
1da177e4 | 51 | * |
0e9f0a4a TH |
52 | * Remove @child from the ptrace list, move it back to the original parent, |
53 | * and restore the execution state so that it conforms to the group stop | |
54 | * state. | |
55 | * | |
56 | * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer | |
57 | * exiting. For PTRACE_DETACH, unless the ptracee has been killed between | |
58 | * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. | |
59 | * If the ptracer is exiting, the ptracee can be in any state. | |
60 | * | |
61 | * After detach, the ptracee should be in a state which conforms to the | |
62 | * group stop. If the group is stopped or in the process of stopping, the | |
63 | * ptracee should be put into TASK_STOPPED; otherwise, it should be woken | |
64 | * up from TASK_TRACED. | |
65 | * | |
66 | * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, | |
67 | * it goes through TRACED -> RUNNING -> STOPPED transition which is similar | |
68 | * to but in the opposite direction of what happens while attaching to a | |
69 | * stopped task. However, in this direction, the intermediate RUNNING | |
70 | * state is not hidden even from the current ptracer and if it immediately | |
71 | * re-attaches and performs a WNOHANG wait(2), it may fail. | |
e3bd058f TH |
72 | * |
73 | * CONTEXT: | |
74 | * write_lock_irq(tasklist_lock) | |
1da177e4 | 75 | */ |
36c8b586 | 76 | void __ptrace_unlink(struct task_struct *child) |
1da177e4 | 77 | { |
5ecfbae0 ON |
78 | BUG_ON(!child->ptrace); |
79 | ||
1da177e4 | 80 | child->ptrace = 0; |
f470021a RM |
81 | child->parent = child->real_parent; |
82 | list_del_init(&child->ptrace_entry); | |
1da177e4 | 83 | |
1da177e4 | 84 | spin_lock(&child->sighand->siglock); |
0e9f0a4a | 85 | |
73ddff2b TH |
86 | /* |
87 | * Clear all pending traps and TRAPPING. TRAPPING should be | |
88 | * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. | |
89 | */ | |
90 | task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); | |
91 | task_clear_jobctl_trapping(child); | |
92 | ||
0e9f0a4a | 93 | /* |
a8f072c1 | 94 | * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and |
0e9f0a4a TH |
95 | * @child isn't dead. |
96 | */ | |
97 | if (!(child->flags & PF_EXITING) && | |
98 | (child->signal->flags & SIGNAL_STOP_STOPPED || | |
8a88951b | 99 | child->signal->group_stop_count)) { |
a8f072c1 | 100 | child->jobctl |= JOBCTL_STOP_PENDING; |
0e9f0a4a | 101 | |
8a88951b ON |
102 | /* |
103 | * This is only possible if this thread was cloned by the | |
104 | * traced task running in the stopped group, set the signal | |
105 | * for the future reports. | |
106 | * FIXME: we should change ptrace_init_task() to handle this | |
107 | * case. | |
108 | */ | |
109 | if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) | |
110 | child->jobctl |= SIGSTOP; | |
111 | } | |
112 | ||
0e9f0a4a TH |
113 | /* |
114 | * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick | |
115 | * @child in the butt. Note that @resume should be used iff @child | |
116 | * is in TASK_TRACED; otherwise, we might unduly disrupt | |
117 | * TASK_KILLABLE sleeps. | |
118 | */ | |
a8f072c1 | 119 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) |
0e9f0a4a TH |
120 | signal_wake_up(child, task_is_traced(child)); |
121 | ||
1da177e4 | 122 | spin_unlock(&child->sighand->siglock); |
1da177e4 LT |
123 | } |
124 | ||
755e276b TH |
125 | /** |
126 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation | |
127 | * @child: ptracee to check for | |
128 | * @ignore_state: don't check whether @child is currently %TASK_TRACED | |
129 | * | |
130 | * Check whether @child is being ptraced by %current and ready for further | |
131 | * ptrace operations. If @ignore_state is %false, @child also should be in | |
132 | * %TASK_TRACED state and on return the child is guaranteed to be traced | |
133 | * and not executing. If @ignore_state is %true, @child can be in any | |
134 | * state. | |
135 | * | |
136 | * CONTEXT: | |
137 | * Grabs and releases tasklist_lock and @child->sighand->siglock. | |
138 | * | |
139 | * RETURNS: | |
140 | * 0 on success, -ESRCH if %child is not ready. | |
1da177e4 | 141 | */ |
755e276b | 142 | int ptrace_check_attach(struct task_struct *child, bool ignore_state) |
1da177e4 LT |
143 | { |
144 | int ret = -ESRCH; | |
145 | ||
146 | /* | |
147 | * We take the read lock around doing both checks to close a | |
148 | * possible race where someone else was tracing our child and | |
149 | * detached between these two checks. After this locked check, | |
150 | * we are sure that this is our traced child and that can only | |
151 | * be changed by us so it's not changing right after this. | |
152 | */ | |
153 | read_lock(&tasklist_lock); | |
c0c0b649 | 154 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { |
c0c0b649 ON |
155 | /* |
156 | * child->sighand can't be NULL, release_task() | |
157 | * does ptrace_unlink() before __exit_signal(). | |
158 | */ | |
1da177e4 | 159 | spin_lock_irq(&child->sighand->siglock); |
321fb561 | 160 | WARN_ON_ONCE(task_is_stopped(child)); |
544b2c91 TH |
161 | if (ignore_state || (task_is_traced(child) && |
162 | !(child->jobctl & JOBCTL_LISTENING))) | |
321fb561 | 163 | ret = 0; |
1da177e4 LT |
164 | spin_unlock_irq(&child->sighand->siglock); |
165 | } | |
166 | read_unlock(&tasklist_lock); | |
167 | ||
755e276b | 168 | if (!ret && !ignore_state) |
85ba2d86 | 169 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; |
1da177e4 LT |
170 | |
171 | /* All systems go.. */ | |
172 | return ret; | |
173 | } | |
174 | ||
69f594a3 EP |
175 | static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) |
176 | { | |
177 | if (mode & PTRACE_MODE_NOAUDIT) | |
178 | return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); | |
179 | else | |
180 | return has_ns_capability(current, ns, CAP_SYS_PTRACE); | |
181 | } | |
182 | ||
006ebb40 | 183 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
ab8d11be | 184 | { |
c69e8d9c | 185 | const struct cred *cred = current_cred(), *tcred; |
b6dff3ec | 186 | |
df26c40e EB |
187 | /* May we inspect the given task? |
188 | * This check is used both for attaching with ptrace | |
189 | * and for allowing access to sensitive information in /proc. | |
190 | * | |
191 | * ptrace_attach denies several cases that /proc allows | |
192 | * because setting up the necessary parent/child relationship | |
193 | * or halting the specified task is impossible. | |
194 | */ | |
195 | int dumpable = 0; | |
196 | /* Don't let security modules deny introspection */ | |
197 | if (task == current) | |
198 | return 0; | |
c69e8d9c DH |
199 | rcu_read_lock(); |
200 | tcred = __task_cred(task); | |
5af66203 EB |
201 | if (uid_eq(cred->uid, tcred->euid) && |
202 | uid_eq(cred->uid, tcred->suid) && | |
203 | uid_eq(cred->uid, tcred->uid) && | |
204 | gid_eq(cred->gid, tcred->egid) && | |
205 | gid_eq(cred->gid, tcred->sgid) && | |
206 | gid_eq(cred->gid, tcred->gid)) | |
8409cca7 | 207 | goto ok; |
c4a4d603 | 208 | if (ptrace_has_cap(tcred->user_ns, mode)) |
8409cca7 SH |
209 | goto ok; |
210 | rcu_read_unlock(); | |
211 | return -EPERM; | |
212 | ok: | |
c69e8d9c | 213 | rcu_read_unlock(); |
ab8d11be | 214 | smp_rmb(); |
df26c40e | 215 | if (task->mm) |
6c5d5238 | 216 | dumpable = get_dumpable(task->mm); |
69f594a3 | 217 | if (!dumpable && !ptrace_has_cap(task_user_ns(task), mode)) |
ab8d11be MS |
218 | return -EPERM; |
219 | ||
9e48858f | 220 | return security_ptrace_access_check(task, mode); |
ab8d11be MS |
221 | } |
222 | ||
006ebb40 | 223 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
ab8d11be MS |
224 | { |
225 | int err; | |
226 | task_lock(task); | |
006ebb40 | 227 | err = __ptrace_may_access(task, mode); |
ab8d11be | 228 | task_unlock(task); |
3a709703 | 229 | return !err; |
ab8d11be MS |
230 | } |
231 | ||
3544d72a | 232 | static int ptrace_attach(struct task_struct *task, long request, |
aa9147c9 | 233 | unsigned long addr, |
3544d72a | 234 | unsigned long flags) |
1da177e4 | 235 | { |
3544d72a | 236 | bool seize = (request == PTRACE_SEIZE); |
1da177e4 | 237 | int retval; |
f5b40e36 | 238 | |
3544d72a | 239 | retval = -EIO; |
aa9147c9 DV |
240 | if (seize) { |
241 | if (addr != 0) | |
242 | goto out; | |
aa9147c9 DV |
243 | if (flags & ~(unsigned long)PTRACE_O_MASK) |
244 | goto out; | |
245 | flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); | |
246 | } else { | |
247 | flags = PT_PTRACED; | |
248 | } | |
3544d72a | 249 | |
a5cb013d AV |
250 | audit_ptrace(task); |
251 | ||
1da177e4 | 252 | retval = -EPERM; |
b79b7ba9 ON |
253 | if (unlikely(task->flags & PF_KTHREAD)) |
254 | goto out; | |
bac0abd6 | 255 | if (same_thread_group(task, current)) |
f5b40e36 LT |
256 | goto out; |
257 | ||
f2f0b00a ON |
258 | /* |
259 | * Protect exec's credential calculations against our interference; | |
86b6c1f3 | 260 | * SUID, SGID and LSM creds get determined differently |
5e751e99 | 261 | * under ptrace. |
d84f4f99 | 262 | */ |
793285fc | 263 | retval = -ERESTARTNOINTR; |
9b1bf12d | 264 | if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) |
d84f4f99 | 265 | goto out; |
f5b40e36 | 266 | |
4b105cbb | 267 | task_lock(task); |
006ebb40 | 268 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); |
4b105cbb | 269 | task_unlock(task); |
1da177e4 | 270 | if (retval) |
4b105cbb | 271 | goto unlock_creds; |
1da177e4 | 272 | |
4b105cbb | 273 | write_lock_irq(&tasklist_lock); |
b79b7ba9 ON |
274 | retval = -EPERM; |
275 | if (unlikely(task->exit_state)) | |
4b105cbb | 276 | goto unlock_tasklist; |
f2f0b00a | 277 | if (task->ptrace) |
4b105cbb | 278 | goto unlock_tasklist; |
b79b7ba9 | 279 | |
3544d72a | 280 | if (seize) |
aa9147c9 | 281 | flags |= PT_SEIZED; |
f1c84dae | 282 | if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE)) |
aa9147c9 DV |
283 | flags |= PT_PTRACE_CAP; |
284 | task->ptrace = flags; | |
1da177e4 | 285 | |
1da177e4 | 286 | __ptrace_link(task, current); |
3544d72a TH |
287 | |
288 | /* SEIZE doesn't trap tracee on attach */ | |
289 | if (!seize) | |
290 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); | |
b79b7ba9 | 291 | |
d79fdd6d TH |
292 | spin_lock(&task->sighand->siglock); |
293 | ||
294 | /* | |
73ddff2b | 295 | * If the task is already STOPPED, set JOBCTL_TRAP_STOP and |
d79fdd6d TH |
296 | * TRAPPING, and kick it so that it transits to TRACED. TRAPPING |
297 | * will be cleared if the child completes the transition or any | |
298 | * event which clears the group stop states happens. We'll wait | |
299 | * for the transition to complete before returning from this | |
300 | * function. | |
301 | * | |
302 | * This hides STOPPED -> RUNNING -> TRACED transition from the | |
303 | * attaching thread but a different thread in the same group can | |
304 | * still observe the transient RUNNING state. IOW, if another | |
305 | * thread's WNOHANG wait(2) on the stopped tracee races against | |
306 | * ATTACH, the wait(2) may fail due to the transient RUNNING. | |
307 | * | |
308 | * The following task_is_stopped() test is safe as both transitions | |
309 | * in and out of STOPPED are protected by siglock. | |
310 | */ | |
7dd3db54 | 311 | if (task_is_stopped(task) && |
73ddff2b | 312 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) |
d79fdd6d | 313 | signal_wake_up(task, 1); |
d79fdd6d TH |
314 | |
315 | spin_unlock(&task->sighand->siglock); | |
316 | ||
b79b7ba9 | 317 | retval = 0; |
4b105cbb ON |
318 | unlock_tasklist: |
319 | write_unlock_irq(&tasklist_lock); | |
320 | unlock_creds: | |
9b1bf12d | 321 | mutex_unlock(&task->signal->cred_guard_mutex); |
f5b40e36 | 322 | out: |
f701e5b7 | 323 | if (!retval) { |
62c124ff TH |
324 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, |
325 | ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); | |
f701e5b7 VZ |
326 | proc_ptrace_connector(task, PTRACE_ATTACH); |
327 | } | |
328 | ||
1da177e4 LT |
329 | return retval; |
330 | } | |
331 | ||
f2f0b00a ON |
332 | /** |
333 | * ptrace_traceme -- helper for PTRACE_TRACEME | |
334 | * | |
335 | * Performs checks and sets PT_PTRACED. | |
336 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | |
337 | */ | |
e3e89cc5 | 338 | static int ptrace_traceme(void) |
f2f0b00a ON |
339 | { |
340 | int ret = -EPERM; | |
341 | ||
4b105cbb ON |
342 | write_lock_irq(&tasklist_lock); |
343 | /* Are we already being traced? */ | |
f2f0b00a | 344 | if (!current->ptrace) { |
f2f0b00a | 345 | ret = security_ptrace_traceme(current->parent); |
f2f0b00a ON |
346 | /* |
347 | * Check PF_EXITING to ensure ->real_parent has not passed | |
348 | * exit_ptrace(). Otherwise we don't report the error but | |
349 | * pretend ->real_parent untraces us right after return. | |
350 | */ | |
351 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { | |
352 | current->ptrace = PT_PTRACED; | |
353 | __ptrace_link(current, current->real_parent); | |
354 | } | |
f2f0b00a | 355 | } |
4b105cbb ON |
356 | write_unlock_irq(&tasklist_lock); |
357 | ||
f2f0b00a ON |
358 | return ret; |
359 | } | |
360 | ||
39c626ae ON |
361 | /* |
362 | * Called with irqs disabled, returns true if childs should reap themselves. | |
363 | */ | |
364 | static int ignoring_children(struct sighand_struct *sigh) | |
365 | { | |
366 | int ret; | |
367 | spin_lock(&sigh->siglock); | |
368 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || | |
369 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); | |
370 | spin_unlock(&sigh->siglock); | |
371 | return ret; | |
372 | } | |
373 | ||
374 | /* | |
375 | * Called with tasklist_lock held for writing. | |
376 | * Unlink a traced task, and clean it up if it was a traced zombie. | |
377 | * Return true if it needs to be reaped with release_task(). | |
378 | * (We can't call release_task() here because we already hold tasklist_lock.) | |
379 | * | |
380 | * If it's a zombie, our attachedness prevented normal parent notification | |
381 | * or self-reaping. Do notification now if it would have happened earlier. | |
382 | * If it should reap itself, return true. | |
383 | * | |
a7f0765e ON |
384 | * If it's our own child, there is no notification to do. But if our normal |
385 | * children self-reap, then this child was prevented by ptrace and we must | |
386 | * reap it now, in that case we must also wake up sub-threads sleeping in | |
387 | * do_wait(). | |
39c626ae ON |
388 | */ |
389 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |
390 | { | |
9843a1e9 ON |
391 | bool dead; |
392 | ||
39c626ae ON |
393 | __ptrace_unlink(p); |
394 | ||
9843a1e9 ON |
395 | if (p->exit_state != EXIT_ZOMBIE) |
396 | return false; | |
397 | ||
398 | dead = !thread_group_leader(p); | |
399 | ||
400 | if (!dead && thread_group_empty(p)) { | |
401 | if (!same_thread_group(p->real_parent, tracer)) | |
402 | dead = do_notify_parent(p, p->exit_signal); | |
403 | else if (ignoring_children(tracer->sighand)) { | |
404 | __wake_up_parent(p, tracer); | |
9843a1e9 | 405 | dead = true; |
39c626ae ON |
406 | } |
407 | } | |
9843a1e9 ON |
408 | /* Mark it as in the process of being reaped. */ |
409 | if (dead) | |
410 | p->exit_state = EXIT_DEAD; | |
411 | return dead; | |
39c626ae ON |
412 | } |
413 | ||
e3e89cc5 | 414 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
1da177e4 | 415 | { |
39c626ae | 416 | bool dead = false; |
4576145c | 417 | |
7ed20e1a | 418 | if (!valid_signal(data)) |
5ecfbae0 | 419 | return -EIO; |
1da177e4 LT |
420 | |
421 | /* Architecture-specific hardware disable .. */ | |
422 | ptrace_disable(child); | |
7d941432 | 423 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
1da177e4 | 424 | |
95c3eb76 | 425 | write_lock_irq(&tasklist_lock); |
39c626ae ON |
426 | /* |
427 | * This child can be already killed. Make sure de_thread() or | |
428 | * our sub-thread doing do_wait() didn't do release_task() yet. | |
429 | */ | |
95c3eb76 ON |
430 | if (child->ptrace) { |
431 | child->exit_code = data; | |
4576145c | 432 | dead = __ptrace_detach(current, child); |
95c3eb76 | 433 | } |
1da177e4 LT |
434 | write_unlock_irq(&tasklist_lock); |
435 | ||
f701e5b7 | 436 | proc_ptrace_connector(child, PTRACE_DETACH); |
4576145c ON |
437 | if (unlikely(dead)) |
438 | release_task(child); | |
439 | ||
1da177e4 LT |
440 | return 0; |
441 | } | |
442 | ||
39c626ae | 443 | /* |
c7e49c14 ON |
444 | * Detach all tasks we were using ptrace on. Called with tasklist held |
445 | * for writing, and returns with it held too. But note it can release | |
446 | * and reacquire the lock. | |
39c626ae ON |
447 | */ |
448 | void exit_ptrace(struct task_struct *tracer) | |
c4b5ed25 NK |
449 | __releases(&tasklist_lock) |
450 | __acquires(&tasklist_lock) | |
39c626ae ON |
451 | { |
452 | struct task_struct *p, *n; | |
453 | LIST_HEAD(ptrace_dead); | |
454 | ||
c7e49c14 ON |
455 | if (likely(list_empty(&tracer->ptraced))) |
456 | return; | |
457 | ||
39c626ae ON |
458 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { |
459 | if (__ptrace_detach(tracer, p)) | |
460 | list_add(&p->ptrace_entry, &ptrace_dead); | |
461 | } | |
39c626ae | 462 | |
c7e49c14 | 463 | write_unlock_irq(&tasklist_lock); |
39c626ae ON |
464 | BUG_ON(!list_empty(&tracer->ptraced)); |
465 | ||
466 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { | |
467 | list_del_init(&p->ptrace_entry); | |
468 | release_task(p); | |
469 | } | |
c7e49c14 ON |
470 | |
471 | write_lock_irq(&tasklist_lock); | |
39c626ae ON |
472 | } |
473 | ||
1da177e4 LT |
474 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
475 | { | |
476 | int copied = 0; | |
477 | ||
478 | while (len > 0) { | |
479 | char buf[128]; | |
480 | int this_len, retval; | |
481 | ||
482 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | |
483 | retval = access_process_vm(tsk, src, buf, this_len, 0); | |
484 | if (!retval) { | |
485 | if (copied) | |
486 | break; | |
487 | return -EIO; | |
488 | } | |
489 | if (copy_to_user(dst, buf, retval)) | |
490 | return -EFAULT; | |
491 | copied += retval; | |
492 | src += retval; | |
493 | dst += retval; | |
3a709703 | 494 | len -= retval; |
1da177e4 LT |
495 | } |
496 | return copied; | |
497 | } | |
498 | ||
499 | int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) | |
500 | { | |
501 | int copied = 0; | |
502 | ||
503 | while (len > 0) { | |
504 | char buf[128]; | |
505 | int this_len, retval; | |
506 | ||
507 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | |
508 | if (copy_from_user(buf, src, this_len)) | |
509 | return -EFAULT; | |
510 | retval = access_process_vm(tsk, dst, buf, this_len, 1); | |
511 | if (!retval) { | |
512 | if (copied) | |
513 | break; | |
514 | return -EIO; | |
515 | } | |
516 | copied += retval; | |
517 | src += retval; | |
518 | dst += retval; | |
3a709703 | 519 | len -= retval; |
1da177e4 LT |
520 | } |
521 | return copied; | |
522 | } | |
523 | ||
4abf9869 | 524 | static int ptrace_setoptions(struct task_struct *child, unsigned long data) |
1da177e4 | 525 | { |
86b6c1f3 DV |
526 | unsigned flags; |
527 | ||
8c5cf9e5 DV |
528 | if (data & ~(unsigned long)PTRACE_O_MASK) |
529 | return -EINVAL; | |
530 | ||
86b6c1f3 DV |
531 | /* Avoid intermediate state when all opts are cleared */ |
532 | flags = child->ptrace; | |
533 | flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); | |
534 | flags |= (data << PT_OPT_FLAG_SHIFT); | |
535 | child->ptrace = flags; | |
1da177e4 | 536 | |
8c5cf9e5 | 537 | return 0; |
1da177e4 LT |
538 | } |
539 | ||
e16b2781 | 540 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) |
1da177e4 | 541 | { |
e4961254 | 542 | unsigned long flags; |
1da177e4 LT |
543 | int error = -ESRCH; |
544 | ||
e4961254 | 545 | if (lock_task_sighand(child, &flags)) { |
1da177e4 | 546 | error = -EINVAL; |
1da177e4 | 547 | if (likely(child->last_siginfo != NULL)) { |
e16b2781 | 548 | *info = *child->last_siginfo; |
1da177e4 LT |
549 | error = 0; |
550 | } | |
e4961254 | 551 | unlock_task_sighand(child, &flags); |
1da177e4 | 552 | } |
1da177e4 LT |
553 | return error; |
554 | } | |
555 | ||
e16b2781 | 556 | static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) |
1da177e4 | 557 | { |
e4961254 | 558 | unsigned long flags; |
1da177e4 LT |
559 | int error = -ESRCH; |
560 | ||
e4961254 | 561 | if (lock_task_sighand(child, &flags)) { |
1da177e4 | 562 | error = -EINVAL; |
1da177e4 | 563 | if (likely(child->last_siginfo != NULL)) { |
e16b2781 | 564 | *child->last_siginfo = *info; |
1da177e4 LT |
565 | error = 0; |
566 | } | |
e4961254 | 567 | unlock_task_sighand(child, &flags); |
1da177e4 | 568 | } |
1da177e4 LT |
569 | return error; |
570 | } | |
571 | ||
36df29d7 RM |
572 | |
573 | #ifdef PTRACE_SINGLESTEP | |
574 | #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) | |
575 | #else | |
576 | #define is_singlestep(request) 0 | |
577 | #endif | |
578 | ||
5b88abbf RM |
579 | #ifdef PTRACE_SINGLEBLOCK |
580 | #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) | |
581 | #else | |
582 | #define is_singleblock(request) 0 | |
583 | #endif | |
584 | ||
36df29d7 RM |
585 | #ifdef PTRACE_SYSEMU |
586 | #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) | |
587 | #else | |
588 | #define is_sysemu_singlestep(request) 0 | |
589 | #endif | |
590 | ||
4abf9869 NK |
591 | static int ptrace_resume(struct task_struct *child, long request, |
592 | unsigned long data) | |
36df29d7 RM |
593 | { |
594 | if (!valid_signal(data)) | |
595 | return -EIO; | |
596 | ||
597 | if (request == PTRACE_SYSCALL) | |
598 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
599 | else | |
600 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
601 | ||
602 | #ifdef TIF_SYSCALL_EMU | |
603 | if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) | |
604 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
605 | else | |
606 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
607 | #endif | |
608 | ||
5b88abbf RM |
609 | if (is_singleblock(request)) { |
610 | if (unlikely(!arch_has_block_step())) | |
611 | return -EIO; | |
612 | user_enable_block_step(child); | |
613 | } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { | |
36df29d7 RM |
614 | if (unlikely(!arch_has_single_step())) |
615 | return -EIO; | |
616 | user_enable_single_step(child); | |
3a709703 | 617 | } else { |
36df29d7 | 618 | user_disable_single_step(child); |
3a709703 | 619 | } |
36df29d7 RM |
620 | |
621 | child->exit_code = data; | |
0666fb51 | 622 | wake_up_state(child, __TASK_TRACED); |
36df29d7 RM |
623 | |
624 | return 0; | |
625 | } | |
626 | ||
2225a122 SS |
627 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
628 | ||
629 | static const struct user_regset * | |
630 | find_regset(const struct user_regset_view *view, unsigned int type) | |
631 | { | |
632 | const struct user_regset *regset; | |
633 | int n; | |
634 | ||
635 | for (n = 0; n < view->n; ++n) { | |
636 | regset = view->regsets + n; | |
637 | if (regset->core_note_type == type) | |
638 | return regset; | |
639 | } | |
640 | ||
641 | return NULL; | |
642 | } | |
643 | ||
644 | static int ptrace_regset(struct task_struct *task, int req, unsigned int type, | |
645 | struct iovec *kiov) | |
646 | { | |
647 | const struct user_regset_view *view = task_user_regset_view(task); | |
648 | const struct user_regset *regset = find_regset(view, type); | |
649 | int regset_no; | |
650 | ||
651 | if (!regset || (kiov->iov_len % regset->size) != 0) | |
c6a0dd7e | 652 | return -EINVAL; |
2225a122 SS |
653 | |
654 | regset_no = regset - view->regsets; | |
655 | kiov->iov_len = min(kiov->iov_len, | |
656 | (__kernel_size_t) (regset->n * regset->size)); | |
657 | ||
658 | if (req == PTRACE_GETREGSET) | |
659 | return copy_regset_to_user(task, view, regset_no, 0, | |
660 | kiov->iov_len, kiov->iov_base); | |
661 | else | |
662 | return copy_regset_from_user(task, view, regset_no, 0, | |
663 | kiov->iov_len, kiov->iov_base); | |
664 | } | |
665 | ||
666 | #endif | |
667 | ||
1da177e4 | 668 | int ptrace_request(struct task_struct *child, long request, |
4abf9869 | 669 | unsigned long addr, unsigned long data) |
1da177e4 | 670 | { |
fca26f26 | 671 | bool seized = child->ptrace & PT_SEIZED; |
1da177e4 | 672 | int ret = -EIO; |
544b2c91 | 673 | siginfo_t siginfo, *si; |
9fed81dc NK |
674 | void __user *datavp = (void __user *) data; |
675 | unsigned long __user *datalp = datavp; | |
fca26f26 | 676 | unsigned long flags; |
1da177e4 LT |
677 | |
678 | switch (request) { | |
16c3e389 RM |
679 | case PTRACE_PEEKTEXT: |
680 | case PTRACE_PEEKDATA: | |
681 | return generic_ptrace_peekdata(child, addr, data); | |
682 | case PTRACE_POKETEXT: | |
683 | case PTRACE_POKEDATA: | |
684 | return generic_ptrace_pokedata(child, addr, data); | |
685 | ||
1da177e4 LT |
686 | #ifdef PTRACE_OLDSETOPTIONS |
687 | case PTRACE_OLDSETOPTIONS: | |
688 | #endif | |
689 | case PTRACE_SETOPTIONS: | |
690 | ret = ptrace_setoptions(child, data); | |
691 | break; | |
692 | case PTRACE_GETEVENTMSG: | |
9fed81dc | 693 | ret = put_user(child->ptrace_message, datalp); |
1da177e4 | 694 | break; |
e16b2781 | 695 | |
1da177e4 | 696 | case PTRACE_GETSIGINFO: |
e16b2781 RM |
697 | ret = ptrace_getsiginfo(child, &siginfo); |
698 | if (!ret) | |
9fed81dc | 699 | ret = copy_siginfo_to_user(datavp, &siginfo); |
1da177e4 | 700 | break; |
e16b2781 | 701 | |
1da177e4 | 702 | case PTRACE_SETSIGINFO: |
9fed81dc | 703 | if (copy_from_user(&siginfo, datavp, sizeof siginfo)) |
e16b2781 RM |
704 | ret = -EFAULT; |
705 | else | |
706 | ret = ptrace_setsiginfo(child, &siginfo); | |
1da177e4 | 707 | break; |
e16b2781 | 708 | |
fca26f26 TH |
709 | case PTRACE_INTERRUPT: |
710 | /* | |
711 | * Stop tracee without any side-effect on signal or job | |
712 | * control. At least one trap is guaranteed to happen | |
713 | * after this request. If @child is already trapped, the | |
714 | * current trap is not disturbed and another trap will | |
715 | * happen after the current trap is ended with PTRACE_CONT. | |
716 | * | |
717 | * The actual trap might not be PTRACE_EVENT_STOP trap but | |
718 | * the pending condition is cleared regardless. | |
719 | */ | |
720 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) | |
721 | break; | |
722 | ||
544b2c91 TH |
723 | /* |
724 | * INTERRUPT doesn't disturb existing trap sans one | |
725 | * exception. If ptracer issued LISTEN for the current | |
726 | * STOP, this INTERRUPT should clear LISTEN and re-trap | |
727 | * tracee into STOP. | |
728 | */ | |
fca26f26 | 729 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) |
544b2c91 TH |
730 | signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); |
731 | ||
732 | unlock_task_sighand(child, &flags); | |
733 | ret = 0; | |
734 | break; | |
735 | ||
736 | case PTRACE_LISTEN: | |
737 | /* | |
738 | * Listen for events. Tracee must be in STOP. It's not | |
739 | * resumed per-se but is not considered to be in TRACED by | |
740 | * wait(2) or ptrace(2). If an async event (e.g. group | |
741 | * stop state change) happens, tracee will enter STOP trap | |
742 | * again. Alternatively, ptracer can issue INTERRUPT to | |
743 | * finish listening and re-trap tracee into STOP. | |
744 | */ | |
745 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) | |
746 | break; | |
747 | ||
748 | si = child->last_siginfo; | |
f9d81f61 ON |
749 | if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { |
750 | child->jobctl |= JOBCTL_LISTENING; | |
751 | /* | |
752 | * If NOTIFY is set, it means event happened between | |
753 | * start of this trap and now. Trigger re-trap. | |
754 | */ | |
755 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | |
756 | signal_wake_up(child, true); | |
757 | ret = 0; | |
758 | } | |
fca26f26 | 759 | unlock_task_sighand(child, &flags); |
fca26f26 TH |
760 | break; |
761 | ||
1bcf5482 AD |
762 | case PTRACE_DETACH: /* detach a process that was attached. */ |
763 | ret = ptrace_detach(child, data); | |
764 | break; | |
36df29d7 | 765 | |
9c1a1259 MF |
766 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
767 | case PTRACE_GETFDPIC: { | |
e0129ef9 | 768 | struct mm_struct *mm = get_task_mm(child); |
9c1a1259 MF |
769 | unsigned long tmp = 0; |
770 | ||
e0129ef9 ON |
771 | ret = -ESRCH; |
772 | if (!mm) | |
773 | break; | |
774 | ||
9c1a1259 MF |
775 | switch (addr) { |
776 | case PTRACE_GETFDPIC_EXEC: | |
e0129ef9 | 777 | tmp = mm->context.exec_fdpic_loadmap; |
9c1a1259 MF |
778 | break; |
779 | case PTRACE_GETFDPIC_INTERP: | |
e0129ef9 | 780 | tmp = mm->context.interp_fdpic_loadmap; |
9c1a1259 MF |
781 | break; |
782 | default: | |
783 | break; | |
784 | } | |
e0129ef9 | 785 | mmput(mm); |
9c1a1259 | 786 | |
9fed81dc | 787 | ret = put_user(tmp, datalp); |
9c1a1259 MF |
788 | break; |
789 | } | |
790 | #endif | |
791 | ||
36df29d7 RM |
792 | #ifdef PTRACE_SINGLESTEP |
793 | case PTRACE_SINGLESTEP: | |
794 | #endif | |
5b88abbf RM |
795 | #ifdef PTRACE_SINGLEBLOCK |
796 | case PTRACE_SINGLEBLOCK: | |
797 | #endif | |
36df29d7 RM |
798 | #ifdef PTRACE_SYSEMU |
799 | case PTRACE_SYSEMU: | |
800 | case PTRACE_SYSEMU_SINGLESTEP: | |
801 | #endif | |
802 | case PTRACE_SYSCALL: | |
803 | case PTRACE_CONT: | |
804 | return ptrace_resume(child, request, data); | |
805 | ||
806 | case PTRACE_KILL: | |
807 | if (child->exit_state) /* already dead */ | |
808 | return 0; | |
809 | return ptrace_resume(child, request, SIGKILL); | |
810 | ||
2225a122 SS |
811 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
812 | case PTRACE_GETREGSET: | |
813 | case PTRACE_SETREGSET: | |
814 | { | |
815 | struct iovec kiov; | |
9fed81dc | 816 | struct iovec __user *uiov = datavp; |
2225a122 SS |
817 | |
818 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) | |
819 | return -EFAULT; | |
820 | ||
821 | if (__get_user(kiov.iov_base, &uiov->iov_base) || | |
822 | __get_user(kiov.iov_len, &uiov->iov_len)) | |
823 | return -EFAULT; | |
824 | ||
825 | ret = ptrace_regset(child, request, addr, &kiov); | |
826 | if (!ret) | |
827 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | |
828 | break; | |
829 | } | |
830 | #endif | |
1da177e4 LT |
831 | default: |
832 | break; | |
833 | } | |
834 | ||
835 | return ret; | |
836 | } | |
481bed45 | 837 | |
8053bdd5 | 838 | static struct task_struct *ptrace_get_task_struct(pid_t pid) |
6b9c7ed8 CH |
839 | { |
840 | struct task_struct *child; | |
481bed45 | 841 | |
8053bdd5 | 842 | rcu_read_lock(); |
228ebcbe | 843 | child = find_task_by_vpid(pid); |
481bed45 CH |
844 | if (child) |
845 | get_task_struct(child); | |
8053bdd5 | 846 | rcu_read_unlock(); |
f400e198 | 847 | |
481bed45 | 848 | if (!child) |
6b9c7ed8 CH |
849 | return ERR_PTR(-ESRCH); |
850 | return child; | |
481bed45 CH |
851 | } |
852 | ||
0ac15559 CH |
853 | #ifndef arch_ptrace_attach |
854 | #define arch_ptrace_attach(child) do { } while (0) | |
855 | #endif | |
856 | ||
4abf9869 NK |
857 | SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, |
858 | unsigned long, data) | |
481bed45 CH |
859 | { |
860 | struct task_struct *child; | |
861 | long ret; | |
862 | ||
6b9c7ed8 CH |
863 | if (request == PTRACE_TRACEME) { |
864 | ret = ptrace_traceme(); | |
6ea6dd93 HS |
865 | if (!ret) |
866 | arch_ptrace_attach(current); | |
481bed45 | 867 | goto out; |
6b9c7ed8 CH |
868 | } |
869 | ||
870 | child = ptrace_get_task_struct(pid); | |
871 | if (IS_ERR(child)) { | |
872 | ret = PTR_ERR(child); | |
873 | goto out; | |
874 | } | |
481bed45 | 875 | |
3544d72a | 876 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
aa9147c9 | 877 | ret = ptrace_attach(child, request, addr, data); |
0ac15559 CH |
878 | /* |
879 | * Some architectures need to do book-keeping after | |
880 | * a ptrace attach. | |
881 | */ | |
882 | if (!ret) | |
883 | arch_ptrace_attach(child); | |
005f18df | 884 | goto out_put_task_struct; |
481bed45 CH |
885 | } |
886 | ||
fca26f26 TH |
887 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
888 | request == PTRACE_INTERRUPT); | |
481bed45 CH |
889 | if (ret < 0) |
890 | goto out_put_task_struct; | |
891 | ||
892 | ret = arch_ptrace(child, request, addr, data); | |
481bed45 CH |
893 | |
894 | out_put_task_struct: | |
895 | put_task_struct(child); | |
896 | out: | |
481bed45 CH |
897 | return ret; |
898 | } | |
76647323 | 899 | |
4abf9869 NK |
900 | int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, |
901 | unsigned long data) | |
76647323 AD |
902 | { |
903 | unsigned long tmp; | |
904 | int copied; | |
905 | ||
906 | copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); | |
907 | if (copied != sizeof(tmp)) | |
908 | return -EIO; | |
909 | return put_user(tmp, (unsigned long __user *)data); | |
910 | } | |
f284ce72 | 911 | |
4abf9869 NK |
912 | int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, |
913 | unsigned long data) | |
f284ce72 AD |
914 | { |
915 | int copied; | |
916 | ||
917 | copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); | |
918 | return (copied == sizeof(data)) ? 0 : -EIO; | |
919 | } | |
032d82d9 | 920 | |
96b8936a | 921 | #if defined CONFIG_COMPAT |
032d82d9 RM |
922 | #include <linux/compat.h> |
923 | ||
924 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, | |
925 | compat_ulong_t addr, compat_ulong_t data) | |
926 | { | |
927 | compat_ulong_t __user *datap = compat_ptr(data); | |
928 | compat_ulong_t word; | |
e16b2781 | 929 | siginfo_t siginfo; |
032d82d9 RM |
930 | int ret; |
931 | ||
932 | switch (request) { | |
933 | case PTRACE_PEEKTEXT: | |
934 | case PTRACE_PEEKDATA: | |
935 | ret = access_process_vm(child, addr, &word, sizeof(word), 0); | |
936 | if (ret != sizeof(word)) | |
937 | ret = -EIO; | |
938 | else | |
939 | ret = put_user(word, datap); | |
940 | break; | |
941 | ||
942 | case PTRACE_POKETEXT: | |
943 | case PTRACE_POKEDATA: | |
944 | ret = access_process_vm(child, addr, &data, sizeof(data), 1); | |
945 | ret = (ret != sizeof(data) ? -EIO : 0); | |
946 | break; | |
947 | ||
948 | case PTRACE_GETEVENTMSG: | |
949 | ret = put_user((compat_ulong_t) child->ptrace_message, datap); | |
950 | break; | |
951 | ||
e16b2781 RM |
952 | case PTRACE_GETSIGINFO: |
953 | ret = ptrace_getsiginfo(child, &siginfo); | |
954 | if (!ret) | |
955 | ret = copy_siginfo_to_user32( | |
956 | (struct compat_siginfo __user *) datap, | |
957 | &siginfo); | |
958 | break; | |
959 | ||
960 | case PTRACE_SETSIGINFO: | |
961 | memset(&siginfo, 0, sizeof siginfo); | |
962 | if (copy_siginfo_from_user32( | |
963 | &siginfo, (struct compat_siginfo __user *) datap)) | |
964 | ret = -EFAULT; | |
965 | else | |
966 | ret = ptrace_setsiginfo(child, &siginfo); | |
967 | break; | |
2225a122 SS |
968 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
969 | case PTRACE_GETREGSET: | |
970 | case PTRACE_SETREGSET: | |
971 | { | |
972 | struct iovec kiov; | |
973 | struct compat_iovec __user *uiov = | |
974 | (struct compat_iovec __user *) datap; | |
975 | compat_uptr_t ptr; | |
976 | compat_size_t len; | |
977 | ||
978 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) | |
979 | return -EFAULT; | |
980 | ||
981 | if (__get_user(ptr, &uiov->iov_base) || | |
982 | __get_user(len, &uiov->iov_len)) | |
983 | return -EFAULT; | |
984 | ||
985 | kiov.iov_base = compat_ptr(ptr); | |
986 | kiov.iov_len = len; | |
987 | ||
988 | ret = ptrace_regset(child, request, addr, &kiov); | |
989 | if (!ret) | |
990 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | |
991 | break; | |
992 | } | |
993 | #endif | |
e16b2781 | 994 | |
032d82d9 RM |
995 | default: |
996 | ret = ptrace_request(child, request, addr, data); | |
997 | } | |
998 | ||
999 | return ret; | |
1000 | } | |
c269f196 | 1001 | |
c269f196 RM |
1002 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, |
1003 | compat_long_t addr, compat_long_t data) | |
1004 | { | |
1005 | struct task_struct *child; | |
1006 | long ret; | |
1007 | ||
c269f196 RM |
1008 | if (request == PTRACE_TRACEME) { |
1009 | ret = ptrace_traceme(); | |
1010 | goto out; | |
1011 | } | |
1012 | ||
1013 | child = ptrace_get_task_struct(pid); | |
1014 | if (IS_ERR(child)) { | |
1015 | ret = PTR_ERR(child); | |
1016 | goto out; | |
1017 | } | |
1018 | ||
3544d72a | 1019 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
aa9147c9 | 1020 | ret = ptrace_attach(child, request, addr, data); |
c269f196 RM |
1021 | /* |
1022 | * Some architectures need to do book-keeping after | |
1023 | * a ptrace attach. | |
1024 | */ | |
1025 | if (!ret) | |
1026 | arch_ptrace_attach(child); | |
1027 | goto out_put_task_struct; | |
1028 | } | |
1029 | ||
fca26f26 TH |
1030 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1031 | request == PTRACE_INTERRUPT); | |
c269f196 RM |
1032 | if (!ret) |
1033 | ret = compat_arch_ptrace(child, request, addr, data); | |
1034 | ||
1035 | out_put_task_struct: | |
1036 | put_task_struct(child); | |
1037 | out: | |
c269f196 RM |
1038 | return ret; |
1039 | } | |
96b8936a | 1040 | #endif /* CONFIG_COMPAT */ |
bf26c018 FW |
1041 | |
1042 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | |
1043 | int ptrace_get_breakpoints(struct task_struct *tsk) | |
1044 | { | |
1045 | if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) | |
1046 | return 0; | |
1047 | ||
1048 | return -1; | |
1049 | } | |
1050 | ||
1051 | void ptrace_put_breakpoints(struct task_struct *tsk) | |
1052 | { | |
1053 | if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) | |
1054 | flush_ptrace_hw_breakpoint(tsk); | |
1055 | } | |
1056 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |