]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PTRACE_H |
2 | #define _LINUX_PTRACE_H | |
3 | /* ptrace.h */ | |
4 | /* structs and defines to help the user use the ptrace system call. */ | |
5 | ||
6 | /* has the defines to get at the registers. */ | |
7 | ||
8 | #define PTRACE_TRACEME 0 | |
9 | #define PTRACE_PEEKTEXT 1 | |
10 | #define PTRACE_PEEKDATA 2 | |
11 | #define PTRACE_PEEKUSR 3 | |
12 | #define PTRACE_POKETEXT 4 | |
13 | #define PTRACE_POKEDATA 5 | |
14 | #define PTRACE_POKEUSR 6 | |
15 | #define PTRACE_CONT 7 | |
16 | #define PTRACE_KILL 8 | |
17 | #define PTRACE_SINGLESTEP 9 | |
18 | ||
416bc512 RM |
19 | #define PTRACE_ATTACH 16 |
20 | #define PTRACE_DETACH 17 | |
1da177e4 LT |
21 | |
22 | #define PTRACE_SYSCALL 24 | |
23 | ||
24 | /* 0x4200-0x4300 are reserved for architecture-independent additions. */ | |
25 | #define PTRACE_SETOPTIONS 0x4200 | |
26 | #define PTRACE_GETEVENTMSG 0x4201 | |
27 | #define PTRACE_GETSIGINFO 0x4202 | |
28 | #define PTRACE_SETSIGINFO 0x4203 | |
29 | ||
2225a122 SS |
30 | /* |
31 | * Generic ptrace interface that exports the architecture specific regsets | |
32 | * using the corresponding NT_* types (which are also used in the core dump). | |
c6a0dd7e SS |
33 | * Please note that the NT_PRSTATUS note type in a core dump contains a full |
34 | * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the | |
35 | * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the | |
36 | * other user_regset flavors, the user_regset layout and the ELF core dump note | |
37 | * payload are exactly the same layout. | |
2225a122 SS |
38 | * |
39 | * This interface usage is as follows: | |
40 | * struct iovec iov = { buf, len}; | |
41 | * | |
42 | * ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov); | |
43 | * | |
44 | * On the successful completion, iov.len will be updated by the kernel, | |
45 | * specifying how much the kernel has written/read to/from the user's iov.buf. | |
46 | */ | |
47 | #define PTRACE_GETREGSET 0x4204 | |
48 | #define PTRACE_SETREGSET 0x4205 | |
49 | ||
3544d72a | 50 | #define PTRACE_SEIZE 0x4206 |
fca26f26 | 51 | #define PTRACE_INTERRUPT 0x4207 |
544b2c91 | 52 | #define PTRACE_LISTEN 0x4208 |
3544d72a TH |
53 | |
54 | /* flags in @data for PTRACE_SEIZE */ | |
55 | #define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */ | |
56 | ||
1da177e4 LT |
57 | /* options set using PTRACE_SETOPTIONS */ |
58 | #define PTRACE_O_TRACESYSGOOD 0x00000001 | |
59 | #define PTRACE_O_TRACEFORK 0x00000002 | |
60 | #define PTRACE_O_TRACEVFORK 0x00000004 | |
61 | #define PTRACE_O_TRACECLONE 0x00000008 | |
62 | #define PTRACE_O_TRACEEXEC 0x00000010 | |
63 | #define PTRACE_O_TRACEVFORKDONE 0x00000020 | |
64 | #define PTRACE_O_TRACEEXIT 0x00000040 | |
65 | ||
66 | #define PTRACE_O_MASK 0x0000007f | |
67 | ||
68 | /* Wait extended result codes for the above trace options. */ | |
69 | #define PTRACE_EVENT_FORK 1 | |
70 | #define PTRACE_EVENT_VFORK 2 | |
71 | #define PTRACE_EVENT_CLONE 3 | |
72 | #define PTRACE_EVENT_EXEC 4 | |
73 | #define PTRACE_EVENT_VFORK_DONE 5 | |
74 | #define PTRACE_EVENT_EXIT 6 | |
3544d72a | 75 | #define PTRACE_EVENT_STOP 7 |
1da177e4 LT |
76 | |
77 | #include <asm/ptrace.h> | |
78 | ||
79 | #ifdef __KERNEL__ | |
80 | /* | |
81 | * Ptrace flags | |
260ea101 EB |
82 | * |
83 | * The owner ship rules for task->ptrace which holds the ptrace | |
84 | * flags is simple. When a task is running it owns it's task->ptrace | |
85 | * flags. When the a task is stopped the ptracer owns task->ptrace. | |
1da177e4 LT |
86 | */ |
87 | ||
3544d72a | 88 | #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ |
1da177e4 LT |
89 | #define PT_PTRACED 0x00000001 |
90 | #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ | |
91 | #define PT_TRACESYSGOOD 0x00000004 | |
92 | #define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */ | |
643ad838 TH |
93 | |
94 | /* PT_TRACE_* event enable flags */ | |
95 | #define PT_EVENT_FLAG_SHIFT 4 | |
96 | #define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1)) | |
97 | ||
98 | #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) | |
99 | #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) | |
100 | #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) | |
101 | #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) | |
102 | #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) | |
103 | #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) | |
1da177e4 LT |
104 | |
105 | #define PT_TRACE_MASK 0x000003f4 | |
106 | ||
107 | /* single stepping state bits (used on ARM and PA-RISC) */ | |
108 | #define PT_SINGLESTEP_BIT 31 | |
109 | #define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT) | |
110 | #define PT_BLOCKSTEP_BIT 30 | |
111 | #define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT) | |
112 | ||
113 | #include <linux/compiler.h> /* For unlikely. */ | |
114 | #include <linux/sched.h> /* For struct task_struct. */ | |
d7e7528b | 115 | #include <linux/err.h> /* for IS_ERR_VALUE */ |
187f1882 | 116 | #include <linux/bug.h> /* For BUG_ON. */ |
1da177e4 | 117 | |
481bed45 | 118 | |
9b05a69e NK |
119 | extern long arch_ptrace(struct task_struct *child, long request, |
120 | unsigned long addr, unsigned long data); | |
1da177e4 LT |
121 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); |
122 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); | |
1da177e4 | 123 | extern void ptrace_disable(struct task_struct *); |
755e276b | 124 | extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); |
4abf9869 NK |
125 | extern int ptrace_request(struct task_struct *child, long request, |
126 | unsigned long addr, unsigned long data); | |
1da177e4 LT |
127 | extern void ptrace_notify(int exit_code); |
128 | extern void __ptrace_link(struct task_struct *child, | |
129 | struct task_struct *new_parent); | |
130 | extern void __ptrace_unlink(struct task_struct *child); | |
39c626ae | 131 | extern void exit_ptrace(struct task_struct *tracer); |
69f594a3 EP |
132 | #define PTRACE_MODE_READ 0x01 |
133 | #define PTRACE_MODE_ATTACH 0x02 | |
134 | #define PTRACE_MODE_NOAUDIT 0x04 | |
006ebb40 SS |
135 | /* Returns 0 on success, -errno on denial. */ |
136 | extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); | |
137 | /* Returns true on success, false on denial. */ | |
138 | extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); | |
1da177e4 | 139 | |
53b6f9fb ON |
140 | static inline int ptrace_reparented(struct task_struct *child) |
141 | { | |
0347e177 | 142 | return !same_thread_group(child->real_parent, child->parent); |
53b6f9fb | 143 | } |
c6a47cc2 | 144 | |
1da177e4 LT |
145 | static inline void ptrace_unlink(struct task_struct *child) |
146 | { | |
147 | if (unlikely(child->ptrace)) | |
148 | __ptrace_unlink(child); | |
149 | } | |
150 | ||
4abf9869 NK |
151 | int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, |
152 | unsigned long data); | |
153 | int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, | |
154 | unsigned long data); | |
1da177e4 | 155 | |
06d98473 TH |
156 | /** |
157 | * ptrace_parent - return the task that is tracing the given task | |
158 | * @task: task to consider | |
159 | * | |
160 | * Returns %NULL if no one is tracing @task, or the &struct task_struct | |
161 | * pointer to its tracer. | |
162 | * | |
163 | * Must called under rcu_read_lock(). The pointer returned might be kept | |
164 | * live only by RCU. During exec, this may be called with task_lock() held | |
165 | * on @task, still held from when check_unsafe_exec() was called. | |
166 | */ | |
167 | static inline struct task_struct *ptrace_parent(struct task_struct *task) | |
168 | { | |
169 | if (unlikely(task->ptrace)) | |
170 | return rcu_dereference(task->parent); | |
171 | return NULL; | |
172 | } | |
173 | ||
643ad838 TH |
174 | /** |
175 | * ptrace_event_enabled - test whether a ptrace event is enabled | |
176 | * @task: ptracee of interest | |
177 | * @event: %PTRACE_EVENT_* to test | |
178 | * | |
179 | * Test whether @event is enabled for ptracee @task. | |
180 | * | |
181 | * Returns %true if @event is enabled, %false otherwise. | |
182 | */ | |
183 | static inline bool ptrace_event_enabled(struct task_struct *task, int event) | |
184 | { | |
185 | return task->ptrace & PT_EVENT_FLAG(event); | |
186 | } | |
187 | ||
88ac2921 RM |
188 | /** |
189 | * ptrace_event - possibly stop for a ptrace event notification | |
643ad838 | 190 | * @event: %PTRACE_EVENT_* value to report |
88ac2921 RM |
191 | * @message: value for %PTRACE_GETEVENTMSG to return |
192 | * | |
643ad838 TH |
193 | * Check whether @event is enabled and, if so, report @event and @message |
194 | * to the ptrace parent. | |
88ac2921 | 195 | * |
88ac2921 RM |
196 | * Called without locks. |
197 | */ | |
f3c04b93 | 198 | static inline void ptrace_event(int event, unsigned long message) |
88ac2921 | 199 | { |
f3c04b93 TH |
200 | if (unlikely(ptrace_event_enabled(current, event))) { |
201 | current->ptrace_message = message; | |
202 | ptrace_notify((event << 8) | SIGTRAP); | |
203 | } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) { | |
204 | /* legacy EXEC report via SIGTRAP */ | |
205 | send_sig(SIGTRAP, current, 0); | |
206 | } | |
88ac2921 RM |
207 | } |
208 | ||
09a05394 RM |
209 | /** |
210 | * ptrace_init_task - initialize ptrace state for a new child | |
211 | * @child: new child task | |
212 | * @ptrace: true if child should be ptrace'd by parent's tracer | |
213 | * | |
214 | * This is called immediately after adding @child to its parent's children | |
215 | * list. @ptrace is false in the normal case, and true to ptrace @child. | |
216 | * | |
217 | * Called with current's siglock and write_lock_irq(&tasklist_lock) held. | |
218 | */ | |
219 | static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |
220 | { | |
221 | INIT_LIST_HEAD(&child->ptrace_entry); | |
222 | INIT_LIST_HEAD(&child->ptraced); | |
6634ae10 ON |
223 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
224 | atomic_set(&child->ptrace_bp_refcnt, 1); | |
225 | #endif | |
226 | child->jobctl = 0; | |
09a05394 | 227 | child->ptrace = 0; |
6634ae10 ON |
228 | child->parent = child->real_parent; |
229 | ||
230 | if (unlikely(ptrace) && current->ptrace) { | |
09a05394 | 231 | child->ptrace = current->ptrace; |
c6a47cc2 | 232 | __ptrace_link(child, current->parent); |
dcace06c | 233 | |
d184d6eb ON |
234 | if (child->ptrace & PT_SEIZED) |
235 | task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); | |
236 | else | |
237 | sigaddset(&child->pending.signal, SIGSTOP); | |
238 | ||
dcace06c | 239 | set_tsk_thread_flag(child, TIF_SIGPENDING); |
09a05394 RM |
240 | } |
241 | } | |
242 | ||
dae33574 RM |
243 | /** |
244 | * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped | |
245 | * @task: task in %EXIT_DEAD state | |
246 | * | |
247 | * Called with write_lock(&tasklist_lock) held. | |
248 | */ | |
249 | static inline void ptrace_release_task(struct task_struct *task) | |
250 | { | |
251 | BUG_ON(!list_empty(&task->ptraced)); | |
252 | ptrace_unlink(task); | |
253 | BUG_ON(!list_empty(&task->ptrace_entry)); | |
254 | } | |
255 | ||
1da177e4 LT |
256 | #ifndef force_successful_syscall_return |
257 | /* | |
258 | * System call handlers that, upon successful completion, need to return a | |
259 | * negative value should call force_successful_syscall_return() right before | |
260 | * returning. On architectures where the syscall convention provides for a | |
261 | * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly | |
262 | * others), this macro can be used to ensure that the error flag will not get | |
263 | * set. On architectures which do not support a separate error flag, the macro | |
264 | * is a no-op and the spurious error condition needs to be filtered out by some | |
265 | * other means (e.g., in user-level, by passing an extra argument to the | |
266 | * syscall handler, or something along those lines). | |
267 | */ | |
268 | #define force_successful_syscall_return() do { } while (0) | |
269 | #endif | |
270 | ||
d7e7528b EP |
271 | #ifndef is_syscall_success |
272 | /* | |
273 | * On most systems we can tell if a syscall is a success based on if the retval | |
274 | * is an error value. On some systems like ia64 and powerpc they have different | |
275 | * indicators of success/failure and must define their own. | |
276 | */ | |
277 | #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs)))) | |
278 | #endif | |
279 | ||
fb7fa8f1 RM |
280 | /* |
281 | * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. | |
282 | * | |
283 | * These do-nothing inlines are used when the arch does not | |
284 | * implement single-step. The kerneldoc comments are here | |
285 | * to document the interface for all arch definitions. | |
286 | */ | |
287 | ||
288 | #ifndef arch_has_single_step | |
289 | /** | |
290 | * arch_has_single_step - does this CPU support user-mode single-step? | |
291 | * | |
292 | * If this is defined, then there must be function declarations or | |
293 | * inlines for user_enable_single_step() and user_disable_single_step(). | |
294 | * arch_has_single_step() should evaluate to nonzero iff the machine | |
295 | * supports instruction single-step for user mode. | |
296 | * It can be a constant or it can test a CPU feature bit. | |
297 | */ | |
298 | #define arch_has_single_step() (0) | |
299 | ||
300 | /** | |
301 | * user_enable_single_step - single-step in user-mode task | |
302 | * @task: either current or a task stopped in %TASK_TRACED | |
303 | * | |
304 | * This can only be called when arch_has_single_step() has returned nonzero. | |
305 | * Set @task so that when it returns to user mode, it will trap after the | |
dc802c2d RM |
306 | * next single instruction executes. If arch_has_block_step() is defined, |
307 | * this must clear the effects of user_enable_block_step() too. | |
fb7fa8f1 RM |
308 | */ |
309 | static inline void user_enable_single_step(struct task_struct *task) | |
310 | { | |
311 | BUG(); /* This can never be called. */ | |
312 | } | |
313 | ||
314 | /** | |
315 | * user_disable_single_step - cancel user-mode single-step | |
316 | * @task: either current or a task stopped in %TASK_TRACED | |
317 | * | |
dc802c2d RM |
318 | * Clear @task of the effects of user_enable_single_step() and |
319 | * user_enable_block_step(). This can be called whether or not either | |
320 | * of those was ever called on @task, and even if arch_has_single_step() | |
321 | * returned zero. | |
fb7fa8f1 RM |
322 | */ |
323 | static inline void user_disable_single_step(struct task_struct *task) | |
324 | { | |
325 | } | |
dacbe41f CH |
326 | #else |
327 | extern void user_enable_single_step(struct task_struct *); | |
328 | extern void user_disable_single_step(struct task_struct *); | |
fb7fa8f1 RM |
329 | #endif /* arch_has_single_step */ |
330 | ||
dc802c2d RM |
331 | #ifndef arch_has_block_step |
332 | /** | |
333 | * arch_has_block_step - does this CPU support user-mode block-step? | |
334 | * | |
335 | * If this is defined, then there must be a function declaration or inline | |
336 | * for user_enable_block_step(), and arch_has_single_step() must be defined | |
337 | * too. arch_has_block_step() should evaluate to nonzero iff the machine | |
338 | * supports step-until-branch for user mode. It can be a constant or it | |
339 | * can test a CPU feature bit. | |
340 | */ | |
5b88abbf | 341 | #define arch_has_block_step() (0) |
dc802c2d RM |
342 | |
343 | /** | |
344 | * user_enable_block_step - step until branch in user-mode task | |
345 | * @task: either current or a task stopped in %TASK_TRACED | |
346 | * | |
347 | * This can only be called when arch_has_block_step() has returned nonzero, | |
348 | * and will never be called when single-instruction stepping is being used. | |
349 | * Set @task so that when it returns to user mode, it will trap after the | |
350 | * next branch or trap taken. | |
351 | */ | |
352 | static inline void user_enable_block_step(struct task_struct *task) | |
353 | { | |
354 | BUG(); /* This can never be called. */ | |
355 | } | |
dacbe41f CH |
356 | #else |
357 | extern void user_enable_block_step(struct task_struct *); | |
dc802c2d RM |
358 | #endif /* arch_has_block_step */ |
359 | ||
85ec7fd9 ON |
360 | #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO |
361 | extern void user_single_step_siginfo(struct task_struct *tsk, | |
362 | struct pt_regs *regs, siginfo_t *info); | |
363 | #else | |
364 | static inline void user_single_step_siginfo(struct task_struct *tsk, | |
365 | struct pt_regs *regs, siginfo_t *info) | |
366 | { | |
367 | memset(info, 0, sizeof(*info)); | |
368 | info->si_signo = SIGTRAP; | |
369 | } | |
370 | #endif | |
371 | ||
1a669c2f RM |
372 | #ifndef arch_ptrace_stop_needed |
373 | /** | |
374 | * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called | |
375 | * @code: current->exit_code value ptrace will stop with | |
376 | * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with | |
377 | * | |
378 | * This is called with the siglock held, to decide whether or not it's | |
379 | * necessary to release the siglock and call arch_ptrace_stop() with the | |
380 | * same @code and @info arguments. It can be defined to a constant if | |
381 | * arch_ptrace_stop() is never required, or always is. On machines where | |
382 | * this makes sense, it should be defined to a quick test to optimize out | |
383 | * calling arch_ptrace_stop() when it would be superfluous. For example, | |
384 | * if the thread has not been back to user mode since the last stop, the | |
385 | * thread state might indicate that nothing needs to be done. | |
386 | */ | |
387 | #define arch_ptrace_stop_needed(code, info) (0) | |
388 | #endif | |
389 | ||
390 | #ifndef arch_ptrace_stop | |
391 | /** | |
392 | * arch_ptrace_stop - Do machine-specific work before stopping for ptrace | |
393 | * @code: current->exit_code value ptrace will stop with | |
394 | * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with | |
395 | * | |
396 | * This is called with no locks held when arch_ptrace_stop_needed() has | |
397 | * just returned nonzero. It is allowed to block, e.g. for user memory | |
398 | * access. The arch can have machine-specific work to be done before | |
399 | * ptrace stops. On ia64, register backing store gets written back to user | |
400 | * memory here. Since this can be costly (requires dropping the siglock), | |
401 | * we only do it when the arch requires it for this particular stop, as | |
402 | * indicated by arch_ptrace_stop_needed(). | |
403 | */ | |
404 | #define arch_ptrace_stop(code, info) do { } while (0) | |
405 | #endif | |
406 | ||
bbc69863 RM |
407 | extern int task_current_syscall(struct task_struct *target, long *callno, |
408 | unsigned long args[6], unsigned int maxargs, | |
409 | unsigned long *sp, unsigned long *pc); | |
410 | ||
bf26c018 FW |
411 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
412 | extern int ptrace_get_breakpoints(struct task_struct *tsk); | |
413 | extern void ptrace_put_breakpoints(struct task_struct *tsk); | |
414 | #else | |
415 | static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } | |
416 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | |
417 | ||
418 | #endif /* __KERNEL */ | |
1da177e4 LT |
419 | |
420 | #endif |