]>
Commit | Line | Data |
---|---|---|
ca557f44 AC |
1 | /* Target-struct-independent code to start (run) and stop an inferior |
2 | process. | |
8926118c | 3 | |
6aba47ca | 4 | Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, |
9b254dd1 | 5 | 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, |
0fb0cc75 | 6 | 2008, 2009 Free Software Foundation, Inc. |
c906108c | 7 | |
c5aa993b | 8 | This file is part of GDB. |
c906108c | 9 | |
c5aa993b JM |
10 | This program is free software; you can redistribute it and/or modify |
11 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 12 | the Free Software Foundation; either version 3 of the License, or |
c5aa993b | 13 | (at your option) any later version. |
c906108c | 14 | |
c5aa993b JM |
15 | This program is distributed in the hope that it will be useful, |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
c906108c | 19 | |
c5aa993b | 20 | You should have received a copy of the GNU General Public License |
a9762ec7 | 21 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
c906108c SS |
22 | |
23 | #include "defs.h" | |
24 | #include "gdb_string.h" | |
25 | #include <ctype.h> | |
26 | #include "symtab.h" | |
27 | #include "frame.h" | |
28 | #include "inferior.h" | |
60250e8b | 29 | #include "exceptions.h" |
c906108c | 30 | #include "breakpoint.h" |
03f2053f | 31 | #include "gdb_wait.h" |
c906108c SS |
32 | #include "gdbcore.h" |
33 | #include "gdbcmd.h" | |
210661e7 | 34 | #include "cli/cli-script.h" |
c906108c SS |
35 | #include "target.h" |
36 | #include "gdbthread.h" | |
37 | #include "annotate.h" | |
1adeb98a | 38 | #include "symfile.h" |
7a292a7a | 39 | #include "top.h" |
c906108c | 40 | #include <signal.h> |
2acceee2 | 41 | #include "inf-loop.h" |
4e052eda | 42 | #include "regcache.h" |
fd0407d6 | 43 | #include "value.h" |
06600e06 | 44 | #include "observer.h" |
f636b87d | 45 | #include "language.h" |
a77053c2 | 46 | #include "solib.h" |
f17517ea | 47 | #include "main.h" |
9f976b41 | 48 | #include "gdb_assert.h" |
034dad6f | 49 | #include "mi/mi-common.h" |
4f8d22e3 | 50 | #include "event-top.h" |
c906108c SS |
51 | |
52 | /* Prototypes for local functions */ | |
53 | ||
96baa820 | 54 | static void signals_info (char *, int); |
c906108c | 55 | |
96baa820 | 56 | static void handle_command (char *, int); |
c906108c | 57 | |
96baa820 | 58 | static void sig_print_info (enum target_signal); |
c906108c | 59 | |
96baa820 | 60 | static void sig_print_header (void); |
c906108c | 61 | |
74b7792f | 62 | static void resume_cleanups (void *); |
c906108c | 63 | |
96baa820 | 64 | static int hook_stop_stub (void *); |
c906108c | 65 | |
96baa820 JM |
66 | static int restore_selected_frame (void *); |
67 | ||
68 | static void build_infrun (void); | |
69 | ||
4ef3f3be | 70 | static int follow_fork (void); |
96baa820 JM |
71 | |
72 | static void set_schedlock_func (char *args, int from_tty, | |
488f131b | 73 | struct cmd_list_element *c); |
96baa820 | 74 | |
4e1c45ea | 75 | static int currently_stepping (struct thread_info *tp); |
96baa820 | 76 | |
a7212384 UW |
77 | static int currently_stepping_callback (struct thread_info *tp, void *data); |
78 | ||
96baa820 JM |
79 | static void xdb_handle_command (char *args, int from_tty); |
80 | ||
6a6b96b9 | 81 | static int prepare_to_proceed (int); |
ea67f13b | 82 | |
96baa820 | 83 | void _initialize_infrun (void); |
43ff13b4 | 84 | |
5fbbeb29 CF |
85 | /* When set, stop the 'step' command if we enter a function which has |
86 | no line number information. The normal behavior is that we step | |
87 | over such function. */ | |
88 | int step_stop_if_no_debug = 0; | |
920d2a44 AC |
89 | static void |
90 | show_step_stop_if_no_debug (struct ui_file *file, int from_tty, | |
91 | struct cmd_list_element *c, const char *value) | |
92 | { | |
93 | fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value); | |
94 | } | |
5fbbeb29 | 95 | |
43ff13b4 | 96 | /* In asynchronous mode, but simulating synchronous execution. */ |
96baa820 | 97 | |
43ff13b4 JM |
98 | int sync_execution = 0; |
99 | ||
c906108c SS |
100 | /* wait_for_inferior and normal_stop use this to notify the user |
101 | when the inferior stopped in a different thread than it had been | |
96baa820 JM |
102 | running in. */ |
103 | ||
39f77062 | 104 | static ptid_t previous_inferior_ptid; |
7a292a7a | 105 | |
237fc4c9 PA |
106 | int debug_displaced = 0; |
107 | static void | |
108 | show_debug_displaced (struct ui_file *file, int from_tty, | |
109 | struct cmd_list_element *c, const char *value) | |
110 | { | |
111 | fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value); | |
112 | } | |
113 | ||
527159b7 | 114 | static int debug_infrun = 0; |
920d2a44 AC |
115 | static void |
116 | show_debug_infrun (struct ui_file *file, int from_tty, | |
117 | struct cmd_list_element *c, const char *value) | |
118 | { | |
119 | fprintf_filtered (file, _("Inferior debugging is %s.\n"), value); | |
120 | } | |
527159b7 | 121 | |
d4f3574e SS |
122 | /* If the program uses ELF-style shared libraries, then calls to |
123 | functions in shared libraries go through stubs, which live in a | |
124 | table called the PLT (Procedure Linkage Table). The first time the | |
125 | function is called, the stub sends control to the dynamic linker, | |
126 | which looks up the function's real address, patches the stub so | |
127 | that future calls will go directly to the function, and then passes | |
128 | control to the function. | |
129 | ||
130 | If we are stepping at the source level, we don't want to see any of | |
131 | this --- we just want to skip over the stub and the dynamic linker. | |
132 | The simple approach is to single-step until control leaves the | |
133 | dynamic linker. | |
134 | ||
ca557f44 AC |
135 | However, on some systems (e.g., Red Hat's 5.2 distribution) the |
136 | dynamic linker calls functions in the shared C library, so you | |
137 | can't tell from the PC alone whether the dynamic linker is still | |
138 | running. In this case, we use a step-resume breakpoint to get us | |
139 | past the dynamic linker, as if we were using "next" to step over a | |
140 | function call. | |
d4f3574e | 141 | |
cfd8ab24 | 142 | in_solib_dynsym_resolve_code() says whether we're in the dynamic |
d4f3574e SS |
143 | linker code or not. Normally, this means we single-step. However, |
144 | if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an | |
145 | address where we can place a step-resume breakpoint to get past the | |
146 | linker's symbol resolution function. | |
147 | ||
cfd8ab24 | 148 | in_solib_dynsym_resolve_code() can generally be implemented in a |
d4f3574e SS |
149 | pretty portable way, by comparing the PC against the address ranges |
150 | of the dynamic linker's sections. | |
151 | ||
152 | SKIP_SOLIB_RESOLVER is generally going to be system-specific, since | |
153 | it depends on internal details of the dynamic linker. It's usually | |
154 | not too hard to figure out where to put a breakpoint, but it | |
155 | certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of | |
156 | sanity checking. If it can't figure things out, returning zero and | |
157 | getting the (possibly confusing) stepping behavior is better than | |
158 | signalling an error, which will obscure the change in the | |
159 | inferior's state. */ | |
c906108c | 160 | |
c906108c SS |
161 | /* This function returns TRUE if pc is the address of an instruction |
162 | that lies within the dynamic linker (such as the event hook, or the | |
163 | dld itself). | |
164 | ||
165 | This function must be used only when a dynamic linker event has | |
166 | been caught, and the inferior is being stepped out of the hook, or | |
167 | undefined results are guaranteed. */ | |
168 | ||
169 | #ifndef SOLIB_IN_DYNAMIC_LINKER | |
170 | #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0 | |
171 | #endif | |
172 | ||
c2c6d25f | 173 | |
7a292a7a SS |
174 | /* Convert the #defines into values. This is temporary until wfi control |
175 | flow is completely sorted out. */ | |
176 | ||
692590c1 MS |
177 | #ifndef CANNOT_STEP_HW_WATCHPOINTS |
178 | #define CANNOT_STEP_HW_WATCHPOINTS 0 | |
179 | #else | |
180 | #undef CANNOT_STEP_HW_WATCHPOINTS | |
181 | #define CANNOT_STEP_HW_WATCHPOINTS 1 | |
182 | #endif | |
183 | ||
c906108c SS |
184 | /* Tables of how to react to signals; the user sets them. */ |
185 | ||
186 | static unsigned char *signal_stop; | |
187 | static unsigned char *signal_print; | |
188 | static unsigned char *signal_program; | |
189 | ||
190 | #define SET_SIGS(nsigs,sigs,flags) \ | |
191 | do { \ | |
192 | int signum = (nsigs); \ | |
193 | while (signum-- > 0) \ | |
194 | if ((sigs)[signum]) \ | |
195 | (flags)[signum] = 1; \ | |
196 | } while (0) | |
197 | ||
198 | #define UNSET_SIGS(nsigs,sigs,flags) \ | |
199 | do { \ | |
200 | int signum = (nsigs); \ | |
201 | while (signum-- > 0) \ | |
202 | if ((sigs)[signum]) \ | |
203 | (flags)[signum] = 0; \ | |
204 | } while (0) | |
205 | ||
39f77062 KB |
206 | /* Value to pass to target_resume() to cause all threads to resume */ |
207 | ||
208 | #define RESUME_ALL (pid_to_ptid (-1)) | |
c906108c SS |
209 | |
210 | /* Command list pointer for the "stop" placeholder. */ | |
211 | ||
212 | static struct cmd_list_element *stop_command; | |
213 | ||
c906108c SS |
214 | /* Function inferior was in as of last step command. */ |
215 | ||
216 | static struct symbol *step_start_function; | |
217 | ||
c906108c SS |
218 | /* Nonzero if we want to give control to the user when we're notified |
219 | of shared library events by the dynamic linker. */ | |
220 | static int stop_on_solib_events; | |
920d2a44 AC |
221 | static void |
222 | show_stop_on_solib_events (struct ui_file *file, int from_tty, | |
223 | struct cmd_list_element *c, const char *value) | |
224 | { | |
225 | fprintf_filtered (file, _("Stopping for shared library events is %s.\n"), | |
226 | value); | |
227 | } | |
c906108c | 228 | |
c906108c SS |
229 | /* Nonzero means expecting a trace trap |
230 | and should stop the inferior and return silently when it happens. */ | |
231 | ||
232 | int stop_after_trap; | |
233 | ||
642fd101 DE |
234 | /* Save register contents here when executing a "finish" command or are |
235 | about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set. | |
c906108c SS |
236 | Thus this contains the return value from the called function (assuming |
237 | values are returned in a register). */ | |
238 | ||
72cec141 | 239 | struct regcache *stop_registers; |
c906108c | 240 | |
c906108c SS |
241 | /* Nonzero after stop if current stack frame should be printed. */ |
242 | ||
243 | static int stop_print_frame; | |
244 | ||
e02bc4cc | 245 | /* This is a cached copy of the pid/waitstatus of the last event |
9a4105ab AC |
246 | returned by target_wait()/deprecated_target_wait_hook(). This |
247 | information is returned by get_last_target_status(). */ | |
39f77062 | 248 | static ptid_t target_last_wait_ptid; |
e02bc4cc DS |
249 | static struct target_waitstatus target_last_waitstatus; |
250 | ||
0d1e5fa7 PA |
251 | static void context_switch (ptid_t ptid); |
252 | ||
4e1c45ea | 253 | void init_thread_stepping_state (struct thread_info *tss); |
0d1e5fa7 PA |
254 | |
255 | void init_infwait_state (void); | |
a474d7c2 | 256 | |
c906108c SS |
257 | /* This is used to remember when a fork, vfork or exec event |
258 | was caught by a catchpoint, and thus the event is to be | |
259 | followed at the next resume of the inferior, and not | |
260 | immediately. */ | |
261 | static struct | |
488f131b JB |
262 | { |
263 | enum target_waitkind kind; | |
264 | struct | |
c906108c | 265 | { |
3a3e9ee3 PA |
266 | ptid_t parent_pid; |
267 | ptid_t child_pid; | |
c906108c | 268 | } |
488f131b JB |
269 | fork_event; |
270 | char *execd_pathname; | |
271 | } | |
c906108c SS |
272 | pending_follow; |
273 | ||
53904c9e AC |
274 | static const char follow_fork_mode_child[] = "child"; |
275 | static const char follow_fork_mode_parent[] = "parent"; | |
276 | ||
488f131b | 277 | static const char *follow_fork_mode_kind_names[] = { |
53904c9e AC |
278 | follow_fork_mode_child, |
279 | follow_fork_mode_parent, | |
280 | NULL | |
ef346e04 | 281 | }; |
c906108c | 282 | |
53904c9e | 283 | static const char *follow_fork_mode_string = follow_fork_mode_parent; |
920d2a44 AC |
284 | static void |
285 | show_follow_fork_mode_string (struct ui_file *file, int from_tty, | |
286 | struct cmd_list_element *c, const char *value) | |
287 | { | |
288 | fprintf_filtered (file, _("\ | |
289 | Debugger response to a program call of fork or vfork is \"%s\".\n"), | |
290 | value); | |
291 | } | |
c906108c SS |
292 | \f |
293 | ||
6604731b | 294 | static int |
4ef3f3be | 295 | follow_fork (void) |
c906108c | 296 | { |
ea1dd7bc | 297 | int follow_child = (follow_fork_mode_string == follow_fork_mode_child); |
c906108c | 298 | |
6604731b | 299 | return target_follow_fork (follow_child); |
c906108c SS |
300 | } |
301 | ||
6604731b DJ |
302 | void |
303 | follow_inferior_reset_breakpoints (void) | |
c906108c | 304 | { |
4e1c45ea PA |
305 | struct thread_info *tp = inferior_thread (); |
306 | ||
6604731b DJ |
307 | /* Was there a step_resume breakpoint? (There was if the user |
308 | did a "next" at the fork() call.) If so, explicitly reset its | |
309 | thread number. | |
310 | ||
311 | step_resumes are a form of bp that are made to be per-thread. | |
312 | Since we created the step_resume bp when the parent process | |
313 | was being debugged, and now are switching to the child process, | |
314 | from the breakpoint package's viewpoint, that's a switch of | |
315 | "threads". We must update the bp's notion of which thread | |
316 | it is for, or it'll be ignored when it triggers. */ | |
317 | ||
4e1c45ea PA |
318 | if (tp->step_resume_breakpoint) |
319 | breakpoint_re_set_thread (tp->step_resume_breakpoint); | |
6604731b DJ |
320 | |
321 | /* Reinsert all breakpoints in the child. The user may have set | |
322 | breakpoints after catching the fork, in which case those | |
323 | were never set in the child, but only in the parent. This makes | |
324 | sure the inserted breakpoints match the breakpoint list. */ | |
325 | ||
326 | breakpoint_re_set (); | |
327 | insert_breakpoints (); | |
c906108c | 328 | } |
c906108c | 329 | |
1adeb98a FN |
330 | /* EXECD_PATHNAME is assumed to be non-NULL. */ |
331 | ||
c906108c | 332 | static void |
3a3e9ee3 | 333 | follow_exec (ptid_t pid, char *execd_pathname) |
c906108c | 334 | { |
7a292a7a | 335 | struct target_ops *tgt; |
4e1c45ea | 336 | struct thread_info *th = inferior_thread (); |
7a292a7a | 337 | |
c906108c SS |
338 | /* This is an exec event that we actually wish to pay attention to. |
339 | Refresh our symbol table to the newly exec'd program, remove any | |
340 | momentary bp's, etc. | |
341 | ||
342 | If there are breakpoints, they aren't really inserted now, | |
343 | since the exec() transformed our inferior into a fresh set | |
344 | of instructions. | |
345 | ||
346 | We want to preserve symbolic breakpoints on the list, since | |
347 | we have hopes that they can be reset after the new a.out's | |
348 | symbol table is read. | |
349 | ||
350 | However, any "raw" breakpoints must be removed from the list | |
351 | (e.g., the solib bp's), since their address is probably invalid | |
352 | now. | |
353 | ||
354 | And, we DON'T want to call delete_breakpoints() here, since | |
355 | that may write the bp's "shadow contents" (the instruction | |
356 | value that was overwritten witha TRAP instruction). Since | |
357 | we now have a new a.out, those shadow contents aren't valid. */ | |
358 | update_breakpoints_after_exec (); | |
359 | ||
360 | /* If there was one, it's gone now. We cannot truly step-to-next | |
361 | statement through an exec(). */ | |
4e1c45ea PA |
362 | th->step_resume_breakpoint = NULL; |
363 | th->step_range_start = 0; | |
364 | th->step_range_end = 0; | |
c906108c | 365 | |
c906108c | 366 | /* What is this a.out's name? */ |
a3f17187 | 367 | printf_unfiltered (_("Executing new program: %s\n"), execd_pathname); |
c906108c SS |
368 | |
369 | /* We've followed the inferior through an exec. Therefore, the | |
370 | inferior has essentially been killed & reborn. */ | |
7a292a7a | 371 | |
c906108c | 372 | gdb_flush (gdb_stdout); |
6ca15a4b PA |
373 | |
374 | breakpoint_init_inferior (inf_execd); | |
e85a822c DJ |
375 | |
376 | if (gdb_sysroot && *gdb_sysroot) | |
377 | { | |
378 | char *name = alloca (strlen (gdb_sysroot) | |
379 | + strlen (execd_pathname) | |
380 | + 1); | |
381 | strcpy (name, gdb_sysroot); | |
382 | strcat (name, execd_pathname); | |
383 | execd_pathname = name; | |
384 | } | |
c906108c SS |
385 | |
386 | /* That a.out is now the one to use. */ | |
387 | exec_file_attach (execd_pathname, 0); | |
388 | ||
cce9b6bf PA |
389 | /* Reset the shared library package. This ensures that we get a |
390 | shlib event when the child reaches "_start", at which point the | |
391 | dld will have had a chance to initialize the child. */ | |
392 | /* Also, loading a symbol file below may trigger symbol lookups, and | |
393 | we don't want those to be satisfied by the libraries of the | |
394 | previous incarnation of this process. */ | |
395 | no_shared_libraries (NULL, 0); | |
396 | ||
397 | /* Load the main file's symbols. */ | |
1adeb98a | 398 | symbol_file_add_main (execd_pathname, 0); |
c906108c | 399 | |
7a292a7a | 400 | #ifdef SOLIB_CREATE_INFERIOR_HOOK |
39f77062 | 401 | SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid)); |
a77053c2 MK |
402 | #else |
403 | solib_create_inferior_hook (); | |
7a292a7a | 404 | #endif |
c906108c SS |
405 | |
406 | /* Reinsert all breakpoints. (Those which were symbolic have | |
407 | been reset to the proper address in the new a.out, thanks | |
408 | to symbol_file_command...) */ | |
409 | insert_breakpoints (); | |
410 | ||
411 | /* The next resume of this inferior should bring it to the shlib | |
412 | startup breakpoints. (If the user had also set bp's on | |
413 | "main" from the old (parent) process, then they'll auto- | |
414 | matically get reset there in the new process.) */ | |
c906108c SS |
415 | } |
416 | ||
417 | /* Non-zero if we just simulating a single-step. This is needed | |
418 | because we cannot remove the breakpoints in the inferior process | |
419 | until after the `wait' in `wait_for_inferior'. */ | |
420 | static int singlestep_breakpoints_inserted_p = 0; | |
9f976b41 DJ |
421 | |
422 | /* The thread we inserted single-step breakpoints for. */ | |
423 | static ptid_t singlestep_ptid; | |
424 | ||
fd48f117 DJ |
425 | /* PC when we started this single-step. */ |
426 | static CORE_ADDR singlestep_pc; | |
427 | ||
9f976b41 DJ |
428 | /* If another thread hit the singlestep breakpoint, we save the original |
429 | thread here so that we can resume single-stepping it later. */ | |
430 | static ptid_t saved_singlestep_ptid; | |
431 | static int stepping_past_singlestep_breakpoint; | |
6a6b96b9 | 432 | |
ca67fcb8 VP |
433 | /* If not equal to null_ptid, this means that after stepping over breakpoint |
434 | is finished, we need to switch to deferred_step_ptid, and step it. | |
435 | ||
436 | The use case is when one thread has hit a breakpoint, and then the user | |
437 | has switched to another thread and issued 'step'. We need to step over | |
438 | breakpoint in the thread which hit the breakpoint, but then continue | |
439 | stepping the thread user has selected. */ | |
440 | static ptid_t deferred_step_ptid; | |
c906108c | 441 | \f |
237fc4c9 PA |
442 | /* Displaced stepping. */ |
443 | ||
444 | /* In non-stop debugging mode, we must take special care to manage | |
445 | breakpoints properly; in particular, the traditional strategy for | |
446 | stepping a thread past a breakpoint it has hit is unsuitable. | |
447 | 'Displaced stepping' is a tactic for stepping one thread past a | |
448 | breakpoint it has hit while ensuring that other threads running | |
449 | concurrently will hit the breakpoint as they should. | |
450 | ||
451 | The traditional way to step a thread T off a breakpoint in a | |
452 | multi-threaded program in all-stop mode is as follows: | |
453 | ||
454 | a0) Initially, all threads are stopped, and breakpoints are not | |
455 | inserted. | |
456 | a1) We single-step T, leaving breakpoints uninserted. | |
457 | a2) We insert breakpoints, and resume all threads. | |
458 | ||
459 | In non-stop debugging, however, this strategy is unsuitable: we | |
460 | don't want to have to stop all threads in the system in order to | |
461 | continue or step T past a breakpoint. Instead, we use displaced | |
462 | stepping: | |
463 | ||
464 | n0) Initially, T is stopped, other threads are running, and | |
465 | breakpoints are inserted. | |
466 | n1) We copy the instruction "under" the breakpoint to a separate | |
467 | location, outside the main code stream, making any adjustments | |
468 | to the instruction, register, and memory state as directed by | |
469 | T's architecture. | |
470 | n2) We single-step T over the instruction at its new location. | |
471 | n3) We adjust the resulting register and memory state as directed | |
472 | by T's architecture. This includes resetting T's PC to point | |
473 | back into the main instruction stream. | |
474 | n4) We resume T. | |
475 | ||
476 | This approach depends on the following gdbarch methods: | |
477 | ||
478 | - gdbarch_max_insn_length and gdbarch_displaced_step_location | |
479 | indicate where to copy the instruction, and how much space must | |
480 | be reserved there. We use these in step n1. | |
481 | ||
482 | - gdbarch_displaced_step_copy_insn copies a instruction to a new | |
483 | address, and makes any necessary adjustments to the instruction, | |
484 | register contents, and memory. We use this in step n1. | |
485 | ||
486 | - gdbarch_displaced_step_fixup adjusts registers and memory after | |
487 | we have successfuly single-stepped the instruction, to yield the | |
488 | same effect the instruction would have had if we had executed it | |
489 | at its original address. We use this in step n3. | |
490 | ||
491 | - gdbarch_displaced_step_free_closure provides cleanup. | |
492 | ||
493 | The gdbarch_displaced_step_copy_insn and | |
494 | gdbarch_displaced_step_fixup functions must be written so that | |
495 | copying an instruction with gdbarch_displaced_step_copy_insn, | |
496 | single-stepping across the copied instruction, and then applying | |
497 | gdbarch_displaced_insn_fixup should have the same effects on the | |
498 | thread's memory and registers as stepping the instruction in place | |
499 | would have. Exactly which responsibilities fall to the copy and | |
500 | which fall to the fixup is up to the author of those functions. | |
501 | ||
502 | See the comments in gdbarch.sh for details. | |
503 | ||
504 | Note that displaced stepping and software single-step cannot | |
505 | currently be used in combination, although with some care I think | |
506 | they could be made to. Software single-step works by placing | |
507 | breakpoints on all possible subsequent instructions; if the | |
508 | displaced instruction is a PC-relative jump, those breakpoints | |
509 | could fall in very strange places --- on pages that aren't | |
510 | executable, or at addresses that are not proper instruction | |
511 | boundaries. (We do generally let other threads run while we wait | |
512 | to hit the software single-step breakpoint, and they might | |
513 | encounter such a corrupted instruction.) One way to work around | |
514 | this would be to have gdbarch_displaced_step_copy_insn fully | |
515 | simulate the effect of PC-relative instructions (and return NULL) | |
516 | on architectures that use software single-stepping. | |
517 | ||
518 | In non-stop mode, we can have independent and simultaneous step | |
519 | requests, so more than one thread may need to simultaneously step | |
520 | over a breakpoint. The current implementation assumes there is | |
521 | only one scratch space per process. In this case, we have to | |
522 | serialize access to the scratch space. If thread A wants to step | |
523 | over a breakpoint, but we are currently waiting for some other | |
524 | thread to complete a displaced step, we leave thread A stopped and | |
525 | place it in the displaced_step_request_queue. Whenever a displaced | |
526 | step finishes, we pick the next thread in the queue and start a new | |
527 | displaced step operation on it. See displaced_step_prepare and | |
528 | displaced_step_fixup for details. */ | |
529 | ||
530 | /* If this is not null_ptid, this is the thread carrying out a | |
531 | displaced single-step. This thread's state will require fixing up | |
532 | once it has completed its step. */ | |
533 | static ptid_t displaced_step_ptid; | |
534 | ||
535 | struct displaced_step_request | |
536 | { | |
537 | ptid_t ptid; | |
538 | struct displaced_step_request *next; | |
539 | }; | |
540 | ||
541 | /* A queue of pending displaced stepping requests. */ | |
542 | struct displaced_step_request *displaced_step_request_queue; | |
543 | ||
544 | /* The architecture the thread had when we stepped it. */ | |
545 | static struct gdbarch *displaced_step_gdbarch; | |
546 | ||
547 | /* The closure provided gdbarch_displaced_step_copy_insn, to be used | |
548 | for post-step cleanup. */ | |
549 | static struct displaced_step_closure *displaced_step_closure; | |
550 | ||
551 | /* The address of the original instruction, and the copy we made. */ | |
552 | static CORE_ADDR displaced_step_original, displaced_step_copy; | |
553 | ||
554 | /* Saved contents of copy area. */ | |
555 | static gdb_byte *displaced_step_saved_copy; | |
556 | ||
fff08868 HZ |
557 | /* Enum strings for "set|show displaced-stepping". */ |
558 | ||
559 | static const char can_use_displaced_stepping_auto[] = "auto"; | |
560 | static const char can_use_displaced_stepping_on[] = "on"; | |
561 | static const char can_use_displaced_stepping_off[] = "off"; | |
562 | static const char *can_use_displaced_stepping_enum[] = | |
563 | { | |
564 | can_use_displaced_stepping_auto, | |
565 | can_use_displaced_stepping_on, | |
566 | can_use_displaced_stepping_off, | |
567 | NULL, | |
568 | }; | |
569 | ||
570 | /* If ON, and the architecture supports it, GDB will use displaced | |
571 | stepping to step over breakpoints. If OFF, or if the architecture | |
572 | doesn't support it, GDB will instead use the traditional | |
573 | hold-and-step approach. If AUTO (which is the default), GDB will | |
574 | decide which technique to use to step over breakpoints depending on | |
575 | which of all-stop or non-stop mode is active --- displaced stepping | |
576 | in non-stop mode; hold-and-step in all-stop mode. */ | |
577 | ||
578 | static const char *can_use_displaced_stepping = | |
579 | can_use_displaced_stepping_auto; | |
580 | ||
237fc4c9 PA |
581 | static void |
582 | show_can_use_displaced_stepping (struct ui_file *file, int from_tty, | |
583 | struct cmd_list_element *c, | |
584 | const char *value) | |
585 | { | |
fff08868 HZ |
586 | if (can_use_displaced_stepping == can_use_displaced_stepping_auto) |
587 | fprintf_filtered (file, _("\ | |
588 | Debugger's willingness to use displaced stepping to step over \ | |
589 | breakpoints is %s (currently %s).\n"), | |
590 | value, non_stop ? "on" : "off"); | |
591 | else | |
592 | fprintf_filtered (file, _("\ | |
593 | Debugger's willingness to use displaced stepping to step over \ | |
594 | breakpoints is %s.\n"), value); | |
237fc4c9 PA |
595 | } |
596 | ||
fff08868 HZ |
597 | /* Return non-zero if displaced stepping can/should be used to step |
598 | over breakpoints. */ | |
599 | ||
237fc4c9 PA |
600 | static int |
601 | use_displaced_stepping (struct gdbarch *gdbarch) | |
602 | { | |
fff08868 HZ |
603 | return (((can_use_displaced_stepping == can_use_displaced_stepping_auto |
604 | && non_stop) | |
605 | || can_use_displaced_stepping == can_use_displaced_stepping_on) | |
237fc4c9 PA |
606 | && gdbarch_displaced_step_copy_insn_p (gdbarch)); |
607 | } | |
608 | ||
609 | /* Clean out any stray displaced stepping state. */ | |
610 | static void | |
611 | displaced_step_clear (void) | |
612 | { | |
613 | /* Indicate that there is no cleanup pending. */ | |
614 | displaced_step_ptid = null_ptid; | |
615 | ||
616 | if (displaced_step_closure) | |
617 | { | |
618 | gdbarch_displaced_step_free_closure (displaced_step_gdbarch, | |
619 | displaced_step_closure); | |
620 | displaced_step_closure = NULL; | |
621 | } | |
622 | } | |
623 | ||
624 | static void | |
625 | cleanup_displaced_step_closure (void *ptr) | |
626 | { | |
627 | struct displaced_step_closure *closure = ptr; | |
628 | ||
629 | gdbarch_displaced_step_free_closure (current_gdbarch, closure); | |
630 | } | |
631 | ||
632 | /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */ | |
633 | void | |
634 | displaced_step_dump_bytes (struct ui_file *file, | |
635 | const gdb_byte *buf, | |
636 | size_t len) | |
637 | { | |
638 | int i; | |
639 | ||
640 | for (i = 0; i < len; i++) | |
641 | fprintf_unfiltered (file, "%02x ", buf[i]); | |
642 | fputs_unfiltered ("\n", file); | |
643 | } | |
644 | ||
645 | /* Prepare to single-step, using displaced stepping. | |
646 | ||
647 | Note that we cannot use displaced stepping when we have a signal to | |
648 | deliver. If we have a signal to deliver and an instruction to step | |
649 | over, then after the step, there will be no indication from the | |
650 | target whether the thread entered a signal handler or ignored the | |
651 | signal and stepped over the instruction successfully --- both cases | |
652 | result in a simple SIGTRAP. In the first case we mustn't do a | |
653 | fixup, and in the second case we must --- but we can't tell which. | |
654 | Comments in the code for 'random signals' in handle_inferior_event | |
655 | explain how we handle this case instead. | |
656 | ||
657 | Returns 1 if preparing was successful -- this thread is going to be | |
658 | stepped now; or 0 if displaced stepping this thread got queued. */ | |
659 | static int | |
660 | displaced_step_prepare (ptid_t ptid) | |
661 | { | |
ad53cd71 | 662 | struct cleanup *old_cleanups, *ignore_cleanups; |
237fc4c9 PA |
663 | struct regcache *regcache = get_thread_regcache (ptid); |
664 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
665 | CORE_ADDR original, copy; | |
666 | ULONGEST len; | |
667 | struct displaced_step_closure *closure; | |
668 | ||
669 | /* We should never reach this function if the architecture does not | |
670 | support displaced stepping. */ | |
671 | gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch)); | |
672 | ||
673 | /* For the first cut, we're displaced stepping one thread at a | |
674 | time. */ | |
675 | ||
676 | if (!ptid_equal (displaced_step_ptid, null_ptid)) | |
677 | { | |
678 | /* Already waiting for a displaced step to finish. Defer this | |
679 | request and place in queue. */ | |
680 | struct displaced_step_request *req, *new_req; | |
681 | ||
682 | if (debug_displaced) | |
683 | fprintf_unfiltered (gdb_stdlog, | |
684 | "displaced: defering step of %s\n", | |
685 | target_pid_to_str (ptid)); | |
686 | ||
687 | new_req = xmalloc (sizeof (*new_req)); | |
688 | new_req->ptid = ptid; | |
689 | new_req->next = NULL; | |
690 | ||
691 | if (displaced_step_request_queue) | |
692 | { | |
693 | for (req = displaced_step_request_queue; | |
694 | req && req->next; | |
695 | req = req->next) | |
696 | ; | |
697 | req->next = new_req; | |
698 | } | |
699 | else | |
700 | displaced_step_request_queue = new_req; | |
701 | ||
702 | return 0; | |
703 | } | |
704 | else | |
705 | { | |
706 | if (debug_displaced) | |
707 | fprintf_unfiltered (gdb_stdlog, | |
708 | "displaced: stepping %s now\n", | |
709 | target_pid_to_str (ptid)); | |
710 | } | |
711 | ||
712 | displaced_step_clear (); | |
713 | ||
ad53cd71 PA |
714 | old_cleanups = save_inferior_ptid (); |
715 | inferior_ptid = ptid; | |
716 | ||
515630c5 | 717 | original = regcache_read_pc (regcache); |
237fc4c9 PA |
718 | |
719 | copy = gdbarch_displaced_step_location (gdbarch); | |
720 | len = gdbarch_max_insn_length (gdbarch); | |
721 | ||
722 | /* Save the original contents of the copy area. */ | |
723 | displaced_step_saved_copy = xmalloc (len); | |
ad53cd71 PA |
724 | ignore_cleanups = make_cleanup (free_current_contents, |
725 | &displaced_step_saved_copy); | |
237fc4c9 PA |
726 | read_memory (copy, displaced_step_saved_copy, len); |
727 | if (debug_displaced) | |
728 | { | |
729 | fprintf_unfiltered (gdb_stdlog, "displaced: saved 0x%s: ", | |
730 | paddr_nz (copy)); | |
731 | displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len); | |
732 | }; | |
733 | ||
734 | closure = gdbarch_displaced_step_copy_insn (gdbarch, | |
ad53cd71 | 735 | original, copy, regcache); |
237fc4c9 PA |
736 | |
737 | /* We don't support the fully-simulated case at present. */ | |
738 | gdb_assert (closure); | |
739 | ||
740 | make_cleanup (cleanup_displaced_step_closure, closure); | |
741 | ||
742 | /* Resume execution at the copy. */ | |
515630c5 | 743 | regcache_write_pc (regcache, copy); |
237fc4c9 | 744 | |
ad53cd71 PA |
745 | discard_cleanups (ignore_cleanups); |
746 | ||
747 | do_cleanups (old_cleanups); | |
237fc4c9 PA |
748 | |
749 | if (debug_displaced) | |
750 | fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to 0x%s\n", | |
ad53cd71 | 751 | paddr_nz (copy)); |
237fc4c9 PA |
752 | |
753 | /* Save the information we need to fix things up if the step | |
754 | succeeds. */ | |
755 | displaced_step_ptid = ptid; | |
756 | displaced_step_gdbarch = gdbarch; | |
757 | displaced_step_closure = closure; | |
758 | displaced_step_original = original; | |
759 | displaced_step_copy = copy; | |
760 | return 1; | |
761 | } | |
762 | ||
763 | static void | |
764 | displaced_step_clear_cleanup (void *ignore) | |
765 | { | |
766 | displaced_step_clear (); | |
767 | } | |
768 | ||
769 | static void | |
770 | write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len) | |
771 | { | |
772 | struct cleanup *ptid_cleanup = save_inferior_ptid (); | |
773 | inferior_ptid = ptid; | |
774 | write_memory (memaddr, myaddr, len); | |
775 | do_cleanups (ptid_cleanup); | |
776 | } | |
777 | ||
778 | static void | |
779 | displaced_step_fixup (ptid_t event_ptid, enum target_signal signal) | |
780 | { | |
781 | struct cleanup *old_cleanups; | |
782 | ||
783 | /* Was this event for the pid we displaced? */ | |
784 | if (ptid_equal (displaced_step_ptid, null_ptid) | |
785 | || ! ptid_equal (displaced_step_ptid, event_ptid)) | |
786 | return; | |
787 | ||
788 | old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0); | |
789 | ||
790 | /* Restore the contents of the copy area. */ | |
791 | { | |
792 | ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch); | |
793 | write_memory_ptid (displaced_step_ptid, displaced_step_copy, | |
794 | displaced_step_saved_copy, len); | |
795 | if (debug_displaced) | |
796 | fprintf_unfiltered (gdb_stdlog, "displaced: restored 0x%s\n", | |
797 | paddr_nz (displaced_step_copy)); | |
798 | } | |
799 | ||
800 | /* Did the instruction complete successfully? */ | |
801 | if (signal == TARGET_SIGNAL_TRAP) | |
802 | { | |
803 | /* Fix up the resulting state. */ | |
804 | gdbarch_displaced_step_fixup (displaced_step_gdbarch, | |
805 | displaced_step_closure, | |
806 | displaced_step_original, | |
807 | displaced_step_copy, | |
808 | get_thread_regcache (displaced_step_ptid)); | |
809 | } | |
810 | else | |
811 | { | |
812 | /* Since the instruction didn't complete, all we can do is | |
813 | relocate the PC. */ | |
515630c5 UW |
814 | struct regcache *regcache = get_thread_regcache (event_ptid); |
815 | CORE_ADDR pc = regcache_read_pc (regcache); | |
237fc4c9 | 816 | pc = displaced_step_original + (pc - displaced_step_copy); |
515630c5 | 817 | regcache_write_pc (regcache, pc); |
237fc4c9 PA |
818 | } |
819 | ||
820 | do_cleanups (old_cleanups); | |
821 | ||
1c5cfe86 PA |
822 | displaced_step_ptid = null_ptid; |
823 | ||
237fc4c9 PA |
824 | /* Are there any pending displaced stepping requests? If so, run |
825 | one now. */ | |
1c5cfe86 | 826 | while (displaced_step_request_queue) |
237fc4c9 PA |
827 | { |
828 | struct displaced_step_request *head; | |
829 | ptid_t ptid; | |
1c5cfe86 | 830 | CORE_ADDR actual_pc; |
237fc4c9 PA |
831 | |
832 | head = displaced_step_request_queue; | |
833 | ptid = head->ptid; | |
834 | displaced_step_request_queue = head->next; | |
835 | xfree (head); | |
836 | ||
ad53cd71 PA |
837 | context_switch (ptid); |
838 | ||
1c5cfe86 PA |
839 | actual_pc = read_pc (); |
840 | ||
841 | if (breakpoint_here_p (actual_pc)) | |
ad53cd71 | 842 | { |
1c5cfe86 PA |
843 | if (debug_displaced) |
844 | fprintf_unfiltered (gdb_stdlog, | |
845 | "displaced: stepping queued %s now\n", | |
846 | target_pid_to_str (ptid)); | |
847 | ||
848 | displaced_step_prepare (ptid); | |
849 | ||
850 | if (debug_displaced) | |
851 | { | |
852 | gdb_byte buf[4]; | |
853 | ||
854 | fprintf_unfiltered (gdb_stdlog, "displaced: run 0x%s: ", | |
855 | paddr_nz (actual_pc)); | |
856 | read_memory (actual_pc, buf, sizeof (buf)); | |
857 | displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf)); | |
858 | } | |
859 | ||
860 | target_resume (ptid, 1, TARGET_SIGNAL_0); | |
861 | ||
862 | /* Done, we're stepping a thread. */ | |
863 | break; | |
ad53cd71 | 864 | } |
1c5cfe86 PA |
865 | else |
866 | { | |
867 | int step; | |
868 | struct thread_info *tp = inferior_thread (); | |
869 | ||
870 | /* The breakpoint we were sitting under has since been | |
871 | removed. */ | |
872 | tp->trap_expected = 0; | |
873 | ||
874 | /* Go back to what we were trying to do. */ | |
875 | step = currently_stepping (tp); | |
ad53cd71 | 876 | |
1c5cfe86 PA |
877 | if (debug_displaced) |
878 | fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n", | |
879 | target_pid_to_str (tp->ptid), step); | |
880 | ||
881 | target_resume (ptid, step, TARGET_SIGNAL_0); | |
882 | tp->stop_signal = TARGET_SIGNAL_0; | |
883 | ||
884 | /* This request was discarded. See if there's any other | |
885 | thread waiting for its turn. */ | |
886 | } | |
237fc4c9 PA |
887 | } |
888 | } | |
889 | ||
5231c1fd PA |
890 | /* Update global variables holding ptids to hold NEW_PTID if they were |
891 | holding OLD_PTID. */ | |
892 | static void | |
893 | infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid) | |
894 | { | |
895 | struct displaced_step_request *it; | |
896 | ||
897 | if (ptid_equal (inferior_ptid, old_ptid)) | |
898 | inferior_ptid = new_ptid; | |
899 | ||
900 | if (ptid_equal (singlestep_ptid, old_ptid)) | |
901 | singlestep_ptid = new_ptid; | |
902 | ||
903 | if (ptid_equal (displaced_step_ptid, old_ptid)) | |
904 | displaced_step_ptid = new_ptid; | |
905 | ||
906 | if (ptid_equal (deferred_step_ptid, old_ptid)) | |
907 | deferred_step_ptid = new_ptid; | |
908 | ||
909 | for (it = displaced_step_request_queue; it; it = it->next) | |
910 | if (ptid_equal (it->ptid, old_ptid)) | |
911 | it->ptid = new_ptid; | |
912 | } | |
913 | ||
237fc4c9 PA |
914 | \f |
915 | /* Resuming. */ | |
c906108c SS |
916 | |
917 | /* Things to clean up if we QUIT out of resume (). */ | |
c906108c | 918 | static void |
74b7792f | 919 | resume_cleanups (void *ignore) |
c906108c SS |
920 | { |
921 | normal_stop (); | |
922 | } | |
923 | ||
53904c9e AC |
924 | static const char schedlock_off[] = "off"; |
925 | static const char schedlock_on[] = "on"; | |
926 | static const char schedlock_step[] = "step"; | |
488f131b | 927 | static const char *scheduler_enums[] = { |
ef346e04 AC |
928 | schedlock_off, |
929 | schedlock_on, | |
930 | schedlock_step, | |
931 | NULL | |
932 | }; | |
920d2a44 AC |
933 | static const char *scheduler_mode = schedlock_off; |
934 | static void | |
935 | show_scheduler_mode (struct ui_file *file, int from_tty, | |
936 | struct cmd_list_element *c, const char *value) | |
937 | { | |
938 | fprintf_filtered (file, _("\ | |
939 | Mode for locking scheduler during execution is \"%s\".\n"), | |
940 | value); | |
941 | } | |
c906108c SS |
942 | |
943 | static void | |
96baa820 | 944 | set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c) |
c906108c | 945 | { |
eefe576e AC |
946 | if (!target_can_lock_scheduler) |
947 | { | |
948 | scheduler_mode = schedlock_off; | |
949 | error (_("Target '%s' cannot support this command."), target_shortname); | |
950 | } | |
c906108c SS |
951 | } |
952 | ||
953 | ||
954 | /* Resume the inferior, but allow a QUIT. This is useful if the user | |
955 | wants to interrupt some lengthy single-stepping operation | |
956 | (for child processes, the SIGINT goes to the inferior, and so | |
957 | we get a SIGINT random_signal, but for remote debugging and perhaps | |
958 | other targets, that's not true). | |
959 | ||
960 | STEP nonzero if we should step (zero to continue instead). | |
961 | SIG is the signal to give the inferior (zero for none). */ | |
962 | void | |
96baa820 | 963 | resume (int step, enum target_signal sig) |
c906108c SS |
964 | { |
965 | int should_resume = 1; | |
74b7792f | 966 | struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0); |
c7e8a53c PA |
967 | |
968 | /* Note that these must be reset if we follow a fork below. */ | |
515630c5 UW |
969 | struct regcache *regcache = get_current_regcache (); |
970 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
4e1c45ea | 971 | struct thread_info *tp = inferior_thread (); |
515630c5 | 972 | CORE_ADDR pc = regcache_read_pc (regcache); |
c7e8a53c | 973 | |
c906108c SS |
974 | QUIT; |
975 | ||
527159b7 | 976 | if (debug_infrun) |
237fc4c9 PA |
977 | fprintf_unfiltered (gdb_stdlog, |
978 | "infrun: resume (step=%d, signal=%d), " | |
4e1c45ea PA |
979 | "trap_expected=%d\n", |
980 | step, sig, tp->trap_expected); | |
c906108c | 981 | |
692590c1 MS |
982 | /* Some targets (e.g. Solaris x86) have a kernel bug when stepping |
983 | over an instruction that causes a page fault without triggering | |
984 | a hardware watchpoint. The kernel properly notices that it shouldn't | |
985 | stop, because the hardware watchpoint is not triggered, but it forgets | |
986 | the step request and continues the program normally. | |
987 | Work around the problem by removing hardware watchpoints if a step is | |
988 | requested, GDB will check for a hardware watchpoint trigger after the | |
989 | step anyway. */ | |
c36b740a | 990 | if (CANNOT_STEP_HW_WATCHPOINTS && step) |
692590c1 | 991 | remove_hw_watchpoints (); |
488f131b | 992 | |
692590c1 | 993 | |
c2c6d25f JM |
994 | /* Normally, by the time we reach `resume', the breakpoints are either |
995 | removed or inserted, as appropriate. The exception is if we're sitting | |
996 | at a permanent breakpoint; we need to step over it, but permanent | |
997 | breakpoints can't be removed. So we have to test for it here. */ | |
237fc4c9 | 998 | if (breakpoint_here_p (pc) == permanent_breakpoint_here) |
6d350bb5 | 999 | { |
515630c5 UW |
1000 | if (gdbarch_skip_permanent_breakpoint_p (gdbarch)) |
1001 | gdbarch_skip_permanent_breakpoint (gdbarch, regcache); | |
6d350bb5 UW |
1002 | else |
1003 | error (_("\ | |
1004 | The program is stopped at a permanent breakpoint, but GDB does not know\n\ | |
1005 | how to step past a permanent breakpoint on this architecture. Try using\n\ | |
1006 | a command like `return' or `jump' to continue execution.")); | |
1007 | } | |
c2c6d25f | 1008 | |
237fc4c9 PA |
1009 | /* If enabled, step over breakpoints by executing a copy of the |
1010 | instruction at a different address. | |
1011 | ||
1012 | We can't use displaced stepping when we have a signal to deliver; | |
1013 | the comments for displaced_step_prepare explain why. The | |
1014 | comments in the handle_inferior event for dealing with 'random | |
1015 | signals' explain what we do instead. */ | |
515630c5 | 1016 | if (use_displaced_stepping (gdbarch) |
4e1c45ea | 1017 | && tp->trap_expected |
237fc4c9 PA |
1018 | && sig == TARGET_SIGNAL_0) |
1019 | { | |
1020 | if (!displaced_step_prepare (inferior_ptid)) | |
d56b7306 VP |
1021 | { |
1022 | /* Got placed in displaced stepping queue. Will be resumed | |
1023 | later when all the currently queued displaced stepping | |
7f7efbd9 VP |
1024 | requests finish. The thread is not executing at this point, |
1025 | and the call to set_executing will be made later. But we | |
1026 | need to call set_running here, since from frontend point of view, | |
1027 | the thread is running. */ | |
1028 | set_running (inferior_ptid, 1); | |
d56b7306 VP |
1029 | discard_cleanups (old_cleanups); |
1030 | return; | |
1031 | } | |
237fc4c9 PA |
1032 | } |
1033 | ||
515630c5 | 1034 | if (step && gdbarch_software_single_step_p (gdbarch)) |
c906108c SS |
1035 | { |
1036 | /* Do it the hard way, w/temp breakpoints */ | |
515630c5 | 1037 | if (gdbarch_software_single_step (gdbarch, get_current_frame ())) |
e6590a1b UW |
1038 | { |
1039 | /* ...and don't ask hardware to do it. */ | |
1040 | step = 0; | |
1041 | /* and do not pull these breakpoints until after a `wait' in | |
1042 | `wait_for_inferior' */ | |
1043 | singlestep_breakpoints_inserted_p = 1; | |
1044 | singlestep_ptid = inferior_ptid; | |
237fc4c9 | 1045 | singlestep_pc = pc; |
e6590a1b | 1046 | } |
c906108c SS |
1047 | } |
1048 | ||
c906108c | 1049 | /* If there were any forks/vforks/execs that were caught and are |
6604731b | 1050 | now to be followed, then do so. */ |
c906108c SS |
1051 | switch (pending_follow.kind) |
1052 | { | |
6604731b DJ |
1053 | case TARGET_WAITKIND_FORKED: |
1054 | case TARGET_WAITKIND_VFORKED: | |
c906108c | 1055 | pending_follow.kind = TARGET_WAITKIND_SPURIOUS; |
6604731b DJ |
1056 | if (follow_fork ()) |
1057 | should_resume = 0; | |
607cecd2 PA |
1058 | |
1059 | /* Following a child fork will change our notion of current | |
1060 | thread. */ | |
1061 | tp = inferior_thread (); | |
c7e8a53c PA |
1062 | regcache = get_current_regcache (); |
1063 | gdbarch = get_regcache_arch (regcache); | |
1064 | pc = regcache_read_pc (regcache); | |
c906108c SS |
1065 | break; |
1066 | ||
6604731b | 1067 | case TARGET_WAITKIND_EXECD: |
c906108c | 1068 | /* follow_exec is called as soon as the exec event is seen. */ |
6604731b | 1069 | pending_follow.kind = TARGET_WAITKIND_SPURIOUS; |
c906108c SS |
1070 | break; |
1071 | ||
1072 | default: | |
1073 | break; | |
1074 | } | |
c906108c SS |
1075 | |
1076 | /* Install inferior's terminal modes. */ | |
1077 | target_terminal_inferior (); | |
1078 | ||
1079 | if (should_resume) | |
1080 | { | |
39f77062 | 1081 | ptid_t resume_ptid; |
dfcd3bfb | 1082 | |
488f131b | 1083 | resume_ptid = RESUME_ALL; /* Default */ |
ef5cf84e | 1084 | |
cd76b0b7 VP |
1085 | /* If STEP is set, it's a request to use hardware stepping |
1086 | facilities. But in that case, we should never | |
1087 | use singlestep breakpoint. */ | |
1088 | gdb_assert (!(singlestep_breakpoints_inserted_p && step)); | |
1089 | ||
1090 | if (singlestep_breakpoints_inserted_p | |
1091 | && stepping_past_singlestep_breakpoint) | |
c906108c | 1092 | { |
cd76b0b7 VP |
1093 | /* The situation here is as follows. In thread T1 we wanted to |
1094 | single-step. Lacking hardware single-stepping we've | |
1095 | set breakpoint at the PC of the next instruction -- call it | |
1096 | P. After resuming, we've hit that breakpoint in thread T2. | |
1097 | Now we've removed original breakpoint, inserted breakpoint | |
1098 | at P+1, and try to step to advance T2 past breakpoint. | |
1099 | We need to step only T2, as if T1 is allowed to freely run, | |
1100 | it can run past P, and if other threads are allowed to run, | |
1101 | they can hit breakpoint at P+1, and nested hits of single-step | |
1102 | breakpoints is not something we'd want -- that's complicated | |
1103 | to support, and has no value. */ | |
1104 | resume_ptid = inferior_ptid; | |
1105 | } | |
c906108c | 1106 | |
e842223a | 1107 | if ((step || singlestep_breakpoints_inserted_p) |
4e1c45ea | 1108 | && tp->trap_expected) |
cd76b0b7 | 1109 | { |
74960c60 VP |
1110 | /* We're allowing a thread to run past a breakpoint it has |
1111 | hit, by single-stepping the thread with the breakpoint | |
1112 | removed. In which case, we need to single-step only this | |
1113 | thread, and keep others stopped, as they can miss this | |
1114 | breakpoint if allowed to run. | |
1115 | ||
1116 | The current code actually removes all breakpoints when | |
1117 | doing this, not just the one being stepped over, so if we | |
1118 | let other threads run, we can actually miss any | |
1119 | breakpoint, not just the one at PC. */ | |
ef5cf84e | 1120 | resume_ptid = inferior_ptid; |
c906108c | 1121 | } |
ef5cf84e | 1122 | |
94cc34af PA |
1123 | if (non_stop) |
1124 | { | |
1125 | /* With non-stop mode on, threads are always handled | |
1126 | individually. */ | |
1127 | resume_ptid = inferior_ptid; | |
1128 | } | |
1129 | else if ((scheduler_mode == schedlock_on) | |
1130 | || (scheduler_mode == schedlock_step | |
1131 | && (step || singlestep_breakpoints_inserted_p))) | |
c906108c | 1132 | { |
ef5cf84e | 1133 | /* User-settable 'scheduler' mode requires solo thread resume. */ |
488f131b | 1134 | resume_ptid = inferior_ptid; |
c906108c | 1135 | } |
ef5cf84e | 1136 | |
515630c5 | 1137 | if (gdbarch_cannot_step_breakpoint (gdbarch)) |
c4ed33b9 AC |
1138 | { |
1139 | /* Most targets can step a breakpoint instruction, thus | |
1140 | executing it normally. But if this one cannot, just | |
1141 | continue and we will hit it anyway. */ | |
237fc4c9 | 1142 | if (step && breakpoint_inserted_here_p (pc)) |
c4ed33b9 AC |
1143 | step = 0; |
1144 | } | |
237fc4c9 PA |
1145 | |
1146 | if (debug_displaced | |
515630c5 | 1147 | && use_displaced_stepping (gdbarch) |
4e1c45ea | 1148 | && tp->trap_expected) |
237fc4c9 | 1149 | { |
515630c5 UW |
1150 | struct regcache *resume_regcache = get_thread_regcache (resume_ptid); |
1151 | CORE_ADDR actual_pc = regcache_read_pc (resume_regcache); | |
237fc4c9 PA |
1152 | gdb_byte buf[4]; |
1153 | ||
1154 | fprintf_unfiltered (gdb_stdlog, "displaced: run 0x%s: ", | |
1155 | paddr_nz (actual_pc)); | |
1156 | read_memory (actual_pc, buf, sizeof (buf)); | |
1157 | displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf)); | |
1158 | } | |
1159 | ||
2020b7ab PA |
1160 | /* Avoid confusing the next resume, if the next stop/resume |
1161 | happens to apply to another thread. */ | |
1162 | tp->stop_signal = TARGET_SIGNAL_0; | |
607cecd2 PA |
1163 | |
1164 | target_resume (resume_ptid, step, sig); | |
c906108c SS |
1165 | } |
1166 | ||
1167 | discard_cleanups (old_cleanups); | |
1168 | } | |
1169 | \f | |
237fc4c9 | 1170 | /* Proceeding. */ |
c906108c SS |
1171 | |
1172 | /* Clear out all variables saying what to do when inferior is continued. | |
1173 | First do this, then set the ones you want, then call `proceed'. */ | |
1174 | ||
a7212384 UW |
1175 | static void |
1176 | clear_proceed_status_thread (struct thread_info *tp) | |
c906108c | 1177 | { |
a7212384 UW |
1178 | if (debug_infrun) |
1179 | fprintf_unfiltered (gdb_stdlog, | |
1180 | "infrun: clear_proceed_status_thread (%s)\n", | |
1181 | target_pid_to_str (tp->ptid)); | |
d6b48e9c | 1182 | |
a7212384 UW |
1183 | tp->trap_expected = 0; |
1184 | tp->step_range_start = 0; | |
1185 | tp->step_range_end = 0; | |
1186 | tp->step_frame_id = null_frame_id; | |
1187 | tp->step_over_calls = STEP_OVER_UNDEBUGGABLE; | |
1188 | tp->stop_requested = 0; | |
4e1c45ea | 1189 | |
a7212384 | 1190 | tp->stop_step = 0; |
32400beb | 1191 | |
a7212384 | 1192 | tp->proceed_to_finish = 0; |
414c69f7 | 1193 | |
a7212384 UW |
1194 | /* Discard any remaining commands or status from previous stop. */ |
1195 | bpstat_clear (&tp->stop_bpstat); | |
1196 | } | |
32400beb | 1197 | |
a7212384 UW |
1198 | static int |
1199 | clear_proceed_status_callback (struct thread_info *tp, void *data) | |
1200 | { | |
1201 | if (is_exited (tp->ptid)) | |
1202 | return 0; | |
d6b48e9c | 1203 | |
a7212384 UW |
1204 | clear_proceed_status_thread (tp); |
1205 | return 0; | |
1206 | } | |
1207 | ||
1208 | void | |
1209 | clear_proceed_status (void) | |
1210 | { | |
1211 | if (!ptid_equal (inferior_ptid, null_ptid)) | |
1212 | { | |
1213 | struct inferior *inferior; | |
1214 | ||
1215 | if (non_stop) | |
1216 | { | |
1217 | /* If in non-stop mode, only delete the per-thread status | |
1218 | of the current thread. */ | |
1219 | clear_proceed_status_thread (inferior_thread ()); | |
1220 | } | |
1221 | else | |
1222 | { | |
1223 | /* In all-stop mode, delete the per-thread status of | |
1224 | *all* threads. */ | |
1225 | iterate_over_threads (clear_proceed_status_callback, NULL); | |
1226 | } | |
1227 | ||
d6b48e9c PA |
1228 | inferior = current_inferior (); |
1229 | inferior->stop_soon = NO_STOP_QUIETLY; | |
4e1c45ea PA |
1230 | } |
1231 | ||
c906108c | 1232 | stop_after_trap = 0; |
f3b1572e PA |
1233 | |
1234 | observer_notify_about_to_proceed (); | |
c906108c | 1235 | |
d5c31457 UW |
1236 | if (stop_registers) |
1237 | { | |
1238 | regcache_xfree (stop_registers); | |
1239 | stop_registers = NULL; | |
1240 | } | |
c906108c SS |
1241 | } |
1242 | ||
ea67f13b DJ |
1243 | /* This should be suitable for any targets that support threads. */ |
1244 | ||
1245 | static int | |
6a6b96b9 | 1246 | prepare_to_proceed (int step) |
ea67f13b DJ |
1247 | { |
1248 | ptid_t wait_ptid; | |
1249 | struct target_waitstatus wait_status; | |
1250 | ||
1251 | /* Get the last target status returned by target_wait(). */ | |
1252 | get_last_target_status (&wait_ptid, &wait_status); | |
1253 | ||
6a6b96b9 | 1254 | /* Make sure we were stopped at a breakpoint. */ |
ea67f13b | 1255 | if (wait_status.kind != TARGET_WAITKIND_STOPPED |
6a6b96b9 | 1256 | || wait_status.value.sig != TARGET_SIGNAL_TRAP) |
ea67f13b DJ |
1257 | { |
1258 | return 0; | |
1259 | } | |
1260 | ||
6a6b96b9 | 1261 | /* Switched over from WAIT_PID. */ |
ea67f13b | 1262 | if (!ptid_equal (wait_ptid, minus_one_ptid) |
515630c5 | 1263 | && !ptid_equal (inferior_ptid, wait_ptid)) |
ea67f13b | 1264 | { |
515630c5 UW |
1265 | struct regcache *regcache = get_thread_regcache (wait_ptid); |
1266 | ||
1267 | if (breakpoint_here_p (regcache_read_pc (regcache))) | |
ea67f13b | 1268 | { |
515630c5 UW |
1269 | /* If stepping, remember current thread to switch back to. */ |
1270 | if (step) | |
1271 | deferred_step_ptid = inferior_ptid; | |
ea67f13b | 1272 | |
515630c5 UW |
1273 | /* Switch back to WAIT_PID thread. */ |
1274 | switch_to_thread (wait_ptid); | |
6a6b96b9 | 1275 | |
515630c5 UW |
1276 | /* We return 1 to indicate that there is a breakpoint here, |
1277 | so we need to step over it before continuing to avoid | |
1278 | hitting it straight away. */ | |
1279 | return 1; | |
1280 | } | |
ea67f13b DJ |
1281 | } |
1282 | ||
1283 | return 0; | |
ea67f13b | 1284 | } |
e4846b08 | 1285 | |
c906108c SS |
1286 | /* Basic routine for continuing the program in various fashions. |
1287 | ||
1288 | ADDR is the address to resume at, or -1 for resume where stopped. | |
1289 | SIGGNAL is the signal to give it, or 0 for none, | |
c5aa993b | 1290 | or -1 for act according to how it stopped. |
c906108c | 1291 | STEP is nonzero if should trap after one instruction. |
c5aa993b JM |
1292 | -1 means return after that and print nothing. |
1293 | You should probably set various step_... variables | |
1294 | before calling here, if you are stepping. | |
c906108c SS |
1295 | |
1296 | You should call clear_proceed_status before calling proceed. */ | |
1297 | ||
1298 | void | |
96baa820 | 1299 | proceed (CORE_ADDR addr, enum target_signal siggnal, int step) |
c906108c | 1300 | { |
515630c5 UW |
1301 | struct regcache *regcache = get_current_regcache (); |
1302 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
4e1c45ea | 1303 | struct thread_info *tp; |
515630c5 | 1304 | CORE_ADDR pc = regcache_read_pc (regcache); |
c906108c SS |
1305 | int oneproc = 0; |
1306 | ||
1307 | if (step > 0) | |
515630c5 | 1308 | step_start_function = find_pc_function (pc); |
c906108c SS |
1309 | if (step < 0) |
1310 | stop_after_trap = 1; | |
1311 | ||
2acceee2 | 1312 | if (addr == (CORE_ADDR) -1) |
c906108c | 1313 | { |
b2175913 MS |
1314 | if (pc == stop_pc && breakpoint_here_p (pc) |
1315 | && execution_direction != EXEC_REVERSE) | |
3352ef37 AC |
1316 | /* There is a breakpoint at the address we will resume at, |
1317 | step one instruction before inserting breakpoints so that | |
1318 | we do not stop right away (and report a second hit at this | |
b2175913 MS |
1319 | breakpoint). |
1320 | ||
1321 | Note, we don't do this in reverse, because we won't | |
1322 | actually be executing the breakpoint insn anyway. | |
1323 | We'll be (un-)executing the previous instruction. */ | |
1324 | ||
c906108c | 1325 | oneproc = 1; |
515630c5 UW |
1326 | else if (gdbarch_single_step_through_delay_p (gdbarch) |
1327 | && gdbarch_single_step_through_delay (gdbarch, | |
1328 | get_current_frame ())) | |
3352ef37 AC |
1329 | /* We stepped onto an instruction that needs to be stepped |
1330 | again before re-inserting the breakpoint, do so. */ | |
c906108c SS |
1331 | oneproc = 1; |
1332 | } | |
1333 | else | |
1334 | { | |
515630c5 | 1335 | regcache_write_pc (regcache, addr); |
c906108c SS |
1336 | } |
1337 | ||
527159b7 | 1338 | if (debug_infrun) |
8a9de0e4 AC |
1339 | fprintf_unfiltered (gdb_stdlog, |
1340 | "infrun: proceed (addr=0x%s, signal=%d, step=%d)\n", | |
1341 | paddr_nz (addr), siggnal, step); | |
527159b7 | 1342 | |
94cc34af PA |
1343 | if (non_stop) |
1344 | /* In non-stop, each thread is handled individually. The context | |
1345 | must already be set to the right thread here. */ | |
1346 | ; | |
1347 | else | |
1348 | { | |
1349 | /* In a multi-threaded task we may select another thread and | |
1350 | then continue or step. | |
c906108c | 1351 | |
94cc34af PA |
1352 | But if the old thread was stopped at a breakpoint, it will |
1353 | immediately cause another breakpoint stop without any | |
1354 | execution (i.e. it will report a breakpoint hit incorrectly). | |
1355 | So we must step over it first. | |
c906108c | 1356 | |
94cc34af PA |
1357 | prepare_to_proceed checks the current thread against the |
1358 | thread that reported the most recent event. If a step-over | |
1359 | is required it returns TRUE and sets the current thread to | |
1360 | the old thread. */ | |
1361 | if (prepare_to_proceed (step)) | |
1362 | oneproc = 1; | |
1363 | } | |
c906108c | 1364 | |
4e1c45ea PA |
1365 | /* prepare_to_proceed may change the current thread. */ |
1366 | tp = inferior_thread (); | |
1367 | ||
c906108c | 1368 | if (oneproc) |
74960c60 | 1369 | { |
4e1c45ea | 1370 | tp->trap_expected = 1; |
237fc4c9 PA |
1371 | /* If displaced stepping is enabled, we can step over the |
1372 | breakpoint without hitting it, so leave all breakpoints | |
1373 | inserted. Otherwise we need to disable all breakpoints, step | |
1374 | one instruction, and then re-add them when that step is | |
1375 | finished. */ | |
515630c5 | 1376 | if (!use_displaced_stepping (gdbarch)) |
237fc4c9 | 1377 | remove_breakpoints (); |
74960c60 | 1378 | } |
237fc4c9 PA |
1379 | |
1380 | /* We can insert breakpoints if we're not trying to step over one, | |
1381 | or if we are stepping over one but we're using displaced stepping | |
1382 | to do so. */ | |
4e1c45ea | 1383 | if (! tp->trap_expected || use_displaced_stepping (gdbarch)) |
c36b740a | 1384 | insert_breakpoints (); |
c906108c | 1385 | |
2020b7ab PA |
1386 | if (!non_stop) |
1387 | { | |
1388 | /* Pass the last stop signal to the thread we're resuming, | |
1389 | irrespective of whether the current thread is the thread that | |
1390 | got the last event or not. This was historically GDB's | |
1391 | behaviour before keeping a stop_signal per thread. */ | |
1392 | ||
1393 | struct thread_info *last_thread; | |
1394 | ptid_t last_ptid; | |
1395 | struct target_waitstatus last_status; | |
1396 | ||
1397 | get_last_target_status (&last_ptid, &last_status); | |
1398 | if (!ptid_equal (inferior_ptid, last_ptid) | |
1399 | && !ptid_equal (last_ptid, null_ptid) | |
1400 | && !ptid_equal (last_ptid, minus_one_ptid)) | |
1401 | { | |
1402 | last_thread = find_thread_pid (last_ptid); | |
1403 | if (last_thread) | |
1404 | { | |
1405 | tp->stop_signal = last_thread->stop_signal; | |
1406 | last_thread->stop_signal = TARGET_SIGNAL_0; | |
1407 | } | |
1408 | } | |
1409 | } | |
1410 | ||
c906108c | 1411 | if (siggnal != TARGET_SIGNAL_DEFAULT) |
2020b7ab | 1412 | tp->stop_signal = siggnal; |
c906108c SS |
1413 | /* If this signal should not be seen by program, |
1414 | give it zero. Used for debugging signals. */ | |
2020b7ab PA |
1415 | else if (!signal_program[tp->stop_signal]) |
1416 | tp->stop_signal = TARGET_SIGNAL_0; | |
c906108c SS |
1417 | |
1418 | annotate_starting (); | |
1419 | ||
1420 | /* Make sure that output from GDB appears before output from the | |
1421 | inferior. */ | |
1422 | gdb_flush (gdb_stdout); | |
1423 | ||
e4846b08 JJ |
1424 | /* Refresh prev_pc value just prior to resuming. This used to be |
1425 | done in stop_stepping, however, setting prev_pc there did not handle | |
1426 | scenarios such as inferior function calls or returning from | |
1427 | a function via the return command. In those cases, the prev_pc | |
1428 | value was not set properly for subsequent commands. The prev_pc value | |
1429 | is used to initialize the starting line number in the ecs. With an | |
1430 | invalid value, the gdb next command ends up stopping at the position | |
1431 | represented by the next line table entry past our start position. | |
1432 | On platforms that generate one line table entry per line, this | |
1433 | is not a problem. However, on the ia64, the compiler generates | |
1434 | extraneous line table entries that do not increase the line number. | |
1435 | When we issue the gdb next command on the ia64 after an inferior call | |
1436 | or a return command, we often end up a few instructions forward, still | |
1437 | within the original line we started. | |
1438 | ||
1439 | An attempt was made to have init_execution_control_state () refresh | |
1440 | the prev_pc value before calculating the line number. This approach | |
1441 | did not work because on platforms that use ptrace, the pc register | |
1442 | cannot be read unless the inferior is stopped. At that point, we | |
515630c5 | 1443 | are not guaranteed the inferior is stopped and so the regcache_read_pc () |
e4846b08 | 1444 | call can fail. Setting the prev_pc value here ensures the value is |
8fb3e588 | 1445 | updated correctly when the inferior is stopped. */ |
4e1c45ea | 1446 | tp->prev_pc = regcache_read_pc (get_current_regcache ()); |
e4846b08 | 1447 | |
59f0d5d9 | 1448 | /* Fill in with reasonable starting values. */ |
4e1c45ea | 1449 | init_thread_stepping_state (tp); |
59f0d5d9 | 1450 | |
59f0d5d9 PA |
1451 | /* Reset to normal state. */ |
1452 | init_infwait_state (); | |
1453 | ||
c906108c | 1454 | /* Resume inferior. */ |
2020b7ab | 1455 | resume (oneproc || step || bpstat_should_step (), tp->stop_signal); |
c906108c SS |
1456 | |
1457 | /* Wait for it to stop (if not standalone) | |
1458 | and in any case decode why it stopped, and act accordingly. */ | |
43ff13b4 JM |
1459 | /* Do this only if we are not using the event loop, or if the target |
1460 | does not support asynchronous execution. */ | |
362646f5 | 1461 | if (!target_can_async_p ()) |
43ff13b4 | 1462 | { |
ae123ec6 | 1463 | wait_for_inferior (0); |
43ff13b4 JM |
1464 | normal_stop (); |
1465 | } | |
c906108c | 1466 | } |
c906108c SS |
1467 | \f |
1468 | ||
1469 | /* Start remote-debugging of a machine over a serial link. */ | |
96baa820 | 1470 | |
c906108c | 1471 | void |
8621d6a9 | 1472 | start_remote (int from_tty) |
c906108c | 1473 | { |
d6b48e9c | 1474 | struct inferior *inferior; |
c906108c | 1475 | init_wait_for_inferior (); |
d6b48e9c PA |
1476 | |
1477 | inferior = current_inferior (); | |
1478 | inferior->stop_soon = STOP_QUIETLY_REMOTE; | |
43ff13b4 | 1479 | |
6426a772 JM |
1480 | /* Always go on waiting for the target, regardless of the mode. */ |
1481 | /* FIXME: cagney/1999-09-23: At present it isn't possible to | |
7e73cedf | 1482 | indicate to wait_for_inferior that a target should timeout if |
6426a772 JM |
1483 | nothing is returned (instead of just blocking). Because of this, |
1484 | targets expecting an immediate response need to, internally, set | |
1485 | things up so that the target_wait() is forced to eventually | |
1486 | timeout. */ | |
1487 | /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to | |
1488 | differentiate to its caller what the state of the target is after | |
1489 | the initial open has been performed. Here we're assuming that | |
1490 | the target has stopped. It should be possible to eventually have | |
1491 | target_open() return to the caller an indication that the target | |
1492 | is currently running and GDB state should be set to the same as | |
1493 | for an async run. */ | |
ae123ec6 | 1494 | wait_for_inferior (0); |
8621d6a9 DJ |
1495 | |
1496 | /* Now that the inferior has stopped, do any bookkeeping like | |
1497 | loading shared libraries. We want to do this before normal_stop, | |
1498 | so that the displayed frame is up to date. */ | |
1499 | post_create_inferior (¤t_target, from_tty); | |
1500 | ||
6426a772 | 1501 | normal_stop (); |
c906108c SS |
1502 | } |
1503 | ||
1504 | /* Initialize static vars when a new inferior begins. */ | |
1505 | ||
1506 | void | |
96baa820 | 1507 | init_wait_for_inferior (void) |
c906108c SS |
1508 | { |
1509 | /* These are meaningless until the first time through wait_for_inferior. */ | |
c906108c | 1510 | |
c906108c SS |
1511 | breakpoint_init_inferior (inf_starting); |
1512 | ||
c906108c SS |
1513 | /* The first resume is not following a fork/vfork/exec. */ |
1514 | pending_follow.kind = TARGET_WAITKIND_SPURIOUS; /* I.e., none. */ | |
c906108c | 1515 | |
c906108c | 1516 | clear_proceed_status (); |
9f976b41 DJ |
1517 | |
1518 | stepping_past_singlestep_breakpoint = 0; | |
ca67fcb8 | 1519 | deferred_step_ptid = null_ptid; |
ca005067 DJ |
1520 | |
1521 | target_last_wait_ptid = minus_one_ptid; | |
237fc4c9 | 1522 | |
0d1e5fa7 PA |
1523 | previous_inferior_ptid = null_ptid; |
1524 | init_infwait_state (); | |
1525 | ||
237fc4c9 | 1526 | displaced_step_clear (); |
c906108c | 1527 | } |
237fc4c9 | 1528 | |
c906108c | 1529 | \f |
b83266a0 SS |
1530 | /* This enum encodes possible reasons for doing a target_wait, so that |
1531 | wfi can call target_wait in one place. (Ultimately the call will be | |
1532 | moved out of the infinite loop entirely.) */ | |
1533 | ||
c5aa993b JM |
1534 | enum infwait_states |
1535 | { | |
cd0fc7c3 SS |
1536 | infwait_normal_state, |
1537 | infwait_thread_hop_state, | |
d983da9c | 1538 | infwait_step_watch_state, |
cd0fc7c3 | 1539 | infwait_nonstep_watch_state |
b83266a0 SS |
1540 | }; |
1541 | ||
11cf8741 JM |
1542 | /* Why did the inferior stop? Used to print the appropriate messages |
1543 | to the interface from within handle_inferior_event(). */ | |
1544 | enum inferior_stop_reason | |
1545 | { | |
11cf8741 JM |
1546 | /* Step, next, nexti, stepi finished. */ |
1547 | END_STEPPING_RANGE, | |
11cf8741 JM |
1548 | /* Inferior terminated by signal. */ |
1549 | SIGNAL_EXITED, | |
1550 | /* Inferior exited. */ | |
1551 | EXITED, | |
1552 | /* Inferior received signal, and user asked to be notified. */ | |
b2175913 MS |
1553 | SIGNAL_RECEIVED, |
1554 | /* Reverse execution -- target ran out of history info. */ | |
1555 | NO_HISTORY | |
11cf8741 JM |
1556 | }; |
1557 | ||
0d1e5fa7 PA |
1558 | /* The PTID we'll do a target_wait on.*/ |
1559 | ptid_t waiton_ptid; | |
1560 | ||
1561 | /* Current inferior wait state. */ | |
1562 | enum infwait_states infwait_state; | |
cd0fc7c3 | 1563 | |
0d1e5fa7 PA |
1564 | /* Data to be passed around while handling an event. This data is |
1565 | discarded between events. */ | |
c5aa993b | 1566 | struct execution_control_state |
488f131b | 1567 | { |
0d1e5fa7 | 1568 | ptid_t ptid; |
4e1c45ea PA |
1569 | /* The thread that got the event, if this was a thread event; NULL |
1570 | otherwise. */ | |
1571 | struct thread_info *event_thread; | |
1572 | ||
488f131b | 1573 | struct target_waitstatus ws; |
488f131b JB |
1574 | int random_signal; |
1575 | CORE_ADDR stop_func_start; | |
1576 | CORE_ADDR stop_func_end; | |
1577 | char *stop_func_name; | |
488f131b | 1578 | int new_thread_event; |
488f131b JB |
1579 | int wait_some_more; |
1580 | }; | |
1581 | ||
1582 | void init_execution_control_state (struct execution_control_state *ecs); | |
1583 | ||
1584 | void handle_inferior_event (struct execution_control_state *ecs); | |
cd0fc7c3 | 1585 | |
b2175913 MS |
1586 | static void handle_step_into_function (struct execution_control_state *ecs); |
1587 | static void handle_step_into_function_backward (struct execution_control_state *ecs); | |
44cbf7b5 | 1588 | static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame); |
14e60db5 | 1589 | static void insert_step_resume_breakpoint_at_caller (struct frame_info *); |
44cbf7b5 AC |
1590 | static void insert_step_resume_breakpoint_at_sal (struct symtab_and_line sr_sal, |
1591 | struct frame_id sr_id); | |
611c83ae PA |
1592 | static void insert_longjmp_resume_breakpoint (CORE_ADDR); |
1593 | ||
104c1213 JM |
1594 | static void stop_stepping (struct execution_control_state *ecs); |
1595 | static void prepare_to_wait (struct execution_control_state *ecs); | |
d4f3574e | 1596 | static void keep_going (struct execution_control_state *ecs); |
488f131b JB |
1597 | static void print_stop_reason (enum inferior_stop_reason stop_reason, |
1598 | int stop_info); | |
104c1213 | 1599 | |
252fbfc8 PA |
1600 | /* Callback for iterate over threads. If the thread is stopped, but |
1601 | the user/frontend doesn't know about that yet, go through | |
1602 | normal_stop, as if the thread had just stopped now. ARG points at | |
1603 | a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If | |
1604 | ptid_is_pid(PTID) is true, applies to all threads of the process | |
1605 | pointed at by PTID. Otherwise, apply only to the thread pointed by | |
1606 | PTID. */ | |
1607 | ||
1608 | static int | |
1609 | infrun_thread_stop_requested_callback (struct thread_info *info, void *arg) | |
1610 | { | |
1611 | ptid_t ptid = * (ptid_t *) arg; | |
1612 | ||
1613 | if ((ptid_equal (info->ptid, ptid) | |
1614 | || ptid_equal (minus_one_ptid, ptid) | |
1615 | || (ptid_is_pid (ptid) | |
1616 | && ptid_get_pid (ptid) == ptid_get_pid (info->ptid))) | |
1617 | && is_running (info->ptid) | |
1618 | && !is_executing (info->ptid)) | |
1619 | { | |
1620 | struct cleanup *old_chain; | |
1621 | struct execution_control_state ecss; | |
1622 | struct execution_control_state *ecs = &ecss; | |
1623 | ||
1624 | memset (ecs, 0, sizeof (*ecs)); | |
1625 | ||
1626 | old_chain = make_cleanup_restore_current_thread (); | |
1627 | ||
1628 | switch_to_thread (info->ptid); | |
1629 | ||
1630 | /* Go through handle_inferior_event/normal_stop, so we always | |
1631 | have consistent output as if the stop event had been | |
1632 | reported. */ | |
1633 | ecs->ptid = info->ptid; | |
1634 | ecs->event_thread = find_thread_pid (info->ptid); | |
1635 | ecs->ws.kind = TARGET_WAITKIND_STOPPED; | |
1636 | ecs->ws.value.sig = TARGET_SIGNAL_0; | |
1637 | ||
1638 | handle_inferior_event (ecs); | |
1639 | ||
1640 | if (!ecs->wait_some_more) | |
1641 | { | |
1642 | struct thread_info *tp; | |
1643 | ||
1644 | normal_stop (); | |
1645 | ||
1646 | /* Finish off the continuations. The continations | |
1647 | themselves are responsible for realising the thread | |
1648 | didn't finish what it was supposed to do. */ | |
1649 | tp = inferior_thread (); | |
1650 | do_all_intermediate_continuations_thread (tp); | |
1651 | do_all_continuations_thread (tp); | |
1652 | } | |
1653 | ||
1654 | do_cleanups (old_chain); | |
1655 | } | |
1656 | ||
1657 | return 0; | |
1658 | } | |
1659 | ||
1660 | /* This function is attached as a "thread_stop_requested" observer. | |
1661 | Cleanup local state that assumed the PTID was to be resumed, and | |
1662 | report the stop to the frontend. */ | |
1663 | ||
2c0b251b | 1664 | static void |
252fbfc8 PA |
1665 | infrun_thread_stop_requested (ptid_t ptid) |
1666 | { | |
1667 | struct displaced_step_request *it, *next, *prev = NULL; | |
1668 | ||
1669 | /* PTID was requested to stop. Remove it from the displaced | |
1670 | stepping queue, so we don't try to resume it automatically. */ | |
1671 | for (it = displaced_step_request_queue; it; it = next) | |
1672 | { | |
1673 | next = it->next; | |
1674 | ||
1675 | if (ptid_equal (it->ptid, ptid) | |
1676 | || ptid_equal (minus_one_ptid, ptid) | |
1677 | || (ptid_is_pid (ptid) | |
1678 | && ptid_get_pid (ptid) == ptid_get_pid (it->ptid))) | |
1679 | { | |
1680 | if (displaced_step_request_queue == it) | |
1681 | displaced_step_request_queue = it->next; | |
1682 | else | |
1683 | prev->next = it->next; | |
1684 | ||
1685 | xfree (it); | |
1686 | } | |
1687 | else | |
1688 | prev = it; | |
1689 | } | |
1690 | ||
1691 | iterate_over_threads (infrun_thread_stop_requested_callback, &ptid); | |
1692 | } | |
1693 | ||
a07daef3 PA |
1694 | void nullify_last_target_wait_ptid (void); |
1695 | ||
1696 | static void | |
1697 | infrun_thread_thread_exit (struct thread_info *tp, int silent) | |
1698 | { | |
1699 | if (ptid_equal (target_last_wait_ptid, tp->ptid)) | |
1700 | nullify_last_target_wait_ptid (); | |
1701 | } | |
1702 | ||
4e1c45ea PA |
1703 | /* Callback for iterate_over_threads. */ |
1704 | ||
1705 | static int | |
1706 | delete_step_resume_breakpoint_callback (struct thread_info *info, void *data) | |
1707 | { | |
1708 | if (is_exited (info->ptid)) | |
1709 | return 0; | |
1710 | ||
1711 | delete_step_resume_breakpoint (info); | |
1712 | return 0; | |
1713 | } | |
1714 | ||
1715 | /* In all-stop, delete the step resume breakpoint of any thread that | |
1716 | had one. In non-stop, delete the step resume breakpoint of the | |
1717 | thread that just stopped. */ | |
1718 | ||
1719 | static void | |
1720 | delete_step_thread_step_resume_breakpoint (void) | |
1721 | { | |
1722 | if (!target_has_execution | |
1723 | || ptid_equal (inferior_ptid, null_ptid)) | |
1724 | /* If the inferior has exited, we have already deleted the step | |
1725 | resume breakpoints out of GDB's lists. */ | |
1726 | return; | |
1727 | ||
1728 | if (non_stop) | |
1729 | { | |
1730 | /* If in non-stop mode, only delete the step-resume or | |
1731 | longjmp-resume breakpoint of the thread that just stopped | |
1732 | stepping. */ | |
1733 | struct thread_info *tp = inferior_thread (); | |
1734 | delete_step_resume_breakpoint (tp); | |
1735 | } | |
1736 | else | |
1737 | /* In all-stop mode, delete all step-resume and longjmp-resume | |
1738 | breakpoints of any thread that had them. */ | |
1739 | iterate_over_threads (delete_step_resume_breakpoint_callback, NULL); | |
1740 | } | |
1741 | ||
1742 | /* A cleanup wrapper. */ | |
1743 | ||
1744 | static void | |
1745 | delete_step_thread_step_resume_breakpoint_cleanup (void *arg) | |
1746 | { | |
1747 | delete_step_thread_step_resume_breakpoint (); | |
1748 | } | |
1749 | ||
223698f8 DE |
1750 | /* Pretty print the results of target_wait, for debugging purposes. */ |
1751 | ||
1752 | static void | |
1753 | print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid, | |
1754 | const struct target_waitstatus *ws) | |
1755 | { | |
1756 | char *status_string = target_waitstatus_to_string (ws); | |
1757 | struct ui_file *tmp_stream = mem_fileopen (); | |
1758 | char *text; | |
1759 | long len; | |
1760 | ||
1761 | /* The text is split over several lines because it was getting too long. | |
1762 | Call fprintf_unfiltered (gdb_stdlog) once so that the text is still | |
1763 | output as a unit; we want only one timestamp printed if debug_timestamp | |
1764 | is set. */ | |
1765 | ||
1766 | fprintf_unfiltered (tmp_stream, | |
1767 | "infrun: target_wait (%d", PIDGET (waiton_ptid)); | |
1768 | if (PIDGET (waiton_ptid) != -1) | |
1769 | fprintf_unfiltered (tmp_stream, | |
1770 | " [%s]", target_pid_to_str (waiton_ptid)); | |
1771 | fprintf_unfiltered (tmp_stream, ", status) =\n"); | |
1772 | fprintf_unfiltered (tmp_stream, | |
1773 | "infrun: %d [%s],\n", | |
1774 | PIDGET (result_ptid), target_pid_to_str (result_ptid)); | |
1775 | fprintf_unfiltered (tmp_stream, | |
1776 | "infrun: %s\n", | |
1777 | status_string); | |
1778 | ||
1779 | text = ui_file_xstrdup (tmp_stream, &len); | |
1780 | ||
1781 | /* This uses %s in part to handle %'s in the text, but also to avoid | |
1782 | a gcc error: the format attribute requires a string literal. */ | |
1783 | fprintf_unfiltered (gdb_stdlog, "%s", text); | |
1784 | ||
1785 | xfree (status_string); | |
1786 | xfree (text); | |
1787 | ui_file_delete (tmp_stream); | |
1788 | } | |
1789 | ||
cd0fc7c3 | 1790 | /* Wait for control to return from inferior to debugger. |
ae123ec6 JB |
1791 | |
1792 | If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals | |
1793 | as if they were SIGTRAP signals. This can be useful during | |
1794 | the startup sequence on some targets such as HP/UX, where | |
1795 | we receive an EXEC event instead of the expected SIGTRAP. | |
1796 | ||
cd0fc7c3 SS |
1797 | If inferior gets a signal, we may decide to start it up again |
1798 | instead of returning. That is why there is a loop in this function. | |
1799 | When this function actually returns it means the inferior | |
1800 | should be left stopped and GDB should read more commands. */ | |
1801 | ||
1802 | void | |
ae123ec6 | 1803 | wait_for_inferior (int treat_exec_as_sigtrap) |
cd0fc7c3 SS |
1804 | { |
1805 | struct cleanup *old_cleanups; | |
0d1e5fa7 | 1806 | struct execution_control_state ecss; |
cd0fc7c3 | 1807 | struct execution_control_state *ecs; |
c906108c | 1808 | |
527159b7 | 1809 | if (debug_infrun) |
ae123ec6 JB |
1810 | fprintf_unfiltered |
1811 | (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n", | |
1812 | treat_exec_as_sigtrap); | |
527159b7 | 1813 | |
4e1c45ea PA |
1814 | old_cleanups = |
1815 | make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL); | |
cd0fc7c3 | 1816 | |
cd0fc7c3 | 1817 | ecs = &ecss; |
0d1e5fa7 PA |
1818 | memset (ecs, 0, sizeof (*ecs)); |
1819 | ||
cd0fc7c3 SS |
1820 | overlay_cache_invalid = 1; |
1821 | ||
e0bb1c1c PA |
1822 | /* We'll update this if & when we switch to a new thread. */ |
1823 | previous_inferior_ptid = inferior_ptid; | |
1824 | ||
cd0fc7c3 SS |
1825 | /* We have to invalidate the registers BEFORE calling target_wait |
1826 | because they can be loaded from the target while in target_wait. | |
1827 | This makes remote debugging a bit more efficient for those | |
1828 | targets that provide critical registers as part of their normal | |
1829 | status mechanism. */ | |
1830 | ||
1831 | registers_changed (); | |
b83266a0 | 1832 | |
c906108c SS |
1833 | while (1) |
1834 | { | |
29f49a6a PA |
1835 | struct cleanup *old_chain; |
1836 | ||
9a4105ab | 1837 | if (deprecated_target_wait_hook) |
0d1e5fa7 | 1838 | ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws); |
cd0fc7c3 | 1839 | else |
0d1e5fa7 | 1840 | ecs->ptid = target_wait (waiton_ptid, &ecs->ws); |
c906108c | 1841 | |
f00150c9 | 1842 | if (debug_infrun) |
223698f8 | 1843 | print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws); |
f00150c9 | 1844 | |
ae123ec6 JB |
1845 | if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD) |
1846 | { | |
1847 | xfree (ecs->ws.value.execd_pathname); | |
1848 | ecs->ws.kind = TARGET_WAITKIND_STOPPED; | |
1849 | ecs->ws.value.sig = TARGET_SIGNAL_TRAP; | |
1850 | } | |
1851 | ||
29f49a6a PA |
1852 | /* If an error happens while handling the event, propagate GDB's |
1853 | knowledge of the executing state to the frontend/user running | |
1854 | state. */ | |
1855 | old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid); | |
1856 | ||
cd0fc7c3 SS |
1857 | /* Now figure out what to do with the result of the result. */ |
1858 | handle_inferior_event (ecs); | |
c906108c | 1859 | |
29f49a6a PA |
1860 | /* No error, don't finish the state yet. */ |
1861 | discard_cleanups (old_chain); | |
1862 | ||
cd0fc7c3 SS |
1863 | if (!ecs->wait_some_more) |
1864 | break; | |
1865 | } | |
4e1c45ea | 1866 | |
cd0fc7c3 SS |
1867 | do_cleanups (old_cleanups); |
1868 | } | |
c906108c | 1869 | |
43ff13b4 JM |
1870 | /* Asynchronous version of wait_for_inferior. It is called by the |
1871 | event loop whenever a change of state is detected on the file | |
1872 | descriptor corresponding to the target. It can be called more than | |
1873 | once to complete a single execution command. In such cases we need | |
a474d7c2 PA |
1874 | to keep the state in a global variable ECSS. If it is the last time |
1875 | that this function is called for a single execution command, then | |
1876 | report to the user that the inferior has stopped, and do the | |
1877 | necessary cleanups. */ | |
43ff13b4 JM |
1878 | |
1879 | void | |
fba45db2 | 1880 | fetch_inferior_event (void *client_data) |
43ff13b4 | 1881 | { |
0d1e5fa7 | 1882 | struct execution_control_state ecss; |
a474d7c2 | 1883 | struct execution_control_state *ecs = &ecss; |
4f8d22e3 | 1884 | struct cleanup *old_chain = make_cleanup (null_cleanup, NULL); |
29f49a6a | 1885 | struct cleanup *ts_old_chain; |
4f8d22e3 | 1886 | int was_sync = sync_execution; |
43ff13b4 | 1887 | |
0d1e5fa7 PA |
1888 | memset (ecs, 0, sizeof (*ecs)); |
1889 | ||
59f0d5d9 | 1890 | overlay_cache_invalid = 1; |
43ff13b4 | 1891 | |
e0bb1c1c PA |
1892 | /* We can only rely on wait_for_more being correct before handling |
1893 | the event in all-stop, but previous_inferior_ptid isn't used in | |
1894 | non-stop. */ | |
1895 | if (!ecs->wait_some_more) | |
1896 | /* We'll update this if & when we switch to a new thread. */ | |
1897 | previous_inferior_ptid = inferior_ptid; | |
1898 | ||
4f8d22e3 PA |
1899 | if (non_stop) |
1900 | /* In non-stop mode, the user/frontend should not notice a thread | |
1901 | switch due to internal events. Make sure we reverse to the | |
1902 | user selected thread and frame after handling the event and | |
1903 | running any breakpoint commands. */ | |
1904 | make_cleanup_restore_current_thread (); | |
1905 | ||
59f0d5d9 PA |
1906 | /* We have to invalidate the registers BEFORE calling target_wait |
1907 | because they can be loaded from the target while in target_wait. | |
1908 | This makes remote debugging a bit more efficient for those | |
1909 | targets that provide critical registers as part of their normal | |
1910 | status mechanism. */ | |
43ff13b4 | 1911 | |
59f0d5d9 | 1912 | registers_changed (); |
43ff13b4 | 1913 | |
9a4105ab | 1914 | if (deprecated_target_wait_hook) |
a474d7c2 | 1915 | ecs->ptid = |
0d1e5fa7 | 1916 | deprecated_target_wait_hook (waiton_ptid, &ecs->ws); |
43ff13b4 | 1917 | else |
0d1e5fa7 | 1918 | ecs->ptid = target_wait (waiton_ptid, &ecs->ws); |
43ff13b4 | 1919 | |
f00150c9 | 1920 | if (debug_infrun) |
223698f8 | 1921 | print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws); |
f00150c9 | 1922 | |
94cc34af PA |
1923 | if (non_stop |
1924 | && ecs->ws.kind != TARGET_WAITKIND_IGNORE | |
1925 | && ecs->ws.kind != TARGET_WAITKIND_EXITED | |
1926 | && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED) | |
1927 | /* In non-stop mode, each thread is handled individually. Switch | |
1928 | early, so the global state is set correctly for this | |
1929 | thread. */ | |
1930 | context_switch (ecs->ptid); | |
1931 | ||
29f49a6a PA |
1932 | /* If an error happens while handling the event, propagate GDB's |
1933 | knowledge of the executing state to the frontend/user running | |
1934 | state. */ | |
1935 | if (!non_stop) | |
1936 | ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid); | |
1937 | else | |
1938 | ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid); | |
1939 | ||
43ff13b4 | 1940 | /* Now figure out what to do with the result of the result. */ |
a474d7c2 | 1941 | handle_inferior_event (ecs); |
43ff13b4 | 1942 | |
a474d7c2 | 1943 | if (!ecs->wait_some_more) |
43ff13b4 | 1944 | { |
d6b48e9c PA |
1945 | struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid)); |
1946 | ||
4e1c45ea | 1947 | delete_step_thread_step_resume_breakpoint (); |
f107f563 | 1948 | |
d6b48e9c PA |
1949 | /* We may not find an inferior if this was a process exit. */ |
1950 | if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY) | |
83c265ab PA |
1951 | normal_stop (); |
1952 | ||
af679fd0 PA |
1953 | if (target_has_execution |
1954 | && ecs->ws.kind != TARGET_WAITKIND_EXITED | |
1955 | && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED | |
1956 | && ecs->event_thread->step_multi | |
414c69f7 | 1957 | && ecs->event_thread->stop_step) |
c2d11a7d JM |
1958 | inferior_event_handler (INF_EXEC_CONTINUE, NULL); |
1959 | else | |
1960 | inferior_event_handler (INF_EXEC_COMPLETE, NULL); | |
43ff13b4 | 1961 | } |
4f8d22e3 | 1962 | |
29f49a6a PA |
1963 | /* No error, don't finish the thread states yet. */ |
1964 | discard_cleanups (ts_old_chain); | |
1965 | ||
4f8d22e3 PA |
1966 | /* Revert thread and frame. */ |
1967 | do_cleanups (old_chain); | |
1968 | ||
1969 | /* If the inferior was in sync execution mode, and now isn't, | |
1970 | restore the prompt. */ | |
1971 | if (was_sync && !sync_execution) | |
1972 | display_gdb_prompt (0); | |
43ff13b4 JM |
1973 | } |
1974 | ||
cd0fc7c3 SS |
1975 | /* Prepare an execution control state for looping through a |
1976 | wait_for_inferior-type loop. */ | |
1977 | ||
1978 | void | |
96baa820 | 1979 | init_execution_control_state (struct execution_control_state *ecs) |
cd0fc7c3 SS |
1980 | { |
1981 | ecs->random_signal = 0; | |
0d1e5fa7 PA |
1982 | } |
1983 | ||
1984 | /* Clear context switchable stepping state. */ | |
1985 | ||
1986 | void | |
4e1c45ea | 1987 | init_thread_stepping_state (struct thread_info *tss) |
0d1e5fa7 | 1988 | { |
2afb61aa PA |
1989 | struct symtab_and_line sal; |
1990 | ||
0d1e5fa7 PA |
1991 | tss->stepping_over_breakpoint = 0; |
1992 | tss->step_after_step_resume_breakpoint = 0; | |
1993 | tss->stepping_through_solib_after_catch = 0; | |
1994 | tss->stepping_through_solib_catchpoints = NULL; | |
2afb61aa | 1995 | |
4e1c45ea | 1996 | sal = find_pc_line (tss->prev_pc, 0); |
2afb61aa PA |
1997 | tss->current_line = sal.line; |
1998 | tss->current_symtab = sal.symtab; | |
cd0fc7c3 SS |
1999 | } |
2000 | ||
e02bc4cc | 2001 | /* Return the cached copy of the last pid/waitstatus returned by |
9a4105ab AC |
2002 | target_wait()/deprecated_target_wait_hook(). The data is actually |
2003 | cached by handle_inferior_event(), which gets called immediately | |
2004 | after target_wait()/deprecated_target_wait_hook(). */ | |
e02bc4cc DS |
2005 | |
2006 | void | |
488f131b | 2007 | get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status) |
e02bc4cc | 2008 | { |
39f77062 | 2009 | *ptidp = target_last_wait_ptid; |
e02bc4cc DS |
2010 | *status = target_last_waitstatus; |
2011 | } | |
2012 | ||
ac264b3b MS |
2013 | void |
2014 | nullify_last_target_wait_ptid (void) | |
2015 | { | |
2016 | target_last_wait_ptid = minus_one_ptid; | |
2017 | } | |
2018 | ||
dcf4fbde | 2019 | /* Switch thread contexts. */ |
dd80620e MS |
2020 | |
2021 | static void | |
0d1e5fa7 | 2022 | context_switch (ptid_t ptid) |
dd80620e | 2023 | { |
fd48f117 DJ |
2024 | if (debug_infrun) |
2025 | { | |
2026 | fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ", | |
2027 | target_pid_to_str (inferior_ptid)); | |
2028 | fprintf_unfiltered (gdb_stdlog, "to %s\n", | |
0d1e5fa7 | 2029 | target_pid_to_str (ptid)); |
fd48f117 DJ |
2030 | } |
2031 | ||
0d1e5fa7 | 2032 | switch_to_thread (ptid); |
dd80620e MS |
2033 | } |
2034 | ||
4fa8626c DJ |
2035 | static void |
2036 | adjust_pc_after_break (struct execution_control_state *ecs) | |
2037 | { | |
24a73cce UW |
2038 | struct regcache *regcache; |
2039 | struct gdbarch *gdbarch; | |
8aad930b | 2040 | CORE_ADDR breakpoint_pc; |
4fa8626c | 2041 | |
4fa8626c DJ |
2042 | /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If |
2043 | we aren't, just return. | |
9709f61c DJ |
2044 | |
2045 | We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not | |
b798847d UW |
2046 | affected by gdbarch_decr_pc_after_break. Other waitkinds which are |
2047 | implemented by software breakpoints should be handled through the normal | |
2048 | breakpoint layer. | |
8fb3e588 | 2049 | |
4fa8626c DJ |
2050 | NOTE drow/2004-01-31: On some targets, breakpoints may generate |
2051 | different signals (SIGILL or SIGEMT for instance), but it is less | |
2052 | clear where the PC is pointing afterwards. It may not match | |
b798847d UW |
2053 | gdbarch_decr_pc_after_break. I don't know any specific target that |
2054 | generates these signals at breakpoints (the code has been in GDB since at | |
2055 | least 1992) so I can not guess how to handle them here. | |
8fb3e588 | 2056 | |
e6cf7916 UW |
2057 | In earlier versions of GDB, a target with |
2058 | gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a | |
b798847d UW |
2059 | watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any |
2060 | target with both of these set in GDB history, and it seems unlikely to be | |
2061 | correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */ | |
4fa8626c DJ |
2062 | |
2063 | if (ecs->ws.kind != TARGET_WAITKIND_STOPPED) | |
2064 | return; | |
2065 | ||
2066 | if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP) | |
2067 | return; | |
2068 | ||
4058b839 PA |
2069 | /* In reverse execution, when a breakpoint is hit, the instruction |
2070 | under it has already been de-executed. The reported PC always | |
2071 | points at the breakpoint address, so adjusting it further would | |
2072 | be wrong. E.g., consider this case on a decr_pc_after_break == 1 | |
2073 | architecture: | |
2074 | ||
2075 | B1 0x08000000 : INSN1 | |
2076 | B2 0x08000001 : INSN2 | |
2077 | 0x08000002 : INSN3 | |
2078 | PC -> 0x08000003 : INSN4 | |
2079 | ||
2080 | Say you're stopped at 0x08000003 as above. Reverse continuing | |
2081 | from that point should hit B2 as below. Reading the PC when the | |
2082 | SIGTRAP is reported should read 0x08000001 and INSN2 should have | |
2083 | been de-executed already. | |
2084 | ||
2085 | B1 0x08000000 : INSN1 | |
2086 | B2 PC -> 0x08000001 : INSN2 | |
2087 | 0x08000002 : INSN3 | |
2088 | 0x08000003 : INSN4 | |
2089 | ||
2090 | We can't apply the same logic as for forward execution, because | |
2091 | we would wrongly adjust the PC to 0x08000000, since there's a | |
2092 | breakpoint at PC - 1. We'd then report a hit on B1, although | |
2093 | INSN1 hadn't been de-executed yet. Doing nothing is the correct | |
2094 | behaviour. */ | |
2095 | if (execution_direction == EXEC_REVERSE) | |
2096 | return; | |
2097 | ||
24a73cce UW |
2098 | /* If this target does not decrement the PC after breakpoints, then |
2099 | we have nothing to do. */ | |
2100 | regcache = get_thread_regcache (ecs->ptid); | |
2101 | gdbarch = get_regcache_arch (regcache); | |
2102 | if (gdbarch_decr_pc_after_break (gdbarch) == 0) | |
2103 | return; | |
2104 | ||
8aad930b AC |
2105 | /* Find the location where (if we've hit a breakpoint) the |
2106 | breakpoint would be. */ | |
515630c5 UW |
2107 | breakpoint_pc = regcache_read_pc (regcache) |
2108 | - gdbarch_decr_pc_after_break (gdbarch); | |
8aad930b | 2109 | |
1c5cfe86 PA |
2110 | /* Check whether there actually is a software breakpoint inserted at |
2111 | that location. | |
2112 | ||
2113 | If in non-stop mode, a race condition is possible where we've | |
2114 | removed a breakpoint, but stop events for that breakpoint were | |
2115 | already queued and arrive later. To suppress those spurious | |
2116 | SIGTRAPs, we keep a list of such breakpoint locations for a bit, | |
2117 | and retire them after a number of stop events are reported. */ | |
2118 | if (software_breakpoint_inserted_here_p (breakpoint_pc) | |
2119 | || (non_stop && moribund_breakpoint_here_p (breakpoint_pc))) | |
8aad930b | 2120 | { |
1c0fdd0e UW |
2121 | /* When using hardware single-step, a SIGTRAP is reported for both |
2122 | a completed single-step and a software breakpoint. Need to | |
2123 | differentiate between the two, as the latter needs adjusting | |
2124 | but the former does not. | |
2125 | ||
2126 | The SIGTRAP can be due to a completed hardware single-step only if | |
2127 | - we didn't insert software single-step breakpoints | |
2128 | - the thread to be examined is still the current thread | |
2129 | - this thread is currently being stepped | |
2130 | ||
2131 | If any of these events did not occur, we must have stopped due | |
2132 | to hitting a software breakpoint, and have to back up to the | |
2133 | breakpoint address. | |
2134 | ||
2135 | As a special case, we could have hardware single-stepped a | |
2136 | software breakpoint. In this case (prev_pc == breakpoint_pc), | |
2137 | we also need to back up to the breakpoint address. */ | |
2138 | ||
2139 | if (singlestep_breakpoints_inserted_p | |
2140 | || !ptid_equal (ecs->ptid, inferior_ptid) | |
4e1c45ea PA |
2141 | || !currently_stepping (ecs->event_thread) |
2142 | || ecs->event_thread->prev_pc == breakpoint_pc) | |
515630c5 | 2143 | regcache_write_pc (regcache, breakpoint_pc); |
8aad930b | 2144 | } |
4fa8626c DJ |
2145 | } |
2146 | ||
0d1e5fa7 PA |
2147 | void |
2148 | init_infwait_state (void) | |
2149 | { | |
2150 | waiton_ptid = pid_to_ptid (-1); | |
2151 | infwait_state = infwait_normal_state; | |
2152 | } | |
2153 | ||
94cc34af PA |
2154 | void |
2155 | error_is_running (void) | |
2156 | { | |
2157 | error (_("\ | |
2158 | Cannot execute this command while the selected thread is running.")); | |
2159 | } | |
2160 | ||
2161 | void | |
2162 | ensure_not_running (void) | |
2163 | { | |
2164 | if (is_running (inferior_ptid)) | |
2165 | error_is_running (); | |
2166 | } | |
2167 | ||
cd0fc7c3 SS |
2168 | /* Given an execution control state that has been freshly filled in |
2169 | by an event from the inferior, figure out what it means and take | |
2170 | appropriate action. */ | |
c906108c | 2171 | |
cd0fc7c3 | 2172 | void |
96baa820 | 2173 | handle_inferior_event (struct execution_control_state *ecs) |
cd0fc7c3 | 2174 | { |
c8edd8b4 | 2175 | int sw_single_step_trap_p = 0; |
d983da9c DJ |
2176 | int stopped_by_watchpoint; |
2177 | int stepped_after_stopped_by_watchpoint = 0; | |
2afb61aa | 2178 | struct symtab_and_line stop_pc_sal; |
d6b48e9c PA |
2179 | enum stop_kind stop_soon; |
2180 | ||
2181 | if (ecs->ws.kind != TARGET_WAITKIND_EXITED | |
2182 | && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED | |
2183 | && ecs->ws.kind != TARGET_WAITKIND_IGNORE) | |
2184 | { | |
2185 | struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid)); | |
2186 | gdb_assert (inf); | |
2187 | stop_soon = inf->stop_soon; | |
2188 | } | |
2189 | else | |
2190 | stop_soon = NO_STOP_QUIETLY; | |
cd0fc7c3 | 2191 | |
e02bc4cc | 2192 | /* Cache the last pid/waitstatus. */ |
39f77062 | 2193 | target_last_wait_ptid = ecs->ptid; |
0d1e5fa7 | 2194 | target_last_waitstatus = ecs->ws; |
e02bc4cc | 2195 | |
ca005067 DJ |
2196 | /* Always clear state belonging to the previous time we stopped. */ |
2197 | stop_stack_dummy = 0; | |
2198 | ||
8c90c137 LM |
2199 | /* If it's a new process, add it to the thread database */ |
2200 | ||
2201 | ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid) | |
2202 | && !ptid_equal (ecs->ptid, minus_one_ptid) | |
2203 | && !in_thread_list (ecs->ptid)); | |
2204 | ||
2205 | if (ecs->ws.kind != TARGET_WAITKIND_EXITED | |
2206 | && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event) | |
2207 | add_thread (ecs->ptid); | |
2208 | ||
88ed393a JK |
2209 | ecs->event_thread = find_thread_pid (ecs->ptid); |
2210 | ||
2211 | /* Dependent on valid ECS->EVENT_THREAD. */ | |
2212 | adjust_pc_after_break (ecs); | |
2213 | ||
2214 | /* Dependent on the current PC value modified by adjust_pc_after_break. */ | |
2215 | reinit_frame_cache (); | |
2216 | ||
8c90c137 LM |
2217 | if (ecs->ws.kind != TARGET_WAITKIND_IGNORE) |
2218 | { | |
1c5cfe86 PA |
2219 | breakpoint_retire_moribund (); |
2220 | ||
48844aa6 PA |
2221 | /* Mark the non-executing threads accordingly. In all-stop, all |
2222 | threads of all processes are stopped when we get any event | |
2223 | reported. In non-stop mode, only the event thread stops. If | |
2224 | we're handling a process exit in non-stop mode, there's | |
2225 | nothing to do, as threads of the dead process are gone, and | |
2226 | threads of any other process were left running. */ | |
2227 | if (!non_stop) | |
2228 | set_executing (minus_one_ptid, 0); | |
2229 | else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED | |
2230 | && ecs->ws.kind != TARGET_WAITKIND_EXITED) | |
2231 | set_executing (inferior_ptid, 0); | |
8c90c137 LM |
2232 | } |
2233 | ||
0d1e5fa7 | 2234 | switch (infwait_state) |
488f131b JB |
2235 | { |
2236 | case infwait_thread_hop_state: | |
527159b7 | 2237 | if (debug_infrun) |
8a9de0e4 | 2238 | fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n"); |
488f131b | 2239 | /* Cancel the waiton_ptid. */ |
0d1e5fa7 | 2240 | waiton_ptid = pid_to_ptid (-1); |
65e82032 | 2241 | break; |
b83266a0 | 2242 | |
488f131b | 2243 | case infwait_normal_state: |
527159b7 | 2244 | if (debug_infrun) |
8a9de0e4 | 2245 | fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n"); |
d983da9c DJ |
2246 | break; |
2247 | ||
2248 | case infwait_step_watch_state: | |
2249 | if (debug_infrun) | |
2250 | fprintf_unfiltered (gdb_stdlog, | |
2251 | "infrun: infwait_step_watch_state\n"); | |
2252 | ||
2253 | stepped_after_stopped_by_watchpoint = 1; | |
488f131b | 2254 | break; |
b83266a0 | 2255 | |
488f131b | 2256 | case infwait_nonstep_watch_state: |
527159b7 | 2257 | if (debug_infrun) |
8a9de0e4 AC |
2258 | fprintf_unfiltered (gdb_stdlog, |
2259 | "infrun: infwait_nonstep_watch_state\n"); | |
488f131b | 2260 | insert_breakpoints (); |
c906108c | 2261 | |
488f131b JB |
2262 | /* FIXME-maybe: is this cleaner than setting a flag? Does it |
2263 | handle things like signals arriving and other things happening | |
2264 | in combination correctly? */ | |
2265 | stepped_after_stopped_by_watchpoint = 1; | |
2266 | break; | |
65e82032 AC |
2267 | |
2268 | default: | |
e2e0b3e5 | 2269 | internal_error (__FILE__, __LINE__, _("bad switch")); |
488f131b | 2270 | } |
0d1e5fa7 | 2271 | infwait_state = infwait_normal_state; |
c906108c | 2272 | |
488f131b JB |
2273 | switch (ecs->ws.kind) |
2274 | { | |
2275 | case TARGET_WAITKIND_LOADED: | |
527159b7 | 2276 | if (debug_infrun) |
8a9de0e4 | 2277 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n"); |
b0f4b84b DJ |
2278 | /* Ignore gracefully during startup of the inferior, as it might |
2279 | be the shell which has just loaded some objects, otherwise | |
2280 | add the symbols for the newly loaded objects. Also ignore at | |
2281 | the beginning of an attach or remote session; we will query | |
2282 | the full list of libraries once the connection is | |
2283 | established. */ | |
c0236d92 | 2284 | if (stop_soon == NO_STOP_QUIETLY) |
488f131b | 2285 | { |
488f131b JB |
2286 | /* Check for any newly added shared libraries if we're |
2287 | supposed to be adding them automatically. Switch | |
2288 | terminal for any messages produced by | |
2289 | breakpoint_re_set. */ | |
2290 | target_terminal_ours_for_output (); | |
aff6338a | 2291 | /* NOTE: cagney/2003-11-25: Make certain that the target |
8fb3e588 AC |
2292 | stack's section table is kept up-to-date. Architectures, |
2293 | (e.g., PPC64), use the section table to perform | |
2294 | operations such as address => section name and hence | |
2295 | require the table to contain all sections (including | |
2296 | those found in shared libraries). */ | |
aff6338a | 2297 | /* NOTE: cagney/2003-11-25: Pass current_target and not |
8fb3e588 AC |
2298 | exec_ops to SOLIB_ADD. This is because current GDB is |
2299 | only tooled to propagate section_table changes out from | |
2300 | the "current_target" (see target_resize_to_sections), and | |
2301 | not up from the exec stratum. This, of course, isn't | |
2302 | right. "infrun.c" should only interact with the | |
2303 | exec/process stratum, instead relying on the target stack | |
2304 | to propagate relevant changes (stop, section table | |
2305 | changed, ...) up to other layers. */ | |
b0f4b84b | 2306 | #ifdef SOLIB_ADD |
aff6338a | 2307 | SOLIB_ADD (NULL, 0, ¤t_target, auto_solib_add); |
b0f4b84b DJ |
2308 | #else |
2309 | solib_add (NULL, 0, ¤t_target, auto_solib_add); | |
2310 | #endif | |
488f131b JB |
2311 | target_terminal_inferior (); |
2312 | ||
b0f4b84b DJ |
2313 | /* If requested, stop when the dynamic linker notifies |
2314 | gdb of events. This allows the user to get control | |
2315 | and place breakpoints in initializer routines for | |
2316 | dynamically loaded objects (among other things). */ | |
2317 | if (stop_on_solib_events) | |
2318 | { | |
2319 | stop_stepping (ecs); | |
2320 | return; | |
2321 | } | |
2322 | ||
2323 | /* NOTE drow/2007-05-11: This might be a good place to check | |
2324 | for "catch load". */ | |
488f131b | 2325 | } |
b0f4b84b DJ |
2326 | |
2327 | /* If we are skipping through a shell, or through shared library | |
2328 | loading that we aren't interested in, resume the program. If | |
2329 | we're running the program normally, also resume. But stop if | |
2330 | we're attaching or setting up a remote connection. */ | |
2331 | if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY) | |
2332 | { | |
74960c60 VP |
2333 | /* Loading of shared libraries might have changed breakpoint |
2334 | addresses. Make sure new breakpoints are inserted. */ | |
0b02b92d UW |
2335 | if (stop_soon == NO_STOP_QUIETLY |
2336 | && !breakpoints_always_inserted_mode ()) | |
74960c60 | 2337 | insert_breakpoints (); |
b0f4b84b DJ |
2338 | resume (0, TARGET_SIGNAL_0); |
2339 | prepare_to_wait (ecs); | |
2340 | return; | |
2341 | } | |
2342 | ||
2343 | break; | |
c5aa993b | 2344 | |
488f131b | 2345 | case TARGET_WAITKIND_SPURIOUS: |
527159b7 | 2346 | if (debug_infrun) |
8a9de0e4 | 2347 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n"); |
488f131b JB |
2348 | resume (0, TARGET_SIGNAL_0); |
2349 | prepare_to_wait (ecs); | |
2350 | return; | |
c5aa993b | 2351 | |
488f131b | 2352 | case TARGET_WAITKIND_EXITED: |
527159b7 | 2353 | if (debug_infrun) |
8a9de0e4 | 2354 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n"); |
fb66883a | 2355 | inferior_ptid = ecs->ptid; |
488f131b JB |
2356 | target_terminal_ours (); /* Must do this before mourn anyway */ |
2357 | print_stop_reason (EXITED, ecs->ws.value.integer); | |
2358 | ||
2359 | /* Record the exit code in the convenience variable $_exitcode, so | |
2360 | that the user can inspect this again later. */ | |
2361 | set_internalvar (lookup_internalvar ("_exitcode"), | |
8b9b9e1a | 2362 | value_from_longest (builtin_type_int32, |
488f131b JB |
2363 | (LONGEST) ecs->ws.value.integer)); |
2364 | gdb_flush (gdb_stdout); | |
2365 | target_mourn_inferior (); | |
1c0fdd0e | 2366 | singlestep_breakpoints_inserted_p = 0; |
488f131b JB |
2367 | stop_print_frame = 0; |
2368 | stop_stepping (ecs); | |
2369 | return; | |
c5aa993b | 2370 | |
488f131b | 2371 | case TARGET_WAITKIND_SIGNALLED: |
527159b7 | 2372 | if (debug_infrun) |
8a9de0e4 | 2373 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n"); |
fb66883a | 2374 | inferior_ptid = ecs->ptid; |
488f131b | 2375 | stop_print_frame = 0; |
488f131b | 2376 | target_terminal_ours (); /* Must do this before mourn anyway */ |
c5aa993b | 2377 | |
488f131b JB |
2378 | /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't |
2379 | reach here unless the inferior is dead. However, for years | |
2380 | target_kill() was called here, which hints that fatal signals aren't | |
2381 | really fatal on some systems. If that's true, then some changes | |
2382 | may be needed. */ | |
2383 | target_mourn_inferior (); | |
c906108c | 2384 | |
2020b7ab | 2385 | print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig); |
1c0fdd0e | 2386 | singlestep_breakpoints_inserted_p = 0; |
488f131b JB |
2387 | stop_stepping (ecs); |
2388 | return; | |
c906108c | 2389 | |
488f131b JB |
2390 | /* The following are the only cases in which we keep going; |
2391 | the above cases end in a continue or goto. */ | |
2392 | case TARGET_WAITKIND_FORKED: | |
deb3b17b | 2393 | case TARGET_WAITKIND_VFORKED: |
527159b7 | 2394 | if (debug_infrun) |
8a9de0e4 | 2395 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n"); |
488f131b JB |
2396 | pending_follow.kind = ecs->ws.kind; |
2397 | ||
3a3e9ee3 | 2398 | pending_follow.fork_event.parent_pid = ecs->ptid; |
8e7d2c16 | 2399 | pending_follow.fork_event.child_pid = ecs->ws.value.related_pid; |
c906108c | 2400 | |
5a2901d9 DJ |
2401 | if (!ptid_equal (ecs->ptid, inferior_ptid)) |
2402 | { | |
0d1e5fa7 | 2403 | context_switch (ecs->ptid); |
35f196d9 | 2404 | reinit_frame_cache (); |
5a2901d9 DJ |
2405 | } |
2406 | ||
488f131b | 2407 | stop_pc = read_pc (); |
675bf4cb | 2408 | |
347bddb7 | 2409 | ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid); |
675bf4cb | 2410 | |
347bddb7 | 2411 | ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat); |
04e68871 DJ |
2412 | |
2413 | /* If no catchpoint triggered for this, then keep going. */ | |
2414 | if (ecs->random_signal) | |
2415 | { | |
2020b7ab | 2416 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; |
04e68871 DJ |
2417 | keep_going (ecs); |
2418 | return; | |
2419 | } | |
2020b7ab | 2420 | ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP; |
488f131b JB |
2421 | goto process_event_stop_test; |
2422 | ||
2423 | case TARGET_WAITKIND_EXECD: | |
527159b7 | 2424 | if (debug_infrun) |
fc5261f2 | 2425 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n"); |
488f131b JB |
2426 | pending_follow.execd_pathname = |
2427 | savestring (ecs->ws.value.execd_pathname, | |
2428 | strlen (ecs->ws.value.execd_pathname)); | |
2429 | ||
5a2901d9 DJ |
2430 | if (!ptid_equal (ecs->ptid, inferior_ptid)) |
2431 | { | |
0d1e5fa7 | 2432 | context_switch (ecs->ptid); |
35f196d9 | 2433 | reinit_frame_cache (); |
5a2901d9 DJ |
2434 | } |
2435 | ||
795e548f PA |
2436 | stop_pc = read_pc (); |
2437 | ||
2438 | /* This causes the eventpoints and symbol table to be reset. | |
2439 | Must do this now, before trying to determine whether to | |
2440 | stop. */ | |
2441 | follow_exec (inferior_ptid, pending_follow.execd_pathname); | |
2442 | xfree (pending_follow.execd_pathname); | |
2443 | ||
2444 | ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid); | |
2445 | ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat); | |
2446 | ||
04e68871 DJ |
2447 | /* If no catchpoint triggered for this, then keep going. */ |
2448 | if (ecs->random_signal) | |
2449 | { | |
2020b7ab | 2450 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; |
04e68871 DJ |
2451 | keep_going (ecs); |
2452 | return; | |
2453 | } | |
2020b7ab | 2454 | ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP; |
488f131b JB |
2455 | goto process_event_stop_test; |
2456 | ||
b4dc5ffa MK |
2457 | /* Be careful not to try to gather much state about a thread |
2458 | that's in a syscall. It's frequently a losing proposition. */ | |
488f131b | 2459 | case TARGET_WAITKIND_SYSCALL_ENTRY: |
527159b7 | 2460 | if (debug_infrun) |
8a9de0e4 | 2461 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n"); |
488f131b JB |
2462 | resume (0, TARGET_SIGNAL_0); |
2463 | prepare_to_wait (ecs); | |
2464 | return; | |
c906108c | 2465 | |
488f131b JB |
2466 | /* Before examining the threads further, step this thread to |
2467 | get it entirely out of the syscall. (We get notice of the | |
2468 | event when the thread is just on the verge of exiting a | |
2469 | syscall. Stepping one instruction seems to get it back | |
b4dc5ffa | 2470 | into user code.) */ |
488f131b | 2471 | case TARGET_WAITKIND_SYSCALL_RETURN: |
527159b7 | 2472 | if (debug_infrun) |
8a9de0e4 | 2473 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n"); |
488f131b | 2474 | target_resume (ecs->ptid, 1, TARGET_SIGNAL_0); |
488f131b JB |
2475 | prepare_to_wait (ecs); |
2476 | return; | |
c906108c | 2477 | |
488f131b | 2478 | case TARGET_WAITKIND_STOPPED: |
527159b7 | 2479 | if (debug_infrun) |
8a9de0e4 | 2480 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n"); |
2020b7ab | 2481 | ecs->event_thread->stop_signal = ecs->ws.value.sig; |
488f131b | 2482 | break; |
c906108c | 2483 | |
b2175913 MS |
2484 | case TARGET_WAITKIND_NO_HISTORY: |
2485 | /* Reverse execution: target ran out of history info. */ | |
40e12b06 | 2486 | stop_pc = read_pc (); |
b2175913 MS |
2487 | print_stop_reason (NO_HISTORY, 0); |
2488 | stop_stepping (ecs); | |
2489 | return; | |
2490 | ||
488f131b JB |
2491 | /* We had an event in the inferior, but we are not interested |
2492 | in handling it at this level. The lower layers have already | |
8e7d2c16 | 2493 | done what needs to be done, if anything. |
8fb3e588 AC |
2494 | |
2495 | One of the possible circumstances for this is when the | |
2496 | inferior produces output for the console. The inferior has | |
2497 | not stopped, and we are ignoring the event. Another possible | |
2498 | circumstance is any event which the lower level knows will be | |
2499 | reported multiple times without an intervening resume. */ | |
488f131b | 2500 | case TARGET_WAITKIND_IGNORE: |
527159b7 | 2501 | if (debug_infrun) |
8a9de0e4 | 2502 | fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n"); |
8e7d2c16 | 2503 | prepare_to_wait (ecs); |
488f131b JB |
2504 | return; |
2505 | } | |
c906108c | 2506 | |
488f131b JB |
2507 | if (ecs->new_thread_event) |
2508 | { | |
94cc34af PA |
2509 | if (non_stop) |
2510 | /* Non-stop assumes that the target handles adding new threads | |
2511 | to the thread list. */ | |
2512 | internal_error (__FILE__, __LINE__, "\ | |
2513 | targets should add new threads to the thread list themselves in non-stop mode."); | |
2514 | ||
2515 | /* We may want to consider not doing a resume here in order to | |
2516 | give the user a chance to play with the new thread. It might | |
2517 | be good to make that a user-settable option. */ | |
2518 | ||
2519 | /* At this point, all threads are stopped (happens automatically | |
2520 | in either the OS or the native code). Therefore we need to | |
2521 | continue all threads in order to make progress. */ | |
2522 | ||
488f131b JB |
2523 | target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0); |
2524 | prepare_to_wait (ecs); | |
2525 | return; | |
2526 | } | |
c906108c | 2527 | |
2020b7ab | 2528 | if (ecs->ws.kind == TARGET_WAITKIND_STOPPED) |
252fbfc8 PA |
2529 | { |
2530 | /* Do we need to clean up the state of a thread that has | |
2531 | completed a displaced single-step? (Doing so usually affects | |
2532 | the PC, so do it here, before we set stop_pc.) */ | |
2533 | displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal); | |
2534 | ||
2535 | /* If we either finished a single-step or hit a breakpoint, but | |
2536 | the user wanted this thread to be stopped, pretend we got a | |
2537 | SIG0 (generic unsignaled stop). */ | |
2538 | ||
2539 | if (ecs->event_thread->stop_requested | |
2540 | && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP) | |
2541 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; | |
2542 | } | |
237fc4c9 | 2543 | |
515630c5 | 2544 | stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid)); |
488f131b | 2545 | |
527159b7 | 2546 | if (debug_infrun) |
237fc4c9 PA |
2547 | { |
2548 | fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = 0x%s\n", | |
2549 | paddr_nz (stop_pc)); | |
2550 | if (STOPPED_BY_WATCHPOINT (&ecs->ws)) | |
2551 | { | |
2552 | CORE_ADDR addr; | |
2553 | fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n"); | |
2554 | ||
2555 | if (target_stopped_data_address (¤t_target, &addr)) | |
2556 | fprintf_unfiltered (gdb_stdlog, | |
2557 | "infrun: stopped data address = 0x%s\n", | |
2558 | paddr_nz (addr)); | |
2559 | else | |
2560 | fprintf_unfiltered (gdb_stdlog, | |
2561 | "infrun: (no data address available)\n"); | |
2562 | } | |
2563 | } | |
527159b7 | 2564 | |
9f976b41 DJ |
2565 | if (stepping_past_singlestep_breakpoint) |
2566 | { | |
1c0fdd0e | 2567 | gdb_assert (singlestep_breakpoints_inserted_p); |
9f976b41 DJ |
2568 | gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid)); |
2569 | gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid)); | |
2570 | ||
2571 | stepping_past_singlestep_breakpoint = 0; | |
2572 | ||
2573 | /* We've either finished single-stepping past the single-step | |
8fb3e588 AC |
2574 | breakpoint, or stopped for some other reason. It would be nice if |
2575 | we could tell, but we can't reliably. */ | |
2020b7ab | 2576 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP) |
8fb3e588 | 2577 | { |
527159b7 | 2578 | if (debug_infrun) |
8a9de0e4 | 2579 | fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n"); |
9f976b41 | 2580 | /* Pull the single step breakpoints out of the target. */ |
e0cd558a | 2581 | remove_single_step_breakpoints (); |
9f976b41 DJ |
2582 | singlestep_breakpoints_inserted_p = 0; |
2583 | ||
2584 | ecs->random_signal = 0; | |
2585 | ||
0d1e5fa7 | 2586 | context_switch (saved_singlestep_ptid); |
9a4105ab AC |
2587 | if (deprecated_context_hook) |
2588 | deprecated_context_hook (pid_to_thread_id (ecs->ptid)); | |
9f976b41 DJ |
2589 | |
2590 | resume (1, TARGET_SIGNAL_0); | |
2591 | prepare_to_wait (ecs); | |
2592 | return; | |
2593 | } | |
2594 | } | |
2595 | ||
ca67fcb8 | 2596 | if (!ptid_equal (deferred_step_ptid, null_ptid)) |
6a6b96b9 | 2597 | { |
94cc34af PA |
2598 | /* In non-stop mode, there's never a deferred_step_ptid set. */ |
2599 | gdb_assert (!non_stop); | |
2600 | ||
6a6b96b9 UW |
2601 | /* If we stopped for some other reason than single-stepping, ignore |
2602 | the fact that we were supposed to switch back. */ | |
2020b7ab | 2603 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP) |
6a6b96b9 UW |
2604 | { |
2605 | if (debug_infrun) | |
2606 | fprintf_unfiltered (gdb_stdlog, | |
ca67fcb8 | 2607 | "infrun: handling deferred step\n"); |
6a6b96b9 UW |
2608 | |
2609 | /* Pull the single step breakpoints out of the target. */ | |
2610 | if (singlestep_breakpoints_inserted_p) | |
2611 | { | |
2612 | remove_single_step_breakpoints (); | |
2613 | singlestep_breakpoints_inserted_p = 0; | |
2614 | } | |
2615 | ||
2616 | /* Note: We do not call context_switch at this point, as the | |
2617 | context is already set up for stepping the original thread. */ | |
ca67fcb8 VP |
2618 | switch_to_thread (deferred_step_ptid); |
2619 | deferred_step_ptid = null_ptid; | |
6a6b96b9 UW |
2620 | /* Suppress spurious "Switching to ..." message. */ |
2621 | previous_inferior_ptid = inferior_ptid; | |
2622 | ||
2623 | resume (1, TARGET_SIGNAL_0); | |
2624 | prepare_to_wait (ecs); | |
2625 | return; | |
2626 | } | |
ca67fcb8 VP |
2627 | |
2628 | deferred_step_ptid = null_ptid; | |
6a6b96b9 UW |
2629 | } |
2630 | ||
488f131b JB |
2631 | /* See if a thread hit a thread-specific breakpoint that was meant for |
2632 | another thread. If so, then step that thread past the breakpoint, | |
2633 | and continue it. */ | |
2634 | ||
2020b7ab | 2635 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP) |
488f131b | 2636 | { |
9f976b41 DJ |
2637 | int thread_hop_needed = 0; |
2638 | ||
f8d40ec8 JB |
2639 | /* Check if a regular breakpoint has been hit before checking |
2640 | for a potential single step breakpoint. Otherwise, GDB will | |
2641 | not see this breakpoint hit when stepping onto breakpoints. */ | |
c36b740a | 2642 | if (regular_breakpoint_inserted_here_p (stop_pc)) |
488f131b | 2643 | { |
c5aa993b | 2644 | ecs->random_signal = 0; |
4fa8626c | 2645 | if (!breakpoint_thread_match (stop_pc, ecs->ptid)) |
9f976b41 DJ |
2646 | thread_hop_needed = 1; |
2647 | } | |
1c0fdd0e | 2648 | else if (singlestep_breakpoints_inserted_p) |
9f976b41 | 2649 | { |
fd48f117 DJ |
2650 | /* We have not context switched yet, so this should be true |
2651 | no matter which thread hit the singlestep breakpoint. */ | |
2652 | gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid)); | |
2653 | if (debug_infrun) | |
2654 | fprintf_unfiltered (gdb_stdlog, "infrun: software single step " | |
2655 | "trap for %s\n", | |
2656 | target_pid_to_str (ecs->ptid)); | |
2657 | ||
9f976b41 DJ |
2658 | ecs->random_signal = 0; |
2659 | /* The call to in_thread_list is necessary because PTIDs sometimes | |
2660 | change when we go from single-threaded to multi-threaded. If | |
2661 | the singlestep_ptid is still in the list, assume that it is | |
2662 | really different from ecs->ptid. */ | |
2663 | if (!ptid_equal (singlestep_ptid, ecs->ptid) | |
2664 | && in_thread_list (singlestep_ptid)) | |
2665 | { | |
fd48f117 DJ |
2666 | /* If the PC of the thread we were trying to single-step |
2667 | has changed, discard this event (which we were going | |
2668 | to ignore anyway), and pretend we saw that thread | |
2669 | trap. This prevents us continuously moving the | |
2670 | single-step breakpoint forward, one instruction at a | |
2671 | time. If the PC has changed, then the thread we were | |
2672 | trying to single-step has trapped or been signalled, | |
2673 | but the event has not been reported to GDB yet. | |
2674 | ||
2675 | There might be some cases where this loses signal | |
2676 | information, if a signal has arrived at exactly the | |
2677 | same time that the PC changed, but this is the best | |
2678 | we can do with the information available. Perhaps we | |
2679 | should arrange to report all events for all threads | |
2680 | when they stop, or to re-poll the remote looking for | |
2681 | this particular thread (i.e. temporarily enable | |
2682 | schedlock). */ | |
515630c5 UW |
2683 | |
2684 | CORE_ADDR new_singlestep_pc | |
2685 | = regcache_read_pc (get_thread_regcache (singlestep_ptid)); | |
2686 | ||
2687 | if (new_singlestep_pc != singlestep_pc) | |
fd48f117 | 2688 | { |
2020b7ab PA |
2689 | enum target_signal stop_signal; |
2690 | ||
fd48f117 DJ |
2691 | if (debug_infrun) |
2692 | fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread," | |
2693 | " but expected thread advanced also\n"); | |
2694 | ||
2695 | /* The current context still belongs to | |
2696 | singlestep_ptid. Don't swap here, since that's | |
2697 | the context we want to use. Just fudge our | |
2698 | state and continue. */ | |
2020b7ab PA |
2699 | stop_signal = ecs->event_thread->stop_signal; |
2700 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; | |
fd48f117 | 2701 | ecs->ptid = singlestep_ptid; |
4e1c45ea | 2702 | ecs->event_thread = find_thread_pid (ecs->ptid); |
2020b7ab | 2703 | ecs->event_thread->stop_signal = stop_signal; |
515630c5 | 2704 | stop_pc = new_singlestep_pc; |
fd48f117 DJ |
2705 | } |
2706 | else | |
2707 | { | |
2708 | if (debug_infrun) | |
2709 | fprintf_unfiltered (gdb_stdlog, | |
2710 | "infrun: unexpected thread\n"); | |
2711 | ||
2712 | thread_hop_needed = 1; | |
2713 | stepping_past_singlestep_breakpoint = 1; | |
2714 | saved_singlestep_ptid = singlestep_ptid; | |
2715 | } | |
9f976b41 DJ |
2716 | } |
2717 | } | |
2718 | ||
2719 | if (thread_hop_needed) | |
8fb3e588 | 2720 | { |
237fc4c9 | 2721 | int remove_status = 0; |
8fb3e588 | 2722 | |
527159b7 | 2723 | if (debug_infrun) |
8a9de0e4 | 2724 | fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n"); |
527159b7 | 2725 | |
8fb3e588 AC |
2726 | /* Saw a breakpoint, but it was hit by the wrong thread. |
2727 | Just continue. */ | |
2728 | ||
1c0fdd0e | 2729 | if (singlestep_breakpoints_inserted_p) |
488f131b | 2730 | { |
8fb3e588 | 2731 | /* Pull the single step breakpoints out of the target. */ |
e0cd558a | 2732 | remove_single_step_breakpoints (); |
8fb3e588 AC |
2733 | singlestep_breakpoints_inserted_p = 0; |
2734 | } | |
2735 | ||
237fc4c9 PA |
2736 | /* If the arch can displace step, don't remove the |
2737 | breakpoints. */ | |
2738 | if (!use_displaced_stepping (current_gdbarch)) | |
2739 | remove_status = remove_breakpoints (); | |
2740 | ||
8fb3e588 AC |
2741 | /* Did we fail to remove breakpoints? If so, try |
2742 | to set the PC past the bp. (There's at least | |
2743 | one situation in which we can fail to remove | |
2744 | the bp's: On HP-UX's that use ttrace, we can't | |
2745 | change the address space of a vforking child | |
2746 | process until the child exits (well, okay, not | |
2747 | then either :-) or execs. */ | |
2748 | if (remove_status != 0) | |
9d9cd7ac | 2749 | error (_("Cannot step over breakpoint hit in wrong thread")); |
8fb3e588 AC |
2750 | else |
2751 | { /* Single step */ | |
8fb3e588 | 2752 | if (!ptid_equal (inferior_ptid, ecs->ptid)) |
0d1e5fa7 PA |
2753 | context_switch (ecs->ptid); |
2754 | ||
94cc34af PA |
2755 | if (!non_stop) |
2756 | { | |
2757 | /* Only need to require the next event from this | |
2758 | thread in all-stop mode. */ | |
2759 | waiton_ptid = ecs->ptid; | |
2760 | infwait_state = infwait_thread_hop_state; | |
2761 | } | |
8fb3e588 | 2762 | |
4e1c45ea | 2763 | ecs->event_thread->stepping_over_breakpoint = 1; |
8fb3e588 AC |
2764 | keep_going (ecs); |
2765 | registers_changed (); | |
2766 | return; | |
2767 | } | |
488f131b | 2768 | } |
1c0fdd0e | 2769 | else if (singlestep_breakpoints_inserted_p) |
8fb3e588 AC |
2770 | { |
2771 | sw_single_step_trap_p = 1; | |
2772 | ecs->random_signal = 0; | |
2773 | } | |
488f131b JB |
2774 | } |
2775 | else | |
2776 | ecs->random_signal = 1; | |
c906108c | 2777 | |
488f131b | 2778 | /* See if something interesting happened to the non-current thread. If |
b40c7d58 DJ |
2779 | so, then switch to that thread. */ |
2780 | if (!ptid_equal (ecs->ptid, inferior_ptid)) | |
488f131b | 2781 | { |
527159b7 | 2782 | if (debug_infrun) |
8a9de0e4 | 2783 | fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n"); |
527159b7 | 2784 | |
0d1e5fa7 | 2785 | context_switch (ecs->ptid); |
c5aa993b | 2786 | |
9a4105ab AC |
2787 | if (deprecated_context_hook) |
2788 | deprecated_context_hook (pid_to_thread_id (ecs->ptid)); | |
488f131b | 2789 | } |
c906108c | 2790 | |
1c0fdd0e | 2791 | if (singlestep_breakpoints_inserted_p) |
488f131b JB |
2792 | { |
2793 | /* Pull the single step breakpoints out of the target. */ | |
e0cd558a | 2794 | remove_single_step_breakpoints (); |
488f131b JB |
2795 | singlestep_breakpoints_inserted_p = 0; |
2796 | } | |
c906108c | 2797 | |
d983da9c DJ |
2798 | if (stepped_after_stopped_by_watchpoint) |
2799 | stopped_by_watchpoint = 0; | |
2800 | else | |
2801 | stopped_by_watchpoint = watchpoints_triggered (&ecs->ws); | |
2802 | ||
2803 | /* If necessary, step over this watchpoint. We'll be back to display | |
2804 | it in a moment. */ | |
2805 | if (stopped_by_watchpoint | |
2806 | && (HAVE_STEPPABLE_WATCHPOINT | |
2807 | || gdbarch_have_nonsteppable_watchpoint (current_gdbarch))) | |
488f131b | 2808 | { |
488f131b JB |
2809 | /* At this point, we are stopped at an instruction which has |
2810 | attempted to write to a piece of memory under control of | |
2811 | a watchpoint. The instruction hasn't actually executed | |
2812 | yet. If we were to evaluate the watchpoint expression | |
2813 | now, we would get the old value, and therefore no change | |
2814 | would seem to have occurred. | |
2815 | ||
2816 | In order to make watchpoints work `right', we really need | |
2817 | to complete the memory write, and then evaluate the | |
d983da9c DJ |
2818 | watchpoint expression. We do this by single-stepping the |
2819 | target. | |
2820 | ||
2821 | It may not be necessary to disable the watchpoint to stop over | |
2822 | it. For example, the PA can (with some kernel cooperation) | |
2823 | single step over a watchpoint without disabling the watchpoint. | |
2824 | ||
2825 | It is far more common to need to disable a watchpoint to step | |
2826 | the inferior over it. If we have non-steppable watchpoints, | |
2827 | we must disable the current watchpoint; it's simplest to | |
2828 | disable all watchpoints and breakpoints. */ | |
2829 | ||
2830 | if (!HAVE_STEPPABLE_WATCHPOINT) | |
2831 | remove_breakpoints (); | |
488f131b JB |
2832 | registers_changed (); |
2833 | target_resume (ecs->ptid, 1, TARGET_SIGNAL_0); /* Single step */ | |
0d1e5fa7 | 2834 | waiton_ptid = ecs->ptid; |
d983da9c | 2835 | if (HAVE_STEPPABLE_WATCHPOINT) |
0d1e5fa7 | 2836 | infwait_state = infwait_step_watch_state; |
d983da9c | 2837 | else |
0d1e5fa7 | 2838 | infwait_state = infwait_nonstep_watch_state; |
488f131b JB |
2839 | prepare_to_wait (ecs); |
2840 | return; | |
2841 | } | |
2842 | ||
488f131b JB |
2843 | ecs->stop_func_start = 0; |
2844 | ecs->stop_func_end = 0; | |
2845 | ecs->stop_func_name = 0; | |
2846 | /* Don't care about return value; stop_func_start and stop_func_name | |
2847 | will both be 0 if it doesn't work. */ | |
2848 | find_pc_partial_function (stop_pc, &ecs->stop_func_name, | |
2849 | &ecs->stop_func_start, &ecs->stop_func_end); | |
cbf3b44a UW |
2850 | ecs->stop_func_start |
2851 | += gdbarch_deprecated_function_start_offset (current_gdbarch); | |
4e1c45ea | 2852 | ecs->event_thread->stepping_over_breakpoint = 0; |
347bddb7 | 2853 | bpstat_clear (&ecs->event_thread->stop_bpstat); |
414c69f7 | 2854 | ecs->event_thread->stop_step = 0; |
488f131b JB |
2855 | stop_print_frame = 1; |
2856 | ecs->random_signal = 0; | |
2857 | stopped_by_random_signal = 0; | |
488f131b | 2858 | |
2020b7ab | 2859 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP |
4e1c45ea | 2860 | && ecs->event_thread->trap_expected |
3352ef37 | 2861 | && gdbarch_single_step_through_delay_p (current_gdbarch) |
4e1c45ea | 2862 | && currently_stepping (ecs->event_thread)) |
3352ef37 | 2863 | { |
b50d7442 | 2864 | /* We're trying to step off a breakpoint. Turns out that we're |
3352ef37 AC |
2865 | also on an instruction that needs to be stepped multiple |
2866 | times before it's been fully executing. E.g., architectures | |
2867 | with a delay slot. It needs to be stepped twice, once for | |
2868 | the instruction and once for the delay slot. */ | |
2869 | int step_through_delay | |
2870 | = gdbarch_single_step_through_delay (current_gdbarch, | |
2871 | get_current_frame ()); | |
527159b7 | 2872 | if (debug_infrun && step_through_delay) |
8a9de0e4 | 2873 | fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n"); |
4e1c45ea | 2874 | if (ecs->event_thread->step_range_end == 0 && step_through_delay) |
3352ef37 AC |
2875 | { |
2876 | /* The user issued a continue when stopped at a breakpoint. | |
2877 | Set up for another trap and get out of here. */ | |
4e1c45ea | 2878 | ecs->event_thread->stepping_over_breakpoint = 1; |
3352ef37 AC |
2879 | keep_going (ecs); |
2880 | return; | |
2881 | } | |
2882 | else if (step_through_delay) | |
2883 | { | |
2884 | /* The user issued a step when stopped at a breakpoint. | |
2885 | Maybe we should stop, maybe we should not - the delay | |
2886 | slot *might* correspond to a line of source. In any | |
ca67fcb8 VP |
2887 | case, don't decide that here, just set |
2888 | ecs->stepping_over_breakpoint, making sure we | |
2889 | single-step again before breakpoints are re-inserted. */ | |
4e1c45ea | 2890 | ecs->event_thread->stepping_over_breakpoint = 1; |
3352ef37 AC |
2891 | } |
2892 | } | |
2893 | ||
488f131b JB |
2894 | /* Look at the cause of the stop, and decide what to do. |
2895 | The alternatives are: | |
0d1e5fa7 PA |
2896 | 1) stop_stepping and return; to really stop and return to the debugger, |
2897 | 2) keep_going and return to start up again | |
4e1c45ea | 2898 | (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once) |
488f131b JB |
2899 | 3) set ecs->random_signal to 1, and the decision between 1 and 2 |
2900 | will be made according to the signal handling tables. */ | |
2901 | ||
2902 | /* First, distinguish signals caused by the debugger from signals | |
03cebad2 MK |
2903 | that have to do with the program's own actions. Note that |
2904 | breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending | |
2905 | on the operating system version. Here we detect when a SIGILL or | |
2906 | SIGEMT is really a breakpoint and change it to SIGTRAP. We do | |
2907 | something similar for SIGSEGV, since a SIGSEGV will be generated | |
2908 | when we're trying to execute a breakpoint instruction on a | |
2909 | non-executable stack. This happens for call dummy breakpoints | |
2910 | for architectures like SPARC that place call dummies on the | |
237fc4c9 | 2911 | stack. |
488f131b | 2912 | |
237fc4c9 PA |
2913 | If we're doing a displaced step past a breakpoint, then the |
2914 | breakpoint is always inserted at the original instruction; | |
2915 | non-standard signals can't be explained by the breakpoint. */ | |
2020b7ab | 2916 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP |
4e1c45ea | 2917 | || (! ecs->event_thread->trap_expected |
237fc4c9 | 2918 | && breakpoint_inserted_here_p (stop_pc) |
2020b7ab PA |
2919 | && (ecs->event_thread->stop_signal == TARGET_SIGNAL_ILL |
2920 | || ecs->event_thread->stop_signal == TARGET_SIGNAL_SEGV | |
2921 | || ecs->event_thread->stop_signal == TARGET_SIGNAL_EMT)) | |
b0f4b84b DJ |
2922 | || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP |
2923 | || stop_soon == STOP_QUIETLY_REMOTE) | |
488f131b | 2924 | { |
2020b7ab | 2925 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap) |
488f131b | 2926 | { |
527159b7 | 2927 | if (debug_infrun) |
8a9de0e4 | 2928 | fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n"); |
488f131b JB |
2929 | stop_print_frame = 0; |
2930 | stop_stepping (ecs); | |
2931 | return; | |
2932 | } | |
c54cfec8 EZ |
2933 | |
2934 | /* This is originated from start_remote(), start_inferior() and | |
2935 | shared libraries hook functions. */ | |
b0f4b84b | 2936 | if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE) |
488f131b | 2937 | { |
527159b7 | 2938 | if (debug_infrun) |
8a9de0e4 | 2939 | fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n"); |
488f131b JB |
2940 | stop_stepping (ecs); |
2941 | return; | |
2942 | } | |
2943 | ||
c54cfec8 | 2944 | /* This originates from attach_command(). We need to overwrite |
a0d21d28 PA |
2945 | the stop_signal here, because some kernels don't ignore a |
2946 | SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call. | |
2947 | See more comments in inferior.h. On the other hand, if we | |
a0ef4274 | 2948 | get a non-SIGSTOP, report it to the user - assume the backend |
a0d21d28 PA |
2949 | will handle the SIGSTOP if it should show up later. |
2950 | ||
2951 | Also consider that the attach is complete when we see a | |
2952 | SIGTRAP. Some systems (e.g. Windows), and stubs supporting | |
2953 | target extended-remote report it instead of a SIGSTOP | |
2954 | (e.g. gdbserver). We already rely on SIGTRAP being our | |
e0ba6746 PA |
2955 | signal, so this is no exception. |
2956 | ||
2957 | Also consider that the attach is complete when we see a | |
2958 | TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell | |
2959 | the target to stop all threads of the inferior, in case the | |
2960 | low level attach operation doesn't stop them implicitly. If | |
2961 | they weren't stopped implicitly, then the stub will report a | |
2962 | TARGET_SIGNAL_0, meaning: stopped for no particular reason | |
2963 | other than GDB's request. */ | |
a0ef4274 | 2964 | if (stop_soon == STOP_QUIETLY_NO_SIGSTOP |
2020b7ab | 2965 | && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP |
e0ba6746 PA |
2966 | || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP |
2967 | || ecs->event_thread->stop_signal == TARGET_SIGNAL_0)) | |
c54cfec8 EZ |
2968 | { |
2969 | stop_stepping (ecs); | |
2020b7ab | 2970 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; |
c54cfec8 EZ |
2971 | return; |
2972 | } | |
2973 | ||
fba57f8f | 2974 | /* See if there is a breakpoint at the current PC. */ |
347bddb7 | 2975 | ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid); |
fba57f8f VP |
2976 | |
2977 | /* Following in case break condition called a | |
2978 | function. */ | |
2979 | stop_print_frame = 1; | |
488f131b | 2980 | |
73dd234f | 2981 | /* NOTE: cagney/2003-03-29: These two checks for a random signal |
8fb3e588 AC |
2982 | at one stage in the past included checks for an inferior |
2983 | function call's call dummy's return breakpoint. The original | |
2984 | comment, that went with the test, read: | |
73dd234f | 2985 | |
8fb3e588 AC |
2986 | ``End of a stack dummy. Some systems (e.g. Sony news) give |
2987 | another signal besides SIGTRAP, so check here as well as | |
2988 | above.'' | |
73dd234f | 2989 | |
8002d778 | 2990 | If someone ever tries to get call dummys on a |
73dd234f | 2991 | non-executable stack to work (where the target would stop |
03cebad2 MK |
2992 | with something like a SIGSEGV), then those tests might need |
2993 | to be re-instated. Given, however, that the tests were only | |
73dd234f | 2994 | enabled when momentary breakpoints were not being used, I |
03cebad2 MK |
2995 | suspect that it won't be the case. |
2996 | ||
8fb3e588 AC |
2997 | NOTE: kettenis/2004-02-05: Indeed such checks don't seem to |
2998 | be necessary for call dummies on a non-executable stack on | |
2999 | SPARC. */ | |
73dd234f | 3000 | |
2020b7ab | 3001 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP) |
488f131b | 3002 | ecs->random_signal |
347bddb7 | 3003 | = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat) |
4e1c45ea PA |
3004 | || ecs->event_thread->trap_expected |
3005 | || (ecs->event_thread->step_range_end | |
3006 | && ecs->event_thread->step_resume_breakpoint == NULL)); | |
488f131b JB |
3007 | else |
3008 | { | |
347bddb7 | 3009 | ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat); |
488f131b | 3010 | if (!ecs->random_signal) |
2020b7ab | 3011 | ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP; |
488f131b JB |
3012 | } |
3013 | } | |
3014 | ||
3015 | /* When we reach this point, we've pretty much decided | |
3016 | that the reason for stopping must've been a random | |
3017 | (unexpected) signal. */ | |
3018 | ||
3019 | else | |
3020 | ecs->random_signal = 1; | |
488f131b | 3021 | |
04e68871 | 3022 | process_event_stop_test: |
488f131b JB |
3023 | /* For the program's own signals, act according to |
3024 | the signal handling tables. */ | |
3025 | ||
3026 | if (ecs->random_signal) | |
3027 | { | |
3028 | /* Signal not for debugging purposes. */ | |
3029 | int printed = 0; | |
3030 | ||
527159b7 | 3031 | if (debug_infrun) |
2020b7ab PA |
3032 | fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n", |
3033 | ecs->event_thread->stop_signal); | |
527159b7 | 3034 | |
488f131b JB |
3035 | stopped_by_random_signal = 1; |
3036 | ||
2020b7ab | 3037 | if (signal_print[ecs->event_thread->stop_signal]) |
488f131b JB |
3038 | { |
3039 | printed = 1; | |
3040 | target_terminal_ours_for_output (); | |
2020b7ab | 3041 | print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal); |
488f131b | 3042 | } |
252fbfc8 PA |
3043 | /* Always stop on signals if we're either just gaining control |
3044 | of the program, or the user explicitly requested this thread | |
3045 | to remain stopped. */ | |
d6b48e9c | 3046 | if (stop_soon != NO_STOP_QUIETLY |
252fbfc8 | 3047 | || ecs->event_thread->stop_requested |
d6b48e9c | 3048 | || signal_stop_state (ecs->event_thread->stop_signal)) |
488f131b JB |
3049 | { |
3050 | stop_stepping (ecs); | |
3051 | return; | |
3052 | } | |
3053 | /* If not going to stop, give terminal back | |
3054 | if we took it away. */ | |
3055 | else if (printed) | |
3056 | target_terminal_inferior (); | |
3057 | ||
3058 | /* Clear the signal if it should not be passed. */ | |
2020b7ab PA |
3059 | if (signal_program[ecs->event_thread->stop_signal] == 0) |
3060 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; | |
488f131b | 3061 | |
4e1c45ea PA |
3062 | if (ecs->event_thread->prev_pc == read_pc () |
3063 | && ecs->event_thread->trap_expected | |
3064 | && ecs->event_thread->step_resume_breakpoint == NULL) | |
68f53502 AC |
3065 | { |
3066 | /* We were just starting a new sequence, attempting to | |
3067 | single-step off of a breakpoint and expecting a SIGTRAP. | |
237fc4c9 | 3068 | Instead this signal arrives. This signal will take us out |
68f53502 AC |
3069 | of the stepping range so GDB needs to remember to, when |
3070 | the signal handler returns, resume stepping off that | |
3071 | breakpoint. */ | |
3072 | /* To simplify things, "continue" is forced to use the same | |
3073 | code paths as single-step - set a breakpoint at the | |
3074 | signal return address and then, once hit, step off that | |
3075 | breakpoint. */ | |
237fc4c9 PA |
3076 | if (debug_infrun) |
3077 | fprintf_unfiltered (gdb_stdlog, | |
3078 | "infrun: signal arrived while stepping over " | |
3079 | "breakpoint\n"); | |
d3169d93 | 3080 | |
44cbf7b5 | 3081 | insert_step_resume_breakpoint_at_frame (get_current_frame ()); |
4e1c45ea | 3082 | ecs->event_thread->step_after_step_resume_breakpoint = 1; |
9d799f85 AC |
3083 | keep_going (ecs); |
3084 | return; | |
68f53502 | 3085 | } |
9d799f85 | 3086 | |
4e1c45ea | 3087 | if (ecs->event_thread->step_range_end != 0 |
2020b7ab | 3088 | && ecs->event_thread->stop_signal != TARGET_SIGNAL_0 |
4e1c45ea PA |
3089 | && (ecs->event_thread->step_range_start <= stop_pc |
3090 | && stop_pc < ecs->event_thread->step_range_end) | |
9d799f85 | 3091 | && frame_id_eq (get_frame_id (get_current_frame ()), |
4e1c45ea PA |
3092 | ecs->event_thread->step_frame_id) |
3093 | && ecs->event_thread->step_resume_breakpoint == NULL) | |
d303a6c7 AC |
3094 | { |
3095 | /* The inferior is about to take a signal that will take it | |
3096 | out of the single step range. Set a breakpoint at the | |
3097 | current PC (which is presumably where the signal handler | |
3098 | will eventually return) and then allow the inferior to | |
3099 | run free. | |
3100 | ||
3101 | Note that this is only needed for a signal delivered | |
3102 | while in the single-step range. Nested signals aren't a | |
3103 | problem as they eventually all return. */ | |
237fc4c9 PA |
3104 | if (debug_infrun) |
3105 | fprintf_unfiltered (gdb_stdlog, | |
3106 | "infrun: signal may take us out of " | |
3107 | "single-step range\n"); | |
3108 | ||
44cbf7b5 | 3109 | insert_step_resume_breakpoint_at_frame (get_current_frame ()); |
9d799f85 AC |
3110 | keep_going (ecs); |
3111 | return; | |
d303a6c7 | 3112 | } |
9d799f85 AC |
3113 | |
3114 | /* Note: step_resume_breakpoint may be non-NULL. This occures | |
3115 | when either there's a nested signal, or when there's a | |
3116 | pending signal enabled just as the signal handler returns | |
3117 | (leaving the inferior at the step-resume-breakpoint without | |
3118 | actually executing it). Either way continue until the | |
3119 | breakpoint is really hit. */ | |
488f131b JB |
3120 | keep_going (ecs); |
3121 | return; | |
3122 | } | |
3123 | ||
3124 | /* Handle cases caused by hitting a breakpoint. */ | |
3125 | { | |
3126 | CORE_ADDR jmp_buf_pc; | |
3127 | struct bpstat_what what; | |
3128 | ||
347bddb7 | 3129 | what = bpstat_what (ecs->event_thread->stop_bpstat); |
488f131b JB |
3130 | |
3131 | if (what.call_dummy) | |
3132 | { | |
3133 | stop_stack_dummy = 1; | |
c5aa993b | 3134 | } |
c906108c | 3135 | |
488f131b | 3136 | switch (what.main_action) |
c5aa993b | 3137 | { |
488f131b | 3138 | case BPSTAT_WHAT_SET_LONGJMP_RESUME: |
611c83ae PA |
3139 | /* If we hit the breakpoint at longjmp while stepping, we |
3140 | install a momentary breakpoint at the target of the | |
3141 | jmp_buf. */ | |
3142 | ||
3143 | if (debug_infrun) | |
3144 | fprintf_unfiltered (gdb_stdlog, | |
3145 | "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n"); | |
3146 | ||
4e1c45ea | 3147 | ecs->event_thread->stepping_over_breakpoint = 1; |
611c83ae | 3148 | |
91104499 | 3149 | if (!gdbarch_get_longjmp_target_p (current_gdbarch) |
60ade65d UW |
3150 | || !gdbarch_get_longjmp_target (current_gdbarch, |
3151 | get_current_frame (), &jmp_buf_pc)) | |
c5aa993b | 3152 | { |
611c83ae PA |
3153 | if (debug_infrun) |
3154 | fprintf_unfiltered (gdb_stdlog, "\ | |
3155 | infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n"); | |
488f131b | 3156 | keep_going (ecs); |
104c1213 | 3157 | return; |
c5aa993b | 3158 | } |
488f131b | 3159 | |
611c83ae PA |
3160 | /* We're going to replace the current step-resume breakpoint |
3161 | with a longjmp-resume breakpoint. */ | |
4e1c45ea | 3162 | delete_step_resume_breakpoint (ecs->event_thread); |
611c83ae PA |
3163 | |
3164 | /* Insert a breakpoint at resume address. */ | |
3165 | insert_longjmp_resume_breakpoint (jmp_buf_pc); | |
c906108c | 3166 | |
488f131b JB |
3167 | keep_going (ecs); |
3168 | return; | |
c906108c | 3169 | |
488f131b | 3170 | case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME: |
527159b7 | 3171 | if (debug_infrun) |
611c83ae PA |
3172 | fprintf_unfiltered (gdb_stdlog, |
3173 | "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n"); | |
3174 | ||
4e1c45ea PA |
3175 | gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL); |
3176 | delete_step_resume_breakpoint (ecs->event_thread); | |
611c83ae | 3177 | |
414c69f7 | 3178 | ecs->event_thread->stop_step = 1; |
611c83ae PA |
3179 | print_stop_reason (END_STEPPING_RANGE, 0); |
3180 | stop_stepping (ecs); | |
3181 | return; | |
488f131b JB |
3182 | |
3183 | case BPSTAT_WHAT_SINGLE: | |
527159b7 | 3184 | if (debug_infrun) |
8802d8ed | 3185 | fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n"); |
4e1c45ea | 3186 | ecs->event_thread->stepping_over_breakpoint = 1; |
488f131b JB |
3187 | /* Still need to check other stuff, at least the case |
3188 | where we are stepping and step out of the right range. */ | |
3189 | break; | |
c906108c | 3190 | |
488f131b | 3191 | case BPSTAT_WHAT_STOP_NOISY: |
527159b7 | 3192 | if (debug_infrun) |
8802d8ed | 3193 | fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n"); |
488f131b | 3194 | stop_print_frame = 1; |
c906108c | 3195 | |
d303a6c7 AC |
3196 | /* We are about to nuke the step_resume_breakpointt via the |
3197 | cleanup chain, so no need to worry about it here. */ | |
c5aa993b | 3198 | |
488f131b JB |
3199 | stop_stepping (ecs); |
3200 | return; | |
c5aa993b | 3201 | |
488f131b | 3202 | case BPSTAT_WHAT_STOP_SILENT: |
527159b7 | 3203 | if (debug_infrun) |
8802d8ed | 3204 | fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n"); |
488f131b | 3205 | stop_print_frame = 0; |
c5aa993b | 3206 | |
d303a6c7 AC |
3207 | /* We are about to nuke the step_resume_breakpoin via the |
3208 | cleanup chain, so no need to worry about it here. */ | |
c5aa993b | 3209 | |
488f131b | 3210 | stop_stepping (ecs); |
e441088d | 3211 | return; |
c5aa993b | 3212 | |
488f131b | 3213 | case BPSTAT_WHAT_STEP_RESUME: |
527159b7 | 3214 | if (debug_infrun) |
8802d8ed | 3215 | fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n"); |
527159b7 | 3216 | |
4e1c45ea PA |
3217 | delete_step_resume_breakpoint (ecs->event_thread); |
3218 | if (ecs->event_thread->step_after_step_resume_breakpoint) | |
68f53502 AC |
3219 | { |
3220 | /* Back when the step-resume breakpoint was inserted, we | |
3221 | were trying to single-step off a breakpoint. Go back | |
3222 | to doing that. */ | |
4e1c45ea PA |
3223 | ecs->event_thread->step_after_step_resume_breakpoint = 0; |
3224 | ecs->event_thread->stepping_over_breakpoint = 1; | |
68f53502 AC |
3225 | keep_going (ecs); |
3226 | return; | |
3227 | } | |
b2175913 MS |
3228 | if (stop_pc == ecs->stop_func_start |
3229 | && execution_direction == EXEC_REVERSE) | |
3230 | { | |
3231 | /* We are stepping over a function call in reverse, and | |
3232 | just hit the step-resume breakpoint at the start | |
3233 | address of the function. Go back to single-stepping, | |
3234 | which should take us back to the function call. */ | |
3235 | ecs->event_thread->stepping_over_breakpoint = 1; | |
3236 | keep_going (ecs); | |
3237 | return; | |
3238 | } | |
488f131b JB |
3239 | break; |
3240 | ||
488f131b | 3241 | case BPSTAT_WHAT_CHECK_SHLIBS: |
c906108c | 3242 | { |
527159b7 | 3243 | if (debug_infrun) |
8802d8ed | 3244 | fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n"); |
488f131b JB |
3245 | |
3246 | /* Check for any newly added shared libraries if we're | |
3247 | supposed to be adding them automatically. Switch | |
3248 | terminal for any messages produced by | |
3249 | breakpoint_re_set. */ | |
3250 | target_terminal_ours_for_output (); | |
aff6338a | 3251 | /* NOTE: cagney/2003-11-25: Make certain that the target |
8fb3e588 AC |
3252 | stack's section table is kept up-to-date. Architectures, |
3253 | (e.g., PPC64), use the section table to perform | |
3254 | operations such as address => section name and hence | |
3255 | require the table to contain all sections (including | |
3256 | those found in shared libraries). */ | |
aff6338a | 3257 | /* NOTE: cagney/2003-11-25: Pass current_target and not |
8fb3e588 AC |
3258 | exec_ops to SOLIB_ADD. This is because current GDB is |
3259 | only tooled to propagate section_table changes out from | |
3260 | the "current_target" (see target_resize_to_sections), and | |
3261 | not up from the exec stratum. This, of course, isn't | |
3262 | right. "infrun.c" should only interact with the | |
3263 | exec/process stratum, instead relying on the target stack | |
3264 | to propagate relevant changes (stop, section table | |
3265 | changed, ...) up to other layers. */ | |
a77053c2 | 3266 | #ifdef SOLIB_ADD |
aff6338a | 3267 | SOLIB_ADD (NULL, 0, ¤t_target, auto_solib_add); |
a77053c2 MK |
3268 | #else |
3269 | solib_add (NULL, 0, ¤t_target, auto_solib_add); | |
3270 | #endif | |
488f131b JB |
3271 | target_terminal_inferior (); |
3272 | ||
488f131b JB |
3273 | /* If requested, stop when the dynamic linker notifies |
3274 | gdb of events. This allows the user to get control | |
3275 | and place breakpoints in initializer routines for | |
3276 | dynamically loaded objects (among other things). */ | |
877522db | 3277 | if (stop_on_solib_events || stop_stack_dummy) |
d4f3574e | 3278 | { |
488f131b | 3279 | stop_stepping (ecs); |
d4f3574e SS |
3280 | return; |
3281 | } | |
c5aa993b | 3282 | else |
c5aa993b | 3283 | { |
488f131b | 3284 | /* We want to step over this breakpoint, then keep going. */ |
4e1c45ea | 3285 | ecs->event_thread->stepping_over_breakpoint = 1; |
488f131b | 3286 | break; |
c5aa993b | 3287 | } |
488f131b | 3288 | } |
488f131b | 3289 | break; |
c906108c | 3290 | |
488f131b JB |
3291 | case BPSTAT_WHAT_LAST: |
3292 | /* Not a real code, but listed here to shut up gcc -Wall. */ | |
c906108c | 3293 | |
488f131b JB |
3294 | case BPSTAT_WHAT_KEEP_CHECKING: |
3295 | break; | |
3296 | } | |
3297 | } | |
c906108c | 3298 | |
488f131b JB |
3299 | /* We come here if we hit a breakpoint but should not |
3300 | stop for it. Possibly we also were stepping | |
3301 | and should stop for that. So fall through and | |
3302 | test for stepping. But, if not stepping, | |
3303 | do not stop. */ | |
c906108c | 3304 | |
a7212384 UW |
3305 | /* In all-stop mode, if we're currently stepping but have stopped in |
3306 | some other thread, we need to switch back to the stepped thread. */ | |
3307 | if (!non_stop) | |
3308 | { | |
3309 | struct thread_info *tp; | |
3310 | tp = iterate_over_threads (currently_stepping_callback, | |
3311 | ecs->event_thread); | |
3312 | if (tp) | |
3313 | { | |
3314 | /* However, if the current thread is blocked on some internal | |
3315 | breakpoint, and we simply need to step over that breakpoint | |
3316 | to get it going again, do that first. */ | |
3317 | if ((ecs->event_thread->trap_expected | |
3318 | && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP) | |
3319 | || ecs->event_thread->stepping_over_breakpoint) | |
3320 | { | |
3321 | keep_going (ecs); | |
3322 | return; | |
3323 | } | |
3324 | ||
3325 | /* Otherwise, we no longer expect a trap in the current thread. | |
3326 | Clear the trap_expected flag before switching back -- this is | |
3327 | what keep_going would do as well, if we called it. */ | |
3328 | ecs->event_thread->trap_expected = 0; | |
3329 | ||
3330 | if (debug_infrun) | |
3331 | fprintf_unfiltered (gdb_stdlog, | |
3332 | "infrun: switching back to stepped thread\n"); | |
3333 | ||
3334 | ecs->event_thread = tp; | |
3335 | ecs->ptid = tp->ptid; | |
3336 | context_switch (ecs->ptid); | |
3337 | keep_going (ecs); | |
3338 | return; | |
3339 | } | |
3340 | } | |
3341 | ||
9d1ff73f MS |
3342 | /* Are we stepping to get the inferior out of the dynamic linker's |
3343 | hook (and possibly the dld itself) after catching a shlib | |
3344 | event? */ | |
4e1c45ea | 3345 | if (ecs->event_thread->stepping_through_solib_after_catch) |
488f131b JB |
3346 | { |
3347 | #if defined(SOLIB_ADD) | |
3348 | /* Have we reached our destination? If not, keep going. */ | |
3349 | if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc)) | |
3350 | { | |
527159b7 | 3351 | if (debug_infrun) |
8a9de0e4 | 3352 | fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n"); |
4e1c45ea | 3353 | ecs->event_thread->stepping_over_breakpoint = 1; |
488f131b | 3354 | keep_going (ecs); |
104c1213 | 3355 | return; |
488f131b JB |
3356 | } |
3357 | #endif | |
527159b7 | 3358 | if (debug_infrun) |
8a9de0e4 | 3359 | fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n"); |
488f131b JB |
3360 | /* Else, stop and report the catchpoint(s) whose triggering |
3361 | caused us to begin stepping. */ | |
4e1c45ea | 3362 | ecs->event_thread->stepping_through_solib_after_catch = 0; |
347bddb7 PA |
3363 | bpstat_clear (&ecs->event_thread->stop_bpstat); |
3364 | ecs->event_thread->stop_bpstat | |
3365 | = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints); | |
4e1c45ea | 3366 | bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints); |
488f131b JB |
3367 | stop_print_frame = 1; |
3368 | stop_stepping (ecs); | |
3369 | return; | |
3370 | } | |
c906108c | 3371 | |
4e1c45ea | 3372 | if (ecs->event_thread->step_resume_breakpoint) |
488f131b | 3373 | { |
527159b7 | 3374 | if (debug_infrun) |
d3169d93 DJ |
3375 | fprintf_unfiltered (gdb_stdlog, |
3376 | "infrun: step-resume breakpoint is inserted\n"); | |
527159b7 | 3377 | |
488f131b JB |
3378 | /* Having a step-resume breakpoint overrides anything |
3379 | else having to do with stepping commands until | |
3380 | that breakpoint is reached. */ | |
488f131b JB |
3381 | keep_going (ecs); |
3382 | return; | |
3383 | } | |
c5aa993b | 3384 | |
4e1c45ea | 3385 | if (ecs->event_thread->step_range_end == 0) |
488f131b | 3386 | { |
527159b7 | 3387 | if (debug_infrun) |
8a9de0e4 | 3388 | fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n"); |
488f131b | 3389 | /* Likewise if we aren't even stepping. */ |
488f131b JB |
3390 | keep_going (ecs); |
3391 | return; | |
3392 | } | |
c5aa993b | 3393 | |
488f131b | 3394 | /* If stepping through a line, keep going if still within it. |
c906108c | 3395 | |
488f131b JB |
3396 | Note that step_range_end is the address of the first instruction |
3397 | beyond the step range, and NOT the address of the last instruction | |
3398 | within it! */ | |
4e1c45ea PA |
3399 | if (stop_pc >= ecs->event_thread->step_range_start |
3400 | && stop_pc < ecs->event_thread->step_range_end) | |
488f131b | 3401 | { |
527159b7 | 3402 | if (debug_infrun) |
b2175913 | 3403 | fprintf_unfiltered (gdb_stdlog, "infrun: stepping inside range [0x%s-0x%s]\n", |
4e1c45ea PA |
3404 | paddr_nz (ecs->event_thread->step_range_start), |
3405 | paddr_nz (ecs->event_thread->step_range_end)); | |
b2175913 MS |
3406 | |
3407 | /* When stepping backward, stop at beginning of line range | |
3408 | (unless it's the function entry point, in which case | |
3409 | keep going back to the call point). */ | |
3410 | if (stop_pc == ecs->event_thread->step_range_start | |
3411 | && stop_pc != ecs->stop_func_start | |
3412 | && execution_direction == EXEC_REVERSE) | |
3413 | { | |
3414 | ecs->event_thread->stop_step = 1; | |
3415 | print_stop_reason (END_STEPPING_RANGE, 0); | |
3416 | stop_stepping (ecs); | |
3417 | } | |
3418 | else | |
3419 | keep_going (ecs); | |
3420 | ||
488f131b JB |
3421 | return; |
3422 | } | |
c5aa993b | 3423 | |
488f131b | 3424 | /* We stepped out of the stepping range. */ |
c906108c | 3425 | |
488f131b JB |
3426 | /* If we are stepping at the source level and entered the runtime |
3427 | loader dynamic symbol resolution code, we keep on single stepping | |
3428 | until we exit the run time loader code and reach the callee's | |
3429 | address. */ | |
078130d0 | 3430 | if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE |
cfd8ab24 | 3431 | && in_solib_dynsym_resolve_code (stop_pc)) |
488f131b | 3432 | { |
4c8c40e6 MK |
3433 | CORE_ADDR pc_after_resolver = |
3434 | gdbarch_skip_solib_resolver (current_gdbarch, stop_pc); | |
c906108c | 3435 | |
527159b7 | 3436 | if (debug_infrun) |
8a9de0e4 | 3437 | fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n"); |
527159b7 | 3438 | |
488f131b JB |
3439 | if (pc_after_resolver) |
3440 | { | |
3441 | /* Set up a step-resume breakpoint at the address | |
3442 | indicated by SKIP_SOLIB_RESOLVER. */ | |
3443 | struct symtab_and_line sr_sal; | |
fe39c653 | 3444 | init_sal (&sr_sal); |
488f131b JB |
3445 | sr_sal.pc = pc_after_resolver; |
3446 | ||
44cbf7b5 | 3447 | insert_step_resume_breakpoint_at_sal (sr_sal, null_frame_id); |
c5aa993b | 3448 | } |
c906108c | 3449 | |
488f131b JB |
3450 | keep_going (ecs); |
3451 | return; | |
3452 | } | |
c906108c | 3453 | |
4e1c45ea | 3454 | if (ecs->event_thread->step_range_end != 1 |
078130d0 PA |
3455 | && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE |
3456 | || ecs->event_thread->step_over_calls == STEP_OVER_ALL) | |
42edda50 | 3457 | && get_frame_type (get_current_frame ()) == SIGTRAMP_FRAME) |
488f131b | 3458 | { |
527159b7 | 3459 | if (debug_infrun) |
8a9de0e4 | 3460 | fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n"); |
42edda50 | 3461 | /* The inferior, while doing a "step" or "next", has ended up in |
8fb3e588 AC |
3462 | a signal trampoline (either by a signal being delivered or by |
3463 | the signal handler returning). Just single-step until the | |
3464 | inferior leaves the trampoline (either by calling the handler | |
3465 | or returning). */ | |
488f131b JB |
3466 | keep_going (ecs); |
3467 | return; | |
3468 | } | |
c906108c | 3469 | |
c17eaafe DJ |
3470 | /* Check for subroutine calls. The check for the current frame |
3471 | equalling the step ID is not necessary - the check of the | |
3472 | previous frame's ID is sufficient - but it is a common case and | |
3473 | cheaper than checking the previous frame's ID. | |
14e60db5 DJ |
3474 | |
3475 | NOTE: frame_id_eq will never report two invalid frame IDs as | |
3476 | being equal, so to get into this block, both the current and | |
3477 | previous frame must have valid frame IDs. */ | |
4e1c45ea PA |
3478 | if (!frame_id_eq (get_frame_id (get_current_frame ()), |
3479 | ecs->event_thread->step_frame_id) | |
b2175913 MS |
3480 | && (frame_id_eq (frame_unwind_id (get_current_frame ()), |
3481 | ecs->event_thread->step_frame_id) | |
3482 | || execution_direction == EXEC_REVERSE)) | |
488f131b | 3483 | { |
95918acb | 3484 | CORE_ADDR real_stop_pc; |
8fb3e588 | 3485 | |
527159b7 | 3486 | if (debug_infrun) |
8a9de0e4 | 3487 | fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n"); |
527159b7 | 3488 | |
078130d0 | 3489 | if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE) |
4e1c45ea PA |
3490 | || ((ecs->event_thread->step_range_end == 1) |
3491 | && in_prologue (ecs->event_thread->prev_pc, | |
3492 | ecs->stop_func_start))) | |
95918acb AC |
3493 | { |
3494 | /* I presume that step_over_calls is only 0 when we're | |
3495 | supposed to be stepping at the assembly language level | |
3496 | ("stepi"). Just stop. */ | |
3497 | /* Also, maybe we just did a "nexti" inside a prolog, so we | |
3498 | thought it was a subroutine call but it was not. Stop as | |
3499 | well. FENN */ | |
414c69f7 | 3500 | ecs->event_thread->stop_step = 1; |
95918acb AC |
3501 | print_stop_reason (END_STEPPING_RANGE, 0); |
3502 | stop_stepping (ecs); | |
3503 | return; | |
3504 | } | |
8fb3e588 | 3505 | |
078130d0 | 3506 | if (ecs->event_thread->step_over_calls == STEP_OVER_ALL) |
8567c30f | 3507 | { |
b2175913 MS |
3508 | /* We're doing a "next". |
3509 | ||
3510 | Normal (forward) execution: set a breakpoint at the | |
3511 | callee's return address (the address at which the caller | |
3512 | will resume). | |
3513 | ||
3514 | Reverse (backward) execution. set the step-resume | |
3515 | breakpoint at the start of the function that we just | |
3516 | stepped into (backwards), and continue to there. When we | |
6130d0b7 | 3517 | get there, we'll need to single-step back to the caller. */ |
b2175913 MS |
3518 | |
3519 | if (execution_direction == EXEC_REVERSE) | |
3520 | { | |
3521 | struct symtab_and_line sr_sal; | |
3067f6e5 MS |
3522 | |
3523 | if (ecs->stop_func_start == 0 | |
3524 | && in_solib_dynsym_resolve_code (stop_pc)) | |
3525 | { | |
3526 | /* Stepped into runtime loader dynamic symbol | |
3527 | resolution code. Since we're in reverse, | |
3528 | we have already backed up through the runtime | |
3529 | loader and the dynamic function. This is just | |
3530 | the trampoline (jump table). | |
3531 | ||
3532 | Just keep stepping, we'll soon be home. | |
3533 | */ | |
3534 | keep_going (ecs); | |
3535 | return; | |
3536 | } | |
3537 | /* Normal (staticly linked) function call return. */ | |
b2175913 MS |
3538 | init_sal (&sr_sal); |
3539 | sr_sal.pc = ecs->stop_func_start; | |
3540 | insert_step_resume_breakpoint_at_sal (sr_sal, null_frame_id); | |
3541 | } | |
3542 | else | |
3543 | insert_step_resume_breakpoint_at_caller (get_current_frame ()); | |
3544 | ||
8567c30f AC |
3545 | keep_going (ecs); |
3546 | return; | |
3547 | } | |
a53c66de | 3548 | |
95918acb | 3549 | /* If we are in a function call trampoline (a stub between the |
8fb3e588 AC |
3550 | calling routine and the real function), locate the real |
3551 | function. That's what tells us (a) whether we want to step | |
3552 | into it at all, and (b) what prologue we want to run to the | |
3553 | end of, if we do step into it. */ | |
52f729a7 | 3554 | real_stop_pc = skip_language_trampoline (get_current_frame (), stop_pc); |
95918acb | 3555 | if (real_stop_pc == 0) |
52f729a7 UW |
3556 | real_stop_pc = gdbarch_skip_trampoline_code |
3557 | (current_gdbarch, get_current_frame (), stop_pc); | |
95918acb AC |
3558 | if (real_stop_pc != 0) |
3559 | ecs->stop_func_start = real_stop_pc; | |
8fb3e588 | 3560 | |
db5f024e | 3561 | if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc)) |
1b2bfbb9 RC |
3562 | { |
3563 | struct symtab_and_line sr_sal; | |
3564 | init_sal (&sr_sal); | |
3565 | sr_sal.pc = ecs->stop_func_start; | |
3566 | ||
44cbf7b5 | 3567 | insert_step_resume_breakpoint_at_sal (sr_sal, null_frame_id); |
8fb3e588 AC |
3568 | keep_going (ecs); |
3569 | return; | |
1b2bfbb9 RC |
3570 | } |
3571 | ||
95918acb | 3572 | /* If we have line number information for the function we are |
8fb3e588 | 3573 | thinking of stepping into, step into it. |
95918acb | 3574 | |
8fb3e588 AC |
3575 | If there are several symtabs at that PC (e.g. with include |
3576 | files), just want to know whether *any* of them have line | |
3577 | numbers. find_pc_line handles this. */ | |
95918acb AC |
3578 | { |
3579 | struct symtab_and_line tmp_sal; | |
8fb3e588 | 3580 | |
95918acb AC |
3581 | tmp_sal = find_pc_line (ecs->stop_func_start, 0); |
3582 | if (tmp_sal.line != 0) | |
3583 | { | |
b2175913 MS |
3584 | if (execution_direction == EXEC_REVERSE) |
3585 | handle_step_into_function_backward (ecs); | |
3586 | else | |
3587 | handle_step_into_function (ecs); | |
95918acb AC |
3588 | return; |
3589 | } | |
3590 | } | |
3591 | ||
3592 | /* If we have no line number and the step-stop-if-no-debug is | |
8fb3e588 AC |
3593 | set, we stop the step so that the user has a chance to switch |
3594 | in assembly mode. */ | |
078130d0 PA |
3595 | if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE |
3596 | && step_stop_if_no_debug) | |
95918acb | 3597 | { |
414c69f7 | 3598 | ecs->event_thread->stop_step = 1; |
95918acb AC |
3599 | print_stop_reason (END_STEPPING_RANGE, 0); |
3600 | stop_stepping (ecs); | |
3601 | return; | |
3602 | } | |
3603 | ||
b2175913 MS |
3604 | if (execution_direction == EXEC_REVERSE) |
3605 | { | |
3606 | /* Set a breakpoint at callee's start address. | |
3607 | From there we can step once and be back in the caller. */ | |
3608 | struct symtab_and_line sr_sal; | |
3609 | init_sal (&sr_sal); | |
3610 | sr_sal.pc = ecs->stop_func_start; | |
3611 | insert_step_resume_breakpoint_at_sal (sr_sal, null_frame_id); | |
3612 | } | |
3613 | else | |
3614 | /* Set a breakpoint at callee's return address (the address | |
3615 | at which the caller will resume). */ | |
3616 | insert_step_resume_breakpoint_at_caller (get_current_frame ()); | |
3617 | ||
95918acb | 3618 | keep_going (ecs); |
488f131b | 3619 | return; |
488f131b | 3620 | } |
c906108c | 3621 | |
488f131b JB |
3622 | /* If we're in the return path from a shared library trampoline, |
3623 | we want to proceed through the trampoline when stepping. */ | |
e76f05fa UW |
3624 | if (gdbarch_in_solib_return_trampoline (current_gdbarch, |
3625 | stop_pc, ecs->stop_func_name)) | |
488f131b | 3626 | { |
488f131b | 3627 | /* Determine where this trampoline returns. */ |
52f729a7 UW |
3628 | CORE_ADDR real_stop_pc; |
3629 | real_stop_pc = gdbarch_skip_trampoline_code | |
3630 | (current_gdbarch, get_current_frame (), stop_pc); | |
c906108c | 3631 | |
527159b7 | 3632 | if (debug_infrun) |
8a9de0e4 | 3633 | fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n"); |
527159b7 | 3634 | |
488f131b | 3635 | /* Only proceed through if we know where it's going. */ |
d764a824 | 3636 | if (real_stop_pc) |
488f131b JB |
3637 | { |
3638 | /* And put the step-breakpoint there and go until there. */ | |
3639 | struct symtab_and_line sr_sal; | |
3640 | ||
fe39c653 | 3641 | init_sal (&sr_sal); /* initialize to zeroes */ |
d764a824 | 3642 | sr_sal.pc = real_stop_pc; |
488f131b | 3643 | sr_sal.section = find_pc_overlay (sr_sal.pc); |
44cbf7b5 AC |
3644 | |
3645 | /* Do not specify what the fp should be when we stop since | |
3646 | on some machines the prologue is where the new fp value | |
3647 | is established. */ | |
3648 | insert_step_resume_breakpoint_at_sal (sr_sal, null_frame_id); | |
c906108c | 3649 | |
488f131b JB |
3650 | /* Restart without fiddling with the step ranges or |
3651 | other state. */ | |
3652 | keep_going (ecs); | |
3653 | return; | |
3654 | } | |
3655 | } | |
c906108c | 3656 | |
2afb61aa | 3657 | stop_pc_sal = find_pc_line (stop_pc, 0); |
7ed0fe66 | 3658 | |
1b2bfbb9 RC |
3659 | /* NOTE: tausq/2004-05-24: This if block used to be done before all |
3660 | the trampoline processing logic, however, there are some trampolines | |
3661 | that have no names, so we should do trampoline handling first. */ | |
078130d0 | 3662 | if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE |
7ed0fe66 | 3663 | && ecs->stop_func_name == NULL |
2afb61aa | 3664 | && stop_pc_sal.line == 0) |
1b2bfbb9 | 3665 | { |
527159b7 | 3666 | if (debug_infrun) |
8a9de0e4 | 3667 | fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n"); |
527159b7 | 3668 | |
1b2bfbb9 | 3669 | /* The inferior just stepped into, or returned to, an |
7ed0fe66 DJ |
3670 | undebuggable function (where there is no debugging information |
3671 | and no line number corresponding to the address where the | |
1b2bfbb9 RC |
3672 | inferior stopped). Since we want to skip this kind of code, |
3673 | we keep going until the inferior returns from this | |
14e60db5 DJ |
3674 | function - unless the user has asked us not to (via |
3675 | set step-mode) or we no longer know how to get back | |
3676 | to the call site. */ | |
3677 | if (step_stop_if_no_debug | |
eb2f4a08 | 3678 | || !frame_id_p (frame_unwind_id (get_current_frame ()))) |
1b2bfbb9 RC |
3679 | { |
3680 | /* If we have no line number and the step-stop-if-no-debug | |
3681 | is set, we stop the step so that the user has a chance to | |
3682 | switch in assembly mode. */ | |
414c69f7 | 3683 | ecs->event_thread->stop_step = 1; |
1b2bfbb9 RC |
3684 | print_stop_reason (END_STEPPING_RANGE, 0); |
3685 | stop_stepping (ecs); | |
3686 | return; | |
3687 | } | |
3688 | else | |
3689 | { | |
3690 | /* Set a breakpoint at callee's return address (the address | |
3691 | at which the caller will resume). */ | |
14e60db5 | 3692 | insert_step_resume_breakpoint_at_caller (get_current_frame ()); |
1b2bfbb9 RC |
3693 | keep_going (ecs); |
3694 | return; | |
3695 | } | |
3696 | } | |
3697 | ||
4e1c45ea | 3698 | if (ecs->event_thread->step_range_end == 1) |
1b2bfbb9 RC |
3699 | { |
3700 | /* It is stepi or nexti. We always want to stop stepping after | |
3701 | one instruction. */ | |
527159b7 | 3702 | if (debug_infrun) |
8a9de0e4 | 3703 | fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n"); |
414c69f7 | 3704 | ecs->event_thread->stop_step = 1; |
1b2bfbb9 RC |
3705 | print_stop_reason (END_STEPPING_RANGE, 0); |
3706 | stop_stepping (ecs); | |
3707 | return; | |
3708 | } | |
3709 | ||
2afb61aa | 3710 | if (stop_pc_sal.line == 0) |
488f131b JB |
3711 | { |
3712 | /* We have no line number information. That means to stop | |
3713 | stepping (does this always happen right after one instruction, | |
3714 | when we do "s" in a function with no line numbers, | |
3715 | or can this happen as a result of a return or longjmp?). */ | |
527159b7 | 3716 | if (debug_infrun) |
8a9de0e4 | 3717 | fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n"); |
414c69f7 | 3718 | ecs->event_thread->stop_step = 1; |
488f131b JB |
3719 | print_stop_reason (END_STEPPING_RANGE, 0); |
3720 | stop_stepping (ecs); | |
3721 | return; | |
3722 | } | |
c906108c | 3723 | |
2afb61aa | 3724 | if ((stop_pc == stop_pc_sal.pc) |
4e1c45ea PA |
3725 | && (ecs->event_thread->current_line != stop_pc_sal.line |
3726 | || ecs->event_thread->current_symtab != stop_pc_sal.symtab)) | |
488f131b JB |
3727 | { |
3728 | /* We are at the start of a different line. So stop. Note that | |
3729 | we don't stop if we step into the middle of a different line. | |
3730 | That is said to make things like for (;;) statements work | |
3731 | better. */ | |
527159b7 | 3732 | if (debug_infrun) |
8a9de0e4 | 3733 | fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n"); |
414c69f7 | 3734 | ecs->event_thread->stop_step = 1; |
488f131b JB |
3735 | print_stop_reason (END_STEPPING_RANGE, 0); |
3736 | stop_stepping (ecs); | |
3737 | return; | |
3738 | } | |
c906108c | 3739 | |
488f131b | 3740 | /* We aren't done stepping. |
c906108c | 3741 | |
488f131b JB |
3742 | Optimize by setting the stepping range to the line. |
3743 | (We might not be in the original line, but if we entered a | |
3744 | new line in mid-statement, we continue stepping. This makes | |
3745 | things like for(;;) statements work better.) */ | |
c906108c | 3746 | |
4e1c45ea PA |
3747 | ecs->event_thread->step_range_start = stop_pc_sal.pc; |
3748 | ecs->event_thread->step_range_end = stop_pc_sal.end; | |
3749 | ecs->event_thread->step_frame_id = get_frame_id (get_current_frame ()); | |
3750 | ecs->event_thread->current_line = stop_pc_sal.line; | |
3751 | ecs->event_thread->current_symtab = stop_pc_sal.symtab; | |
488f131b | 3752 | |
527159b7 | 3753 | if (debug_infrun) |
8a9de0e4 | 3754 | fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n"); |
488f131b | 3755 | keep_going (ecs); |
104c1213 JM |
3756 | } |
3757 | ||
3758 | /* Are we in the middle of stepping? */ | |
3759 | ||
a7212384 UW |
3760 | static int |
3761 | currently_stepping_thread (struct thread_info *tp) | |
3762 | { | |
3763 | return (tp->step_range_end && tp->step_resume_breakpoint == NULL) | |
3764 | || tp->trap_expected | |
3765 | || tp->stepping_through_solib_after_catch; | |
3766 | } | |
3767 | ||
3768 | static int | |
3769 | currently_stepping_callback (struct thread_info *tp, void *data) | |
3770 | { | |
3771 | /* Return true if any thread *but* the one passed in "data" is | |
3772 | in the middle of stepping. */ | |
3773 | return tp != data && currently_stepping_thread (tp); | |
3774 | } | |
3775 | ||
104c1213 | 3776 | static int |
4e1c45ea | 3777 | currently_stepping (struct thread_info *tp) |
104c1213 | 3778 | { |
a7212384 | 3779 | return currently_stepping_thread (tp) || bpstat_should_step (); |
104c1213 | 3780 | } |
c906108c | 3781 | |
b2175913 MS |
3782 | /* Inferior has stepped into a subroutine call with source code that |
3783 | we should not step over. Do step to the first line of code in | |
3784 | it. */ | |
c2c6d25f JM |
3785 | |
3786 | static void | |
b2175913 | 3787 | handle_step_into_function (struct execution_control_state *ecs) |
c2c6d25f JM |
3788 | { |
3789 | struct symtab *s; | |
2afb61aa | 3790 | struct symtab_and_line stop_func_sal, sr_sal; |
c2c6d25f JM |
3791 | |
3792 | s = find_pc_symtab (stop_pc); | |
3793 | if (s && s->language != language_asm) | |
b2175913 MS |
3794 | ecs->stop_func_start = gdbarch_skip_prologue (current_gdbarch, |
3795 | ecs->stop_func_start); | |
c2c6d25f | 3796 | |
2afb61aa | 3797 | stop_func_sal = find_pc_line (ecs->stop_func_start, 0); |
c2c6d25f JM |
3798 | /* Use the step_resume_break to step until the end of the prologue, |
3799 | even if that involves jumps (as it seems to on the vax under | |
3800 | 4.2). */ | |
3801 | /* If the prologue ends in the middle of a source line, continue to | |
3802 | the end of that source line (if it is still within the function). | |
3803 | Otherwise, just go to end of prologue. */ | |
2afb61aa PA |
3804 | if (stop_func_sal.end |
3805 | && stop_func_sal.pc != ecs->stop_func_start | |
3806 | && stop_func_sal.end < ecs->stop_func_end) | |
3807 | ecs->stop_func_start = stop_func_sal.end; | |
c2c6d25f | 3808 | |
2dbd5e30 KB |
3809 | /* Architectures which require breakpoint adjustment might not be able |
3810 | to place a breakpoint at the computed address. If so, the test | |
3811 | ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust | |
3812 | ecs->stop_func_start to an address at which a breakpoint may be | |
3813 | legitimately placed. | |
8fb3e588 | 3814 | |
2dbd5e30 KB |
3815 | Note: kevinb/2004-01-19: On FR-V, if this adjustment is not |
3816 | made, GDB will enter an infinite loop when stepping through | |
3817 | optimized code consisting of VLIW instructions which contain | |
3818 | subinstructions corresponding to different source lines. On | |
3819 | FR-V, it's not permitted to place a breakpoint on any but the | |
3820 | first subinstruction of a VLIW instruction. When a breakpoint is | |
3821 | set, GDB will adjust the breakpoint address to the beginning of | |
3822 | the VLIW instruction. Thus, we need to make the corresponding | |
3823 | adjustment here when computing the stop address. */ | |
8fb3e588 | 3824 | |
2dbd5e30 KB |
3825 | if (gdbarch_adjust_breakpoint_address_p (current_gdbarch)) |
3826 | { | |
3827 | ecs->stop_func_start | |
3828 | = gdbarch_adjust_breakpoint_address (current_gdbarch, | |
8fb3e588 | 3829 | ecs->stop_func_start); |
2dbd5e30 KB |
3830 | } |
3831 | ||
c2c6d25f JM |
3832 | if (ecs->stop_func_start == stop_pc) |
3833 | { | |
3834 | /* We are already there: stop now. */ | |
414c69f7 | 3835 | ecs->event_thread->stop_step = 1; |
488f131b | 3836 | print_stop_reason (END_STEPPING_RANGE, 0); |
c2c6d25f JM |
3837 | stop_stepping (ecs); |
3838 | return; | |
3839 | } | |
3840 | else | |
3841 | { | |
3842 | /* Put the step-breakpoint there and go until there. */ | |
fe39c653 | 3843 | init_sal (&sr_sal); /* initialize to zeroes */ |
c2c6d25f JM |
3844 | sr_sal.pc = ecs->stop_func_start; |
3845 | sr_sal.section = find_pc_overlay (ecs->stop_func_start); | |
44cbf7b5 | 3846 | |
c2c6d25f | 3847 | /* Do not specify what the fp should be when we stop since on |
488f131b JB |
3848 | some machines the prologue is where the new fp value is |
3849 | established. */ | |
44cbf7b5 | 3850 | insert_step_resume_breakpoint_at_sal (sr_sal, null_frame_id); |
c2c6d25f JM |
3851 | |
3852 | /* And make sure stepping stops right away then. */ | |
4e1c45ea | 3853 | ecs->event_thread->step_range_end = ecs->event_thread->step_range_start; |
c2c6d25f JM |
3854 | } |
3855 | keep_going (ecs); | |
3856 | } | |
d4f3574e | 3857 | |
b2175913 MS |
3858 | /* Inferior has stepped backward into a subroutine call with source |
3859 | code that we should not step over. Do step to the beginning of the | |
3860 | last line of code in it. */ | |
3861 | ||
3862 | static void | |
3863 | handle_step_into_function_backward (struct execution_control_state *ecs) | |
3864 | { | |
3865 | struct symtab *s; | |
3866 | struct symtab_and_line stop_func_sal, sr_sal; | |
3867 | ||
3868 | s = find_pc_symtab (stop_pc); | |
3869 | if (s && s->language != language_asm) | |
3870 | ecs->stop_func_start = gdbarch_skip_prologue (current_gdbarch, | |
3871 | ecs->stop_func_start); | |
3872 | ||
3873 | stop_func_sal = find_pc_line (stop_pc, 0); | |
3874 | ||
3875 | /* OK, we're just going to keep stepping here. */ | |
3876 | if (stop_func_sal.pc == stop_pc) | |
3877 | { | |
3878 | /* We're there already. Just stop stepping now. */ | |
3879 | ecs->event_thread->stop_step = 1; | |
3880 | print_stop_reason (END_STEPPING_RANGE, 0); | |
3881 | stop_stepping (ecs); | |
3882 | } | |
3883 | else | |
3884 | { | |
3885 | /* Else just reset the step range and keep going. | |
3886 | No step-resume breakpoint, they don't work for | |
3887 | epilogues, which can have multiple entry paths. */ | |
3888 | ecs->event_thread->step_range_start = stop_func_sal.pc; | |
3889 | ecs->event_thread->step_range_end = stop_func_sal.end; | |
3890 | keep_going (ecs); | |
3891 | } | |
3892 | return; | |
3893 | } | |
3894 | ||
d3169d93 | 3895 | /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID. |
44cbf7b5 AC |
3896 | This is used to both functions and to skip over code. */ |
3897 | ||
3898 | static void | |
3899 | insert_step_resume_breakpoint_at_sal (struct symtab_and_line sr_sal, | |
3900 | struct frame_id sr_id) | |
3901 | { | |
611c83ae PA |
3902 | /* There should never be more than one step-resume or longjmp-resume |
3903 | breakpoint per thread, so we should never be setting a new | |
44cbf7b5 | 3904 | step_resume_breakpoint when one is already active. */ |
4e1c45ea | 3905 | gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL); |
d3169d93 DJ |
3906 | |
3907 | if (debug_infrun) | |
3908 | fprintf_unfiltered (gdb_stdlog, | |
3909 | "infrun: inserting step-resume breakpoint at 0x%s\n", | |
3910 | paddr_nz (sr_sal.pc)); | |
3911 | ||
4e1c45ea PA |
3912 | inferior_thread ()->step_resume_breakpoint |
3913 | = set_momentary_breakpoint (sr_sal, sr_id, bp_step_resume); | |
44cbf7b5 | 3914 | } |
7ce450bd | 3915 | |
d3169d93 | 3916 | /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used |
14e60db5 | 3917 | to skip a potential signal handler. |
7ce450bd | 3918 | |
14e60db5 DJ |
3919 | This is called with the interrupted function's frame. The signal |
3920 | handler, when it returns, will resume the interrupted function at | |
3921 | RETURN_FRAME.pc. */ | |
d303a6c7 AC |
3922 | |
3923 | static void | |
44cbf7b5 | 3924 | insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame) |
d303a6c7 AC |
3925 | { |
3926 | struct symtab_and_line sr_sal; | |
3927 | ||
f4c1edd8 | 3928 | gdb_assert (return_frame != NULL); |
d303a6c7 AC |
3929 | init_sal (&sr_sal); /* initialize to zeros */ |
3930 | ||
bf6ae464 UW |
3931 | sr_sal.pc = gdbarch_addr_bits_remove |
3932 | (current_gdbarch, get_frame_pc (return_frame)); | |
d303a6c7 AC |
3933 | sr_sal.section = find_pc_overlay (sr_sal.pc); |
3934 | ||
44cbf7b5 | 3935 | insert_step_resume_breakpoint_at_sal (sr_sal, get_frame_id (return_frame)); |
d303a6c7 AC |
3936 | } |
3937 | ||
14e60db5 DJ |
3938 | /* Similar to insert_step_resume_breakpoint_at_frame, except |
3939 | but a breakpoint at the previous frame's PC. This is used to | |
3940 | skip a function after stepping into it (for "next" or if the called | |
3941 | function has no debugging information). | |
3942 | ||
3943 | The current function has almost always been reached by single | |
3944 | stepping a call or return instruction. NEXT_FRAME belongs to the | |
3945 | current function, and the breakpoint will be set at the caller's | |
3946 | resume address. | |
3947 | ||
3948 | This is a separate function rather than reusing | |
3949 | insert_step_resume_breakpoint_at_frame in order to avoid | |
3950 | get_prev_frame, which may stop prematurely (see the implementation | |
eb2f4a08 | 3951 | of frame_unwind_id for an example). */ |
14e60db5 DJ |
3952 | |
3953 | static void | |
3954 | insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame) | |
3955 | { | |
3956 | struct symtab_and_line sr_sal; | |
3957 | ||
3958 | /* We shouldn't have gotten here if we don't know where the call site | |
3959 | is. */ | |
eb2f4a08 | 3960 | gdb_assert (frame_id_p (frame_unwind_id (next_frame))); |
14e60db5 DJ |
3961 | |
3962 | init_sal (&sr_sal); /* initialize to zeros */ | |
3963 | ||
bf6ae464 | 3964 | sr_sal.pc = gdbarch_addr_bits_remove |
eb2f4a08 | 3965 | (current_gdbarch, frame_pc_unwind (next_frame)); |
14e60db5 DJ |
3966 | sr_sal.section = find_pc_overlay (sr_sal.pc); |
3967 | ||
eb2f4a08 | 3968 | insert_step_resume_breakpoint_at_sal (sr_sal, frame_unwind_id (next_frame)); |
14e60db5 DJ |
3969 | } |
3970 | ||
611c83ae PA |
3971 | /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a |
3972 | new breakpoint at the target of a jmp_buf. The handling of | |
3973 | longjmp-resume uses the same mechanisms used for handling | |
3974 | "step-resume" breakpoints. */ | |
3975 | ||
3976 | static void | |
3977 | insert_longjmp_resume_breakpoint (CORE_ADDR pc) | |
3978 | { | |
3979 | /* There should never be more than one step-resume or longjmp-resume | |
3980 | breakpoint per thread, so we should never be setting a new | |
3981 | longjmp_resume_breakpoint when one is already active. */ | |
4e1c45ea | 3982 | gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL); |
611c83ae PA |
3983 | |
3984 | if (debug_infrun) | |
3985 | fprintf_unfiltered (gdb_stdlog, | |
3986 | "infrun: inserting longjmp-resume breakpoint at 0x%s\n", | |
3987 | paddr_nz (pc)); | |
3988 | ||
4e1c45ea | 3989 | inferior_thread ()->step_resume_breakpoint = |
611c83ae PA |
3990 | set_momentary_breakpoint_at_pc (pc, bp_longjmp_resume); |
3991 | } | |
3992 | ||
104c1213 JM |
3993 | static void |
3994 | stop_stepping (struct execution_control_state *ecs) | |
3995 | { | |
527159b7 | 3996 | if (debug_infrun) |
8a9de0e4 | 3997 | fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n"); |
527159b7 | 3998 | |
cd0fc7c3 SS |
3999 | /* Let callers know we don't want to wait for the inferior anymore. */ |
4000 | ecs->wait_some_more = 0; | |
4001 | } | |
4002 | ||
d4f3574e SS |
4003 | /* This function handles various cases where we need to continue |
4004 | waiting for the inferior. */ | |
4005 | /* (Used to be the keep_going: label in the old wait_for_inferior) */ | |
4006 | ||
4007 | static void | |
4008 | keep_going (struct execution_control_state *ecs) | |
4009 | { | |
d4f3574e | 4010 | /* Save the pc before execution, to compare with pc after stop. */ |
4e1c45ea | 4011 | ecs->event_thread->prev_pc = read_pc (); /* Might have been DECR_AFTER_BREAK */ |
d4f3574e | 4012 | |
d4f3574e SS |
4013 | /* If we did not do break;, it means we should keep running the |
4014 | inferior and not return to debugger. */ | |
4015 | ||
2020b7ab PA |
4016 | if (ecs->event_thread->trap_expected |
4017 | && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP) | |
d4f3574e SS |
4018 | { |
4019 | /* We took a signal (which we are supposed to pass through to | |
4e1c45ea PA |
4020 | the inferior, else we'd not get here) and we haven't yet |
4021 | gotten our trap. Simply continue. */ | |
2020b7ab PA |
4022 | resume (currently_stepping (ecs->event_thread), |
4023 | ecs->event_thread->stop_signal); | |
d4f3574e SS |
4024 | } |
4025 | else | |
4026 | { | |
4027 | /* Either the trap was not expected, but we are continuing | |
488f131b JB |
4028 | anyway (the user asked that this signal be passed to the |
4029 | child) | |
4030 | -- or -- | |
4031 | The signal was SIGTRAP, e.g. it was our signal, but we | |
4032 | decided we should resume from it. | |
d4f3574e | 4033 | |
c36b740a | 4034 | We're going to run this baby now! |
d4f3574e | 4035 | |
c36b740a VP |
4036 | Note that insert_breakpoints won't try to re-insert |
4037 | already inserted breakpoints. Therefore, we don't | |
4038 | care if breakpoints were already inserted, or not. */ | |
4039 | ||
4e1c45ea | 4040 | if (ecs->event_thread->stepping_over_breakpoint) |
45e8c884 | 4041 | { |
237fc4c9 PA |
4042 | if (! use_displaced_stepping (current_gdbarch)) |
4043 | /* Since we can't do a displaced step, we have to remove | |
4044 | the breakpoint while we step it. To keep things | |
4045 | simple, we remove them all. */ | |
4046 | remove_breakpoints (); | |
45e8c884 VP |
4047 | } |
4048 | else | |
d4f3574e | 4049 | { |
e236ba44 | 4050 | struct gdb_exception e; |
569631c6 UW |
4051 | /* Stop stepping when inserting breakpoints |
4052 | has failed. */ | |
e236ba44 VP |
4053 | TRY_CATCH (e, RETURN_MASK_ERROR) |
4054 | { | |
4055 | insert_breakpoints (); | |
4056 | } | |
4057 | if (e.reason < 0) | |
d4f3574e SS |
4058 | { |
4059 | stop_stepping (ecs); | |
4060 | return; | |
4061 | } | |
d4f3574e SS |
4062 | } |
4063 | ||
4e1c45ea | 4064 | ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint; |
d4f3574e SS |
4065 | |
4066 | /* Do not deliver SIGNAL_TRAP (except when the user explicitly | |
488f131b JB |
4067 | specifies that such a signal should be delivered to the |
4068 | target program). | |
4069 | ||
4070 | Typically, this would occure when a user is debugging a | |
4071 | target monitor on a simulator: the target monitor sets a | |
4072 | breakpoint; the simulator encounters this break-point and | |
4073 | halts the simulation handing control to GDB; GDB, noteing | |
4074 | that the break-point isn't valid, returns control back to the | |
4075 | simulator; the simulator then delivers the hardware | |
4076 | equivalent of a SIGNAL_TRAP to the program being debugged. */ | |
4077 | ||
2020b7ab PA |
4078 | if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP |
4079 | && !signal_program[ecs->event_thread->stop_signal]) | |
4080 | ecs->event_thread->stop_signal = TARGET_SIGNAL_0; | |
d4f3574e | 4081 | |
2020b7ab PA |
4082 | resume (currently_stepping (ecs->event_thread), |
4083 | ecs->event_thread->stop_signal); | |
d4f3574e SS |
4084 | } |
4085 | ||
488f131b | 4086 | prepare_to_wait (ecs); |
d4f3574e SS |
4087 | } |
4088 | ||
104c1213 JM |
4089 | /* This function normally comes after a resume, before |
4090 | handle_inferior_event exits. It takes care of any last bits of | |
4091 | housekeeping, and sets the all-important wait_some_more flag. */ | |
cd0fc7c3 | 4092 | |
104c1213 JM |
4093 | static void |
4094 | prepare_to_wait (struct execution_control_state *ecs) | |
cd0fc7c3 | 4095 | { |
527159b7 | 4096 | if (debug_infrun) |
8a9de0e4 | 4097 | fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n"); |
0d1e5fa7 | 4098 | if (infwait_state == infwait_normal_state) |
104c1213 JM |
4099 | { |
4100 | overlay_cache_invalid = 1; | |
4101 | ||
4102 | /* We have to invalidate the registers BEFORE calling | |
488f131b JB |
4103 | target_wait because they can be loaded from the target while |
4104 | in target_wait. This makes remote debugging a bit more | |
4105 | efficient for those targets that provide critical registers | |
4106 | as part of their normal status mechanism. */ | |
104c1213 JM |
4107 | |
4108 | registers_changed (); | |
0d1e5fa7 | 4109 | waiton_ptid = pid_to_ptid (-1); |
104c1213 JM |
4110 | } |
4111 | /* This is the old end of the while loop. Let everybody know we | |
4112 | want to wait for the inferior some more and get called again | |
4113 | soon. */ | |
4114 | ecs->wait_some_more = 1; | |
c906108c | 4115 | } |
11cf8741 JM |
4116 | |
4117 | /* Print why the inferior has stopped. We always print something when | |
4118 | the inferior exits, or receives a signal. The rest of the cases are | |
4119 | dealt with later on in normal_stop() and print_it_typical(). Ideally | |
4120 | there should be a call to this function from handle_inferior_event() | |
4121 | each time stop_stepping() is called.*/ | |
4122 | static void | |
4123 | print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info) | |
4124 | { | |
4125 | switch (stop_reason) | |
4126 | { | |
11cf8741 JM |
4127 | case END_STEPPING_RANGE: |
4128 | /* We are done with a step/next/si/ni command. */ | |
4129 | /* For now print nothing. */ | |
fb40c209 | 4130 | /* Print a message only if not in the middle of doing a "step n" |
488f131b | 4131 | operation for n > 1 */ |
414c69f7 PA |
4132 | if (!inferior_thread ()->step_multi |
4133 | || !inferior_thread ()->stop_step) | |
9dc5e2a9 | 4134 | if (ui_out_is_mi_like_p (uiout)) |
034dad6f BR |
4135 | ui_out_field_string |
4136 | (uiout, "reason", | |
4137 | async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE)); | |
11cf8741 | 4138 | break; |
11cf8741 JM |
4139 | case SIGNAL_EXITED: |
4140 | /* The inferior was terminated by a signal. */ | |
8b93c638 | 4141 | annotate_signalled (); |
9dc5e2a9 | 4142 | if (ui_out_is_mi_like_p (uiout)) |
034dad6f BR |
4143 | ui_out_field_string |
4144 | (uiout, "reason", | |
4145 | async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED)); | |
8b93c638 JM |
4146 | ui_out_text (uiout, "\nProgram terminated with signal "); |
4147 | annotate_signal_name (); | |
488f131b JB |
4148 | ui_out_field_string (uiout, "signal-name", |
4149 | target_signal_to_name (stop_info)); | |
8b93c638 JM |
4150 | annotate_signal_name_end (); |
4151 | ui_out_text (uiout, ", "); | |
4152 | annotate_signal_string (); | |
488f131b JB |
4153 | ui_out_field_string (uiout, "signal-meaning", |
4154 | target_signal_to_string (stop_info)); | |
8b93c638 JM |
4155 | annotate_signal_string_end (); |
4156 | ui_out_text (uiout, ".\n"); | |
4157 | ui_out_text (uiout, "The program no longer exists.\n"); | |
11cf8741 JM |
4158 | break; |
4159 | case EXITED: | |
4160 | /* The inferior program is finished. */ | |
8b93c638 JM |
4161 | annotate_exited (stop_info); |
4162 | if (stop_info) | |
4163 | { | |
9dc5e2a9 | 4164 | if (ui_out_is_mi_like_p (uiout)) |
034dad6f BR |
4165 | ui_out_field_string (uiout, "reason", |
4166 | async_reason_lookup (EXEC_ASYNC_EXITED)); | |
8b93c638 | 4167 | ui_out_text (uiout, "\nProgram exited with code "); |
488f131b JB |
4168 | ui_out_field_fmt (uiout, "exit-code", "0%o", |
4169 | (unsigned int) stop_info); | |
8b93c638 JM |
4170 | ui_out_text (uiout, ".\n"); |
4171 | } | |
4172 | else | |
4173 | { | |
9dc5e2a9 | 4174 | if (ui_out_is_mi_like_p (uiout)) |
034dad6f BR |
4175 | ui_out_field_string |
4176 | (uiout, "reason", | |
4177 | async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY)); | |
8b93c638 JM |
4178 | ui_out_text (uiout, "\nProgram exited normally.\n"); |
4179 | } | |
f17517ea AS |
4180 | /* Support the --return-child-result option. */ |
4181 | return_child_result_value = stop_info; | |
11cf8741 JM |
4182 | break; |
4183 | case SIGNAL_RECEIVED: | |
252fbfc8 PA |
4184 | /* Signal received. The signal table tells us to print about |
4185 | it. */ | |
8b93c638 | 4186 | annotate_signal (); |
252fbfc8 PA |
4187 | |
4188 | if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout)) | |
4189 | { | |
4190 | struct thread_info *t = inferior_thread (); | |
4191 | ||
4192 | ui_out_text (uiout, "\n["); | |
4193 | ui_out_field_string (uiout, "thread-name", | |
4194 | target_pid_to_str (t->ptid)); | |
4195 | ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num); | |
4196 | ui_out_text (uiout, " stopped"); | |
4197 | } | |
4198 | else | |
4199 | { | |
4200 | ui_out_text (uiout, "\nProgram received signal "); | |
4201 | annotate_signal_name (); | |
4202 | if (ui_out_is_mi_like_p (uiout)) | |
4203 | ui_out_field_string | |
4204 | (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED)); | |
4205 | ui_out_field_string (uiout, "signal-name", | |
4206 | target_signal_to_name (stop_info)); | |
4207 | annotate_signal_name_end (); | |
4208 | ui_out_text (uiout, ", "); | |
4209 | annotate_signal_string (); | |
4210 | ui_out_field_string (uiout, "signal-meaning", | |
4211 | target_signal_to_string (stop_info)); | |
4212 | annotate_signal_string_end (); | |
4213 | } | |
8b93c638 | 4214 | ui_out_text (uiout, ".\n"); |
11cf8741 | 4215 | break; |
b2175913 MS |
4216 | case NO_HISTORY: |
4217 | /* Reverse execution: target ran out of history info. */ | |
4218 | ui_out_text (uiout, "\nNo more reverse-execution history.\n"); | |
4219 | break; | |
11cf8741 | 4220 | default: |
8e65ff28 | 4221 | internal_error (__FILE__, __LINE__, |
e2e0b3e5 | 4222 | _("print_stop_reason: unrecognized enum value")); |
11cf8741 JM |
4223 | break; |
4224 | } | |
4225 | } | |
c906108c | 4226 | \f |
43ff13b4 | 4227 | |
c906108c SS |
4228 | /* Here to return control to GDB when the inferior stops for real. |
4229 | Print appropriate messages, remove breakpoints, give terminal our modes. | |
4230 | ||
4231 | STOP_PRINT_FRAME nonzero means print the executing frame | |
4232 | (pc, function, args, file, line number and line text). | |
4233 | BREAKPOINTS_FAILED nonzero means stop was due to error | |
4234 | attempting to insert breakpoints. */ | |
4235 | ||
4236 | void | |
96baa820 | 4237 | normal_stop (void) |
c906108c | 4238 | { |
73b65bb0 DJ |
4239 | struct target_waitstatus last; |
4240 | ptid_t last_ptid; | |
29f49a6a | 4241 | struct cleanup *old_chain = make_cleanup (null_cleanup, NULL); |
73b65bb0 DJ |
4242 | |
4243 | get_last_target_status (&last_ptid, &last); | |
4244 | ||
29f49a6a PA |
4245 | /* If an exception is thrown from this point on, make sure to |
4246 | propagate GDB's knowledge of the executing state to the | |
4247 | frontend/user running state. A QUIT is an easy exception to see | |
4248 | here, so do this before any filtered output. */ | |
4249 | if (target_has_execution) | |
4250 | { | |
4251 | if (!non_stop) | |
fee0be5d | 4252 | make_cleanup (finish_thread_state_cleanup, &minus_one_ptid); |
29f49a6a PA |
4253 | else if (last.kind != TARGET_WAITKIND_SIGNALLED |
4254 | && last.kind != TARGET_WAITKIND_EXITED) | |
fee0be5d | 4255 | make_cleanup (finish_thread_state_cleanup, &inferior_ptid); |
29f49a6a PA |
4256 | } |
4257 | ||
4f8d22e3 PA |
4258 | /* In non-stop mode, we don't want GDB to switch threads behind the |
4259 | user's back, to avoid races where the user is typing a command to | |
4260 | apply to thread x, but GDB switches to thread y before the user | |
4261 | finishes entering the command. */ | |
4262 | ||
c906108c SS |
4263 | /* As with the notification of thread events, we want to delay |
4264 | notifying the user that we've switched thread context until | |
4265 | the inferior actually stops. | |
4266 | ||
73b65bb0 DJ |
4267 | There's no point in saying anything if the inferior has exited. |
4268 | Note that SIGNALLED here means "exited with a signal", not | |
4269 | "received a signal". */ | |
4f8d22e3 PA |
4270 | if (!non_stop |
4271 | && !ptid_equal (previous_inferior_ptid, inferior_ptid) | |
73b65bb0 DJ |
4272 | && target_has_execution |
4273 | && last.kind != TARGET_WAITKIND_SIGNALLED | |
4274 | && last.kind != TARGET_WAITKIND_EXITED) | |
c906108c SS |
4275 | { |
4276 | target_terminal_ours_for_output (); | |
a3f17187 | 4277 | printf_filtered (_("[Switching to %s]\n"), |
c95310c6 | 4278 | target_pid_to_str (inferior_ptid)); |
b8fa951a | 4279 | annotate_thread_changed (); |
39f77062 | 4280 | previous_inferior_ptid = inferior_ptid; |
c906108c | 4281 | } |
c906108c | 4282 | |
74960c60 | 4283 | if (!breakpoints_always_inserted_mode () && target_has_execution) |
c906108c SS |
4284 | { |
4285 | if (remove_breakpoints ()) | |
4286 | { | |
4287 | target_terminal_ours_for_output (); | |
a3f17187 AC |
4288 | printf_filtered (_("\ |
4289 | Cannot remove breakpoints because program is no longer writable.\n\ | |
a3f17187 | 4290 | Further execution is probably impossible.\n")); |
c906108c SS |
4291 | } |
4292 | } | |
c906108c | 4293 | |
c906108c SS |
4294 | /* If an auto-display called a function and that got a signal, |
4295 | delete that auto-display to avoid an infinite recursion. */ | |
4296 | ||
4297 | if (stopped_by_random_signal) | |
4298 | disable_current_display (); | |
4299 | ||
4300 | /* Don't print a message if in the middle of doing a "step n" | |
4301 | operation for n > 1 */ | |
af679fd0 PA |
4302 | if (target_has_execution |
4303 | && last.kind != TARGET_WAITKIND_SIGNALLED | |
4304 | && last.kind != TARGET_WAITKIND_EXITED | |
4305 | && inferior_thread ()->step_multi | |
414c69f7 | 4306 | && inferior_thread ()->stop_step) |
c906108c SS |
4307 | goto done; |
4308 | ||
4309 | target_terminal_ours (); | |
4310 | ||
7abfe014 DJ |
4311 | /* Set the current source location. This will also happen if we |
4312 | display the frame below, but the current SAL will be incorrect | |
4313 | during a user hook-stop function. */ | |
d729566a | 4314 | if (has_stack_frames () && !stop_stack_dummy) |
7abfe014 DJ |
4315 | set_current_sal_from_frame (get_current_frame (), 1); |
4316 | ||
dd7e2d2b PA |
4317 | /* Let the user/frontend see the threads as stopped. */ |
4318 | do_cleanups (old_chain); | |
4319 | ||
4320 | /* Look up the hook_stop and run it (CLI internally handles problem | |
4321 | of stop_command's pre-hook not existing). */ | |
4322 | if (stop_command) | |
4323 | catch_errors (hook_stop_stub, stop_command, | |
4324 | "Error while running hook_stop:\n", RETURN_MASK_ALL); | |
4325 | ||
d729566a | 4326 | if (!has_stack_frames ()) |
d51fd4c8 | 4327 | goto done; |
c906108c | 4328 | |
32400beb PA |
4329 | if (last.kind == TARGET_WAITKIND_SIGNALLED |
4330 | || last.kind == TARGET_WAITKIND_EXITED) | |
4331 | goto done; | |
4332 | ||
c906108c SS |
4333 | /* Select innermost stack frame - i.e., current frame is frame 0, |
4334 | and current location is based on that. | |
4335 | Don't do this on return from a stack dummy routine, | |
4336 | or if the program has exited. */ | |
4337 | ||
4338 | if (!stop_stack_dummy) | |
4339 | { | |
0f7d239c | 4340 | select_frame (get_current_frame ()); |
c906108c SS |
4341 | |
4342 | /* Print current location without a level number, if | |
c5aa993b JM |
4343 | we have changed functions or hit a breakpoint. |
4344 | Print source line if we have one. | |
4345 | bpstat_print() contains the logic deciding in detail | |
4346 | what to print, based on the event(s) that just occurred. */ | |
c906108c | 4347 | |
d01a8610 AS |
4348 | /* If --batch-silent is enabled then there's no need to print the current |
4349 | source location, and to try risks causing an error message about | |
4350 | missing source files. */ | |
4351 | if (stop_print_frame && !batch_silent) | |
c906108c SS |
4352 | { |
4353 | int bpstat_ret; | |
4354 | int source_flag; | |
917317f4 | 4355 | int do_frame_printing = 1; |
347bddb7 | 4356 | struct thread_info *tp = inferior_thread (); |
c906108c | 4357 | |
347bddb7 | 4358 | bpstat_ret = bpstat_print (tp->stop_bpstat); |
917317f4 JM |
4359 | switch (bpstat_ret) |
4360 | { | |
4361 | case PRINT_UNKNOWN: | |
b0f4b84b DJ |
4362 | /* If we had hit a shared library event breakpoint, |
4363 | bpstat_print would print out this message. If we hit | |
4364 | an OS-level shared library event, do the same | |
4365 | thing. */ | |
4366 | if (last.kind == TARGET_WAITKIND_LOADED) | |
4367 | { | |
4368 | printf_filtered (_("Stopped due to shared library event\n")); | |
4369 | source_flag = SRC_LINE; /* something bogus */ | |
4370 | do_frame_printing = 0; | |
4371 | break; | |
4372 | } | |
4373 | ||
aa0cd9c1 | 4374 | /* FIXME: cagney/2002-12-01: Given that a frame ID does |
8fb3e588 AC |
4375 | (or should) carry around the function and does (or |
4376 | should) use that when doing a frame comparison. */ | |
414c69f7 | 4377 | if (tp->stop_step |
347bddb7 | 4378 | && frame_id_eq (tp->step_frame_id, |
aa0cd9c1 | 4379 | get_frame_id (get_current_frame ())) |
917317f4 | 4380 | && step_start_function == find_pc_function (stop_pc)) |
488f131b | 4381 | source_flag = SRC_LINE; /* finished step, just print source line */ |
917317f4 | 4382 | else |
488f131b | 4383 | source_flag = SRC_AND_LOC; /* print location and source line */ |
917317f4 JM |
4384 | break; |
4385 | case PRINT_SRC_AND_LOC: | |
488f131b | 4386 | source_flag = SRC_AND_LOC; /* print location and source line */ |
917317f4 JM |
4387 | break; |
4388 | case PRINT_SRC_ONLY: | |
c5394b80 | 4389 | source_flag = SRC_LINE; |
917317f4 JM |
4390 | break; |
4391 | case PRINT_NOTHING: | |
488f131b | 4392 | source_flag = SRC_LINE; /* something bogus */ |
917317f4 JM |
4393 | do_frame_printing = 0; |
4394 | break; | |
4395 | default: | |
e2e0b3e5 | 4396 | internal_error (__FILE__, __LINE__, _("Unknown value.")); |
917317f4 | 4397 | } |
c906108c SS |
4398 | |
4399 | /* The behavior of this routine with respect to the source | |
4400 | flag is: | |
c5394b80 JM |
4401 | SRC_LINE: Print only source line |
4402 | LOCATION: Print only location | |
4403 | SRC_AND_LOC: Print location and source line */ | |
917317f4 | 4404 | if (do_frame_printing) |
b04f3ab4 | 4405 | print_stack_frame (get_selected_frame (NULL), 0, source_flag); |
c906108c SS |
4406 | |
4407 | /* Display the auto-display expressions. */ | |
4408 | do_displays (); | |
4409 | } | |
4410 | } | |
4411 | ||
4412 | /* Save the function value return registers, if we care. | |
4413 | We might be about to restore their previous contents. */ | |
32400beb | 4414 | if (inferior_thread ()->proceed_to_finish) |
d5c31457 UW |
4415 | { |
4416 | /* This should not be necessary. */ | |
4417 | if (stop_registers) | |
4418 | regcache_xfree (stop_registers); | |
4419 | ||
4420 | /* NB: The copy goes through to the target picking up the value of | |
4421 | all the registers. */ | |
4422 | stop_registers = regcache_dup (get_current_regcache ()); | |
4423 | } | |
c906108c SS |
4424 | |
4425 | if (stop_stack_dummy) | |
4426 | { | |
b89667eb DE |
4427 | /* Pop the empty frame that contains the stack dummy. |
4428 | This also restores inferior state prior to the call | |
4429 | (struct inferior_thread_state). */ | |
4430 | struct frame_info *frame = get_current_frame (); | |
4431 | gdb_assert (get_frame_type (frame) == DUMMY_FRAME); | |
4432 | frame_pop (frame); | |
4433 | /* frame_pop() calls reinit_frame_cache as the last thing it does | |
4434 | which means there's currently no selected frame. We don't need | |
4435 | to re-establish a selected frame if the dummy call returns normally, | |
4436 | that will be done by restore_inferior_status. However, we do have | |
4437 | to handle the case where the dummy call is returning after being | |
4438 | stopped (e.g. the dummy call previously hit a breakpoint). We | |
4439 | can't know which case we have so just always re-establish a | |
4440 | selected frame here. */ | |
0f7d239c | 4441 | select_frame (get_current_frame ()); |
c906108c SS |
4442 | } |
4443 | ||
c906108c SS |
4444 | done: |
4445 | annotate_stopped (); | |
41d2bdb4 PA |
4446 | |
4447 | /* Suppress the stop observer if we're in the middle of: | |
4448 | ||
4449 | - a step n (n > 1), as there still more steps to be done. | |
4450 | ||
4451 | - a "finish" command, as the observer will be called in | |
4452 | finish_command_continuation, so it can include the inferior | |
4453 | function's return value. | |
4454 | ||
4455 | - calling an inferior function, as we pretend we inferior didn't | |
4456 | run at all. The return value of the call is handled by the | |
4457 | expression evaluator, through call_function_by_hand. */ | |
4458 | ||
4459 | if (!target_has_execution | |
4460 | || last.kind == TARGET_WAITKIND_SIGNALLED | |
4461 | || last.kind == TARGET_WAITKIND_EXITED | |
4462 | || (!inferior_thread ()->step_multi | |
4463 | && !(inferior_thread ()->stop_bpstat | |
c5a4d20b PA |
4464 | && inferior_thread ()->proceed_to_finish) |
4465 | && !inferior_thread ()->in_infcall)) | |
347bddb7 PA |
4466 | { |
4467 | if (!ptid_equal (inferior_ptid, null_ptid)) | |
1d33d6ba VP |
4468 | observer_notify_normal_stop (inferior_thread ()->stop_bpstat, |
4469 | stop_print_frame); | |
347bddb7 | 4470 | else |
1d33d6ba | 4471 | observer_notify_normal_stop (NULL, stop_print_frame); |
347bddb7 | 4472 | } |
347bddb7 | 4473 | |
48844aa6 PA |
4474 | if (target_has_execution) |
4475 | { | |
4476 | if (last.kind != TARGET_WAITKIND_SIGNALLED | |
4477 | && last.kind != TARGET_WAITKIND_EXITED) | |
4478 | /* Delete the breakpoint we stopped at, if it wants to be deleted. | |
4479 | Delete any breakpoint that is to be deleted at the next stop. */ | |
4480 | breakpoint_auto_delete (inferior_thread ()->stop_bpstat); | |
94cc34af | 4481 | } |
c906108c SS |
4482 | } |
4483 | ||
4484 | static int | |
96baa820 | 4485 | hook_stop_stub (void *cmd) |
c906108c | 4486 | { |
5913bcb0 | 4487 | execute_cmd_pre_hook ((struct cmd_list_element *) cmd); |
c906108c SS |
4488 | return (0); |
4489 | } | |
4490 | \f | |
c5aa993b | 4491 | int |
96baa820 | 4492 | signal_stop_state (int signo) |
c906108c | 4493 | { |
d6b48e9c | 4494 | return signal_stop[signo]; |
c906108c SS |
4495 | } |
4496 | ||
c5aa993b | 4497 | int |
96baa820 | 4498 | signal_print_state (int signo) |
c906108c SS |
4499 | { |
4500 | return signal_print[signo]; | |
4501 | } | |
4502 | ||
c5aa993b | 4503 | int |
96baa820 | 4504 | signal_pass_state (int signo) |
c906108c SS |
4505 | { |
4506 | return signal_program[signo]; | |
4507 | } | |
4508 | ||
488f131b | 4509 | int |
7bda5e4a | 4510 | signal_stop_update (int signo, int state) |
d4f3574e SS |
4511 | { |
4512 | int ret = signal_stop[signo]; | |
4513 | signal_stop[signo] = state; | |
4514 | return ret; | |
4515 | } | |
4516 | ||
488f131b | 4517 | int |
7bda5e4a | 4518 | signal_print_update (int signo, int state) |
d4f3574e SS |
4519 | { |
4520 | int ret = signal_print[signo]; | |
4521 | signal_print[signo] = state; | |
4522 | return ret; | |
4523 | } | |
4524 | ||
488f131b | 4525 | int |
7bda5e4a | 4526 | signal_pass_update (int signo, int state) |
d4f3574e SS |
4527 | { |
4528 | int ret = signal_program[signo]; | |
4529 | signal_program[signo] = state; | |
4530 | return ret; | |
4531 | } | |
4532 | ||
c906108c | 4533 | static void |
96baa820 | 4534 | sig_print_header (void) |
c906108c | 4535 | { |
a3f17187 AC |
4536 | printf_filtered (_("\ |
4537 | Signal Stop\tPrint\tPass to program\tDescription\n")); | |
c906108c SS |
4538 | } |
4539 | ||
4540 | static void | |
96baa820 | 4541 | sig_print_info (enum target_signal oursig) |
c906108c | 4542 | { |
54363045 | 4543 | const char *name = target_signal_to_name (oursig); |
c906108c | 4544 | int name_padding = 13 - strlen (name); |
96baa820 | 4545 | |
c906108c SS |
4546 | if (name_padding <= 0) |
4547 | name_padding = 0; | |
4548 | ||
4549 | printf_filtered ("%s", name); | |
488f131b | 4550 | printf_filtered ("%*.*s ", name_padding, name_padding, " "); |
c906108c SS |
4551 | printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No"); |
4552 | printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No"); | |
4553 | printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No"); | |
4554 | printf_filtered ("%s\n", target_signal_to_string (oursig)); | |
4555 | } | |
4556 | ||
4557 | /* Specify how various signals in the inferior should be handled. */ | |
4558 | ||
4559 | static void | |
96baa820 | 4560 | handle_command (char *args, int from_tty) |
c906108c SS |
4561 | { |
4562 | char **argv; | |
4563 | int digits, wordlen; | |
4564 | int sigfirst, signum, siglast; | |
4565 | enum target_signal oursig; | |
4566 | int allsigs; | |
4567 | int nsigs; | |
4568 | unsigned char *sigs; | |
4569 | struct cleanup *old_chain; | |
4570 | ||
4571 | if (args == NULL) | |
4572 | { | |
e2e0b3e5 | 4573 | error_no_arg (_("signal to handle")); |
c906108c SS |
4574 | } |
4575 | ||
4576 | /* Allocate and zero an array of flags for which signals to handle. */ | |
4577 | ||
4578 | nsigs = (int) TARGET_SIGNAL_LAST; | |
4579 | sigs = (unsigned char *) alloca (nsigs); | |
4580 | memset (sigs, 0, nsigs); | |
4581 | ||
4582 | /* Break the command line up into args. */ | |
4583 | ||
d1a41061 | 4584 | argv = gdb_buildargv (args); |
7a292a7a | 4585 | old_chain = make_cleanup_freeargv (argv); |
c906108c SS |
4586 | |
4587 | /* Walk through the args, looking for signal oursigs, signal names, and | |
4588 | actions. Signal numbers and signal names may be interspersed with | |
4589 | actions, with the actions being performed for all signals cumulatively | |
4590 | specified. Signal ranges can be specified as <LOW>-<HIGH>. */ | |
4591 | ||
4592 | while (*argv != NULL) | |
4593 | { | |
4594 | wordlen = strlen (*argv); | |
4595 | for (digits = 0; isdigit ((*argv)[digits]); digits++) | |
4596 | {; | |
4597 | } | |
4598 | allsigs = 0; | |
4599 | sigfirst = siglast = -1; | |
4600 | ||
4601 | if (wordlen >= 1 && !strncmp (*argv, "all", wordlen)) | |
4602 | { | |
4603 | /* Apply action to all signals except those used by the | |
4604 | debugger. Silently skip those. */ | |
4605 | allsigs = 1; | |
4606 | sigfirst = 0; | |
4607 | siglast = nsigs - 1; | |
4608 | } | |
4609 | else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen)) | |
4610 | { | |
4611 | SET_SIGS (nsigs, sigs, signal_stop); | |
4612 | SET_SIGS (nsigs, sigs, signal_print); | |
4613 | } | |
4614 | else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen)) | |
4615 | { | |
4616 | UNSET_SIGS (nsigs, sigs, signal_program); | |
4617 | } | |
4618 | else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen)) | |
4619 | { | |
4620 | SET_SIGS (nsigs, sigs, signal_print); | |
4621 | } | |
4622 | else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen)) | |
4623 | { | |
4624 | SET_SIGS (nsigs, sigs, signal_program); | |
4625 | } | |
4626 | else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen)) | |
4627 | { | |
4628 | UNSET_SIGS (nsigs, sigs, signal_stop); | |
4629 | } | |
4630 | else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen)) | |
4631 | { | |
4632 | SET_SIGS (nsigs, sigs, signal_program); | |
4633 | } | |
4634 | else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen)) | |
4635 | { | |
4636 | UNSET_SIGS (nsigs, sigs, signal_print); | |
4637 | UNSET_SIGS (nsigs, sigs, signal_stop); | |
4638 | } | |
4639 | else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen)) | |
4640 | { | |
4641 | UNSET_SIGS (nsigs, sigs, signal_program); | |
4642 | } | |
4643 | else if (digits > 0) | |
4644 | { | |
4645 | /* It is numeric. The numeric signal refers to our own | |
4646 | internal signal numbering from target.h, not to host/target | |
4647 | signal number. This is a feature; users really should be | |
4648 | using symbolic names anyway, and the common ones like | |
4649 | SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */ | |
4650 | ||
4651 | sigfirst = siglast = (int) | |
4652 | target_signal_from_command (atoi (*argv)); | |
4653 | if ((*argv)[digits] == '-') | |
4654 | { | |
4655 | siglast = (int) | |
4656 | target_signal_from_command (atoi ((*argv) + digits + 1)); | |
4657 | } | |
4658 | if (sigfirst > siglast) | |
4659 | { | |
4660 | /* Bet he didn't figure we'd think of this case... */ | |
4661 | signum = sigfirst; | |
4662 | sigfirst = siglast; | |
4663 | siglast = signum; | |
4664 | } | |
4665 | } | |
4666 | else | |
4667 | { | |
4668 | oursig = target_signal_from_name (*argv); | |
4669 | if (oursig != TARGET_SIGNAL_UNKNOWN) | |
4670 | { | |
4671 | sigfirst = siglast = (int) oursig; | |
4672 | } | |
4673 | else | |
4674 | { | |
4675 | /* Not a number and not a recognized flag word => complain. */ | |
8a3fe4f8 | 4676 | error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv); |
c906108c SS |
4677 | } |
4678 | } | |
4679 | ||
4680 | /* If any signal numbers or symbol names were found, set flags for | |
c5aa993b | 4681 | which signals to apply actions to. */ |
c906108c SS |
4682 | |
4683 | for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++) | |
4684 | { | |
4685 | switch ((enum target_signal) signum) | |
4686 | { | |
4687 | case TARGET_SIGNAL_TRAP: | |
4688 | case TARGET_SIGNAL_INT: | |
4689 | if (!allsigs && !sigs[signum]) | |
4690 | { | |
9e2f0ad4 HZ |
4691 | if (query (_("%s is used by the debugger.\n\ |
4692 | Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum))) | |
c906108c SS |
4693 | { |
4694 | sigs[signum] = 1; | |
4695 | } | |
4696 | else | |
4697 | { | |
a3f17187 | 4698 | printf_unfiltered (_("Not confirmed, unchanged.\n")); |
c906108c SS |
4699 | gdb_flush (gdb_stdout); |
4700 | } | |
4701 | } | |
4702 | break; | |
4703 | case TARGET_SIGNAL_0: | |
4704 | case TARGET_SIGNAL_DEFAULT: | |
4705 | case TARGET_SIGNAL_UNKNOWN: | |
4706 | /* Make sure that "all" doesn't print these. */ | |
4707 | break; | |
4708 | default: | |
4709 | sigs[signum] = 1; | |
4710 | break; | |
4711 | } | |
4712 | } | |
4713 | ||
4714 | argv++; | |
4715 | } | |
4716 | ||
3a031f65 PA |
4717 | for (signum = 0; signum < nsigs; signum++) |
4718 | if (sigs[signum]) | |
4719 | { | |
4720 | target_notice_signals (inferior_ptid); | |
c906108c | 4721 | |
3a031f65 PA |
4722 | if (from_tty) |
4723 | { | |
4724 | /* Show the results. */ | |
4725 | sig_print_header (); | |
4726 | for (; signum < nsigs; signum++) | |
4727 | if (sigs[signum]) | |
4728 | sig_print_info (signum); | |
4729 | } | |
4730 | ||
4731 | break; | |
4732 | } | |
c906108c SS |
4733 | |
4734 | do_cleanups (old_chain); | |
4735 | } | |
4736 | ||
4737 | static void | |
96baa820 | 4738 | xdb_handle_command (char *args, int from_tty) |
c906108c SS |
4739 | { |
4740 | char **argv; | |
4741 | struct cleanup *old_chain; | |
4742 | ||
d1a41061 PP |
4743 | if (args == NULL) |
4744 | error_no_arg (_("xdb command")); | |
4745 | ||
c906108c SS |
4746 | /* Break the command line up into args. */ |
4747 | ||
d1a41061 | 4748 | argv = gdb_buildargv (args); |
7a292a7a | 4749 | old_chain = make_cleanup_freeargv (argv); |
c906108c SS |
4750 | if (argv[1] != (char *) NULL) |
4751 | { | |
4752 | char *argBuf; | |
4753 | int bufLen; | |
4754 | ||
4755 | bufLen = strlen (argv[0]) + 20; | |
4756 | argBuf = (char *) xmalloc (bufLen); | |
4757 | if (argBuf) | |
4758 | { | |
4759 | int validFlag = 1; | |
4760 | enum target_signal oursig; | |
4761 | ||
4762 | oursig = target_signal_from_name (argv[0]); | |
4763 | memset (argBuf, 0, bufLen); | |
4764 | if (strcmp (argv[1], "Q") == 0) | |
4765 | sprintf (argBuf, "%s %s", argv[0], "noprint"); | |
4766 | else | |
4767 | { | |
4768 | if (strcmp (argv[1], "s") == 0) | |
4769 | { | |
4770 | if (!signal_stop[oursig]) | |
4771 | sprintf (argBuf, "%s %s", argv[0], "stop"); | |
4772 | else | |
4773 | sprintf (argBuf, "%s %s", argv[0], "nostop"); | |
4774 | } | |
4775 | else if (strcmp (argv[1], "i") == 0) | |
4776 | { | |
4777 | if (!signal_program[oursig]) | |
4778 | sprintf (argBuf, "%s %s", argv[0], "pass"); | |
4779 | else | |
4780 | sprintf (argBuf, "%s %s", argv[0], "nopass"); | |
4781 | } | |
4782 | else if (strcmp (argv[1], "r") == 0) | |
4783 | { | |
4784 | if (!signal_print[oursig]) | |
4785 | sprintf (argBuf, "%s %s", argv[0], "print"); | |
4786 | else | |
4787 | sprintf (argBuf, "%s %s", argv[0], "noprint"); | |
4788 | } | |
4789 | else | |
4790 | validFlag = 0; | |
4791 | } | |
4792 | if (validFlag) | |
4793 | handle_command (argBuf, from_tty); | |
4794 | else | |
a3f17187 | 4795 | printf_filtered (_("Invalid signal handling flag.\n")); |
c906108c | 4796 | if (argBuf) |
b8c9b27d | 4797 | xfree (argBuf); |
c906108c SS |
4798 | } |
4799 | } | |
4800 | do_cleanups (old_chain); | |
4801 | } | |
4802 | ||
4803 | /* Print current contents of the tables set by the handle command. | |
4804 | It is possible we should just be printing signals actually used | |
4805 | by the current target (but for things to work right when switching | |
4806 | targets, all signals should be in the signal tables). */ | |
4807 | ||
4808 | static void | |
96baa820 | 4809 | signals_info (char *signum_exp, int from_tty) |
c906108c SS |
4810 | { |
4811 | enum target_signal oursig; | |
4812 | sig_print_header (); | |
4813 | ||
4814 | if (signum_exp) | |
4815 | { | |
4816 | /* First see if this is a symbol name. */ | |
4817 | oursig = target_signal_from_name (signum_exp); | |
4818 | if (oursig == TARGET_SIGNAL_UNKNOWN) | |
4819 | { | |
4820 | /* No, try numeric. */ | |
4821 | oursig = | |
bb518678 | 4822 | target_signal_from_command (parse_and_eval_long (signum_exp)); |
c906108c SS |
4823 | } |
4824 | sig_print_info (oursig); | |
4825 | return; | |
4826 | } | |
4827 | ||
4828 | printf_filtered ("\n"); | |
4829 | /* These ugly casts brought to you by the native VAX compiler. */ | |
4830 | for (oursig = TARGET_SIGNAL_FIRST; | |
4831 | (int) oursig < (int) TARGET_SIGNAL_LAST; | |
4832 | oursig = (enum target_signal) ((int) oursig + 1)) | |
4833 | { | |
4834 | QUIT; | |
4835 | ||
4836 | if (oursig != TARGET_SIGNAL_UNKNOWN | |
488f131b | 4837 | && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0) |
c906108c SS |
4838 | sig_print_info (oursig); |
4839 | } | |
4840 | ||
a3f17187 | 4841 | printf_filtered (_("\nUse the \"handle\" command to change these tables.\n")); |
c906108c | 4842 | } |
4aa995e1 PA |
4843 | |
4844 | /* The $_siginfo convenience variable is a bit special. We don't know | |
4845 | for sure the type of the value until we actually have a chance to | |
4846 | fetch the data. The type can change depending on gdbarch, so it it | |
4847 | also dependent on which thread you have selected. | |
4848 | ||
4849 | 1. making $_siginfo be an internalvar that creates a new value on | |
4850 | access. | |
4851 | ||
4852 | 2. making the value of $_siginfo be an lval_computed value. */ | |
4853 | ||
4854 | /* This function implements the lval_computed support for reading a | |
4855 | $_siginfo value. */ | |
4856 | ||
4857 | static void | |
4858 | siginfo_value_read (struct value *v) | |
4859 | { | |
4860 | LONGEST transferred; | |
4861 | ||
4862 | transferred = | |
4863 | target_read (¤t_target, TARGET_OBJECT_SIGNAL_INFO, | |
4864 | NULL, | |
4865 | value_contents_all_raw (v), | |
4866 | value_offset (v), | |
4867 | TYPE_LENGTH (value_type (v))); | |
4868 | ||
4869 | if (transferred != TYPE_LENGTH (value_type (v))) | |
4870 | error (_("Unable to read siginfo")); | |
4871 | } | |
4872 | ||
4873 | /* This function implements the lval_computed support for writing a | |
4874 | $_siginfo value. */ | |
4875 | ||
4876 | static void | |
4877 | siginfo_value_write (struct value *v, struct value *fromval) | |
4878 | { | |
4879 | LONGEST transferred; | |
4880 | ||
4881 | transferred = target_write (¤t_target, | |
4882 | TARGET_OBJECT_SIGNAL_INFO, | |
4883 | NULL, | |
4884 | value_contents_all_raw (fromval), | |
4885 | value_offset (v), | |
4886 | TYPE_LENGTH (value_type (fromval))); | |
4887 | ||
4888 | if (transferred != TYPE_LENGTH (value_type (fromval))) | |
4889 | error (_("Unable to write siginfo")); | |
4890 | } | |
4891 | ||
4892 | static struct lval_funcs siginfo_value_funcs = | |
4893 | { | |
4894 | siginfo_value_read, | |
4895 | siginfo_value_write | |
4896 | }; | |
4897 | ||
4898 | /* Return a new value with the correct type for the siginfo object of | |
4899 | the current thread. Return a void value if there's no object | |
4900 | available. */ | |
4901 | ||
2c0b251b | 4902 | static struct value * |
4aa995e1 PA |
4903 | siginfo_make_value (struct internalvar *var) |
4904 | { | |
4905 | struct type *type; | |
4906 | struct gdbarch *gdbarch; | |
4907 | ||
4908 | if (target_has_stack | |
4909 | && !ptid_equal (inferior_ptid, null_ptid)) | |
4910 | { | |
4911 | gdbarch = get_frame_arch (get_current_frame ()); | |
4912 | ||
4913 | if (gdbarch_get_siginfo_type_p (gdbarch)) | |
4914 | { | |
4915 | type = gdbarch_get_siginfo_type (gdbarch); | |
4916 | ||
4917 | return allocate_computed_value (type, &siginfo_value_funcs, NULL); | |
4918 | } | |
4919 | } | |
4920 | ||
4921 | return allocate_value (builtin_type_void); | |
4922 | } | |
4923 | ||
c906108c | 4924 | \f |
b89667eb DE |
4925 | /* Inferior thread state. |
4926 | These are details related to the inferior itself, and don't include | |
4927 | things like what frame the user had selected or what gdb was doing | |
4928 | with the target at the time. | |
4929 | For inferior function calls these are things we want to restore | |
4930 | regardless of whether the function call successfully completes | |
4931 | or the dummy frame has to be manually popped. */ | |
4932 | ||
4933 | struct inferior_thread_state | |
7a292a7a SS |
4934 | { |
4935 | enum target_signal stop_signal; | |
4936 | CORE_ADDR stop_pc; | |
b89667eb DE |
4937 | struct regcache *registers; |
4938 | }; | |
4939 | ||
4940 | struct inferior_thread_state * | |
4941 | save_inferior_thread_state (void) | |
4942 | { | |
4943 | struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state); | |
4944 | struct thread_info *tp = inferior_thread (); | |
4945 | ||
4946 | inf_state->stop_signal = tp->stop_signal; | |
4947 | inf_state->stop_pc = stop_pc; | |
4948 | ||
4949 | inf_state->registers = regcache_dup (get_current_regcache ()); | |
4950 | ||
4951 | return inf_state; | |
4952 | } | |
4953 | ||
4954 | /* Restore inferior session state to INF_STATE. */ | |
4955 | ||
4956 | void | |
4957 | restore_inferior_thread_state (struct inferior_thread_state *inf_state) | |
4958 | { | |
4959 | struct thread_info *tp = inferior_thread (); | |
4960 | ||
4961 | tp->stop_signal = inf_state->stop_signal; | |
4962 | stop_pc = inf_state->stop_pc; | |
4963 | ||
4964 | /* The inferior can be gone if the user types "print exit(0)" | |
4965 | (and perhaps other times). */ | |
4966 | if (target_has_execution) | |
4967 | /* NB: The register write goes through to the target. */ | |
4968 | regcache_cpy (get_current_regcache (), inf_state->registers); | |
4969 | regcache_xfree (inf_state->registers); | |
4970 | xfree (inf_state); | |
4971 | } | |
4972 | ||
4973 | static void | |
4974 | do_restore_inferior_thread_state_cleanup (void *state) | |
4975 | { | |
4976 | restore_inferior_thread_state (state); | |
4977 | } | |
4978 | ||
4979 | struct cleanup * | |
4980 | make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state) | |
4981 | { | |
4982 | return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state); | |
4983 | } | |
4984 | ||
4985 | void | |
4986 | discard_inferior_thread_state (struct inferior_thread_state *inf_state) | |
4987 | { | |
4988 | regcache_xfree (inf_state->registers); | |
4989 | xfree (inf_state); | |
4990 | } | |
4991 | ||
4992 | struct regcache * | |
4993 | get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state) | |
4994 | { | |
4995 | return inf_state->registers; | |
4996 | } | |
4997 | ||
4998 | /* Session related state for inferior function calls. | |
4999 | These are the additional bits of state that need to be restored | |
5000 | when an inferior function call successfully completes. */ | |
5001 | ||
5002 | struct inferior_status | |
5003 | { | |
7a292a7a SS |
5004 | bpstat stop_bpstat; |
5005 | int stop_step; | |
5006 | int stop_stack_dummy; | |
5007 | int stopped_by_random_signal; | |
ca67fcb8 | 5008 | int stepping_over_breakpoint; |
7a292a7a SS |
5009 | CORE_ADDR step_range_start; |
5010 | CORE_ADDR step_range_end; | |
aa0cd9c1 | 5011 | struct frame_id step_frame_id; |
5fbbeb29 | 5012 | enum step_over_calls_kind step_over_calls; |
7a292a7a SS |
5013 | CORE_ADDR step_resume_break_address; |
5014 | int stop_after_trap; | |
c0236d92 | 5015 | int stop_soon; |
7a292a7a | 5016 | |
b89667eb | 5017 | /* ID if the selected frame when the inferior function call was made. */ |
101dcfbe AC |
5018 | struct frame_id selected_frame_id; |
5019 | ||
7a292a7a | 5020 | int proceed_to_finish; |
c5a4d20b | 5021 | int in_infcall; |
7a292a7a SS |
5022 | }; |
5023 | ||
c906108c | 5024 | /* Save all of the information associated with the inferior<==>gdb |
b89667eb | 5025 | connection. */ |
c906108c | 5026 | |
7a292a7a | 5027 | struct inferior_status * |
b89667eb | 5028 | save_inferior_status (void) |
c906108c | 5029 | { |
72cec141 | 5030 | struct inferior_status *inf_status = XMALLOC (struct inferior_status); |
4e1c45ea | 5031 | struct thread_info *tp = inferior_thread (); |
d6b48e9c | 5032 | struct inferior *inf = current_inferior (); |
7a292a7a | 5033 | |
414c69f7 | 5034 | inf_status->stop_step = tp->stop_step; |
c906108c SS |
5035 | inf_status->stop_stack_dummy = stop_stack_dummy; |
5036 | inf_status->stopped_by_random_signal = stopped_by_random_signal; | |
4e1c45ea PA |
5037 | inf_status->stepping_over_breakpoint = tp->trap_expected; |
5038 | inf_status->step_range_start = tp->step_range_start; | |
5039 | inf_status->step_range_end = tp->step_range_end; | |
5040 | inf_status->step_frame_id = tp->step_frame_id; | |
078130d0 | 5041 | inf_status->step_over_calls = tp->step_over_calls; |
c906108c | 5042 | inf_status->stop_after_trap = stop_after_trap; |
d6b48e9c | 5043 | inf_status->stop_soon = inf->stop_soon; |
c906108c SS |
5044 | /* Save original bpstat chain here; replace it with copy of chain. |
5045 | If caller's caller is walking the chain, they'll be happier if we | |
7a292a7a SS |
5046 | hand them back the original chain when restore_inferior_status is |
5047 | called. */ | |
347bddb7 PA |
5048 | inf_status->stop_bpstat = tp->stop_bpstat; |
5049 | tp->stop_bpstat = bpstat_copy (tp->stop_bpstat); | |
32400beb | 5050 | inf_status->proceed_to_finish = tp->proceed_to_finish; |
c5a4d20b | 5051 | inf_status->in_infcall = tp->in_infcall; |
c5aa993b | 5052 | |
206415a3 | 5053 | inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL)); |
b89667eb | 5054 | |
7a292a7a | 5055 | return inf_status; |
c906108c SS |
5056 | } |
5057 | ||
c906108c | 5058 | static int |
96baa820 | 5059 | restore_selected_frame (void *args) |
c906108c | 5060 | { |
488f131b | 5061 | struct frame_id *fid = (struct frame_id *) args; |
c906108c | 5062 | struct frame_info *frame; |
c906108c | 5063 | |
101dcfbe | 5064 | frame = frame_find_by_id (*fid); |
c906108c | 5065 | |
aa0cd9c1 AC |
5066 | /* If inf_status->selected_frame_id is NULL, there was no previously |
5067 | selected frame. */ | |
101dcfbe | 5068 | if (frame == NULL) |
c906108c | 5069 | { |
8a3fe4f8 | 5070 | warning (_("Unable to restore previously selected frame.")); |
c906108c SS |
5071 | return 0; |
5072 | } | |
5073 | ||
0f7d239c | 5074 | select_frame (frame); |
c906108c SS |
5075 | |
5076 | return (1); | |
5077 | } | |
5078 | ||
b89667eb DE |
5079 | /* Restore inferior session state to INF_STATUS. */ |
5080 | ||
c906108c | 5081 | void |
96baa820 | 5082 | restore_inferior_status (struct inferior_status *inf_status) |
c906108c | 5083 | { |
4e1c45ea | 5084 | struct thread_info *tp = inferior_thread (); |
d6b48e9c | 5085 | struct inferior *inf = current_inferior (); |
4e1c45ea | 5086 | |
414c69f7 | 5087 | tp->stop_step = inf_status->stop_step; |
c906108c SS |
5088 | stop_stack_dummy = inf_status->stop_stack_dummy; |
5089 | stopped_by_random_signal = inf_status->stopped_by_random_signal; | |
4e1c45ea PA |
5090 | tp->trap_expected = inf_status->stepping_over_breakpoint; |
5091 | tp->step_range_start = inf_status->step_range_start; | |
5092 | tp->step_range_end = inf_status->step_range_end; | |
5093 | tp->step_frame_id = inf_status->step_frame_id; | |
078130d0 | 5094 | tp->step_over_calls = inf_status->step_over_calls; |
c906108c | 5095 | stop_after_trap = inf_status->stop_after_trap; |
d6b48e9c | 5096 | inf->stop_soon = inf_status->stop_soon; |
347bddb7 PA |
5097 | bpstat_clear (&tp->stop_bpstat); |
5098 | tp->stop_bpstat = inf_status->stop_bpstat; | |
b89667eb | 5099 | inf_status->stop_bpstat = NULL; |
32400beb | 5100 | tp->proceed_to_finish = inf_status->proceed_to_finish; |
c5a4d20b | 5101 | tp->in_infcall = inf_status->in_infcall; |
c906108c | 5102 | |
b89667eb | 5103 | if (target_has_stack) |
c906108c | 5104 | { |
c906108c | 5105 | /* The point of catch_errors is that if the stack is clobbered, |
101dcfbe AC |
5106 | walking the stack might encounter a garbage pointer and |
5107 | error() trying to dereference it. */ | |
488f131b JB |
5108 | if (catch_errors |
5109 | (restore_selected_frame, &inf_status->selected_frame_id, | |
5110 | "Unable to restore previously selected frame:\n", | |
5111 | RETURN_MASK_ERROR) == 0) | |
c906108c SS |
5112 | /* Error in restoring the selected frame. Select the innermost |
5113 | frame. */ | |
0f7d239c | 5114 | select_frame (get_current_frame ()); |
c906108c | 5115 | } |
c906108c | 5116 | |
72cec141 | 5117 | xfree (inf_status); |
7a292a7a | 5118 | } |
c906108c | 5119 | |
74b7792f AC |
5120 | static void |
5121 | do_restore_inferior_status_cleanup (void *sts) | |
5122 | { | |
5123 | restore_inferior_status (sts); | |
5124 | } | |
5125 | ||
5126 | struct cleanup * | |
5127 | make_cleanup_restore_inferior_status (struct inferior_status *inf_status) | |
5128 | { | |
5129 | return make_cleanup (do_restore_inferior_status_cleanup, inf_status); | |
5130 | } | |
5131 | ||
c906108c | 5132 | void |
96baa820 | 5133 | discard_inferior_status (struct inferior_status *inf_status) |
7a292a7a SS |
5134 | { |
5135 | /* See save_inferior_status for info on stop_bpstat. */ | |
5136 | bpstat_clear (&inf_status->stop_bpstat); | |
72cec141 | 5137 | xfree (inf_status); |
7a292a7a | 5138 | } |
b89667eb | 5139 | \f |
47932f85 | 5140 | int |
3a3e9ee3 | 5141 | inferior_has_forked (ptid_t pid, ptid_t *child_pid) |
47932f85 DJ |
5142 | { |
5143 | struct target_waitstatus last; | |
5144 | ptid_t last_ptid; | |
5145 | ||
5146 | get_last_target_status (&last_ptid, &last); | |
5147 | ||
5148 | if (last.kind != TARGET_WAITKIND_FORKED) | |
5149 | return 0; | |
5150 | ||
3a3e9ee3 | 5151 | if (!ptid_equal (last_ptid, pid)) |
47932f85 DJ |
5152 | return 0; |
5153 | ||
5154 | *child_pid = last.value.related_pid; | |
5155 | return 1; | |
5156 | } | |
5157 | ||
5158 | int | |
3a3e9ee3 | 5159 | inferior_has_vforked (ptid_t pid, ptid_t *child_pid) |
47932f85 DJ |
5160 | { |
5161 | struct target_waitstatus last; | |
5162 | ptid_t last_ptid; | |
5163 | ||
5164 | get_last_target_status (&last_ptid, &last); | |
5165 | ||
5166 | if (last.kind != TARGET_WAITKIND_VFORKED) | |
5167 | return 0; | |
5168 | ||
3a3e9ee3 | 5169 | if (!ptid_equal (last_ptid, pid)) |
47932f85 DJ |
5170 | return 0; |
5171 | ||
5172 | *child_pid = last.value.related_pid; | |
5173 | return 1; | |
5174 | } | |
5175 | ||
5176 | int | |
3a3e9ee3 | 5177 | inferior_has_execd (ptid_t pid, char **execd_pathname) |
47932f85 DJ |
5178 | { |
5179 | struct target_waitstatus last; | |
5180 | ptid_t last_ptid; | |
5181 | ||
5182 | get_last_target_status (&last_ptid, &last); | |
5183 | ||
5184 | if (last.kind != TARGET_WAITKIND_EXECD) | |
5185 | return 0; | |
5186 | ||
3a3e9ee3 | 5187 | if (!ptid_equal (last_ptid, pid)) |
47932f85 DJ |
5188 | return 0; |
5189 | ||
5190 | *execd_pathname = xstrdup (last.value.execd_pathname); | |
5191 | return 1; | |
5192 | } | |
5193 | ||
ca6724c1 KB |
5194 | /* Oft used ptids */ |
5195 | ptid_t null_ptid; | |
5196 | ptid_t minus_one_ptid; | |
5197 | ||
5198 | /* Create a ptid given the necessary PID, LWP, and TID components. */ | |
488f131b | 5199 | |
ca6724c1 KB |
5200 | ptid_t |
5201 | ptid_build (int pid, long lwp, long tid) | |
5202 | { | |
5203 | ptid_t ptid; | |
5204 | ||
5205 | ptid.pid = pid; | |
5206 | ptid.lwp = lwp; | |
5207 | ptid.tid = tid; | |
5208 | return ptid; | |
5209 | } | |
5210 | ||
5211 | /* Create a ptid from just a pid. */ | |
5212 | ||
5213 | ptid_t | |
5214 | pid_to_ptid (int pid) | |
5215 | { | |
5216 | return ptid_build (pid, 0, 0); | |
5217 | } | |
5218 | ||
5219 | /* Fetch the pid (process id) component from a ptid. */ | |
5220 | ||
5221 | int | |
5222 | ptid_get_pid (ptid_t ptid) | |
5223 | { | |
5224 | return ptid.pid; | |
5225 | } | |
5226 | ||
5227 | /* Fetch the lwp (lightweight process) component from a ptid. */ | |
5228 | ||
5229 | long | |
5230 | ptid_get_lwp (ptid_t ptid) | |
5231 | { | |
5232 | return ptid.lwp; | |
5233 | } | |
5234 | ||
5235 | /* Fetch the tid (thread id) component from a ptid. */ | |
5236 | ||
5237 | long | |
5238 | ptid_get_tid (ptid_t ptid) | |
5239 | { | |
5240 | return ptid.tid; | |
5241 | } | |
5242 | ||
5243 | /* ptid_equal() is used to test equality of two ptids. */ | |
5244 | ||
5245 | int | |
5246 | ptid_equal (ptid_t ptid1, ptid_t ptid2) | |
5247 | { | |
5248 | return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp | |
488f131b | 5249 | && ptid1.tid == ptid2.tid); |
ca6724c1 KB |
5250 | } |
5251 | ||
252fbfc8 PA |
5252 | /* Returns true if PTID represents a process. */ |
5253 | ||
5254 | int | |
5255 | ptid_is_pid (ptid_t ptid) | |
5256 | { | |
5257 | if (ptid_equal (minus_one_ptid, ptid)) | |
5258 | return 0; | |
5259 | if (ptid_equal (null_ptid, ptid)) | |
5260 | return 0; | |
5261 | ||
5262 | return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0); | |
5263 | } | |
5264 | ||
ca6724c1 KB |
5265 | /* restore_inferior_ptid() will be used by the cleanup machinery |
5266 | to restore the inferior_ptid value saved in a call to | |
5267 | save_inferior_ptid(). */ | |
ce696e05 KB |
5268 | |
5269 | static void | |
5270 | restore_inferior_ptid (void *arg) | |
5271 | { | |
5272 | ptid_t *saved_ptid_ptr = arg; | |
5273 | inferior_ptid = *saved_ptid_ptr; | |
5274 | xfree (arg); | |
5275 | } | |
5276 | ||
5277 | /* Save the value of inferior_ptid so that it may be restored by a | |
5278 | later call to do_cleanups(). Returns the struct cleanup pointer | |
5279 | needed for later doing the cleanup. */ | |
5280 | ||
5281 | struct cleanup * | |
5282 | save_inferior_ptid (void) | |
5283 | { | |
5284 | ptid_t *saved_ptid_ptr; | |
5285 | ||
5286 | saved_ptid_ptr = xmalloc (sizeof (ptid_t)); | |
5287 | *saved_ptid_ptr = inferior_ptid; | |
5288 | return make_cleanup (restore_inferior_ptid, saved_ptid_ptr); | |
5289 | } | |
c5aa993b | 5290 | \f |
488f131b | 5291 | |
b2175913 MS |
5292 | /* User interface for reverse debugging: |
5293 | Set exec-direction / show exec-direction commands | |
5294 | (returns error unless target implements to_set_exec_direction method). */ | |
5295 | ||
5296 | enum exec_direction_kind execution_direction = EXEC_FORWARD; | |
5297 | static const char exec_forward[] = "forward"; | |
5298 | static const char exec_reverse[] = "reverse"; | |
5299 | static const char *exec_direction = exec_forward; | |
5300 | static const char *exec_direction_names[] = { | |
5301 | exec_forward, | |
5302 | exec_reverse, | |
5303 | NULL | |
5304 | }; | |
5305 | ||
5306 | static void | |
5307 | set_exec_direction_func (char *args, int from_tty, | |
5308 | struct cmd_list_element *cmd) | |
5309 | { | |
5310 | if (target_can_execute_reverse) | |
5311 | { | |
5312 | if (!strcmp (exec_direction, exec_forward)) | |
5313 | execution_direction = EXEC_FORWARD; | |
5314 | else if (!strcmp (exec_direction, exec_reverse)) | |
5315 | execution_direction = EXEC_REVERSE; | |
5316 | } | |
5317 | } | |
5318 | ||
5319 | static void | |
5320 | show_exec_direction_func (struct ui_file *out, int from_tty, | |
5321 | struct cmd_list_element *cmd, const char *value) | |
5322 | { | |
5323 | switch (execution_direction) { | |
5324 | case EXEC_FORWARD: | |
5325 | fprintf_filtered (out, _("Forward.\n")); | |
5326 | break; | |
5327 | case EXEC_REVERSE: | |
5328 | fprintf_filtered (out, _("Reverse.\n")); | |
5329 | break; | |
5330 | case EXEC_ERROR: | |
5331 | default: | |
5332 | fprintf_filtered (out, | |
5333 | _("Forward (target `%s' does not support exec-direction).\n"), | |
5334 | target_shortname); | |
5335 | break; | |
5336 | } | |
5337 | } | |
5338 | ||
5339 | /* User interface for non-stop mode. */ | |
5340 | ||
ad52ddc6 PA |
5341 | int non_stop = 0; |
5342 | static int non_stop_1 = 0; | |
5343 | ||
5344 | static void | |
5345 | set_non_stop (char *args, int from_tty, | |
5346 | struct cmd_list_element *c) | |
5347 | { | |
5348 | if (target_has_execution) | |
5349 | { | |
5350 | non_stop_1 = non_stop; | |
5351 | error (_("Cannot change this setting while the inferior is running.")); | |
5352 | } | |
5353 | ||
5354 | non_stop = non_stop_1; | |
5355 | } | |
5356 | ||
5357 | static void | |
5358 | show_non_stop (struct ui_file *file, int from_tty, | |
5359 | struct cmd_list_element *c, const char *value) | |
5360 | { | |
5361 | fprintf_filtered (file, | |
5362 | _("Controlling the inferior in non-stop mode is %s.\n"), | |
5363 | value); | |
5364 | } | |
5365 | ||
5366 | ||
c906108c | 5367 | void |
96baa820 | 5368 | _initialize_infrun (void) |
c906108c | 5369 | { |
52f0bd74 AC |
5370 | int i; |
5371 | int numsigs; | |
c906108c SS |
5372 | struct cmd_list_element *c; |
5373 | ||
1bedd215 AC |
5374 | add_info ("signals", signals_info, _("\ |
5375 | What debugger does when program gets various signals.\n\ | |
5376 | Specify a signal as argument to print info on that signal only.")); | |
c906108c SS |
5377 | add_info_alias ("handle", "signals", 0); |
5378 | ||
1bedd215 AC |
5379 | add_com ("handle", class_run, handle_command, _("\ |
5380 | Specify how to handle a signal.\n\ | |
c906108c SS |
5381 | Args are signals and actions to apply to those signals.\n\ |
5382 | Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\ | |
5383 | from 1-15 are allowed for compatibility with old versions of GDB.\n\ | |
5384 | Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\ | |
5385 | The special arg \"all\" is recognized to mean all signals except those\n\ | |
1bedd215 AC |
5386 | used by the debugger, typically SIGTRAP and SIGINT.\n\ |
5387 | Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\ | |
c906108c SS |
5388 | \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\ |
5389 | Stop means reenter debugger if this signal happens (implies print).\n\ | |
5390 | Print means print a message if this signal happens.\n\ | |
5391 | Pass means let program see this signal; otherwise program doesn't know.\n\ | |
5392 | Ignore is a synonym for nopass and noignore is a synonym for pass.\n\ | |
1bedd215 | 5393 | Pass and Stop may be combined.")); |
c906108c SS |
5394 | if (xdb_commands) |
5395 | { | |
1bedd215 AC |
5396 | add_com ("lz", class_info, signals_info, _("\ |
5397 | What debugger does when program gets various signals.\n\ | |
5398 | Specify a signal as argument to print info on that signal only.")); | |
5399 | add_com ("z", class_run, xdb_handle_command, _("\ | |
5400 | Specify how to handle a signal.\n\ | |
c906108c SS |
5401 | Args are signals and actions to apply to those signals.\n\ |
5402 | Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\ | |
5403 | from 1-15 are allowed for compatibility with old versions of GDB.\n\ | |
5404 | Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\ | |
5405 | The special arg \"all\" is recognized to mean all signals except those\n\ | |
1bedd215 AC |
5406 | used by the debugger, typically SIGTRAP and SIGINT.\n\ |
5407 | Recognized actions include \"s\" (toggles between stop and nostop), \n\ | |
c906108c SS |
5408 | \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \ |
5409 | nopass), \"Q\" (noprint)\n\ | |
5410 | Stop means reenter debugger if this signal happens (implies print).\n\ | |
5411 | Print means print a message if this signal happens.\n\ | |
5412 | Pass means let program see this signal; otherwise program doesn't know.\n\ | |
5413 | Ignore is a synonym for nopass and noignore is a synonym for pass.\n\ | |
1bedd215 | 5414 | Pass and Stop may be combined.")); |
c906108c SS |
5415 | } |
5416 | ||
5417 | if (!dbx_commands) | |
1a966eab AC |
5418 | stop_command = add_cmd ("stop", class_obscure, |
5419 | not_just_help_class_command, _("\ | |
5420 | There is no `stop' command, but you can set a hook on `stop'.\n\ | |
c906108c | 5421 | This allows you to set a list of commands to be run each time execution\n\ |
1a966eab | 5422 | of the program stops."), &cmdlist); |
c906108c | 5423 | |
85c07804 AC |
5424 | add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\ |
5425 | Set inferior debugging."), _("\ | |
5426 | Show inferior debugging."), _("\ | |
5427 | When non-zero, inferior specific debugging is enabled."), | |
5428 | NULL, | |
920d2a44 | 5429 | show_debug_infrun, |
85c07804 | 5430 | &setdebuglist, &showdebuglist); |
527159b7 | 5431 | |
237fc4c9 PA |
5432 | add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\ |
5433 | Set displaced stepping debugging."), _("\ | |
5434 | Show displaced stepping debugging."), _("\ | |
5435 | When non-zero, displaced stepping specific debugging is enabled."), | |
5436 | NULL, | |
5437 | show_debug_displaced, | |
5438 | &setdebuglist, &showdebuglist); | |
5439 | ||
ad52ddc6 PA |
5440 | add_setshow_boolean_cmd ("non-stop", no_class, |
5441 | &non_stop_1, _("\ | |
5442 | Set whether gdb controls the inferior in non-stop mode."), _("\ | |
5443 | Show whether gdb controls the inferior in non-stop mode."), _("\ | |
5444 | When debugging a multi-threaded program and this setting is\n\ | |
5445 | off (the default, also called all-stop mode), when one thread stops\n\ | |
5446 | (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\ | |
5447 | all other threads in the program while you interact with the thread of\n\ | |
5448 | interest. When you continue or step a thread, you can allow the other\n\ | |
5449 | threads to run, or have them remain stopped, but while you inspect any\n\ | |
5450 | thread's state, all threads stop.\n\ | |
5451 | \n\ | |
5452 | In non-stop mode, when one thread stops, other threads can continue\n\ | |
5453 | to run freely. You'll be able to step each thread independently,\n\ | |
5454 | leave it stopped or free to run as needed."), | |
5455 | set_non_stop, | |
5456 | show_non_stop, | |
5457 | &setlist, | |
5458 | &showlist); | |
5459 | ||
c906108c | 5460 | numsigs = (int) TARGET_SIGNAL_LAST; |
488f131b | 5461 | signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs); |
c906108c SS |
5462 | signal_print = (unsigned char *) |
5463 | xmalloc (sizeof (signal_print[0]) * numsigs); | |
5464 | signal_program = (unsigned char *) | |
5465 | xmalloc (sizeof (signal_program[0]) * numsigs); | |
5466 | for (i = 0; i < numsigs; i++) | |
5467 | { | |
5468 | signal_stop[i] = 1; | |
5469 | signal_print[i] = 1; | |
5470 | signal_program[i] = 1; | |
5471 | } | |
5472 | ||
5473 | /* Signals caused by debugger's own actions | |
5474 | should not be given to the program afterwards. */ | |
5475 | signal_program[TARGET_SIGNAL_TRAP] = 0; | |
5476 | signal_program[TARGET_SIGNAL_INT] = 0; | |
5477 | ||
5478 | /* Signals that are not errors should not normally enter the debugger. */ | |
5479 | signal_stop[TARGET_SIGNAL_ALRM] = 0; | |
5480 | signal_print[TARGET_SIGNAL_ALRM] = 0; | |
5481 | signal_stop[TARGET_SIGNAL_VTALRM] = 0; | |
5482 | signal_print[TARGET_SIGNAL_VTALRM] = 0; | |
5483 | signal_stop[TARGET_SIGNAL_PROF] = 0; | |
5484 | signal_print[TARGET_SIGNAL_PROF] = 0; | |
5485 | signal_stop[TARGET_SIGNAL_CHLD] = 0; | |
5486 | signal_print[TARGET_SIGNAL_CHLD] = 0; | |
5487 | signal_stop[TARGET_SIGNAL_IO] = 0; | |
5488 | signal_print[TARGET_SIGNAL_IO] = 0; | |
5489 | signal_stop[TARGET_SIGNAL_POLL] = 0; | |
5490 | signal_print[TARGET_SIGNAL_POLL] = 0; | |
5491 | signal_stop[TARGET_SIGNAL_URG] = 0; | |
5492 | signal_print[TARGET_SIGNAL_URG] = 0; | |
5493 | signal_stop[TARGET_SIGNAL_WINCH] = 0; | |
5494 | signal_print[TARGET_SIGNAL_WINCH] = 0; | |
5495 | ||
cd0fc7c3 SS |
5496 | /* These signals are used internally by user-level thread |
5497 | implementations. (See signal(5) on Solaris.) Like the above | |
5498 | signals, a healthy program receives and handles them as part of | |
5499 | its normal operation. */ | |
5500 | signal_stop[TARGET_SIGNAL_LWP] = 0; | |
5501 | signal_print[TARGET_SIGNAL_LWP] = 0; | |
5502 | signal_stop[TARGET_SIGNAL_WAITING] = 0; | |
5503 | signal_print[TARGET_SIGNAL_WAITING] = 0; | |
5504 | signal_stop[TARGET_SIGNAL_CANCEL] = 0; | |
5505 | signal_print[TARGET_SIGNAL_CANCEL] = 0; | |
5506 | ||
85c07804 AC |
5507 | add_setshow_zinteger_cmd ("stop-on-solib-events", class_support, |
5508 | &stop_on_solib_events, _("\ | |
5509 | Set stopping for shared library events."), _("\ | |
5510 | Show stopping for shared library events."), _("\ | |
c906108c SS |
5511 | If nonzero, gdb will give control to the user when the dynamic linker\n\ |
5512 | notifies gdb of shared library events. The most common event of interest\n\ | |
85c07804 AC |
5513 | to the user would be loading/unloading of a new library."), |
5514 | NULL, | |
920d2a44 | 5515 | show_stop_on_solib_events, |
85c07804 | 5516 | &setlist, &showlist); |
c906108c | 5517 | |
7ab04401 AC |
5518 | add_setshow_enum_cmd ("follow-fork-mode", class_run, |
5519 | follow_fork_mode_kind_names, | |
5520 | &follow_fork_mode_string, _("\ | |
5521 | Set debugger response to a program call of fork or vfork."), _("\ | |
5522 | Show debugger response to a program call of fork or vfork."), _("\ | |
c906108c SS |
5523 | A fork or vfork creates a new process. follow-fork-mode can be:\n\ |
5524 | parent - the original process is debugged after a fork\n\ | |
5525 | child - the new process is debugged after a fork\n\ | |
ea1dd7bc | 5526 | The unfollowed process will continue to run.\n\ |
7ab04401 AC |
5527 | By default, the debugger will follow the parent process."), |
5528 | NULL, | |
920d2a44 | 5529 | show_follow_fork_mode_string, |
7ab04401 AC |
5530 | &setlist, &showlist); |
5531 | ||
5532 | add_setshow_enum_cmd ("scheduler-locking", class_run, | |
5533 | scheduler_enums, &scheduler_mode, _("\ | |
5534 | Set mode for locking scheduler during execution."), _("\ | |
5535 | Show mode for locking scheduler during execution."), _("\ | |
c906108c SS |
5536 | off == no locking (threads may preempt at any time)\n\ |
5537 | on == full locking (no thread except the current thread may run)\n\ | |
5538 | step == scheduler locked during every single-step operation.\n\ | |
5539 | In this mode, no other thread may run during a step command.\n\ | |
7ab04401 AC |
5540 | Other threads may run while stepping over a function call ('next')."), |
5541 | set_schedlock_func, /* traps on target vector */ | |
920d2a44 | 5542 | show_scheduler_mode, |
7ab04401 | 5543 | &setlist, &showlist); |
5fbbeb29 | 5544 | |
5bf193a2 AC |
5545 | add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\ |
5546 | Set mode of the step operation."), _("\ | |
5547 | Show mode of the step operation."), _("\ | |
5548 | When set, doing a step over a function without debug line information\n\ | |
5549 | will stop at the first instruction of that function. Otherwise, the\n\ | |
5550 | function is skipped and the step command stops at a different source line."), | |
5551 | NULL, | |
920d2a44 | 5552 | show_step_stop_if_no_debug, |
5bf193a2 | 5553 | &setlist, &showlist); |
ca6724c1 | 5554 | |
fff08868 HZ |
5555 | add_setshow_enum_cmd ("displaced-stepping", class_run, |
5556 | can_use_displaced_stepping_enum, | |
5557 | &can_use_displaced_stepping, _("\ | |
237fc4c9 PA |
5558 | Set debugger's willingness to use displaced stepping."), _("\ |
5559 | Show debugger's willingness to use displaced stepping."), _("\ | |
fff08868 HZ |
5560 | If on, gdb will use displaced stepping to step over breakpoints if it is\n\ |
5561 | supported by the target architecture. If off, gdb will not use displaced\n\ | |
5562 | stepping to step over breakpoints, even if such is supported by the target\n\ | |
5563 | architecture. If auto (which is the default), gdb will use displaced stepping\n\ | |
5564 | if the target architecture supports it and non-stop mode is active, but will not\n\ | |
5565 | use it in all-stop mode (see help set non-stop)."), | |
5566 | NULL, | |
5567 | show_can_use_displaced_stepping, | |
5568 | &setlist, &showlist); | |
237fc4c9 | 5569 | |
b2175913 MS |
5570 | add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names, |
5571 | &exec_direction, _("Set direction of execution.\n\ | |
5572 | Options are 'forward' or 'reverse'."), | |
5573 | _("Show direction of execution (forward/reverse)."), | |
5574 | _("Tells gdb whether to execute forward or backward."), | |
5575 | set_exec_direction_func, show_exec_direction_func, | |
5576 | &setlist, &showlist); | |
5577 | ||
ca6724c1 KB |
5578 | /* ptid initializations */ |
5579 | null_ptid = ptid_build (0, 0, 0); | |
5580 | minus_one_ptid = ptid_build (-1, 0, 0); | |
5581 | inferior_ptid = null_ptid; | |
5582 | target_last_wait_ptid = minus_one_ptid; | |
237fc4c9 | 5583 | displaced_step_ptid = null_ptid; |
5231c1fd PA |
5584 | |
5585 | observer_attach_thread_ptid_changed (infrun_thread_ptid_changed); | |
252fbfc8 | 5586 | observer_attach_thread_stop_requested (infrun_thread_stop_requested); |
a07daef3 | 5587 | observer_attach_thread_exit (infrun_thread_thread_exit); |
4aa995e1 PA |
5588 | |
5589 | /* Explicitly create without lookup, since that tries to create a | |
5590 | value with a void typed value, and when we get here, gdbarch | |
5591 | isn't initialized yet. At this point, we're quite sure there | |
5592 | isn't another convenience variable of the same name. */ | |
5593 | create_internalvar_type_lazy ("_siginfo", siginfo_make_value); | |
c906108c | 5594 | } |