]>
Commit | Line | Data |
---|---|---|
c906108c | 1 | /* Select target systems and architectures at runtime for GDB. |
7998dfc3 | 2 | |
32d0add0 | 3 | Copyright (C) 1990-2015 Free Software Foundation, Inc. |
7998dfc3 | 4 | |
c906108c SS |
5 | Contributed by Cygnus Support. |
6 | ||
c5aa993b | 7 | This file is part of GDB. |
c906108c | 8 | |
c5aa993b JM |
9 | This program is free software; you can redistribute it and/or modify |
10 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 11 | the Free Software Foundation; either version 3 of the License, or |
c5aa993b | 12 | (at your option) any later version. |
c906108c | 13 | |
c5aa993b JM |
14 | This program is distributed in the hope that it will be useful, |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
c906108c | 18 | |
c5aa993b | 19 | You should have received a copy of the GNU General Public License |
a9762ec7 | 20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
c906108c SS |
21 | |
22 | #include "defs.h" | |
c906108c | 23 | #include "target.h" |
68c765e2 | 24 | #include "target-dcache.h" |
c906108c SS |
25 | #include "gdbcmd.h" |
26 | #include "symtab.h" | |
27 | #include "inferior.h" | |
45741a9c | 28 | #include "infrun.h" |
c906108c SS |
29 | #include "bfd.h" |
30 | #include "symfile.h" | |
31 | #include "objfiles.h" | |
4930751a | 32 | #include "dcache.h" |
c906108c | 33 | #include <signal.h> |
4e052eda | 34 | #include "regcache.h" |
b6591e8b | 35 | #include "gdbcore.h" |
424163ea | 36 | #include "target-descriptions.h" |
e1ac3328 | 37 | #include "gdbthread.h" |
b9db4ced | 38 | #include "solib.h" |
07b82ea5 | 39 | #include "exec.h" |
edb3359d | 40 | #include "inline-frame.h" |
2f4d8875 | 41 | #include "tracepoint.h" |
7313baad | 42 | #include "gdb/fileio.h" |
8ffcbaaf | 43 | #include "agent.h" |
8de71aab | 44 | #include "auxv.h" |
a7068b60 | 45 | #include "target-debug.h" |
c906108c | 46 | |
a14ed312 | 47 | static void target_info (char *, int); |
c906108c | 48 | |
f0f9ff95 TT |
49 | static void generic_tls_error (void) ATTRIBUTE_NORETURN; |
50 | ||
0a4f40a2 | 51 | static void default_terminal_info (struct target_ops *, const char *, int); |
c906108c | 52 | |
5009afc5 AS |
53 | static int default_watchpoint_addr_within_range (struct target_ops *, |
54 | CORE_ADDR, CORE_ADDR, int); | |
55 | ||
31568a15 TT |
56 | static int default_region_ok_for_hw_watchpoint (struct target_ops *, |
57 | CORE_ADDR, int); | |
e0d24f8d | 58 | |
a30bf1f1 | 59 | static void default_rcmd (struct target_ops *, const char *, struct ui_file *); |
a53f3625 | 60 | |
4229b31d TT |
61 | static ptid_t default_get_ada_task_ptid (struct target_ops *self, |
62 | long lwp, long tid); | |
63 | ||
098dba18 TT |
64 | static int default_follow_fork (struct target_ops *self, int follow_child, |
65 | int detach_fork); | |
66 | ||
8d657035 TT |
67 | static void default_mourn_inferior (struct target_ops *self); |
68 | ||
58a5184e TT |
69 | static int default_search_memory (struct target_ops *ops, |
70 | CORE_ADDR start_addr, | |
71 | ULONGEST search_space_len, | |
72 | const gdb_byte *pattern, | |
73 | ULONGEST pattern_len, | |
74 | CORE_ADDR *found_addrp); | |
75 | ||
936d2992 PA |
76 | static int default_verify_memory (struct target_ops *self, |
77 | const gdb_byte *data, | |
78 | CORE_ADDR memaddr, ULONGEST size); | |
79 | ||
8eaff7cd TT |
80 | static struct address_space *default_thread_address_space |
81 | (struct target_ops *self, ptid_t ptid); | |
82 | ||
c25c4a8b | 83 | static void tcomplain (void) ATTRIBUTE_NORETURN; |
c906108c | 84 | |
555bbdeb TT |
85 | static int return_zero (struct target_ops *); |
86 | ||
87 | static int return_zero_has_execution (struct target_ops *, ptid_t); | |
c906108c | 88 | |
a14ed312 | 89 | static void target_command (char *, int); |
c906108c | 90 | |
a14ed312 | 91 | static struct target_ops *find_default_run_target (char *); |
c906108c | 92 | |
c2250ad1 UW |
93 | static struct gdbarch *default_thread_architecture (struct target_ops *ops, |
94 | ptid_t ptid); | |
95 | ||
0b5a2719 TT |
96 | static int dummy_find_memory_regions (struct target_ops *self, |
97 | find_memory_region_ftype ignore1, | |
98 | void *ignore2); | |
99 | ||
16f796b1 TT |
100 | static char *dummy_make_corefile_notes (struct target_ops *self, |
101 | bfd *ignore1, int *ignore2); | |
102 | ||
770234d3 TT |
103 | static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid); |
104 | ||
fe31bf5b TT |
105 | static enum exec_direction_kind default_execution_direction |
106 | (struct target_ops *self); | |
107 | ||
a7068b60 TT |
108 | static struct target_ops debug_target; |
109 | ||
1101cb7b TT |
110 | #include "target-delegates.c" |
111 | ||
a14ed312 | 112 | static void init_dummy_target (void); |
c906108c | 113 | |
3cecbbbe TT |
114 | static void update_current_target (void); |
115 | ||
89a1c21a SM |
116 | /* Vector of existing target structures. */ |
117 | typedef struct target_ops *target_ops_p; | |
118 | DEF_VEC_P (target_ops_p); | |
119 | static VEC (target_ops_p) *target_structs; | |
c906108c SS |
120 | |
121 | /* The initial current target, so that there is always a semi-valid | |
122 | current target. */ | |
123 | ||
124 | static struct target_ops dummy_target; | |
125 | ||
126 | /* Top of target stack. */ | |
127 | ||
258b763a | 128 | static struct target_ops *target_stack; |
c906108c SS |
129 | |
130 | /* The target structure we are currently using to talk to a process | |
131 | or file or whatever "inferior" we have. */ | |
132 | ||
133 | struct target_ops current_target; | |
134 | ||
135 | /* Command list for target. */ | |
136 | ||
137 | static struct cmd_list_element *targetlist = NULL; | |
138 | ||
cf7a04e8 DJ |
139 | /* Nonzero if we should trust readonly sections from the |
140 | executable when reading memory. */ | |
141 | ||
142 | static int trust_readonly = 0; | |
143 | ||
8defab1a DJ |
144 | /* Nonzero if we should show true memory content including |
145 | memory breakpoint inserted by gdb. */ | |
146 | ||
147 | static int show_memory_breakpoints = 0; | |
148 | ||
d914c394 SS |
149 | /* These globals control whether GDB attempts to perform these |
150 | operations; they are useful for targets that need to prevent | |
151 | inadvertant disruption, such as in non-stop mode. */ | |
152 | ||
153 | int may_write_registers = 1; | |
154 | ||
155 | int may_write_memory = 1; | |
156 | ||
157 | int may_insert_breakpoints = 1; | |
158 | ||
159 | int may_insert_tracepoints = 1; | |
160 | ||
161 | int may_insert_fast_tracepoints = 1; | |
162 | ||
163 | int may_stop = 1; | |
164 | ||
c906108c SS |
165 | /* Non-zero if we want to see trace of target level stuff. */ |
166 | ||
ccce17b0 | 167 | static unsigned int targetdebug = 0; |
3cecbbbe TT |
168 | |
169 | static void | |
170 | set_targetdebug (char *args, int from_tty, struct cmd_list_element *c) | |
171 | { | |
172 | update_current_target (); | |
173 | } | |
174 | ||
920d2a44 AC |
175 | static void |
176 | show_targetdebug (struct ui_file *file, int from_tty, | |
177 | struct cmd_list_element *c, const char *value) | |
178 | { | |
179 | fprintf_filtered (file, _("Target debugging is %s.\n"), value); | |
180 | } | |
c906108c | 181 | |
a14ed312 | 182 | static void setup_target_debug (void); |
c906108c | 183 | |
c906108c SS |
184 | /* The user just typed 'target' without the name of a target. */ |
185 | ||
c906108c | 186 | static void |
fba45db2 | 187 | target_command (char *arg, int from_tty) |
c906108c SS |
188 | { |
189 | fputs_filtered ("Argument required (target name). Try `help target'\n", | |
190 | gdb_stdout); | |
191 | } | |
192 | ||
c35b1492 PA |
193 | /* Default target_has_* methods for process_stratum targets. */ |
194 | ||
195 | int | |
196 | default_child_has_all_memory (struct target_ops *ops) | |
197 | { | |
198 | /* If no inferior selected, then we can't read memory here. */ | |
199 | if (ptid_equal (inferior_ptid, null_ptid)) | |
200 | return 0; | |
201 | ||
202 | return 1; | |
203 | } | |
204 | ||
205 | int | |
206 | default_child_has_memory (struct target_ops *ops) | |
207 | { | |
208 | /* If no inferior selected, then we can't read memory here. */ | |
209 | if (ptid_equal (inferior_ptid, null_ptid)) | |
210 | return 0; | |
211 | ||
212 | return 1; | |
213 | } | |
214 | ||
215 | int | |
216 | default_child_has_stack (struct target_ops *ops) | |
217 | { | |
218 | /* If no inferior selected, there's no stack. */ | |
219 | if (ptid_equal (inferior_ptid, null_ptid)) | |
220 | return 0; | |
221 | ||
222 | return 1; | |
223 | } | |
224 | ||
225 | int | |
226 | default_child_has_registers (struct target_ops *ops) | |
227 | { | |
228 | /* Can't read registers from no inferior. */ | |
229 | if (ptid_equal (inferior_ptid, null_ptid)) | |
230 | return 0; | |
231 | ||
232 | return 1; | |
233 | } | |
234 | ||
235 | int | |
aeaec162 | 236 | default_child_has_execution (struct target_ops *ops, ptid_t the_ptid) |
c35b1492 PA |
237 | { |
238 | /* If there's no thread selected, then we can't make it run through | |
239 | hoops. */ | |
aeaec162 | 240 | if (ptid_equal (the_ptid, null_ptid)) |
c35b1492 PA |
241 | return 0; |
242 | ||
243 | return 1; | |
244 | } | |
245 | ||
246 | ||
247 | int | |
248 | target_has_all_memory_1 (void) | |
249 | { | |
250 | struct target_ops *t; | |
251 | ||
252 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
253 | if (t->to_has_all_memory (t)) | |
254 | return 1; | |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
259 | int | |
260 | target_has_memory_1 (void) | |
261 | { | |
262 | struct target_ops *t; | |
263 | ||
264 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
265 | if (t->to_has_memory (t)) | |
266 | return 1; | |
267 | ||
268 | return 0; | |
269 | } | |
270 | ||
271 | int | |
272 | target_has_stack_1 (void) | |
273 | { | |
274 | struct target_ops *t; | |
275 | ||
276 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
277 | if (t->to_has_stack (t)) | |
278 | return 1; | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | int | |
284 | target_has_registers_1 (void) | |
285 | { | |
286 | struct target_ops *t; | |
287 | ||
288 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
289 | if (t->to_has_registers (t)) | |
290 | return 1; | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
295 | int | |
aeaec162 | 296 | target_has_execution_1 (ptid_t the_ptid) |
c35b1492 PA |
297 | { |
298 | struct target_ops *t; | |
299 | ||
300 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
aeaec162 | 301 | if (t->to_has_execution (t, the_ptid)) |
c35b1492 PA |
302 | return 1; |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
aeaec162 TT |
307 | int |
308 | target_has_execution_current (void) | |
309 | { | |
310 | return target_has_execution_1 (inferior_ptid); | |
311 | } | |
312 | ||
c22a2b88 TT |
313 | /* Complete initialization of T. This ensures that various fields in |
314 | T are set, if needed by the target implementation. */ | |
c906108c SS |
315 | |
316 | void | |
c22a2b88 | 317 | complete_target_initialization (struct target_ops *t) |
c906108c | 318 | { |
0088c768 | 319 | /* Provide default values for all "must have" methods. */ |
0088c768 | 320 | |
c35b1492 | 321 | if (t->to_has_all_memory == NULL) |
555bbdeb | 322 | t->to_has_all_memory = return_zero; |
c35b1492 PA |
323 | |
324 | if (t->to_has_memory == NULL) | |
555bbdeb | 325 | t->to_has_memory = return_zero; |
c35b1492 PA |
326 | |
327 | if (t->to_has_stack == NULL) | |
555bbdeb | 328 | t->to_has_stack = return_zero; |
c35b1492 PA |
329 | |
330 | if (t->to_has_registers == NULL) | |
555bbdeb | 331 | t->to_has_registers = return_zero; |
c35b1492 PA |
332 | |
333 | if (t->to_has_execution == NULL) | |
555bbdeb | 334 | t->to_has_execution = return_zero_has_execution; |
1101cb7b | 335 | |
b3ccfe11 TT |
336 | /* These methods can be called on an unpushed target and so require |
337 | a default implementation if the target might plausibly be the | |
338 | default run target. */ | |
339 | gdb_assert (t->to_can_run == NULL || (t->to_can_async_p != NULL | |
340 | && t->to_supports_non_stop != NULL)); | |
341 | ||
1101cb7b | 342 | install_delegators (t); |
c22a2b88 TT |
343 | } |
344 | ||
8981c758 TT |
345 | /* This is used to implement the various target commands. */ |
346 | ||
347 | static void | |
348 | open_target (char *args, int from_tty, struct cmd_list_element *command) | |
349 | { | |
19ba03f4 | 350 | struct target_ops *ops = (struct target_ops *) get_cmd_context (command); |
8981c758 TT |
351 | |
352 | if (targetdebug) | |
353 | fprintf_unfiltered (gdb_stdlog, "-> %s->to_open (...)\n", | |
354 | ops->to_shortname); | |
355 | ||
356 | ops->to_open (args, from_tty); | |
357 | ||
358 | if (targetdebug) | |
359 | fprintf_unfiltered (gdb_stdlog, "<- %s->to_open (%s, %d)\n", | |
360 | ops->to_shortname, args, from_tty); | |
361 | } | |
362 | ||
c22a2b88 TT |
363 | /* Add possible target architecture T to the list and add a new |
364 | command 'target T->to_shortname'. Set COMPLETER as the command's | |
365 | completer if not NULL. */ | |
366 | ||
367 | void | |
368 | add_target_with_completer (struct target_ops *t, | |
369 | completer_ftype *completer) | |
370 | { | |
371 | struct cmd_list_element *c; | |
372 | ||
373 | complete_target_initialization (t); | |
c35b1492 | 374 | |
89a1c21a | 375 | VEC_safe_push (target_ops_p, target_structs, t); |
c906108c SS |
376 | |
377 | if (targetlist == NULL) | |
1bedd215 AC |
378 | add_prefix_cmd ("target", class_run, target_command, _("\ |
379 | Connect to a target machine or process.\n\ | |
c906108c SS |
380 | The first argument is the type or protocol of the target machine.\n\ |
381 | Remaining arguments are interpreted by the target protocol. For more\n\ | |
382 | information on the arguments for a particular protocol, type\n\ | |
1bedd215 | 383 | `help target ' followed by the protocol name."), |
c906108c | 384 | &targetlist, "target ", 0, &cmdlist); |
8981c758 TT |
385 | c = add_cmd (t->to_shortname, no_class, NULL, t->to_doc, &targetlist); |
386 | set_cmd_sfunc (c, open_target); | |
387 | set_cmd_context (c, t); | |
9852c492 YQ |
388 | if (completer != NULL) |
389 | set_cmd_completer (c, completer); | |
390 | } | |
391 | ||
392 | /* Add a possible target architecture to the list. */ | |
393 | ||
394 | void | |
395 | add_target (struct target_ops *t) | |
396 | { | |
397 | add_target_with_completer (t, NULL); | |
c906108c SS |
398 | } |
399 | ||
b48d48eb MM |
400 | /* See target.h. */ |
401 | ||
402 | void | |
403 | add_deprecated_target_alias (struct target_ops *t, char *alias) | |
404 | { | |
405 | struct cmd_list_element *c; | |
406 | char *alt; | |
407 | ||
408 | /* If we use add_alias_cmd, here, we do not get the deprecated warning, | |
409 | see PR cli/15104. */ | |
8981c758 TT |
410 | c = add_cmd (alias, no_class, NULL, t->to_doc, &targetlist); |
411 | set_cmd_sfunc (c, open_target); | |
412 | set_cmd_context (c, t); | |
b48d48eb MM |
413 | alt = xstrprintf ("target %s", t->to_shortname); |
414 | deprecate_cmd (c, alt); | |
415 | } | |
416 | ||
c906108c SS |
417 | /* Stub functions */ |
418 | ||
7d85a9c0 JB |
419 | void |
420 | target_kill (void) | |
421 | { | |
423a4807 | 422 | current_target.to_kill (¤t_target); |
7d85a9c0 JB |
423 | } |
424 | ||
11cf8741 | 425 | void |
9cbe5fff | 426 | target_load (const char *arg, int from_tty) |
11cf8741 | 427 | { |
4e5d721f | 428 | target_dcache_invalidate (); |
71a9f134 | 429 | (*current_target.to_load) (¤t_target, arg, from_tty); |
11cf8741 JM |
430 | } |
431 | ||
5842f62a PA |
432 | /* Possible terminal states. */ |
433 | ||
434 | enum terminal_state | |
435 | { | |
436 | /* The inferior's terminal settings are in effect. */ | |
437 | terminal_is_inferior = 0, | |
438 | ||
439 | /* Some of our terminal settings are in effect, enough to get | |
440 | proper output. */ | |
441 | terminal_is_ours_for_output = 1, | |
442 | ||
443 | /* Our terminal settings are in effect, for output and input. */ | |
444 | terminal_is_ours = 2 | |
445 | }; | |
446 | ||
7afa63c6 | 447 | static enum terminal_state terminal_state = terminal_is_ours; |
5842f62a PA |
448 | |
449 | /* See target.h. */ | |
450 | ||
451 | void | |
452 | target_terminal_init (void) | |
453 | { | |
454 | (*current_target.to_terminal_init) (¤t_target); | |
455 | ||
456 | terminal_state = terminal_is_ours; | |
457 | } | |
458 | ||
459 | /* See target.h. */ | |
460 | ||
6fdebc3d PA |
461 | int |
462 | target_terminal_is_inferior (void) | |
463 | { | |
464 | return (terminal_state == terminal_is_inferior); | |
465 | } | |
466 | ||
467 | /* See target.h. */ | |
468 | ||
d9d2d8b6 PA |
469 | void |
470 | target_terminal_inferior (void) | |
471 | { | |
472 | /* A background resume (``run&'') should leave GDB in control of the | |
c378eb4e | 473 | terminal. Use target_can_async_p, not target_is_async_p, since at |
ba7f6c64 VP |
474 | this point the target is not async yet. However, if sync_execution |
475 | is not set, we know it will become async prior to resume. */ | |
476 | if (target_can_async_p () && !sync_execution) | |
d9d2d8b6 PA |
477 | return; |
478 | ||
5842f62a PA |
479 | if (terminal_state == terminal_is_inferior) |
480 | return; | |
481 | ||
d9d2d8b6 PA |
482 | /* If GDB is resuming the inferior in the foreground, install |
483 | inferior's terminal modes. */ | |
d2f640d4 | 484 | (*current_target.to_terminal_inferior) (¤t_target); |
5842f62a PA |
485 | terminal_state = terminal_is_inferior; |
486 | } | |
487 | ||
488 | /* See target.h. */ | |
489 | ||
490 | void | |
491 | target_terminal_ours (void) | |
492 | { | |
493 | if (terminal_state == terminal_is_ours) | |
494 | return; | |
495 | ||
496 | (*current_target.to_terminal_ours) (¤t_target); | |
497 | terminal_state = terminal_is_ours; | |
498 | } | |
499 | ||
500 | /* See target.h. */ | |
501 | ||
502 | void | |
503 | target_terminal_ours_for_output (void) | |
504 | { | |
505 | if (terminal_state != terminal_is_inferior) | |
506 | return; | |
507 | (*current_target.to_terminal_ours_for_output) (¤t_target); | |
508 | terminal_state = terminal_is_ours_for_output; | |
d9d2d8b6 | 509 | } |
136d6dae | 510 | |
b0ed115f TT |
511 | /* See target.h. */ |
512 | ||
513 | int | |
514 | target_supports_terminal_ours (void) | |
515 | { | |
516 | struct target_ops *t; | |
517 | ||
518 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
519 | { | |
520 | if (t->to_terminal_ours != delegate_terminal_ours | |
521 | && t->to_terminal_ours != tdefault_terminal_ours) | |
522 | return 1; | |
523 | } | |
524 | ||
525 | return 0; | |
526 | } | |
527 | ||
1abf3a14 SM |
528 | /* Restore the terminal to its previous state (helper for |
529 | make_cleanup_restore_target_terminal). */ | |
530 | ||
531 | static void | |
532 | cleanup_restore_target_terminal (void *arg) | |
533 | { | |
19ba03f4 | 534 | enum terminal_state *previous_state = (enum terminal_state *) arg; |
1abf3a14 SM |
535 | |
536 | switch (*previous_state) | |
537 | { | |
538 | case terminal_is_ours: | |
539 | target_terminal_ours (); | |
540 | break; | |
541 | case terminal_is_ours_for_output: | |
542 | target_terminal_ours_for_output (); | |
543 | break; | |
544 | case terminal_is_inferior: | |
545 | target_terminal_inferior (); | |
546 | break; | |
547 | } | |
548 | } | |
549 | ||
550 | /* See target.h. */ | |
551 | ||
552 | struct cleanup * | |
553 | make_cleanup_restore_target_terminal (void) | |
554 | { | |
8d749320 | 555 | enum terminal_state *ts = XNEW (enum terminal_state); |
1abf3a14 SM |
556 | |
557 | *ts = terminal_state; | |
558 | ||
559 | return make_cleanup_dtor (cleanup_restore_target_terminal, ts, xfree); | |
560 | } | |
561 | ||
c906108c | 562 | static void |
fba45db2 | 563 | tcomplain (void) |
c906108c | 564 | { |
8a3fe4f8 | 565 | error (_("You can't do that when your target is `%s'"), |
c906108c SS |
566 | current_target.to_shortname); |
567 | } | |
568 | ||
569 | void | |
fba45db2 | 570 | noprocess (void) |
c906108c | 571 | { |
8a3fe4f8 | 572 | error (_("You can't do that without a process to debug.")); |
c906108c SS |
573 | } |
574 | ||
c906108c | 575 | static void |
0a4f40a2 | 576 | default_terminal_info (struct target_ops *self, const char *args, int from_tty) |
c906108c | 577 | { |
a3f17187 | 578 | printf_unfiltered (_("No saved terminal information.\n")); |
c906108c SS |
579 | } |
580 | ||
0ef643c8 JB |
581 | /* A default implementation for the to_get_ada_task_ptid target method. |
582 | ||
583 | This function builds the PTID by using both LWP and TID as part of | |
584 | the PTID lwp and tid elements. The pid used is the pid of the | |
585 | inferior_ptid. */ | |
586 | ||
2c0b251b | 587 | static ptid_t |
1e6b91a4 | 588 | default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid) |
0ef643c8 JB |
589 | { |
590 | return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid); | |
591 | } | |
592 | ||
32231432 | 593 | static enum exec_direction_kind |
4c612759 | 594 | default_execution_direction (struct target_ops *self) |
32231432 PA |
595 | { |
596 | if (!target_can_execute_reverse) | |
597 | return EXEC_FORWARD; | |
598 | else if (!target_can_async_p ()) | |
599 | return EXEC_FORWARD; | |
600 | else | |
601 | gdb_assert_not_reached ("\ | |
602 | to_execution_direction must be implemented for reverse async"); | |
603 | } | |
604 | ||
7998dfc3 AC |
605 | /* Go through the target stack from top to bottom, copying over zero |
606 | entries in current_target, then filling in still empty entries. In | |
607 | effect, we are doing class inheritance through the pushed target | |
608 | vectors. | |
609 | ||
610 | NOTE: cagney/2003-10-17: The problem with this inheritance, as it | |
611 | is currently implemented, is that it discards any knowledge of | |
612 | which target an inherited method originally belonged to. | |
613 | Consequently, new new target methods should instead explicitly and | |
614 | locally search the target stack for the target that can handle the | |
615 | request. */ | |
c906108c SS |
616 | |
617 | static void | |
7998dfc3 | 618 | update_current_target (void) |
c906108c | 619 | { |
7998dfc3 AC |
620 | struct target_ops *t; |
621 | ||
08d8bcd7 | 622 | /* First, reset current's contents. */ |
7998dfc3 AC |
623 | memset (¤t_target, 0, sizeof (current_target)); |
624 | ||
1101cb7b TT |
625 | /* Install the delegators. */ |
626 | install_delegators (¤t_target); | |
627 | ||
be4ddd36 TT |
628 | current_target.to_stratum = target_stack->to_stratum; |
629 | ||
7998dfc3 AC |
630 | #define INHERIT(FIELD, TARGET) \ |
631 | if (!current_target.FIELD) \ | |
632 | current_target.FIELD = (TARGET)->FIELD | |
633 | ||
be4ddd36 TT |
634 | /* Do not add any new INHERITs here. Instead, use the delegation |
635 | mechanism provided by make-target-delegates. */ | |
7998dfc3 AC |
636 | for (t = target_stack; t; t = t->beneath) |
637 | { | |
638 | INHERIT (to_shortname, t); | |
639 | INHERIT (to_longname, t); | |
dc177b7a | 640 | INHERIT (to_attach_no_wait, t); |
74174d2e | 641 | INHERIT (to_have_steppable_watchpoint, t); |
7998dfc3 | 642 | INHERIT (to_have_continuable_watchpoint, t); |
7998dfc3 | 643 | INHERIT (to_has_thread_control, t); |
7998dfc3 AC |
644 | } |
645 | #undef INHERIT | |
646 | ||
7998dfc3 AC |
647 | /* Finally, position the target-stack beneath the squashed |
648 | "current_target". That way code looking for a non-inherited | |
649 | target method can quickly and simply find it. */ | |
650 | current_target.beneath = target_stack; | |
b4b61fdb DJ |
651 | |
652 | if (targetdebug) | |
653 | setup_target_debug (); | |
c906108c SS |
654 | } |
655 | ||
656 | /* Push a new target type into the stack of the existing target accessors, | |
657 | possibly superseding some of the existing accessors. | |
658 | ||
c906108c SS |
659 | Rather than allow an empty stack, we always have the dummy target at |
660 | the bottom stratum, so we can call the function vectors without | |
661 | checking them. */ | |
662 | ||
b26a4dcb | 663 | void |
fba45db2 | 664 | push_target (struct target_ops *t) |
c906108c | 665 | { |
258b763a | 666 | struct target_ops **cur; |
c906108c SS |
667 | |
668 | /* Check magic number. If wrong, it probably means someone changed | |
669 | the struct definition, but not all the places that initialize one. */ | |
670 | if (t->to_magic != OPS_MAGIC) | |
671 | { | |
c5aa993b JM |
672 | fprintf_unfiltered (gdb_stderr, |
673 | "Magic number of %s target struct wrong\n", | |
674 | t->to_shortname); | |
3e43a32a MS |
675 | internal_error (__FILE__, __LINE__, |
676 | _("failed internal consistency check")); | |
c906108c SS |
677 | } |
678 | ||
258b763a AC |
679 | /* Find the proper stratum to install this target in. */ |
680 | for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath) | |
c906108c | 681 | { |
258b763a | 682 | if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum) |
c906108c SS |
683 | break; |
684 | } | |
685 | ||
258b763a | 686 | /* If there's already targets at this stratum, remove them. */ |
88c231eb | 687 | /* FIXME: cagney/2003-10-15: I think this should be popping all |
258b763a AC |
688 | targets to CUR, and not just those at this stratum level. */ |
689 | while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum) | |
690 | { | |
691 | /* There's already something at this stratum level. Close it, | |
692 | and un-hook it from the stack. */ | |
693 | struct target_ops *tmp = (*cur); | |
5d502164 | 694 | |
258b763a AC |
695 | (*cur) = (*cur)->beneath; |
696 | tmp->beneath = NULL; | |
460014f5 | 697 | target_close (tmp); |
258b763a | 698 | } |
c906108c SS |
699 | |
700 | /* We have removed all targets in our stratum, now add the new one. */ | |
258b763a AC |
701 | t->beneath = (*cur); |
702 | (*cur) = t; | |
c906108c SS |
703 | |
704 | update_current_target (); | |
c906108c SS |
705 | } |
706 | ||
2bc416ba | 707 | /* Remove a target_ops vector from the stack, wherever it may be. |
c906108c SS |
708 | Return how many times it was removed (0 or 1). */ |
709 | ||
710 | int | |
fba45db2 | 711 | unpush_target (struct target_ops *t) |
c906108c | 712 | { |
258b763a AC |
713 | struct target_ops **cur; |
714 | struct target_ops *tmp; | |
c906108c | 715 | |
c8d104ad PA |
716 | if (t->to_stratum == dummy_stratum) |
717 | internal_error (__FILE__, __LINE__, | |
9b20d036 | 718 | _("Attempt to unpush the dummy target")); |
c8d104ad | 719 | |
c906108c | 720 | /* Look for the specified target. Note that we assume that a target |
c378eb4e | 721 | can only occur once in the target stack. */ |
c906108c | 722 | |
258b763a AC |
723 | for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath) |
724 | { | |
725 | if ((*cur) == t) | |
726 | break; | |
727 | } | |
c906108c | 728 | |
305436e0 PA |
729 | /* If we don't find target_ops, quit. Only open targets should be |
730 | closed. */ | |
258b763a | 731 | if ((*cur) == NULL) |
305436e0 | 732 | return 0; |
5269965e | 733 | |
c378eb4e | 734 | /* Unchain the target. */ |
258b763a AC |
735 | tmp = (*cur); |
736 | (*cur) = (*cur)->beneath; | |
737 | tmp->beneath = NULL; | |
c906108c SS |
738 | |
739 | update_current_target (); | |
c906108c | 740 | |
305436e0 PA |
741 | /* Finally close the target. Note we do this after unchaining, so |
742 | any target method calls from within the target_close | |
743 | implementation don't end up in T anymore. */ | |
460014f5 | 744 | target_close (t); |
305436e0 | 745 | |
c906108c SS |
746 | return 1; |
747 | } | |
748 | ||
915ef8b1 PA |
749 | /* Unpush TARGET and assert that it worked. */ |
750 | ||
751 | static void | |
752 | unpush_target_and_assert (struct target_ops *target) | |
753 | { | |
754 | if (!unpush_target (target)) | |
755 | { | |
756 | fprintf_unfiltered (gdb_stderr, | |
757 | "pop_all_targets couldn't find target %s\n", | |
758 | target->to_shortname); | |
759 | internal_error (__FILE__, __LINE__, | |
760 | _("failed internal consistency check")); | |
761 | } | |
762 | } | |
763 | ||
aa76d38d | 764 | void |
460014f5 | 765 | pop_all_targets_above (enum strata above_stratum) |
aa76d38d | 766 | { |
87ab71f0 | 767 | while ((int) (current_target.to_stratum) > (int) above_stratum) |
915ef8b1 PA |
768 | unpush_target_and_assert (target_stack); |
769 | } | |
770 | ||
771 | /* See target.h. */ | |
772 | ||
773 | void | |
774 | pop_all_targets_at_and_above (enum strata stratum) | |
775 | { | |
776 | while ((int) (current_target.to_stratum) >= (int) stratum) | |
777 | unpush_target_and_assert (target_stack); | |
aa76d38d PA |
778 | } |
779 | ||
87ab71f0 | 780 | void |
460014f5 | 781 | pop_all_targets (void) |
87ab71f0 | 782 | { |
460014f5 | 783 | pop_all_targets_above (dummy_stratum); |
87ab71f0 PA |
784 | } |
785 | ||
c0edd9ed JK |
786 | /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */ |
787 | ||
788 | int | |
789 | target_is_pushed (struct target_ops *t) | |
790 | { | |
84202f9c | 791 | struct target_ops *cur; |
c0edd9ed JK |
792 | |
793 | /* Check magic number. If wrong, it probably means someone changed | |
794 | the struct definition, but not all the places that initialize one. */ | |
795 | if (t->to_magic != OPS_MAGIC) | |
796 | { | |
797 | fprintf_unfiltered (gdb_stderr, | |
798 | "Magic number of %s target struct wrong\n", | |
799 | t->to_shortname); | |
3e43a32a MS |
800 | internal_error (__FILE__, __LINE__, |
801 | _("failed internal consistency check")); | |
c0edd9ed JK |
802 | } |
803 | ||
84202f9c TT |
804 | for (cur = target_stack; cur != NULL; cur = cur->beneath) |
805 | if (cur == t) | |
c0edd9ed JK |
806 | return 1; |
807 | ||
808 | return 0; | |
809 | } | |
810 | ||
f0f9ff95 TT |
811 | /* Default implementation of to_get_thread_local_address. */ |
812 | ||
813 | static void | |
814 | generic_tls_error (void) | |
815 | { | |
816 | throw_error (TLS_GENERIC_ERROR, | |
817 | _("Cannot find thread-local variables on this target")); | |
818 | } | |
819 | ||
72f5cf0e | 820 | /* Using the objfile specified in OBJFILE, find the address for the |
9e35dae4 DJ |
821 | current thread's thread-local storage with offset OFFSET. */ |
822 | CORE_ADDR | |
823 | target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset) | |
824 | { | |
825 | volatile CORE_ADDR addr = 0; | |
f0f9ff95 | 826 | struct target_ops *target = ¤t_target; |
9e35dae4 | 827 | |
f0f9ff95 | 828 | if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ())) |
9e35dae4 DJ |
829 | { |
830 | ptid_t ptid = inferior_ptid; | |
9e35dae4 | 831 | |
492d29ea | 832 | TRY |
9e35dae4 DJ |
833 | { |
834 | CORE_ADDR lm_addr; | |
835 | ||
836 | /* Fetch the load module address for this objfile. */ | |
f5656ead | 837 | lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (), |
9e35dae4 | 838 | objfile); |
9e35dae4 | 839 | |
3e43a32a MS |
840 | addr = target->to_get_thread_local_address (target, ptid, |
841 | lm_addr, offset); | |
9e35dae4 DJ |
842 | } |
843 | /* If an error occurred, print TLS related messages here. Otherwise, | |
844 | throw the error to some higher catcher. */ | |
492d29ea | 845 | CATCH (ex, RETURN_MASK_ALL) |
9e35dae4 DJ |
846 | { |
847 | int objfile_is_library = (objfile->flags & OBJF_SHARED); | |
848 | ||
849 | switch (ex.error) | |
850 | { | |
851 | case TLS_NO_LIBRARY_SUPPORT_ERROR: | |
3e43a32a MS |
852 | error (_("Cannot find thread-local variables " |
853 | "in this thread library.")); | |
9e35dae4 DJ |
854 | break; |
855 | case TLS_LOAD_MODULE_NOT_FOUND_ERROR: | |
856 | if (objfile_is_library) | |
857 | error (_("Cannot find shared library `%s' in dynamic" | |
4262abfb | 858 | " linker's load module list"), objfile_name (objfile)); |
9e35dae4 DJ |
859 | else |
860 | error (_("Cannot find executable file `%s' in dynamic" | |
4262abfb | 861 | " linker's load module list"), objfile_name (objfile)); |
9e35dae4 DJ |
862 | break; |
863 | case TLS_NOT_ALLOCATED_YET_ERROR: | |
864 | if (objfile_is_library) | |
865 | error (_("The inferior has not yet allocated storage for" | |
866 | " thread-local variables in\n" | |
867 | "the shared library `%s'\n" | |
868 | "for %s"), | |
4262abfb | 869 | objfile_name (objfile), target_pid_to_str (ptid)); |
9e35dae4 DJ |
870 | else |
871 | error (_("The inferior has not yet allocated storage for" | |
872 | " thread-local variables in\n" | |
873 | "the executable `%s'\n" | |
874 | "for %s"), | |
4262abfb | 875 | objfile_name (objfile), target_pid_to_str (ptid)); |
9e35dae4 DJ |
876 | break; |
877 | case TLS_GENERIC_ERROR: | |
878 | if (objfile_is_library) | |
879 | error (_("Cannot find thread-local storage for %s, " | |
880 | "shared library %s:\n%s"), | |
881 | target_pid_to_str (ptid), | |
4262abfb | 882 | objfile_name (objfile), ex.message); |
9e35dae4 DJ |
883 | else |
884 | error (_("Cannot find thread-local storage for %s, " | |
885 | "executable file %s:\n%s"), | |
886 | target_pid_to_str (ptid), | |
4262abfb | 887 | objfile_name (objfile), ex.message); |
9e35dae4 DJ |
888 | break; |
889 | default: | |
890 | throw_exception (ex); | |
891 | break; | |
892 | } | |
893 | } | |
492d29ea | 894 | END_CATCH |
9e35dae4 DJ |
895 | } |
896 | /* It wouldn't be wrong here to try a gdbarch method, too; finding | |
897 | TLS is an ABI-specific thing. But we don't do that yet. */ | |
898 | else | |
899 | error (_("Cannot find thread-local variables on this target")); | |
900 | ||
901 | return addr; | |
902 | } | |
903 | ||
6be7b56e | 904 | const char * |
01cb8804 | 905 | target_xfer_status_to_string (enum target_xfer_status status) |
6be7b56e PA |
906 | { |
907 | #define CASE(X) case X: return #X | |
01cb8804 | 908 | switch (status) |
6be7b56e PA |
909 | { |
910 | CASE(TARGET_XFER_E_IO); | |
bc113b4e | 911 | CASE(TARGET_XFER_UNAVAILABLE); |
6be7b56e PA |
912 | default: |
913 | return "<unknown>"; | |
914 | } | |
915 | #undef CASE | |
916 | }; | |
917 | ||
918 | ||
c906108c SS |
919 | #undef MIN |
920 | #define MIN(A, B) (((A) <= (B)) ? (A) : (B)) | |
921 | ||
922 | /* target_read_string -- read a null terminated string, up to LEN bytes, | |
923 | from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful. | |
924 | Set *STRING to a pointer to malloc'd memory containing the data; the caller | |
925 | is responsible for freeing it. Return the number of bytes successfully | |
926 | read. */ | |
927 | ||
928 | int | |
fba45db2 | 929 | target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop) |
c906108c | 930 | { |
c2e8b827 | 931 | int tlen, offset, i; |
1b0ba102 | 932 | gdb_byte buf[4]; |
c906108c SS |
933 | int errcode = 0; |
934 | char *buffer; | |
935 | int buffer_allocated; | |
936 | char *bufptr; | |
937 | unsigned int nbytes_read = 0; | |
938 | ||
6217bf3e MS |
939 | gdb_assert (string); |
940 | ||
c906108c SS |
941 | /* Small for testing. */ |
942 | buffer_allocated = 4; | |
224c3ddb | 943 | buffer = (char *) xmalloc (buffer_allocated); |
c906108c SS |
944 | bufptr = buffer; |
945 | ||
c906108c SS |
946 | while (len > 0) |
947 | { | |
948 | tlen = MIN (len, 4 - (memaddr & 3)); | |
949 | offset = memaddr & 3; | |
950 | ||
1b0ba102 | 951 | errcode = target_read_memory (memaddr & ~3, buf, sizeof buf); |
c906108c SS |
952 | if (errcode != 0) |
953 | { | |
954 | /* The transfer request might have crossed the boundary to an | |
c378eb4e | 955 | unallocated region of memory. Retry the transfer, requesting |
c906108c SS |
956 | a single byte. */ |
957 | tlen = 1; | |
958 | offset = 0; | |
b8eb5af0 | 959 | errcode = target_read_memory (memaddr, buf, 1); |
c906108c SS |
960 | if (errcode != 0) |
961 | goto done; | |
962 | } | |
963 | ||
964 | if (bufptr - buffer + tlen > buffer_allocated) | |
965 | { | |
966 | unsigned int bytes; | |
5d502164 | 967 | |
c906108c SS |
968 | bytes = bufptr - buffer; |
969 | buffer_allocated *= 2; | |
224c3ddb | 970 | buffer = (char *) xrealloc (buffer, buffer_allocated); |
c906108c SS |
971 | bufptr = buffer + bytes; |
972 | } | |
973 | ||
974 | for (i = 0; i < tlen; i++) | |
975 | { | |
976 | *bufptr++ = buf[i + offset]; | |
977 | if (buf[i + offset] == '\000') | |
978 | { | |
979 | nbytes_read += i + 1; | |
980 | goto done; | |
981 | } | |
982 | } | |
983 | ||
984 | memaddr += tlen; | |
985 | len -= tlen; | |
986 | nbytes_read += tlen; | |
987 | } | |
c5aa993b | 988 | done: |
6217bf3e | 989 | *string = buffer; |
c906108c SS |
990 | if (errnop != NULL) |
991 | *errnop = errcode; | |
c906108c SS |
992 | return nbytes_read; |
993 | } | |
994 | ||
07b82ea5 PA |
995 | struct target_section_table * |
996 | target_get_section_table (struct target_ops *target) | |
997 | { | |
7e35c012 | 998 | return (*target->to_get_section_table) (target); |
07b82ea5 PA |
999 | } |
1000 | ||
8db32d44 | 1001 | /* Find a section containing ADDR. */ |
07b82ea5 | 1002 | |
0542c86d | 1003 | struct target_section * |
8db32d44 AC |
1004 | target_section_by_addr (struct target_ops *target, CORE_ADDR addr) |
1005 | { | |
07b82ea5 | 1006 | struct target_section_table *table = target_get_section_table (target); |
0542c86d | 1007 | struct target_section *secp; |
07b82ea5 PA |
1008 | |
1009 | if (table == NULL) | |
1010 | return NULL; | |
1011 | ||
1012 | for (secp = table->sections; secp < table->sections_end; secp++) | |
8db32d44 AC |
1013 | { |
1014 | if (addr >= secp->addr && addr < secp->endaddr) | |
1015 | return secp; | |
1016 | } | |
1017 | return NULL; | |
1018 | } | |
1019 | ||
0fec99e8 PA |
1020 | |
1021 | /* Helper for the memory xfer routines. Checks the attributes of the | |
1022 | memory region of MEMADDR against the read or write being attempted. | |
1023 | If the access is permitted returns true, otherwise returns false. | |
1024 | REGION_P is an optional output parameter. If not-NULL, it is | |
1025 | filled with a pointer to the memory region of MEMADDR. REG_LEN | |
1026 | returns LEN trimmed to the end of the region. This is how much the | |
1027 | caller can continue requesting, if the access is permitted. A | |
1028 | single xfer request must not straddle memory region boundaries. */ | |
1029 | ||
1030 | static int | |
1031 | memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf, | |
1032 | ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len, | |
1033 | struct mem_region **region_p) | |
1034 | { | |
1035 | struct mem_region *region; | |
1036 | ||
1037 | region = lookup_mem_region (memaddr); | |
1038 | ||
1039 | if (region_p != NULL) | |
1040 | *region_p = region; | |
1041 | ||
1042 | switch (region->attrib.mode) | |
1043 | { | |
1044 | case MEM_RO: | |
1045 | if (writebuf != NULL) | |
1046 | return 0; | |
1047 | break; | |
1048 | ||
1049 | case MEM_WO: | |
1050 | if (readbuf != NULL) | |
1051 | return 0; | |
1052 | break; | |
1053 | ||
1054 | case MEM_FLASH: | |
1055 | /* We only support writing to flash during "load" for now. */ | |
1056 | if (writebuf != NULL) | |
1057 | error (_("Writing to flash memory forbidden in this context")); | |
1058 | break; | |
1059 | ||
1060 | case MEM_NONE: | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | /* region->hi == 0 means there's no upper bound. */ | |
1065 | if (memaddr + len < region->hi || region->hi == 0) | |
1066 | *reg_len = len; | |
1067 | else | |
1068 | *reg_len = region->hi - memaddr; | |
1069 | ||
1070 | return 1; | |
1071 | } | |
1072 | ||
9f713294 YQ |
1073 | /* Read memory from more than one valid target. A core file, for |
1074 | instance, could have some of memory but delegate other bits to | |
1075 | the target below it. So, we must manually try all targets. */ | |
1076 | ||
cc9f16aa | 1077 | enum target_xfer_status |
17fde6d0 | 1078 | raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf, |
9b409511 YQ |
1079 | const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len, |
1080 | ULONGEST *xfered_len) | |
9f713294 | 1081 | { |
9b409511 | 1082 | enum target_xfer_status res; |
9f713294 YQ |
1083 | |
1084 | do | |
1085 | { | |
1086 | res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL, | |
9b409511 YQ |
1087 | readbuf, writebuf, memaddr, len, |
1088 | xfered_len); | |
1089 | if (res == TARGET_XFER_OK) | |
9f713294 YQ |
1090 | break; |
1091 | ||
633785ff | 1092 | /* Stop if the target reports that the memory is not available. */ |
bc113b4e | 1093 | if (res == TARGET_XFER_UNAVAILABLE) |
633785ff MM |
1094 | break; |
1095 | ||
9f713294 YQ |
1096 | /* We want to continue past core files to executables, but not |
1097 | past a running target's memory. */ | |
1098 | if (ops->to_has_all_memory (ops)) | |
1099 | break; | |
1100 | ||
1101 | ops = ops->beneath; | |
1102 | } | |
1103 | while (ops != NULL); | |
1104 | ||
0f26cec1 PA |
1105 | /* The cache works at the raw memory level. Make sure the cache |
1106 | gets updated with raw contents no matter what kind of memory | |
1107 | object was originally being written. Note we do write-through | |
1108 | first, so that if it fails, we don't write to the cache contents | |
1109 | that never made it to the target. */ | |
1110 | if (writebuf != NULL | |
1111 | && !ptid_equal (inferior_ptid, null_ptid) | |
1112 | && target_dcache_init_p () | |
1113 | && (stack_cache_enabled_p () || code_cache_enabled_p ())) | |
1114 | { | |
1115 | DCACHE *dcache = target_dcache_get (); | |
1116 | ||
1117 | /* Note that writing to an area of memory which wasn't present | |
1118 | in the cache doesn't cause it to be loaded in. */ | |
1119 | dcache_update (dcache, res, memaddr, writebuf, *xfered_len); | |
1120 | } | |
1121 | ||
9f713294 YQ |
1122 | return res; |
1123 | } | |
1124 | ||
7f79c47e DE |
1125 | /* Perform a partial memory transfer. |
1126 | For docs see target.h, to_xfer_partial. */ | |
cf7a04e8 | 1127 | |
9b409511 | 1128 | static enum target_xfer_status |
f0ba3972 | 1129 | memory_xfer_partial_1 (struct target_ops *ops, enum target_object object, |
17fde6d0 | 1130 | gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr, |
9b409511 | 1131 | ULONGEST len, ULONGEST *xfered_len) |
0779438d | 1132 | { |
9b409511 | 1133 | enum target_xfer_status res; |
0fec99e8 | 1134 | ULONGEST reg_len; |
cf7a04e8 | 1135 | struct mem_region *region; |
4e5d721f | 1136 | struct inferior *inf; |
cf7a04e8 | 1137 | |
07b82ea5 PA |
1138 | /* For accesses to unmapped overlay sections, read directly from |
1139 | files. Must do this first, as MEMADDR may need adjustment. */ | |
1140 | if (readbuf != NULL && overlay_debugging) | |
1141 | { | |
1142 | struct obj_section *section = find_pc_overlay (memaddr); | |
5d502164 | 1143 | |
07b82ea5 PA |
1144 | if (pc_in_unmapped_range (memaddr, section)) |
1145 | { | |
1146 | struct target_section_table *table | |
1147 | = target_get_section_table (ops); | |
1148 | const char *section_name = section->the_bfd_section->name; | |
5d502164 | 1149 | |
07b82ea5 PA |
1150 | memaddr = overlay_mapped_address (memaddr, section); |
1151 | return section_table_xfer_memory_partial (readbuf, writebuf, | |
9b409511 | 1152 | memaddr, len, xfered_len, |
07b82ea5 PA |
1153 | table->sections, |
1154 | table->sections_end, | |
1155 | section_name); | |
1156 | } | |
1157 | } | |
1158 | ||
1159 | /* Try the executable files, if "trust-readonly-sections" is set. */ | |
cf7a04e8 DJ |
1160 | if (readbuf != NULL && trust_readonly) |
1161 | { | |
0542c86d | 1162 | struct target_section *secp; |
07b82ea5 | 1163 | struct target_section_table *table; |
cf7a04e8 DJ |
1164 | |
1165 | secp = target_section_by_addr (ops, memaddr); | |
1166 | if (secp != NULL | |
2b2848e2 DE |
1167 | && (bfd_get_section_flags (secp->the_bfd_section->owner, |
1168 | secp->the_bfd_section) | |
cf7a04e8 | 1169 | & SEC_READONLY)) |
07b82ea5 PA |
1170 | { |
1171 | table = target_get_section_table (ops); | |
1172 | return section_table_xfer_memory_partial (readbuf, writebuf, | |
9b409511 | 1173 | memaddr, len, xfered_len, |
07b82ea5 PA |
1174 | table->sections, |
1175 | table->sections_end, | |
1176 | NULL); | |
1177 | } | |
98646950 UW |
1178 | } |
1179 | ||
cf7a04e8 | 1180 | /* Try GDB's internal data cache. */ |
cf7a04e8 | 1181 | |
0fec99e8 PA |
1182 | if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, ®_len, |
1183 | ®ion)) | |
1184 | return TARGET_XFER_E_IO; | |
cf7a04e8 | 1185 | |
6c95b8df | 1186 | if (!ptid_equal (inferior_ptid, null_ptid)) |
c9657e70 | 1187 | inf = find_inferior_ptid (inferior_ptid); |
6c95b8df PA |
1188 | else |
1189 | inf = NULL; | |
4e5d721f DE |
1190 | |
1191 | if (inf != NULL | |
0f26cec1 | 1192 | && readbuf != NULL |
2f4d8875 PA |
1193 | /* The dcache reads whole cache lines; that doesn't play well |
1194 | with reading from a trace buffer, because reading outside of | |
1195 | the collected memory range fails. */ | |
1196 | && get_traceframe_number () == -1 | |
4e5d721f | 1197 | && (region->attrib.cache |
29453a14 YQ |
1198 | || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY) |
1199 | || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY))) | |
cf7a04e8 | 1200 | { |
2a2f9fe4 YQ |
1201 | DCACHE *dcache = target_dcache_get_or_init (); |
1202 | ||
0f26cec1 PA |
1203 | return dcache_read_memory_partial (ops, dcache, memaddr, readbuf, |
1204 | reg_len, xfered_len); | |
cf7a04e8 DJ |
1205 | } |
1206 | ||
1207 | /* If none of those methods found the memory we wanted, fall back | |
1208 | to a target partial transfer. Normally a single call to | |
1209 | to_xfer_partial is enough; if it doesn't recognize an object | |
1210 | it will call the to_xfer_partial of the next target down. | |
1211 | But for memory this won't do. Memory is the only target | |
9b409511 YQ |
1212 | object which can be read from more than one valid target. |
1213 | A core file, for instance, could have some of memory but | |
1214 | delegate other bits to the target below it. So, we must | |
1215 | manually try all targets. */ | |
1216 | ||
1217 | res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len, | |
1218 | xfered_len); | |
cf7a04e8 DJ |
1219 | |
1220 | /* If we still haven't got anything, return the last error. We | |
1221 | give up. */ | |
1222 | return res; | |
0779438d AC |
1223 | } |
1224 | ||
f0ba3972 PA |
1225 | /* Perform a partial memory transfer. For docs see target.h, |
1226 | to_xfer_partial. */ | |
1227 | ||
9b409511 | 1228 | static enum target_xfer_status |
f0ba3972 | 1229 | memory_xfer_partial (struct target_ops *ops, enum target_object object, |
9b409511 YQ |
1230 | gdb_byte *readbuf, const gdb_byte *writebuf, |
1231 | ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len) | |
f0ba3972 | 1232 | { |
9b409511 | 1233 | enum target_xfer_status res; |
f0ba3972 PA |
1234 | |
1235 | /* Zero length requests are ok and require no work. */ | |
1236 | if (len == 0) | |
9b409511 | 1237 | return TARGET_XFER_EOF; |
f0ba3972 PA |
1238 | |
1239 | /* Fill in READBUF with breakpoint shadows, or WRITEBUF with | |
1240 | breakpoint insns, thus hiding out from higher layers whether | |
1241 | there are software breakpoints inserted in the code stream. */ | |
1242 | if (readbuf != NULL) | |
1243 | { | |
9b409511 YQ |
1244 | res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len, |
1245 | xfered_len); | |
f0ba3972 | 1246 | |
9b409511 | 1247 | if (res == TARGET_XFER_OK && !show_memory_breakpoints) |
c63528fc | 1248 | breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len); |
f0ba3972 PA |
1249 | } |
1250 | else | |
1251 | { | |
d7f3ff3e | 1252 | gdb_byte *buf; |
f0ba3972 PA |
1253 | struct cleanup *old_chain; |
1254 | ||
67c059c2 AB |
1255 | /* A large write request is likely to be partially satisfied |
1256 | by memory_xfer_partial_1. We will continually malloc | |
1257 | and free a copy of the entire write request for breakpoint | |
1258 | shadow handling even though we only end up writing a small | |
1259 | subset of it. Cap writes to 4KB to mitigate this. */ | |
1260 | len = min (4096, len); | |
1261 | ||
d7f3ff3e | 1262 | buf = (gdb_byte *) xmalloc (len); |
f0ba3972 PA |
1263 | old_chain = make_cleanup (xfree, buf); |
1264 | memcpy (buf, writebuf, len); | |
1265 | ||
1266 | breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len); | |
9b409511 YQ |
1267 | res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len, |
1268 | xfered_len); | |
f0ba3972 PA |
1269 | |
1270 | do_cleanups (old_chain); | |
1271 | } | |
1272 | ||
1273 | return res; | |
1274 | } | |
1275 | ||
8defab1a DJ |
1276 | static void |
1277 | restore_show_memory_breakpoints (void *arg) | |
1278 | { | |
1279 | show_memory_breakpoints = (uintptr_t) arg; | |
1280 | } | |
1281 | ||
1282 | struct cleanup * | |
1283 | make_show_memory_breakpoints_cleanup (int show) | |
1284 | { | |
1285 | int current = show_memory_breakpoints; | |
8defab1a | 1286 | |
5d502164 | 1287 | show_memory_breakpoints = show; |
8defab1a DJ |
1288 | return make_cleanup (restore_show_memory_breakpoints, |
1289 | (void *) (uintptr_t) current); | |
1290 | } | |
1291 | ||
7f79c47e DE |
1292 | /* For docs see target.h, to_xfer_partial. */ |
1293 | ||
9b409511 | 1294 | enum target_xfer_status |
27394598 AC |
1295 | target_xfer_partial (struct target_ops *ops, |
1296 | enum target_object object, const char *annex, | |
4ac248ca | 1297 | gdb_byte *readbuf, const gdb_byte *writebuf, |
9b409511 YQ |
1298 | ULONGEST offset, ULONGEST len, |
1299 | ULONGEST *xfered_len) | |
27394598 | 1300 | { |
9b409511 | 1301 | enum target_xfer_status retval; |
27394598 AC |
1302 | |
1303 | gdb_assert (ops->to_xfer_partial != NULL); | |
cf7a04e8 | 1304 | |
ce6d0892 YQ |
1305 | /* Transfer is done when LEN is zero. */ |
1306 | if (len == 0) | |
9b409511 | 1307 | return TARGET_XFER_EOF; |
ce6d0892 | 1308 | |
d914c394 SS |
1309 | if (writebuf && !may_write_memory) |
1310 | error (_("Writing to memory is not allowed (addr %s, len %s)"), | |
1311 | core_addr_to_string_nz (offset), plongest (len)); | |
1312 | ||
9b409511 YQ |
1313 | *xfered_len = 0; |
1314 | ||
cf7a04e8 DJ |
1315 | /* If this is a memory transfer, let the memory-specific code |
1316 | have a look at it instead. Memory transfers are more | |
1317 | complicated. */ | |
29453a14 YQ |
1318 | if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY |
1319 | || object == TARGET_OBJECT_CODE_MEMORY) | |
4e5d721f | 1320 | retval = memory_xfer_partial (ops, object, readbuf, |
9b409511 | 1321 | writebuf, offset, len, xfered_len); |
9f713294 | 1322 | else if (object == TARGET_OBJECT_RAW_MEMORY) |
cf7a04e8 | 1323 | { |
0fec99e8 PA |
1324 | /* Skip/avoid accessing the target if the memory region |
1325 | attributes block the access. Check this here instead of in | |
1326 | raw_memory_xfer_partial as otherwise we'd end up checking | |
1327 | this twice in the case of the memory_xfer_partial path is | |
1328 | taken; once before checking the dcache, and another in the | |
1329 | tail call to raw_memory_xfer_partial. */ | |
1330 | if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len, | |
1331 | NULL)) | |
1332 | return TARGET_XFER_E_IO; | |
1333 | ||
9f713294 | 1334 | /* Request the normal memory object from other layers. */ |
9b409511 YQ |
1335 | retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len, |
1336 | xfered_len); | |
cf7a04e8 | 1337 | } |
9f713294 YQ |
1338 | else |
1339 | retval = ops->to_xfer_partial (ops, object, annex, readbuf, | |
9b409511 | 1340 | writebuf, offset, len, xfered_len); |
cf7a04e8 | 1341 | |
27394598 AC |
1342 | if (targetdebug) |
1343 | { | |
1344 | const unsigned char *myaddr = NULL; | |
1345 | ||
1346 | fprintf_unfiltered (gdb_stdlog, | |
3e43a32a | 1347 | "%s:target_xfer_partial " |
9b409511 | 1348 | "(%d, %s, %s, %s, %s, %s) = %d, %s", |
27394598 AC |
1349 | ops->to_shortname, |
1350 | (int) object, | |
1351 | (annex ? annex : "(null)"), | |
53b71562 JB |
1352 | host_address_to_string (readbuf), |
1353 | host_address_to_string (writebuf), | |
0b1553bc | 1354 | core_addr_to_string_nz (offset), |
9b409511 YQ |
1355 | pulongest (len), retval, |
1356 | pulongest (*xfered_len)); | |
27394598 AC |
1357 | |
1358 | if (readbuf) | |
1359 | myaddr = readbuf; | |
1360 | if (writebuf) | |
1361 | myaddr = writebuf; | |
9b409511 | 1362 | if (retval == TARGET_XFER_OK && myaddr != NULL) |
27394598 AC |
1363 | { |
1364 | int i; | |
2bc416ba | 1365 | |
27394598 | 1366 | fputs_unfiltered (", bytes =", gdb_stdlog); |
9b409511 | 1367 | for (i = 0; i < *xfered_len; i++) |
27394598 | 1368 | { |
53b71562 | 1369 | if ((((intptr_t) &(myaddr[i])) & 0xf) == 0) |
27394598 AC |
1370 | { |
1371 | if (targetdebug < 2 && i > 0) | |
1372 | { | |
1373 | fprintf_unfiltered (gdb_stdlog, " ..."); | |
1374 | break; | |
1375 | } | |
1376 | fprintf_unfiltered (gdb_stdlog, "\n"); | |
1377 | } | |
2bc416ba | 1378 | |
27394598 AC |
1379 | fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff); |
1380 | } | |
1381 | } | |
2bc416ba | 1382 | |
27394598 AC |
1383 | fputc_unfiltered ('\n', gdb_stdlog); |
1384 | } | |
9b409511 YQ |
1385 | |
1386 | /* Check implementations of to_xfer_partial update *XFERED_LEN | |
1387 | properly. Do assertion after printing debug messages, so that we | |
1388 | can find more clues on assertion failure from debugging messages. */ | |
bc113b4e | 1389 | if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE) |
9b409511 YQ |
1390 | gdb_assert (*xfered_len > 0); |
1391 | ||
27394598 AC |
1392 | return retval; |
1393 | } | |
1394 | ||
578d3588 PA |
1395 | /* Read LEN bytes of target memory at address MEMADDR, placing the |
1396 | results in GDB's memory at MYADDR. Returns either 0 for success or | |
d09f2c3f | 1397 | -1 if any error occurs. |
c906108c SS |
1398 | |
1399 | If an error occurs, no guarantee is made about the contents of the data at | |
1400 | MYADDR. In particular, the caller should not depend upon partial reads | |
1401 | filling the buffer with good data. There is no way for the caller to know | |
1402 | how much good data might have been transfered anyway. Callers that can | |
cf7a04e8 | 1403 | deal with partial reads should call target_read (which will retry until |
c378eb4e | 1404 | it makes no progress, and then return how much was transferred). */ |
c906108c SS |
1405 | |
1406 | int | |
1b162304 | 1407 | target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) |
c906108c | 1408 | { |
c35b1492 PA |
1409 | /* Dispatch to the topmost target, not the flattened current_target. |
1410 | Memory accesses check target->to_has_(all_)memory, and the | |
1411 | flattened target doesn't inherit those. */ | |
1412 | if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL, | |
cf7a04e8 DJ |
1413 | myaddr, memaddr, len) == len) |
1414 | return 0; | |
0779438d | 1415 | else |
d09f2c3f | 1416 | return -1; |
c906108c SS |
1417 | } |
1418 | ||
721ec300 GB |
1419 | /* See target/target.h. */ |
1420 | ||
1421 | int | |
1422 | target_read_uint32 (CORE_ADDR memaddr, uint32_t *result) | |
1423 | { | |
1424 | gdb_byte buf[4]; | |
1425 | int r; | |
1426 | ||
1427 | r = target_read_memory (memaddr, buf, sizeof buf); | |
1428 | if (r != 0) | |
1429 | return r; | |
1430 | *result = extract_unsigned_integer (buf, sizeof buf, | |
1431 | gdbarch_byte_order (target_gdbarch ())); | |
1432 | return 0; | |
1433 | } | |
1434 | ||
aee4bf85 PA |
1435 | /* Like target_read_memory, but specify explicitly that this is a read |
1436 | from the target's raw memory. That is, this read bypasses the | |
1437 | dcache, breakpoint shadowing, etc. */ | |
1438 | ||
1439 | int | |
1440 | target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) | |
1441 | { | |
1442 | /* See comment in target_read_memory about why the request starts at | |
1443 | current_target.beneath. */ | |
1444 | if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL, | |
1445 | myaddr, memaddr, len) == len) | |
1446 | return 0; | |
1447 | else | |
d09f2c3f | 1448 | return -1; |
aee4bf85 PA |
1449 | } |
1450 | ||
4e5d721f DE |
1451 | /* Like target_read_memory, but specify explicitly that this is a read from |
1452 | the target's stack. This may trigger different cache behavior. */ | |
1453 | ||
1454 | int | |
45aa4659 | 1455 | target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) |
4e5d721f | 1456 | { |
aee4bf85 PA |
1457 | /* See comment in target_read_memory about why the request starts at |
1458 | current_target.beneath. */ | |
4e5d721f DE |
1459 | if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL, |
1460 | myaddr, memaddr, len) == len) | |
1461 | return 0; | |
1462 | else | |
d09f2c3f | 1463 | return -1; |
4e5d721f DE |
1464 | } |
1465 | ||
29453a14 YQ |
1466 | /* Like target_read_memory, but specify explicitly that this is a read from |
1467 | the target's code. This may trigger different cache behavior. */ | |
1468 | ||
1469 | int | |
1470 | target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) | |
1471 | { | |
aee4bf85 PA |
1472 | /* See comment in target_read_memory about why the request starts at |
1473 | current_target.beneath. */ | |
29453a14 YQ |
1474 | if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL, |
1475 | myaddr, memaddr, len) == len) | |
1476 | return 0; | |
1477 | else | |
d09f2c3f | 1478 | return -1; |
29453a14 YQ |
1479 | } |
1480 | ||
7f79c47e | 1481 | /* Write LEN bytes from MYADDR to target memory at address MEMADDR. |
d09f2c3f PA |
1482 | Returns either 0 for success or -1 if any error occurs. If an |
1483 | error occurs, no guarantee is made about how much data got written. | |
1484 | Callers that can deal with partial writes should call | |
1485 | target_write. */ | |
7f79c47e | 1486 | |
c906108c | 1487 | int |
45aa4659 | 1488 | target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len) |
c906108c | 1489 | { |
aee4bf85 PA |
1490 | /* See comment in target_read_memory about why the request starts at |
1491 | current_target.beneath. */ | |
c35b1492 | 1492 | if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL, |
cf7a04e8 DJ |
1493 | myaddr, memaddr, len) == len) |
1494 | return 0; | |
0779438d | 1495 | else |
d09f2c3f | 1496 | return -1; |
c906108c | 1497 | } |
c5aa993b | 1498 | |
f0ba3972 | 1499 | /* Write LEN bytes from MYADDR to target raw memory at address |
d09f2c3f PA |
1500 | MEMADDR. Returns either 0 for success or -1 if any error occurs. |
1501 | If an error occurs, no guarantee is made about how much data got | |
1502 | written. Callers that can deal with partial writes should call | |
1503 | target_write. */ | |
f0ba3972 PA |
1504 | |
1505 | int | |
45aa4659 | 1506 | target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len) |
f0ba3972 | 1507 | { |
aee4bf85 PA |
1508 | /* See comment in target_read_memory about why the request starts at |
1509 | current_target.beneath. */ | |
f0ba3972 PA |
1510 | if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL, |
1511 | myaddr, memaddr, len) == len) | |
1512 | return 0; | |
1513 | else | |
d09f2c3f | 1514 | return -1; |
f0ba3972 PA |
1515 | } |
1516 | ||
fd79ecee DJ |
1517 | /* Fetch the target's memory map. */ |
1518 | ||
1519 | VEC(mem_region_s) * | |
1520 | target_memory_map (void) | |
1521 | { | |
1522 | VEC(mem_region_s) *result; | |
1523 | struct mem_region *last_one, *this_one; | |
1524 | int ix; | |
1525 | struct target_ops *t; | |
1526 | ||
6b2c5a57 | 1527 | result = current_target.to_memory_map (¤t_target); |
fd79ecee DJ |
1528 | if (result == NULL) |
1529 | return NULL; | |
1530 | ||
1531 | qsort (VEC_address (mem_region_s, result), | |
1532 | VEC_length (mem_region_s, result), | |
1533 | sizeof (struct mem_region), mem_region_cmp); | |
1534 | ||
1535 | /* Check that regions do not overlap. Simultaneously assign | |
1536 | a numbering for the "mem" commands to use to refer to | |
1537 | each region. */ | |
1538 | last_one = NULL; | |
1539 | for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++) | |
1540 | { | |
1541 | this_one->number = ix; | |
1542 | ||
1543 | if (last_one && last_one->hi > this_one->lo) | |
1544 | { | |
1545 | warning (_("Overlapping regions in memory map: ignoring")); | |
1546 | VEC_free (mem_region_s, result); | |
1547 | return NULL; | |
1548 | } | |
1549 | last_one = this_one; | |
1550 | } | |
1551 | ||
1552 | return result; | |
1553 | } | |
1554 | ||
a76d924d DJ |
1555 | void |
1556 | target_flash_erase (ULONGEST address, LONGEST length) | |
1557 | { | |
e8a6c6ac | 1558 | current_target.to_flash_erase (¤t_target, address, length); |
a76d924d DJ |
1559 | } |
1560 | ||
1561 | void | |
1562 | target_flash_done (void) | |
1563 | { | |
f6fb2925 | 1564 | current_target.to_flash_done (¤t_target); |
a76d924d DJ |
1565 | } |
1566 | ||
920d2a44 AC |
1567 | static void |
1568 | show_trust_readonly (struct ui_file *file, int from_tty, | |
1569 | struct cmd_list_element *c, const char *value) | |
1570 | { | |
3e43a32a MS |
1571 | fprintf_filtered (file, |
1572 | _("Mode for reading from readonly sections is %s.\n"), | |
920d2a44 AC |
1573 | value); |
1574 | } | |
3a11626d | 1575 | |
7f79c47e | 1576 | /* Target vector read/write partial wrapper functions. */ |
0088c768 | 1577 | |
9b409511 | 1578 | static enum target_xfer_status |
1e3ff5ad AC |
1579 | target_read_partial (struct target_ops *ops, |
1580 | enum target_object object, | |
1b0ba102 | 1581 | const char *annex, gdb_byte *buf, |
9b409511 YQ |
1582 | ULONGEST offset, ULONGEST len, |
1583 | ULONGEST *xfered_len) | |
1e3ff5ad | 1584 | { |
9b409511 YQ |
1585 | return target_xfer_partial (ops, object, annex, buf, NULL, offset, len, |
1586 | xfered_len); | |
1e3ff5ad AC |
1587 | } |
1588 | ||
8a55ffb0 | 1589 | static enum target_xfer_status |
1e3ff5ad AC |
1590 | target_write_partial (struct target_ops *ops, |
1591 | enum target_object object, | |
1b0ba102 | 1592 | const char *annex, const gdb_byte *buf, |
9b409511 | 1593 | ULONGEST offset, LONGEST len, ULONGEST *xfered_len) |
1e3ff5ad | 1594 | { |
9b409511 YQ |
1595 | return target_xfer_partial (ops, object, annex, NULL, buf, offset, len, |
1596 | xfered_len); | |
1e3ff5ad AC |
1597 | } |
1598 | ||
1599 | /* Wrappers to perform the full transfer. */ | |
7f79c47e DE |
1600 | |
1601 | /* For docs on target_read see target.h. */ | |
1602 | ||
1e3ff5ad AC |
1603 | LONGEST |
1604 | target_read (struct target_ops *ops, | |
1605 | enum target_object object, | |
1b0ba102 | 1606 | const char *annex, gdb_byte *buf, |
1e3ff5ad AC |
1607 | ULONGEST offset, LONGEST len) |
1608 | { | |
279a6fed | 1609 | LONGEST xfered_total = 0; |
d309493c SM |
1610 | int unit_size = 1; |
1611 | ||
1612 | /* If we are reading from a memory object, find the length of an addressable | |
1613 | unit for that architecture. */ | |
1614 | if (object == TARGET_OBJECT_MEMORY | |
1615 | || object == TARGET_OBJECT_STACK_MEMORY | |
1616 | || object == TARGET_OBJECT_CODE_MEMORY | |
1617 | || object == TARGET_OBJECT_RAW_MEMORY) | |
1618 | unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ()); | |
5d502164 | 1619 | |
279a6fed | 1620 | while (xfered_total < len) |
1e3ff5ad | 1621 | { |
279a6fed | 1622 | ULONGEST xfered_partial; |
9b409511 YQ |
1623 | enum target_xfer_status status; |
1624 | ||
1625 | status = target_read_partial (ops, object, annex, | |
d309493c | 1626 | buf + xfered_total * unit_size, |
279a6fed SM |
1627 | offset + xfered_total, len - xfered_total, |
1628 | &xfered_partial); | |
5d502164 | 1629 | |
1e3ff5ad | 1630 | /* Call an observer, notifying them of the xfer progress? */ |
9b409511 | 1631 | if (status == TARGET_XFER_EOF) |
279a6fed | 1632 | return xfered_total; |
9b409511 YQ |
1633 | else if (status == TARGET_XFER_OK) |
1634 | { | |
279a6fed | 1635 | xfered_total += xfered_partial; |
9b409511 YQ |
1636 | QUIT; |
1637 | } | |
1638 | else | |
279a6fed | 1639 | return TARGET_XFER_E_IO; |
9b409511 | 1640 | |
1e3ff5ad AC |
1641 | } |
1642 | return len; | |
1643 | } | |
1644 | ||
f1a507a1 JB |
1645 | /* Assuming that the entire [begin, end) range of memory cannot be |
1646 | read, try to read whatever subrange is possible to read. | |
1647 | ||
1648 | The function returns, in RESULT, either zero or one memory block. | |
1649 | If there's a readable subrange at the beginning, it is completely | |
1650 | read and returned. Any further readable subrange will not be read. | |
1651 | Otherwise, if there's a readable subrange at the end, it will be | |
1652 | completely read and returned. Any readable subranges before it | |
1653 | (obviously, not starting at the beginning), will be ignored. In | |
1654 | other cases -- either no readable subrange, or readable subrange(s) | |
1655 | that is neither at the beginning, or end, nothing is returned. | |
1656 | ||
1657 | The purpose of this function is to handle a read across a boundary | |
1658 | of accessible memory in a case when memory map is not available. | |
1659 | The above restrictions are fine for this case, but will give | |
1660 | incorrect results if the memory is 'patchy'. However, supporting | |
1661 | 'patchy' memory would require trying to read every single byte, | |
1662 | and it seems unacceptable solution. Explicit memory map is | |
1663 | recommended for this case -- and target_read_memory_robust will | |
1664 | take care of reading multiple ranges then. */ | |
8dedea02 VP |
1665 | |
1666 | static void | |
3e43a32a | 1667 | read_whatever_is_readable (struct target_ops *ops, |
279a6fed | 1668 | const ULONGEST begin, const ULONGEST end, |
d309493c | 1669 | int unit_size, |
8dedea02 | 1670 | VEC(memory_read_result_s) **result) |
d5086790 | 1671 | { |
224c3ddb | 1672 | gdb_byte *buf = (gdb_byte *) xmalloc (end - begin); |
8dedea02 VP |
1673 | ULONGEST current_begin = begin; |
1674 | ULONGEST current_end = end; | |
1675 | int forward; | |
1676 | memory_read_result_s r; | |
9b409511 | 1677 | ULONGEST xfered_len; |
8dedea02 VP |
1678 | |
1679 | /* If we previously failed to read 1 byte, nothing can be done here. */ | |
1680 | if (end - begin <= 1) | |
13b3fd9b MS |
1681 | { |
1682 | xfree (buf); | |
1683 | return; | |
1684 | } | |
8dedea02 VP |
1685 | |
1686 | /* Check that either first or the last byte is readable, and give up | |
c378eb4e | 1687 | if not. This heuristic is meant to permit reading accessible memory |
8dedea02 VP |
1688 | at the boundary of accessible region. */ |
1689 | if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL, | |
9b409511 | 1690 | buf, begin, 1, &xfered_len) == TARGET_XFER_OK) |
8dedea02 VP |
1691 | { |
1692 | forward = 1; | |
1693 | ++current_begin; | |
1694 | } | |
1695 | else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL, | |
279a6fed | 1696 | buf + (end - begin) - 1, end - 1, 1, |
9b409511 | 1697 | &xfered_len) == TARGET_XFER_OK) |
8dedea02 VP |
1698 | { |
1699 | forward = 0; | |
1700 | --current_end; | |
1701 | } | |
1702 | else | |
1703 | { | |
13b3fd9b | 1704 | xfree (buf); |
8dedea02 VP |
1705 | return; |
1706 | } | |
1707 | ||
1708 | /* Loop invariant is that the [current_begin, current_end) was previously | |
1709 | found to be not readable as a whole. | |
1710 | ||
1711 | Note loop condition -- if the range has 1 byte, we can't divide the range | |
1712 | so there's no point trying further. */ | |
1713 | while (current_end - current_begin > 1) | |
1714 | { | |
1715 | ULONGEST first_half_begin, first_half_end; | |
1716 | ULONGEST second_half_begin, second_half_end; | |
1717 | LONGEST xfer; | |
279a6fed | 1718 | ULONGEST middle = current_begin + (current_end - current_begin) / 2; |
f1a507a1 | 1719 | |
8dedea02 VP |
1720 | if (forward) |
1721 | { | |
1722 | first_half_begin = current_begin; | |
1723 | first_half_end = middle; | |
1724 | second_half_begin = middle; | |
1725 | second_half_end = current_end; | |
1726 | } | |
1727 | else | |
1728 | { | |
1729 | first_half_begin = middle; | |
1730 | first_half_end = current_end; | |
1731 | second_half_begin = current_begin; | |
1732 | second_half_end = middle; | |
1733 | } | |
1734 | ||
1735 | xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL, | |
d309493c | 1736 | buf + (first_half_begin - begin) * unit_size, |
8dedea02 VP |
1737 | first_half_begin, |
1738 | first_half_end - first_half_begin); | |
1739 | ||
1740 | if (xfer == first_half_end - first_half_begin) | |
1741 | { | |
c378eb4e | 1742 | /* This half reads up fine. So, the error must be in the |
3e43a32a | 1743 | other half. */ |
8dedea02 VP |
1744 | current_begin = second_half_begin; |
1745 | current_end = second_half_end; | |
1746 | } | |
1747 | else | |
1748 | { | |
c378eb4e | 1749 | /* This half is not readable. Because we've tried one byte, we |
279a6fed | 1750 | know some part of this half if actually readable. Go to the next |
8dedea02 VP |
1751 | iteration to divide again and try to read. |
1752 | ||
1753 | We don't handle the other half, because this function only tries | |
1754 | to read a single readable subrange. */ | |
1755 | current_begin = first_half_begin; | |
1756 | current_end = first_half_end; | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | if (forward) | |
1761 | { | |
1762 | /* The [begin, current_begin) range has been read. */ | |
1763 | r.begin = begin; | |
1764 | r.end = current_begin; | |
1765 | r.data = buf; | |
1766 | } | |
1767 | else | |
1768 | { | |
1769 | /* The [current_end, end) range has been read. */ | |
279a6fed | 1770 | LONGEST region_len = end - current_end; |
f1a507a1 | 1771 | |
224c3ddb | 1772 | r.data = (gdb_byte *) xmalloc (region_len * unit_size); |
d309493c SM |
1773 | memcpy (r.data, buf + (current_end - begin) * unit_size, |
1774 | region_len * unit_size); | |
8dedea02 VP |
1775 | r.begin = current_end; |
1776 | r.end = end; | |
1777 | xfree (buf); | |
1778 | } | |
1779 | VEC_safe_push(memory_read_result_s, (*result), &r); | |
1780 | } | |
1781 | ||
1782 | void | |
1783 | free_memory_read_result_vector (void *x) | |
1784 | { | |
19ba03f4 | 1785 | VEC(memory_read_result_s) *v = (VEC(memory_read_result_s) *) x; |
8dedea02 VP |
1786 | memory_read_result_s *current; |
1787 | int ix; | |
1788 | ||
1789 | for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix) | |
1790 | { | |
1791 | xfree (current->data); | |
1792 | } | |
1793 | VEC_free (memory_read_result_s, v); | |
1794 | } | |
1795 | ||
1796 | VEC(memory_read_result_s) * | |
279a6fed SM |
1797 | read_memory_robust (struct target_ops *ops, |
1798 | const ULONGEST offset, const LONGEST len) | |
8dedea02 VP |
1799 | { |
1800 | VEC(memory_read_result_s) *result = 0; | |
d309493c | 1801 | int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ()); |
8dedea02 | 1802 | |
279a6fed SM |
1803 | LONGEST xfered_total = 0; |
1804 | while (xfered_total < len) | |
d5086790 | 1805 | { |
279a6fed SM |
1806 | struct mem_region *region = lookup_mem_region (offset + xfered_total); |
1807 | LONGEST region_len; | |
5d502164 | 1808 | |
8dedea02 VP |
1809 | /* If there is no explicit region, a fake one should be created. */ |
1810 | gdb_assert (region); | |
1811 | ||
1812 | if (region->hi == 0) | |
279a6fed | 1813 | region_len = len - xfered_total; |
8dedea02 | 1814 | else |
279a6fed | 1815 | region_len = region->hi - offset; |
8dedea02 VP |
1816 | |
1817 | if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO) | |
d5086790 | 1818 | { |
c378eb4e | 1819 | /* Cannot read this region. Note that we can end up here only |
8dedea02 VP |
1820 | if the region is explicitly marked inaccessible, or |
1821 | 'inaccessible-by-default' is in effect. */ | |
279a6fed | 1822 | xfered_total += region_len; |
8dedea02 VP |
1823 | } |
1824 | else | |
1825 | { | |
279a6fed | 1826 | LONGEST to_read = min (len - xfered_total, region_len); |
d309493c | 1827 | gdb_byte *buffer = (gdb_byte *) xmalloc (to_read * unit_size); |
8dedea02 | 1828 | |
279a6fed SM |
1829 | LONGEST xfered_partial = |
1830 | target_read (ops, TARGET_OBJECT_MEMORY, NULL, | |
1831 | (gdb_byte *) buffer, | |
1832 | offset + xfered_total, to_read); | |
8dedea02 | 1833 | /* Call an observer, notifying them of the xfer progress? */ |
279a6fed | 1834 | if (xfered_partial <= 0) |
d5086790 | 1835 | { |
c378eb4e | 1836 | /* Got an error reading full chunk. See if maybe we can read |
8dedea02 VP |
1837 | some subrange. */ |
1838 | xfree (buffer); | |
e084c964 DB |
1839 | read_whatever_is_readable (ops, offset + xfered_total, |
1840 | offset + xfered_total + to_read, | |
1841 | unit_size, &result); | |
279a6fed | 1842 | xfered_total += to_read; |
d5086790 | 1843 | } |
8dedea02 VP |
1844 | else |
1845 | { | |
1846 | struct memory_read_result r; | |
1847 | r.data = buffer; | |
279a6fed SM |
1848 | r.begin = offset + xfered_total; |
1849 | r.end = r.begin + xfered_partial; | |
8dedea02 | 1850 | VEC_safe_push (memory_read_result_s, result, &r); |
279a6fed | 1851 | xfered_total += xfered_partial; |
8dedea02 VP |
1852 | } |
1853 | QUIT; | |
d5086790 | 1854 | } |
d5086790 | 1855 | } |
8dedea02 | 1856 | return result; |
d5086790 VP |
1857 | } |
1858 | ||
8dedea02 | 1859 | |
cf7a04e8 DJ |
1860 | /* An alternative to target_write with progress callbacks. */ |
1861 | ||
1e3ff5ad | 1862 | LONGEST |
cf7a04e8 DJ |
1863 | target_write_with_progress (struct target_ops *ops, |
1864 | enum target_object object, | |
1865 | const char *annex, const gdb_byte *buf, | |
1866 | ULONGEST offset, LONGEST len, | |
1867 | void (*progress) (ULONGEST, void *), void *baton) | |
1e3ff5ad | 1868 | { |
279a6fed | 1869 | LONGEST xfered_total = 0; |
d309493c SM |
1870 | int unit_size = 1; |
1871 | ||
1872 | /* If we are writing to a memory object, find the length of an addressable | |
1873 | unit for that architecture. */ | |
1874 | if (object == TARGET_OBJECT_MEMORY | |
1875 | || object == TARGET_OBJECT_STACK_MEMORY | |
1876 | || object == TARGET_OBJECT_CODE_MEMORY | |
1877 | || object == TARGET_OBJECT_RAW_MEMORY) | |
1878 | unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ()); | |
a76d924d DJ |
1879 | |
1880 | /* Give the progress callback a chance to set up. */ | |
1881 | if (progress) | |
1882 | (*progress) (0, baton); | |
1883 | ||
279a6fed | 1884 | while (xfered_total < len) |
1e3ff5ad | 1885 | { |
279a6fed | 1886 | ULONGEST xfered_partial; |
9b409511 YQ |
1887 | enum target_xfer_status status; |
1888 | ||
1889 | status = target_write_partial (ops, object, annex, | |
d309493c | 1890 | buf + xfered_total * unit_size, |
279a6fed SM |
1891 | offset + xfered_total, len - xfered_total, |
1892 | &xfered_partial); | |
cf7a04e8 | 1893 | |
5c328c05 | 1894 | if (status != TARGET_XFER_OK) |
279a6fed | 1895 | return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO; |
cf7a04e8 DJ |
1896 | |
1897 | if (progress) | |
279a6fed | 1898 | (*progress) (xfered_partial, baton); |
cf7a04e8 | 1899 | |
279a6fed | 1900 | xfered_total += xfered_partial; |
1e3ff5ad AC |
1901 | QUIT; |
1902 | } | |
1903 | return len; | |
1904 | } | |
1905 | ||
7f79c47e DE |
1906 | /* For docs on target_write see target.h. */ |
1907 | ||
cf7a04e8 DJ |
1908 | LONGEST |
1909 | target_write (struct target_ops *ops, | |
1910 | enum target_object object, | |
1911 | const char *annex, const gdb_byte *buf, | |
1912 | ULONGEST offset, LONGEST len) | |
1913 | { | |
1914 | return target_write_with_progress (ops, object, annex, buf, offset, len, | |
1915 | NULL, NULL); | |
1916 | } | |
1917 | ||
159f81f3 DJ |
1918 | /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return |
1919 | the size of the transferred data. PADDING additional bytes are | |
1920 | available in *BUF_P. This is a helper function for | |
1921 | target_read_alloc; see the declaration of that function for more | |
1922 | information. */ | |
13547ab6 | 1923 | |
159f81f3 DJ |
1924 | static LONGEST |
1925 | target_read_alloc_1 (struct target_ops *ops, enum target_object object, | |
1926 | const char *annex, gdb_byte **buf_p, int padding) | |
13547ab6 DJ |
1927 | { |
1928 | size_t buf_alloc, buf_pos; | |
1929 | gdb_byte *buf; | |
13547ab6 DJ |
1930 | |
1931 | /* This function does not have a length parameter; it reads the | |
1932 | entire OBJECT). Also, it doesn't support objects fetched partly | |
1933 | from one target and partly from another (in a different stratum, | |
1934 | e.g. a core file and an executable). Both reasons make it | |
1935 | unsuitable for reading memory. */ | |
1936 | gdb_assert (object != TARGET_OBJECT_MEMORY); | |
1937 | ||
1938 | /* Start by reading up to 4K at a time. The target will throttle | |
1939 | this number down if necessary. */ | |
1940 | buf_alloc = 4096; | |
224c3ddb | 1941 | buf = (gdb_byte *) xmalloc (buf_alloc); |
13547ab6 DJ |
1942 | buf_pos = 0; |
1943 | while (1) | |
1944 | { | |
9b409511 YQ |
1945 | ULONGEST xfered_len; |
1946 | enum target_xfer_status status; | |
1947 | ||
1948 | status = target_read_partial (ops, object, annex, &buf[buf_pos], | |
1949 | buf_pos, buf_alloc - buf_pos - padding, | |
1950 | &xfered_len); | |
1951 | ||
1952 | if (status == TARGET_XFER_EOF) | |
13547ab6 DJ |
1953 | { |
1954 | /* Read all there was. */ | |
1955 | if (buf_pos == 0) | |
1956 | xfree (buf); | |
1957 | else | |
1958 | *buf_p = buf; | |
1959 | return buf_pos; | |
1960 | } | |
9b409511 YQ |
1961 | else if (status != TARGET_XFER_OK) |
1962 | { | |
1963 | /* An error occurred. */ | |
1964 | xfree (buf); | |
1965 | return TARGET_XFER_E_IO; | |
1966 | } | |
13547ab6 | 1967 | |
9b409511 | 1968 | buf_pos += xfered_len; |
13547ab6 DJ |
1969 | |
1970 | /* If the buffer is filling up, expand it. */ | |
1971 | if (buf_alloc < buf_pos * 2) | |
1972 | { | |
1973 | buf_alloc *= 2; | |
224c3ddb | 1974 | buf = (gdb_byte *) xrealloc (buf, buf_alloc); |
13547ab6 DJ |
1975 | } |
1976 | ||
1977 | QUIT; | |
1978 | } | |
1979 | } | |
1980 | ||
159f81f3 DJ |
1981 | /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return |
1982 | the size of the transferred data. See the declaration in "target.h" | |
1983 | function for more information about the return value. */ | |
1984 | ||
1985 | LONGEST | |
1986 | target_read_alloc (struct target_ops *ops, enum target_object object, | |
1987 | const char *annex, gdb_byte **buf_p) | |
1988 | { | |
1989 | return target_read_alloc_1 (ops, object, annex, buf_p, 0); | |
1990 | } | |
1991 | ||
1992 | /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and | |
1993 | returned as a string, allocated using xmalloc. If an error occurs | |
1994 | or the transfer is unsupported, NULL is returned. Empty objects | |
1995 | are returned as allocated but empty strings. A warning is issued | |
1996 | if the result contains any embedded NUL bytes. */ | |
1997 | ||
1998 | char * | |
1999 | target_read_stralloc (struct target_ops *ops, enum target_object object, | |
2000 | const char *annex) | |
2001 | { | |
39086a0e PA |
2002 | gdb_byte *buffer; |
2003 | char *bufstr; | |
7313baad | 2004 | LONGEST i, transferred; |
159f81f3 | 2005 | |
39086a0e PA |
2006 | transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1); |
2007 | bufstr = (char *) buffer; | |
159f81f3 DJ |
2008 | |
2009 | if (transferred < 0) | |
2010 | return NULL; | |
2011 | ||
2012 | if (transferred == 0) | |
2013 | return xstrdup (""); | |
2014 | ||
39086a0e | 2015 | bufstr[transferred] = 0; |
7313baad UW |
2016 | |
2017 | /* Check for embedded NUL bytes; but allow trailing NULs. */ | |
39086a0e PA |
2018 | for (i = strlen (bufstr); i < transferred; i++) |
2019 | if (bufstr[i] != 0) | |
7313baad UW |
2020 | { |
2021 | warning (_("target object %d, annex %s, " | |
2022 | "contained unexpected null characters"), | |
2023 | (int) object, annex ? annex : "(none)"); | |
2024 | break; | |
2025 | } | |
159f81f3 | 2026 | |
39086a0e | 2027 | return bufstr; |
159f81f3 DJ |
2028 | } |
2029 | ||
b6591e8b AC |
2030 | /* Memory transfer methods. */ |
2031 | ||
2032 | void | |
1b0ba102 | 2033 | get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf, |
b6591e8b AC |
2034 | LONGEST len) |
2035 | { | |
07b82ea5 PA |
2036 | /* This method is used to read from an alternate, non-current |
2037 | target. This read must bypass the overlay support (as symbols | |
2038 | don't match this target), and GDB's internal cache (wrong cache | |
2039 | for this target). */ | |
2040 | if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len) | |
b6591e8b | 2041 | != len) |
578d3588 | 2042 | memory_error (TARGET_XFER_E_IO, addr); |
b6591e8b AC |
2043 | } |
2044 | ||
2045 | ULONGEST | |
5d502164 MS |
2046 | get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr, |
2047 | int len, enum bfd_endian byte_order) | |
b6591e8b | 2048 | { |
f6519ebc | 2049 | gdb_byte buf[sizeof (ULONGEST)]; |
b6591e8b AC |
2050 | |
2051 | gdb_assert (len <= sizeof (buf)); | |
2052 | get_target_memory (ops, addr, buf, len); | |
e17a4113 | 2053 | return extract_unsigned_integer (buf, len, byte_order); |
b6591e8b AC |
2054 | } |
2055 | ||
3db08215 MM |
2056 | /* See target.h. */ |
2057 | ||
d914c394 SS |
2058 | int |
2059 | target_insert_breakpoint (struct gdbarch *gdbarch, | |
2060 | struct bp_target_info *bp_tgt) | |
2061 | { | |
2062 | if (!may_insert_breakpoints) | |
2063 | { | |
2064 | warning (_("May not insert breakpoints")); | |
2065 | return 1; | |
2066 | } | |
2067 | ||
6b84065d TT |
2068 | return current_target.to_insert_breakpoint (¤t_target, |
2069 | gdbarch, bp_tgt); | |
d914c394 SS |
2070 | } |
2071 | ||
3db08215 MM |
2072 | /* See target.h. */ |
2073 | ||
d914c394 | 2074 | int |
6b84065d TT |
2075 | target_remove_breakpoint (struct gdbarch *gdbarch, |
2076 | struct bp_target_info *bp_tgt) | |
d914c394 SS |
2077 | { |
2078 | /* This is kind of a weird case to handle, but the permission might | |
2079 | have been changed after breakpoints were inserted - in which case | |
2080 | we should just take the user literally and assume that any | |
2081 | breakpoints should be left in place. */ | |
2082 | if (!may_insert_breakpoints) | |
2083 | { | |
2084 | warning (_("May not remove breakpoints")); | |
2085 | return 1; | |
2086 | } | |
2087 | ||
6b84065d TT |
2088 | return current_target.to_remove_breakpoint (¤t_target, |
2089 | gdbarch, bp_tgt); | |
d914c394 SS |
2090 | } |
2091 | ||
c906108c | 2092 | static void |
fba45db2 | 2093 | target_info (char *args, int from_tty) |
c906108c SS |
2094 | { |
2095 | struct target_ops *t; | |
c906108c | 2096 | int has_all_mem = 0; |
c5aa993b | 2097 | |
c906108c | 2098 | if (symfile_objfile != NULL) |
4262abfb JK |
2099 | printf_unfiltered (_("Symbols from \"%s\".\n"), |
2100 | objfile_name (symfile_objfile)); | |
c906108c | 2101 | |
258b763a | 2102 | for (t = target_stack; t != NULL; t = t->beneath) |
c906108c | 2103 | { |
c35b1492 | 2104 | if (!(*t->to_has_memory) (t)) |
c906108c SS |
2105 | continue; |
2106 | ||
c5aa993b | 2107 | if ((int) (t->to_stratum) <= (int) dummy_stratum) |
c906108c SS |
2108 | continue; |
2109 | if (has_all_mem) | |
3e43a32a MS |
2110 | printf_unfiltered (_("\tWhile running this, " |
2111 | "GDB does not access memory from...\n")); | |
c5aa993b JM |
2112 | printf_unfiltered ("%s:\n", t->to_longname); |
2113 | (t->to_files_info) (t); | |
c35b1492 | 2114 | has_all_mem = (*t->to_has_all_memory) (t); |
c906108c SS |
2115 | } |
2116 | } | |
2117 | ||
fd79ecee DJ |
2118 | /* This function is called before any new inferior is created, e.g. |
2119 | by running a program, attaching, or connecting to a target. | |
2120 | It cleans up any state from previous invocations which might | |
2121 | change between runs. This is a subset of what target_preopen | |
2122 | resets (things which might change between targets). */ | |
2123 | ||
2124 | void | |
2125 | target_pre_inferior (int from_tty) | |
2126 | { | |
c378eb4e | 2127 | /* Clear out solib state. Otherwise the solib state of the previous |
b9db4ced | 2128 | inferior might have survived and is entirely wrong for the new |
c378eb4e | 2129 | target. This has been observed on GNU/Linux using glibc 2.3. How |
b9db4ced UW |
2130 | to reproduce: |
2131 | ||
2132 | bash$ ./foo& | |
2133 | [1] 4711 | |
2134 | bash$ ./foo& | |
2135 | [1] 4712 | |
2136 | bash$ gdb ./foo | |
2137 | [...] | |
2138 | (gdb) attach 4711 | |
2139 | (gdb) detach | |
2140 | (gdb) attach 4712 | |
2141 | Cannot access memory at address 0xdeadbeef | |
2142 | */ | |
b9db4ced | 2143 | |
50c71eaf PA |
2144 | /* In some OSs, the shared library list is the same/global/shared |
2145 | across inferiors. If code is shared between processes, so are | |
2146 | memory regions and features. */ | |
f5656ead | 2147 | if (!gdbarch_has_global_solist (target_gdbarch ())) |
50c71eaf PA |
2148 | { |
2149 | no_shared_libraries (NULL, from_tty); | |
2150 | ||
2151 | invalidate_target_mem_regions (); | |
424163ea | 2152 | |
50c71eaf PA |
2153 | target_clear_description (); |
2154 | } | |
8ffcbaaf | 2155 | |
e9756d52 PP |
2156 | /* attach_flag may be set if the previous process associated with |
2157 | the inferior was attached to. */ | |
2158 | current_inferior ()->attach_flag = 0; | |
2159 | ||
8ffcbaaf | 2160 | agent_capability_invalidate (); |
fd79ecee DJ |
2161 | } |
2162 | ||
b8fa0bfa PA |
2163 | /* Callback for iterate_over_inferiors. Gets rid of the given |
2164 | inferior. */ | |
2165 | ||
2166 | static int | |
2167 | dispose_inferior (struct inferior *inf, void *args) | |
2168 | { | |
2169 | struct thread_info *thread; | |
2170 | ||
2171 | thread = any_thread_of_process (inf->pid); | |
2172 | if (thread) | |
2173 | { | |
2174 | switch_to_thread (thread->ptid); | |
2175 | ||
2176 | /* Core inferiors actually should be detached, not killed. */ | |
2177 | if (target_has_execution) | |
2178 | target_kill (); | |
2179 | else | |
2180 | target_detach (NULL, 0); | |
2181 | } | |
2182 | ||
2183 | return 0; | |
2184 | } | |
2185 | ||
c906108c SS |
2186 | /* This is to be called by the open routine before it does |
2187 | anything. */ | |
2188 | ||
2189 | void | |
fba45db2 | 2190 | target_preopen (int from_tty) |
c906108c | 2191 | { |
c5aa993b | 2192 | dont_repeat (); |
c906108c | 2193 | |
b8fa0bfa | 2194 | if (have_inferiors ()) |
c5aa993b | 2195 | { |
adf40b2e | 2196 | if (!from_tty |
b8fa0bfa PA |
2197 | || !have_live_inferiors () |
2198 | || query (_("A program is being debugged already. Kill it? "))) | |
2199 | iterate_over_inferiors (dispose_inferior, NULL); | |
c906108c | 2200 | else |
8a3fe4f8 | 2201 | error (_("Program not killed.")); |
c906108c SS |
2202 | } |
2203 | ||
2204 | /* Calling target_kill may remove the target from the stack. But if | |
2205 | it doesn't (which seems like a win for UDI), remove it now. */ | |
87ab71f0 PA |
2206 | /* Leave the exec target, though. The user may be switching from a |
2207 | live process to a core of the same program. */ | |
460014f5 | 2208 | pop_all_targets_above (file_stratum); |
fd79ecee DJ |
2209 | |
2210 | target_pre_inferior (from_tty); | |
c906108c SS |
2211 | } |
2212 | ||
2213 | /* Detach a target after doing deferred register stores. */ | |
2214 | ||
2215 | void | |
52554a0e | 2216 | target_detach (const char *args, int from_tty) |
c906108c | 2217 | { |
136d6dae VP |
2218 | struct target_ops* t; |
2219 | ||
f5656ead | 2220 | if (gdbarch_has_global_breakpoints (target_gdbarch ())) |
50c71eaf PA |
2221 | /* Don't remove global breakpoints here. They're removed on |
2222 | disconnection from the target. */ | |
2223 | ; | |
2224 | else | |
2225 | /* If we're in breakpoints-always-inserted mode, have to remove | |
2226 | them before detaching. */ | |
dfd4cc63 | 2227 | remove_breakpoints_pid (ptid_get_pid (inferior_ptid)); |
74960c60 | 2228 | |
24291992 PA |
2229 | prepare_for_detach (); |
2230 | ||
09da0d0a | 2231 | current_target.to_detach (¤t_target, args, from_tty); |
c906108c SS |
2232 | } |
2233 | ||
6ad8ae5c | 2234 | void |
fee354ee | 2235 | target_disconnect (const char *args, int from_tty) |
6ad8ae5c | 2236 | { |
50c71eaf PA |
2237 | /* If we're in breakpoints-always-inserted mode or if breakpoints |
2238 | are global across processes, we have to remove them before | |
2239 | disconnecting. */ | |
74960c60 VP |
2240 | remove_breakpoints (); |
2241 | ||
86a0854a | 2242 | current_target.to_disconnect (¤t_target, args, from_tty); |
6ad8ae5c DJ |
2243 | } |
2244 | ||
117de6a9 | 2245 | ptid_t |
47608cb1 | 2246 | target_wait (ptid_t ptid, struct target_waitstatus *status, int options) |
117de6a9 | 2247 | { |
a7068b60 | 2248 | return (current_target.to_wait) (¤t_target, ptid, status, options); |
117de6a9 PA |
2249 | } |
2250 | ||
0b333c5e PA |
2251 | /* See target.h. */ |
2252 | ||
2253 | ptid_t | |
2254 | default_target_wait (struct target_ops *ops, | |
2255 | ptid_t ptid, struct target_waitstatus *status, | |
2256 | int options) | |
2257 | { | |
2258 | status->kind = TARGET_WAITKIND_IGNORE; | |
2259 | return minus_one_ptid; | |
2260 | } | |
2261 | ||
117de6a9 PA |
2262 | char * |
2263 | target_pid_to_str (ptid_t ptid) | |
2264 | { | |
770234d3 | 2265 | return (*current_target.to_pid_to_str) (¤t_target, ptid); |
117de6a9 PA |
2266 | } |
2267 | ||
73ede765 | 2268 | const char * |
4694da01 TT |
2269 | target_thread_name (struct thread_info *info) |
2270 | { | |
825828fc | 2271 | return current_target.to_thread_name (¤t_target, info); |
4694da01 TT |
2272 | } |
2273 | ||
e1ac3328 | 2274 | void |
2ea28649 | 2275 | target_resume (ptid_t ptid, int step, enum gdb_signal signal) |
e1ac3328 | 2276 | { |
28439f5e PA |
2277 | struct target_ops *t; |
2278 | ||
4e5d721f | 2279 | target_dcache_invalidate (); |
28439f5e | 2280 | |
6b84065d | 2281 | current_target.to_resume (¤t_target, ptid, step, signal); |
28439f5e | 2282 | |
6b84065d | 2283 | registers_changed_ptid (ptid); |
251bde03 PA |
2284 | /* We only set the internal executing state here. The user/frontend |
2285 | running state is set at a higher level. */ | |
6b84065d | 2286 | set_executing (ptid, 1); |
6b84065d | 2287 | clear_inline_frame_state (ptid); |
e1ac3328 | 2288 | } |
2455069d UW |
2289 | |
2290 | void | |
2291 | target_pass_signals (int numsigs, unsigned char *pass_signals) | |
2292 | { | |
035cad7f | 2293 | (*current_target.to_pass_signals) (¤t_target, numsigs, pass_signals); |
2455069d UW |
2294 | } |
2295 | ||
9b224c5e PA |
2296 | void |
2297 | target_program_signals (int numsigs, unsigned char *program_signals) | |
2298 | { | |
7d4f8efa TT |
2299 | (*current_target.to_program_signals) (¤t_target, |
2300 | numsigs, program_signals); | |
9b224c5e PA |
2301 | } |
2302 | ||
098dba18 TT |
2303 | static int |
2304 | default_follow_fork (struct target_ops *self, int follow_child, | |
2305 | int detach_fork) | |
2306 | { | |
2307 | /* Some target returned a fork event, but did not know how to follow it. */ | |
2308 | internal_error (__FILE__, __LINE__, | |
2309 | _("could not find a target to follow fork")); | |
2310 | } | |
2311 | ||
ee057212 DJ |
2312 | /* Look through the list of possible targets for a target that can |
2313 | follow forks. */ | |
2314 | ||
2315 | int | |
07107ca6 | 2316 | target_follow_fork (int follow_child, int detach_fork) |
ee057212 | 2317 | { |
a7068b60 TT |
2318 | return current_target.to_follow_fork (¤t_target, |
2319 | follow_child, detach_fork); | |
ee057212 DJ |
2320 | } |
2321 | ||
94585166 DB |
2322 | /* Target wrapper for follow exec hook. */ |
2323 | ||
2324 | void | |
2325 | target_follow_exec (struct inferior *inf, char *execd_pathname) | |
2326 | { | |
2327 | current_target.to_follow_exec (¤t_target, inf, execd_pathname); | |
2328 | } | |
2329 | ||
8d657035 TT |
2330 | static void |
2331 | default_mourn_inferior (struct target_ops *self) | |
2332 | { | |
2333 | internal_error (__FILE__, __LINE__, | |
2334 | _("could not find a target to follow mourn inferior")); | |
2335 | } | |
2336 | ||
136d6dae VP |
2337 | void |
2338 | target_mourn_inferior (void) | |
2339 | { | |
8d657035 | 2340 | current_target.to_mourn_inferior (¤t_target); |
136d6dae | 2341 | |
8d657035 TT |
2342 | /* We no longer need to keep handles on any of the object files. |
2343 | Make sure to release them to avoid unnecessarily locking any | |
2344 | of them while we're not actually debugging. */ | |
2345 | bfd_cache_close_all (); | |
136d6dae VP |
2346 | } |
2347 | ||
424163ea DJ |
2348 | /* Look for a target which can describe architectural features, starting |
2349 | from TARGET. If we find one, return its description. */ | |
2350 | ||
2351 | const struct target_desc * | |
2352 | target_read_description (struct target_ops *target) | |
2353 | { | |
2117c711 | 2354 | return target->to_read_description (target); |
424163ea DJ |
2355 | } |
2356 | ||
58a5184e | 2357 | /* This implements a basic search of memory, reading target memory and |
08388c79 DE |
2358 | performing the search here (as opposed to performing the search in on the |
2359 | target side with, for example, gdbserver). */ | |
2360 | ||
2361 | int | |
2362 | simple_search_memory (struct target_ops *ops, | |
2363 | CORE_ADDR start_addr, ULONGEST search_space_len, | |
2364 | const gdb_byte *pattern, ULONGEST pattern_len, | |
2365 | CORE_ADDR *found_addrp) | |
2366 | { | |
2367 | /* NOTE: also defined in find.c testcase. */ | |
2368 | #define SEARCH_CHUNK_SIZE 16000 | |
2369 | const unsigned chunk_size = SEARCH_CHUNK_SIZE; | |
2370 | /* Buffer to hold memory contents for searching. */ | |
2371 | gdb_byte *search_buf; | |
2372 | unsigned search_buf_size; | |
2373 | struct cleanup *old_cleanups; | |
2374 | ||
2375 | search_buf_size = chunk_size + pattern_len - 1; | |
2376 | ||
2377 | /* No point in trying to allocate a buffer larger than the search space. */ | |
2378 | if (search_space_len < search_buf_size) | |
2379 | search_buf_size = search_space_len; | |
2380 | ||
224c3ddb | 2381 | search_buf = (gdb_byte *) malloc (search_buf_size); |
08388c79 | 2382 | if (search_buf == NULL) |
5e1471f5 | 2383 | error (_("Unable to allocate memory to perform the search.")); |
08388c79 DE |
2384 | old_cleanups = make_cleanup (free_current_contents, &search_buf); |
2385 | ||
2386 | /* Prime the search buffer. */ | |
2387 | ||
2388 | if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, | |
2389 | search_buf, start_addr, search_buf_size) != search_buf_size) | |
2390 | { | |
b3dc46ff AB |
2391 | warning (_("Unable to access %s bytes of target " |
2392 | "memory at %s, halting search."), | |
2393 | pulongest (search_buf_size), hex_string (start_addr)); | |
08388c79 DE |
2394 | do_cleanups (old_cleanups); |
2395 | return -1; | |
2396 | } | |
2397 | ||
2398 | /* Perform the search. | |
2399 | ||
2400 | The loop is kept simple by allocating [N + pattern-length - 1] bytes. | |
2401 | When we've scanned N bytes we copy the trailing bytes to the start and | |
2402 | read in another N bytes. */ | |
2403 | ||
2404 | while (search_space_len >= pattern_len) | |
2405 | { | |
2406 | gdb_byte *found_ptr; | |
2407 | unsigned nr_search_bytes = min (search_space_len, search_buf_size); | |
2408 | ||
d7f3ff3e SM |
2409 | found_ptr = (gdb_byte *) memmem (search_buf, nr_search_bytes, |
2410 | pattern, pattern_len); | |
08388c79 DE |
2411 | |
2412 | if (found_ptr != NULL) | |
2413 | { | |
2414 | CORE_ADDR found_addr = start_addr + (found_ptr - search_buf); | |
5d502164 | 2415 | |
08388c79 DE |
2416 | *found_addrp = found_addr; |
2417 | do_cleanups (old_cleanups); | |
2418 | return 1; | |
2419 | } | |
2420 | ||
2421 | /* Not found in this chunk, skip to next chunk. */ | |
2422 | ||
2423 | /* Don't let search_space_len wrap here, it's unsigned. */ | |
2424 | if (search_space_len >= chunk_size) | |
2425 | search_space_len -= chunk_size; | |
2426 | else | |
2427 | search_space_len = 0; | |
2428 | ||
2429 | if (search_space_len >= pattern_len) | |
2430 | { | |
2431 | unsigned keep_len = search_buf_size - chunk_size; | |
8a35fb51 | 2432 | CORE_ADDR read_addr = start_addr + chunk_size + keep_len; |
08388c79 DE |
2433 | int nr_to_read; |
2434 | ||
2435 | /* Copy the trailing part of the previous iteration to the front | |
2436 | of the buffer for the next iteration. */ | |
2437 | gdb_assert (keep_len == pattern_len - 1); | |
2438 | memcpy (search_buf, search_buf + chunk_size, keep_len); | |
2439 | ||
2440 | nr_to_read = min (search_space_len - keep_len, chunk_size); | |
2441 | ||
2442 | if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, | |
2443 | search_buf + keep_len, read_addr, | |
2444 | nr_to_read) != nr_to_read) | |
2445 | { | |
b3dc46ff | 2446 | warning (_("Unable to access %s bytes of target " |
9b20d036 | 2447 | "memory at %s, halting search."), |
b3dc46ff | 2448 | plongest (nr_to_read), |
08388c79 DE |
2449 | hex_string (read_addr)); |
2450 | do_cleanups (old_cleanups); | |
2451 | return -1; | |
2452 | } | |
2453 | ||
2454 | start_addr += chunk_size; | |
2455 | } | |
2456 | } | |
2457 | ||
2458 | /* Not found. */ | |
2459 | ||
2460 | do_cleanups (old_cleanups); | |
2461 | return 0; | |
2462 | } | |
2463 | ||
58a5184e TT |
2464 | /* Default implementation of memory-searching. */ |
2465 | ||
2466 | static int | |
2467 | default_search_memory (struct target_ops *self, | |
2468 | CORE_ADDR start_addr, ULONGEST search_space_len, | |
2469 | const gdb_byte *pattern, ULONGEST pattern_len, | |
2470 | CORE_ADDR *found_addrp) | |
2471 | { | |
2472 | /* Start over from the top of the target stack. */ | |
2473 | return simple_search_memory (current_target.beneath, | |
2474 | start_addr, search_space_len, | |
2475 | pattern, pattern_len, found_addrp); | |
2476 | } | |
2477 | ||
08388c79 DE |
2478 | /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the |
2479 | sequence of bytes in PATTERN with length PATTERN_LEN. | |
2480 | ||
2481 | The result is 1 if found, 0 if not found, and -1 if there was an error | |
2482 | requiring halting of the search (e.g. memory read error). | |
2483 | If the pattern is found the address is recorded in FOUND_ADDRP. */ | |
2484 | ||
2485 | int | |
2486 | target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len, | |
2487 | const gdb_byte *pattern, ULONGEST pattern_len, | |
2488 | CORE_ADDR *found_addrp) | |
2489 | { | |
a7068b60 TT |
2490 | return current_target.to_search_memory (¤t_target, start_addr, |
2491 | search_space_len, | |
2492 | pattern, pattern_len, found_addrp); | |
08388c79 DE |
2493 | } |
2494 | ||
8edfe269 DJ |
2495 | /* Look through the currently pushed targets. If none of them will |
2496 | be able to restart the currently running process, issue an error | |
2497 | message. */ | |
2498 | ||
2499 | void | |
2500 | target_require_runnable (void) | |
2501 | { | |
2502 | struct target_ops *t; | |
2503 | ||
2504 | for (t = target_stack; t != NULL; t = t->beneath) | |
2505 | { | |
2506 | /* If this target knows how to create a new program, then | |
2507 | assume we will still be able to after killing the current | |
2508 | one. Either killing and mourning will not pop T, or else | |
2509 | find_default_run_target will find it again. */ | |
2510 | if (t->to_create_inferior != NULL) | |
2511 | return; | |
2512 | ||
548740d6 | 2513 | /* Do not worry about targets at certain strata that can not |
8edfe269 DJ |
2514 | create inferiors. Assume they will be pushed again if |
2515 | necessary, and continue to the process_stratum. */ | |
85e747d2 | 2516 | if (t->to_stratum == thread_stratum |
548740d6 | 2517 | || t->to_stratum == record_stratum |
85e747d2 | 2518 | || t->to_stratum == arch_stratum) |
8edfe269 DJ |
2519 | continue; |
2520 | ||
3e43a32a MS |
2521 | error (_("The \"%s\" target does not support \"run\". " |
2522 | "Try \"help target\" or \"continue\"."), | |
8edfe269 DJ |
2523 | t->to_shortname); |
2524 | } | |
2525 | ||
2526 | /* This function is only called if the target is running. In that | |
2527 | case there should have been a process_stratum target and it | |
c378eb4e | 2528 | should either know how to create inferiors, or not... */ |
9b20d036 | 2529 | internal_error (__FILE__, __LINE__, _("No targets found")); |
8edfe269 DJ |
2530 | } |
2531 | ||
6a3cb8e8 PA |
2532 | /* Whether GDB is allowed to fall back to the default run target for |
2533 | "run", "attach", etc. when no target is connected yet. */ | |
2534 | static int auto_connect_native_target = 1; | |
2535 | ||
2536 | static void | |
2537 | show_auto_connect_native_target (struct ui_file *file, int from_tty, | |
2538 | struct cmd_list_element *c, const char *value) | |
2539 | { | |
2540 | fprintf_filtered (file, | |
2541 | _("Whether GDB may automatically connect to the " | |
2542 | "native target is %s.\n"), | |
2543 | value); | |
2544 | } | |
2545 | ||
c906108c SS |
2546 | /* Look through the list of possible targets for a target that can |
2547 | execute a run or attach command without any other data. This is | |
2548 | used to locate the default process stratum. | |
2549 | ||
5f667f2d PA |
2550 | If DO_MESG is not NULL, the result is always valid (error() is |
2551 | called for errors); else, return NULL on error. */ | |
c906108c SS |
2552 | |
2553 | static struct target_ops * | |
fba45db2 | 2554 | find_default_run_target (char *do_mesg) |
c906108c | 2555 | { |
c906108c | 2556 | struct target_ops *runable = NULL; |
c906108c | 2557 | |
6a3cb8e8 | 2558 | if (auto_connect_native_target) |
c906108c | 2559 | { |
89a1c21a | 2560 | struct target_ops *t; |
6a3cb8e8 | 2561 | int count = 0; |
89a1c21a | 2562 | int i; |
6a3cb8e8 | 2563 | |
89a1c21a | 2564 | for (i = 0; VEC_iterate (target_ops_p, target_structs, i, t); ++i) |
c906108c | 2565 | { |
89a1c21a | 2566 | if (t->to_can_run != delegate_can_run && target_can_run (t)) |
6a3cb8e8 | 2567 | { |
89a1c21a | 2568 | runable = t; |
6a3cb8e8 PA |
2569 | ++count; |
2570 | } | |
c906108c | 2571 | } |
6a3cb8e8 PA |
2572 | |
2573 | if (count != 1) | |
2574 | runable = NULL; | |
c906108c SS |
2575 | } |
2576 | ||
6a3cb8e8 | 2577 | if (runable == NULL) |
5f667f2d PA |
2578 | { |
2579 | if (do_mesg) | |
2580 | error (_("Don't know how to %s. Try \"help target\"."), do_mesg); | |
2581 | else | |
2582 | return NULL; | |
2583 | } | |
c906108c SS |
2584 | |
2585 | return runable; | |
2586 | } | |
2587 | ||
b3ccfe11 | 2588 | /* See target.h. */ |
c906108c | 2589 | |
b3ccfe11 TT |
2590 | struct target_ops * |
2591 | find_attach_target (void) | |
c906108c SS |
2592 | { |
2593 | struct target_ops *t; | |
2594 | ||
b3ccfe11 TT |
2595 | /* If a target on the current stack can attach, use it. */ |
2596 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
2597 | { | |
2598 | if (t->to_attach != NULL) | |
2599 | break; | |
2600 | } | |
c906108c | 2601 | |
b3ccfe11 TT |
2602 | /* Otherwise, use the default run target for attaching. */ |
2603 | if (t == NULL) | |
2604 | t = find_default_run_target ("attach"); | |
b84876c2 | 2605 | |
b3ccfe11 | 2606 | return t; |
b84876c2 PA |
2607 | } |
2608 | ||
b3ccfe11 | 2609 | /* See target.h. */ |
b84876c2 | 2610 | |
b3ccfe11 TT |
2611 | struct target_ops * |
2612 | find_run_target (void) | |
9908b566 VP |
2613 | { |
2614 | struct target_ops *t; | |
2615 | ||
b3ccfe11 TT |
2616 | /* If a target on the current stack can attach, use it. */ |
2617 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
2618 | { | |
2619 | if (t->to_create_inferior != NULL) | |
2620 | break; | |
2621 | } | |
5d502164 | 2622 | |
b3ccfe11 TT |
2623 | /* Otherwise, use the default run target. */ |
2624 | if (t == NULL) | |
2625 | t = find_default_run_target ("run"); | |
9908b566 | 2626 | |
b3ccfe11 | 2627 | return t; |
9908b566 VP |
2628 | } |
2629 | ||
145b16a9 UW |
2630 | /* Implement the "info proc" command. */ |
2631 | ||
451b7c33 | 2632 | int |
7bc112c1 | 2633 | target_info_proc (const char *args, enum info_proc_what what) |
145b16a9 UW |
2634 | { |
2635 | struct target_ops *t; | |
2636 | ||
2637 | /* If we're already connected to something that can get us OS | |
2638 | related data, use it. Otherwise, try using the native | |
2639 | target. */ | |
2640 | if (current_target.to_stratum >= process_stratum) | |
2641 | t = current_target.beneath; | |
2642 | else | |
2643 | t = find_default_run_target (NULL); | |
2644 | ||
2645 | for (; t != NULL; t = t->beneath) | |
2646 | { | |
2647 | if (t->to_info_proc != NULL) | |
2648 | { | |
2649 | t->to_info_proc (t, args, what); | |
2650 | ||
2651 | if (targetdebug) | |
2652 | fprintf_unfiltered (gdb_stdlog, | |
2653 | "target_info_proc (\"%s\", %d)\n", args, what); | |
2654 | ||
451b7c33 | 2655 | return 1; |
145b16a9 UW |
2656 | } |
2657 | } | |
2658 | ||
451b7c33 | 2659 | return 0; |
145b16a9 UW |
2660 | } |
2661 | ||
03583c20 | 2662 | static int |
2bfc0540 | 2663 | find_default_supports_disable_randomization (struct target_ops *self) |
03583c20 UW |
2664 | { |
2665 | struct target_ops *t; | |
2666 | ||
2667 | t = find_default_run_target (NULL); | |
2668 | if (t && t->to_supports_disable_randomization) | |
2bfc0540 | 2669 | return (t->to_supports_disable_randomization) (t); |
03583c20 UW |
2670 | return 0; |
2671 | } | |
2672 | ||
2673 | int | |
2674 | target_supports_disable_randomization (void) | |
2675 | { | |
2676 | struct target_ops *t; | |
2677 | ||
2678 | for (t = ¤t_target; t != NULL; t = t->beneath) | |
2679 | if (t->to_supports_disable_randomization) | |
2bfc0540 | 2680 | return t->to_supports_disable_randomization (t); |
03583c20 UW |
2681 | |
2682 | return 0; | |
2683 | } | |
9908b566 | 2684 | |
07e059b5 VP |
2685 | char * |
2686 | target_get_osdata (const char *type) | |
2687 | { | |
07e059b5 VP |
2688 | struct target_ops *t; |
2689 | ||
739ef7fb PA |
2690 | /* If we're already connected to something that can get us OS |
2691 | related data, use it. Otherwise, try using the native | |
2692 | target. */ | |
2693 | if (current_target.to_stratum >= process_stratum) | |
6d097e65 | 2694 | t = current_target.beneath; |
739ef7fb PA |
2695 | else |
2696 | t = find_default_run_target ("get OS data"); | |
07e059b5 VP |
2697 | |
2698 | if (!t) | |
2699 | return NULL; | |
2700 | ||
6d097e65 | 2701 | return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type); |
07e059b5 VP |
2702 | } |
2703 | ||
8eaff7cd TT |
2704 | static struct address_space * |
2705 | default_thread_address_space (struct target_ops *self, ptid_t ptid) | |
6c95b8df PA |
2706 | { |
2707 | struct inferior *inf; | |
6c95b8df PA |
2708 | |
2709 | /* Fall-back to the "main" address space of the inferior. */ | |
c9657e70 | 2710 | inf = find_inferior_ptid (ptid); |
6c95b8df PA |
2711 | |
2712 | if (inf == NULL || inf->aspace == NULL) | |
3e43a32a | 2713 | internal_error (__FILE__, __LINE__, |
9b20d036 MS |
2714 | _("Can't determine the current " |
2715 | "address space of thread %s\n"), | |
6c95b8df PA |
2716 | target_pid_to_str (ptid)); |
2717 | ||
2718 | return inf->aspace; | |
2719 | } | |
2720 | ||
8eaff7cd TT |
2721 | /* Determine the current address space of thread PTID. */ |
2722 | ||
2723 | struct address_space * | |
2724 | target_thread_address_space (ptid_t ptid) | |
2725 | { | |
2726 | struct address_space *aspace; | |
2727 | ||
2728 | aspace = current_target.to_thread_address_space (¤t_target, ptid); | |
2729 | gdb_assert (aspace != NULL); | |
2730 | ||
8eaff7cd TT |
2731 | return aspace; |
2732 | } | |
2733 | ||
7313baad UW |
2734 | |
2735 | /* Target file operations. */ | |
2736 | ||
2737 | static struct target_ops * | |
2738 | default_fileio_target (void) | |
2739 | { | |
2740 | /* If we're already connected to something that can perform | |
2741 | file I/O, use it. Otherwise, try using the native target. */ | |
2742 | if (current_target.to_stratum >= process_stratum) | |
2743 | return current_target.beneath; | |
2744 | else | |
2745 | return find_default_run_target ("file I/O"); | |
2746 | } | |
2747 | ||
1c4b552b GB |
2748 | /* File handle for target file operations. */ |
2749 | ||
2750 | typedef struct | |
2751 | { | |
2752 | /* The target on which this file is open. */ | |
2753 | struct target_ops *t; | |
2754 | ||
2755 | /* The file descriptor on the target. */ | |
2756 | int fd; | |
2757 | } fileio_fh_t; | |
2758 | ||
2759 | DEF_VEC_O (fileio_fh_t); | |
2760 | ||
2761 | /* Vector of currently open file handles. The value returned by | |
2762 | target_fileio_open and passed as the FD argument to other | |
2763 | target_fileio_* functions is an index into this vector. This | |
2764 | vector's entries are never freed; instead, files are marked as | |
2765 | closed, and the handle becomes available for reuse. */ | |
2766 | static VEC (fileio_fh_t) *fileio_fhandles; | |
2767 | ||
2768 | /* Macro to check whether a fileio_fh_t represents a closed file. */ | |
2769 | #define is_closed_fileio_fh(fd) ((fd) < 0) | |
2770 | ||
2771 | /* Index into fileio_fhandles of the lowest handle that might be | |
2772 | closed. This permits handle reuse without searching the whole | |
2773 | list each time a new file is opened. */ | |
2774 | static int lowest_closed_fd; | |
2775 | ||
2776 | /* Acquire a target fileio file descriptor. */ | |
2777 | ||
2778 | static int | |
2779 | acquire_fileio_fd (struct target_ops *t, int fd) | |
2780 | { | |
2781 | fileio_fh_t *fh, buf; | |
2782 | ||
2783 | gdb_assert (!is_closed_fileio_fh (fd)); | |
2784 | ||
2785 | /* Search for closed handles to reuse. */ | |
2786 | for (; | |
2787 | VEC_iterate (fileio_fh_t, fileio_fhandles, | |
2788 | lowest_closed_fd, fh); | |
2789 | lowest_closed_fd++) | |
2790 | if (is_closed_fileio_fh (fh->fd)) | |
2791 | break; | |
2792 | ||
2793 | /* Push a new handle if no closed handles were found. */ | |
2794 | if (lowest_closed_fd == VEC_length (fileio_fh_t, fileio_fhandles)) | |
2795 | fh = VEC_safe_push (fileio_fh_t, fileio_fhandles, NULL); | |
2796 | ||
2797 | /* Fill in the handle. */ | |
2798 | fh->t = t; | |
2799 | fh->fd = fd; | |
2800 | ||
2801 | /* Return its index, and start the next lookup at | |
2802 | the next index. */ | |
2803 | return lowest_closed_fd++; | |
2804 | } | |
2805 | ||
2806 | /* Release a target fileio file descriptor. */ | |
2807 | ||
2808 | static void | |
2809 | release_fileio_fd (int fd, fileio_fh_t *fh) | |
2810 | { | |
2811 | fh->fd = -1; | |
2812 | lowest_closed_fd = min (lowest_closed_fd, fd); | |
2813 | } | |
2814 | ||
2815 | /* Return a pointer to the fileio_fhandle_t corresponding to FD. */ | |
2816 | ||
2817 | #define fileio_fd_to_fh(fd) \ | |
2818 | VEC_index (fileio_fh_t, fileio_fhandles, (fd)) | |
2819 | ||
4313b8c0 GB |
2820 | /* Helper for target_fileio_open and |
2821 | target_fileio_open_warn_if_slow. */ | |
12e2a5fd | 2822 | |
4313b8c0 GB |
2823 | static int |
2824 | target_fileio_open_1 (struct inferior *inf, const char *filename, | |
2825 | int flags, int mode, int warn_if_slow, | |
2826 | int *target_errno) | |
7313baad UW |
2827 | { |
2828 | struct target_ops *t; | |
2829 | ||
2830 | for (t = default_fileio_target (); t != NULL; t = t->beneath) | |
2831 | { | |
2832 | if (t->to_fileio_open != NULL) | |
2833 | { | |
07c138c8 | 2834 | int fd = t->to_fileio_open (t, inf, filename, flags, mode, |
4313b8c0 | 2835 | warn_if_slow, target_errno); |
7313baad | 2836 | |
1c4b552b GB |
2837 | if (fd < 0) |
2838 | fd = -1; | |
2839 | else | |
2840 | fd = acquire_fileio_fd (t, fd); | |
2841 | ||
7313baad UW |
2842 | if (targetdebug) |
2843 | fprintf_unfiltered (gdb_stdlog, | |
4313b8c0 | 2844 | "target_fileio_open (%d,%s,0x%x,0%o,%d)" |
07c138c8 GB |
2845 | " = %d (%d)\n", |
2846 | inf == NULL ? 0 : inf->num, | |
7313baad | 2847 | filename, flags, mode, |
4313b8c0 GB |
2848 | warn_if_slow, fd, |
2849 | fd != -1 ? 0 : *target_errno); | |
7313baad UW |
2850 | return fd; |
2851 | } | |
2852 | } | |
2853 | ||
2854 | *target_errno = FILEIO_ENOSYS; | |
2855 | return -1; | |
2856 | } | |
2857 | ||
12e2a5fd GB |
2858 | /* See target.h. */ |
2859 | ||
4313b8c0 GB |
2860 | int |
2861 | target_fileio_open (struct inferior *inf, const char *filename, | |
2862 | int flags, int mode, int *target_errno) | |
2863 | { | |
2864 | return target_fileio_open_1 (inf, filename, flags, mode, 0, | |
2865 | target_errno); | |
2866 | } | |
2867 | ||
2868 | /* See target.h. */ | |
2869 | ||
2870 | int | |
2871 | target_fileio_open_warn_if_slow (struct inferior *inf, | |
2872 | const char *filename, | |
2873 | int flags, int mode, int *target_errno) | |
2874 | { | |
2875 | return target_fileio_open_1 (inf, filename, flags, mode, 1, | |
2876 | target_errno); | |
2877 | } | |
2878 | ||
2879 | /* See target.h. */ | |
2880 | ||
7313baad UW |
2881 | int |
2882 | target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len, | |
2883 | ULONGEST offset, int *target_errno) | |
2884 | { | |
1c4b552b GB |
2885 | fileio_fh_t *fh = fileio_fd_to_fh (fd); |
2886 | int ret = -1; | |
7313baad | 2887 | |
1c4b552b GB |
2888 | if (is_closed_fileio_fh (fh->fd)) |
2889 | *target_errno = EBADF; | |
2890 | else | |
2891 | ret = fh->t->to_fileio_pwrite (fh->t, fh->fd, write_buf, | |
2892 | len, offset, target_errno); | |
7313baad | 2893 | |
1c4b552b GB |
2894 | if (targetdebug) |
2895 | fprintf_unfiltered (gdb_stdlog, | |
2896 | "target_fileio_pwrite (%d,...,%d,%s) " | |
2897 | "= %d (%d)\n", | |
2898 | fd, len, pulongest (offset), | |
2899 | ret, ret != -1 ? 0 : *target_errno); | |
2900 | return ret; | |
7313baad UW |
2901 | } |
2902 | ||
12e2a5fd GB |
2903 | /* See target.h. */ |
2904 | ||
7313baad UW |
2905 | int |
2906 | target_fileio_pread (int fd, gdb_byte *read_buf, int len, | |
2907 | ULONGEST offset, int *target_errno) | |
2908 | { | |
1c4b552b GB |
2909 | fileio_fh_t *fh = fileio_fd_to_fh (fd); |
2910 | int ret = -1; | |
7313baad | 2911 | |
1c4b552b GB |
2912 | if (is_closed_fileio_fh (fh->fd)) |
2913 | *target_errno = EBADF; | |
2914 | else | |
2915 | ret = fh->t->to_fileio_pread (fh->t, fh->fd, read_buf, | |
2916 | len, offset, target_errno); | |
7313baad | 2917 | |
1c4b552b GB |
2918 | if (targetdebug) |
2919 | fprintf_unfiltered (gdb_stdlog, | |
2920 | "target_fileio_pread (%d,...,%d,%s) " | |
2921 | "= %d (%d)\n", | |
2922 | fd, len, pulongest (offset), | |
2923 | ret, ret != -1 ? 0 : *target_errno); | |
9b15c1f0 GB |
2924 | return ret; |
2925 | } | |
2926 | ||
2927 | /* See target.h. */ | |
12e2a5fd | 2928 | |
9b15c1f0 GB |
2929 | int |
2930 | target_fileio_fstat (int fd, struct stat *sb, int *target_errno) | |
2931 | { | |
2932 | fileio_fh_t *fh = fileio_fd_to_fh (fd); | |
2933 | int ret = -1; | |
2934 | ||
2935 | if (is_closed_fileio_fh (fh->fd)) | |
2936 | *target_errno = EBADF; | |
2937 | else | |
2938 | ret = fh->t->to_fileio_fstat (fh->t, fh->fd, sb, target_errno); | |
2939 | ||
2940 | if (targetdebug) | |
2941 | fprintf_unfiltered (gdb_stdlog, | |
2942 | "target_fileio_fstat (%d) = %d (%d)\n", | |
2943 | fd, ret, ret != -1 ? 0 : *target_errno); | |
1c4b552b | 2944 | return ret; |
7313baad UW |
2945 | } |
2946 | ||
12e2a5fd GB |
2947 | /* See target.h. */ |
2948 | ||
7313baad UW |
2949 | int |
2950 | target_fileio_close (int fd, int *target_errno) | |
2951 | { | |
1c4b552b GB |
2952 | fileio_fh_t *fh = fileio_fd_to_fh (fd); |
2953 | int ret = -1; | |
7313baad | 2954 | |
1c4b552b GB |
2955 | if (is_closed_fileio_fh (fh->fd)) |
2956 | *target_errno = EBADF; | |
2957 | else | |
7313baad | 2958 | { |
1c4b552b GB |
2959 | ret = fh->t->to_fileio_close (fh->t, fh->fd, target_errno); |
2960 | release_fileio_fd (fd, fh); | |
7313baad UW |
2961 | } |
2962 | ||
1c4b552b GB |
2963 | if (targetdebug) |
2964 | fprintf_unfiltered (gdb_stdlog, | |
2965 | "target_fileio_close (%d) = %d (%d)\n", | |
2966 | fd, ret, ret != -1 ? 0 : *target_errno); | |
2967 | return ret; | |
7313baad UW |
2968 | } |
2969 | ||
12e2a5fd GB |
2970 | /* See target.h. */ |
2971 | ||
7313baad | 2972 | int |
07c138c8 GB |
2973 | target_fileio_unlink (struct inferior *inf, const char *filename, |
2974 | int *target_errno) | |
7313baad UW |
2975 | { |
2976 | struct target_ops *t; | |
2977 | ||
2978 | for (t = default_fileio_target (); t != NULL; t = t->beneath) | |
2979 | { | |
2980 | if (t->to_fileio_unlink != NULL) | |
2981 | { | |
07c138c8 GB |
2982 | int ret = t->to_fileio_unlink (t, inf, filename, |
2983 | target_errno); | |
7313baad UW |
2984 | |
2985 | if (targetdebug) | |
2986 | fprintf_unfiltered (gdb_stdlog, | |
07c138c8 GB |
2987 | "target_fileio_unlink (%d,%s)" |
2988 | " = %d (%d)\n", | |
2989 | inf == NULL ? 0 : inf->num, filename, | |
2990 | ret, ret != -1 ? 0 : *target_errno); | |
7313baad UW |
2991 | return ret; |
2992 | } | |
2993 | } | |
2994 | ||
2995 | *target_errno = FILEIO_ENOSYS; | |
2996 | return -1; | |
2997 | } | |
2998 | ||
12e2a5fd GB |
2999 | /* See target.h. */ |
3000 | ||
b9e7b9c3 | 3001 | char * |
07c138c8 GB |
3002 | target_fileio_readlink (struct inferior *inf, const char *filename, |
3003 | int *target_errno) | |
b9e7b9c3 UW |
3004 | { |
3005 | struct target_ops *t; | |
3006 | ||
3007 | for (t = default_fileio_target (); t != NULL; t = t->beneath) | |
3008 | { | |
3009 | if (t->to_fileio_readlink != NULL) | |
3010 | { | |
07c138c8 GB |
3011 | char *ret = t->to_fileio_readlink (t, inf, filename, |
3012 | target_errno); | |
b9e7b9c3 UW |
3013 | |
3014 | if (targetdebug) | |
3015 | fprintf_unfiltered (gdb_stdlog, | |
07c138c8 GB |
3016 | "target_fileio_readlink (%d,%s)" |
3017 | " = %s (%d)\n", | |
3018 | inf == NULL ? 0 : inf->num, | |
b9e7b9c3 UW |
3019 | filename, ret? ret : "(nil)", |
3020 | ret? 0 : *target_errno); | |
3021 | return ret; | |
3022 | } | |
3023 | } | |
3024 | ||
3025 | *target_errno = FILEIO_ENOSYS; | |
3026 | return NULL; | |
3027 | } | |
3028 | ||
7313baad UW |
3029 | static void |
3030 | target_fileio_close_cleanup (void *opaque) | |
3031 | { | |
3032 | int fd = *(int *) opaque; | |
3033 | int target_errno; | |
3034 | ||
3035 | target_fileio_close (fd, &target_errno); | |
3036 | } | |
3037 | ||
07c138c8 GB |
3038 | /* Read target file FILENAME, in the filesystem as seen by INF. If |
3039 | INF is NULL, use the filesystem seen by the debugger (GDB or, for | |
3040 | remote targets, the remote stub). Store the result in *BUF_P and | |
3041 | return the size of the transferred data. PADDING additional bytes | |
3042 | are available in *BUF_P. This is a helper function for | |
3043 | target_fileio_read_alloc; see the declaration of that function for | |
3044 | more information. */ | |
7313baad | 3045 | |
f7af1fcd JK |
3046 | static LONGEST |
3047 | target_fileio_read_alloc_1 (struct inferior *inf, const char *filename, | |
3048 | gdb_byte **buf_p, int padding) | |
3049 | { | |
3050 | struct cleanup *close_cleanup; | |
db1ff28b JK |
3051 | size_t buf_alloc, buf_pos; |
3052 | gdb_byte *buf; | |
3053 | LONGEST n; | |
3054 | int fd; | |
3055 | int target_errno; | |
f7af1fcd | 3056 | |
db1ff28b JK |
3057 | fd = target_fileio_open (inf, filename, FILEIO_O_RDONLY, 0700, |
3058 | &target_errno); | |
f7af1fcd JK |
3059 | if (fd == -1) |
3060 | return -1; | |
3061 | ||
3062 | close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd); | |
db1ff28b JK |
3063 | |
3064 | /* Start by reading up to 4K at a time. The target will throttle | |
3065 | this number down if necessary. */ | |
3066 | buf_alloc = 4096; | |
224c3ddb | 3067 | buf = (gdb_byte *) xmalloc (buf_alloc); |
db1ff28b JK |
3068 | buf_pos = 0; |
3069 | while (1) | |
3070 | { | |
3071 | n = target_fileio_pread (fd, &buf[buf_pos], | |
3072 | buf_alloc - buf_pos - padding, buf_pos, | |
3073 | &target_errno); | |
3074 | if (n < 0) | |
3075 | { | |
3076 | /* An error occurred. */ | |
3077 | do_cleanups (close_cleanup); | |
3078 | xfree (buf); | |
3079 | return -1; | |
3080 | } | |
3081 | else if (n == 0) | |
3082 | { | |
3083 | /* Read all there was. */ | |
3084 | do_cleanups (close_cleanup); | |
3085 | if (buf_pos == 0) | |
3086 | xfree (buf); | |
3087 | else | |
3088 | *buf_p = buf; | |
3089 | return buf_pos; | |
3090 | } | |
3091 | ||
3092 | buf_pos += n; | |
3093 | ||
3094 | /* If the buffer is filling up, expand it. */ | |
3095 | if (buf_alloc < buf_pos * 2) | |
3096 | { | |
3097 | buf_alloc *= 2; | |
224c3ddb | 3098 | buf = (gdb_byte *) xrealloc (buf, buf_alloc); |
db1ff28b JK |
3099 | } |
3100 | ||
3101 | QUIT; | |
3102 | } | |
f7af1fcd JK |
3103 | } |
3104 | ||
12e2a5fd | 3105 | /* See target.h. */ |
7313baad UW |
3106 | |
3107 | LONGEST | |
07c138c8 GB |
3108 | target_fileio_read_alloc (struct inferior *inf, const char *filename, |
3109 | gdb_byte **buf_p) | |
7313baad | 3110 | { |
07c138c8 | 3111 | return target_fileio_read_alloc_1 (inf, filename, buf_p, 0); |
7313baad UW |
3112 | } |
3113 | ||
db1ff28b | 3114 | /* See target.h. */ |
f7af1fcd JK |
3115 | |
3116 | char * | |
3117 | target_fileio_read_stralloc (struct inferior *inf, const char *filename) | |
3118 | { | |
db1ff28b JK |
3119 | gdb_byte *buffer; |
3120 | char *bufstr; | |
3121 | LONGEST i, transferred; | |
3122 | ||
3123 | transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1); | |
3124 | bufstr = (char *) buffer; | |
3125 | ||
3126 | if (transferred < 0) | |
3127 | return NULL; | |
3128 | ||
3129 | if (transferred == 0) | |
3130 | return xstrdup (""); | |
3131 | ||
3132 | bufstr[transferred] = 0; | |
3133 | ||
3134 | /* Check for embedded NUL bytes; but allow trailing NULs. */ | |
3135 | for (i = strlen (bufstr); i < transferred; i++) | |
3136 | if (bufstr[i] != 0) | |
3137 | { | |
3138 | warning (_("target file %s " | |
3139 | "contained unexpected null characters"), | |
3140 | filename); | |
3141 | break; | |
3142 | } | |
3143 | ||
3144 | return bufstr; | |
f7af1fcd | 3145 | } |
7313baad | 3146 | |
db1ff28b | 3147 | |
e0d24f8d | 3148 | static int |
31568a15 TT |
3149 | default_region_ok_for_hw_watchpoint (struct target_ops *self, |
3150 | CORE_ADDR addr, int len) | |
e0d24f8d | 3151 | { |
f5656ead | 3152 | return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT); |
ccaa32c7 GS |
3153 | } |
3154 | ||
5009afc5 AS |
3155 | static int |
3156 | default_watchpoint_addr_within_range (struct target_ops *target, | |
3157 | CORE_ADDR addr, | |
3158 | CORE_ADDR start, int length) | |
3159 | { | |
3160 | return addr >= start && addr < start + length; | |
3161 | } | |
3162 | ||
c2250ad1 UW |
3163 | static struct gdbarch * |
3164 | default_thread_architecture (struct target_ops *ops, ptid_t ptid) | |
3165 | { | |
f5656ead | 3166 | return target_gdbarch (); |
c2250ad1 UW |
3167 | } |
3168 | ||
c906108c | 3169 | static int |
555bbdeb TT |
3170 | return_zero (struct target_ops *ignore) |
3171 | { | |
3172 | return 0; | |
3173 | } | |
3174 | ||
3175 | static int | |
3176 | return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2) | |
c906108c SS |
3177 | { |
3178 | return 0; | |
3179 | } | |
3180 | ||
ed9a39eb JM |
3181 | /* |
3182 | * Find the next target down the stack from the specified target. | |
3183 | */ | |
3184 | ||
3185 | struct target_ops * | |
fba45db2 | 3186 | find_target_beneath (struct target_ops *t) |
ed9a39eb | 3187 | { |
258b763a | 3188 | return t->beneath; |
ed9a39eb JM |
3189 | } |
3190 | ||
8b06beed TT |
3191 | /* See target.h. */ |
3192 | ||
3193 | struct target_ops * | |
3194 | find_target_at (enum strata stratum) | |
3195 | { | |
3196 | struct target_ops *t; | |
3197 | ||
3198 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
3199 | if (t->to_stratum == stratum) | |
3200 | return t; | |
3201 | ||
3202 | return NULL; | |
3203 | } | |
3204 | ||
c906108c SS |
3205 | \f |
3206 | /* The inferior process has died. Long live the inferior! */ | |
3207 | ||
3208 | void | |
fba45db2 | 3209 | generic_mourn_inferior (void) |
c906108c | 3210 | { |
7f9f62ba | 3211 | ptid_t ptid; |
c906108c | 3212 | |
7f9f62ba | 3213 | ptid = inferior_ptid; |
39f77062 | 3214 | inferior_ptid = null_ptid; |
7f9f62ba | 3215 | |
f59f708a PA |
3216 | /* Mark breakpoints uninserted in case something tries to delete a |
3217 | breakpoint while we delete the inferior's threads (which would | |
3218 | fail, since the inferior is long gone). */ | |
3219 | mark_breakpoints_out (); | |
3220 | ||
7f9f62ba PA |
3221 | if (!ptid_equal (ptid, null_ptid)) |
3222 | { | |
3223 | int pid = ptid_get_pid (ptid); | |
6c95b8df | 3224 | exit_inferior (pid); |
7f9f62ba PA |
3225 | } |
3226 | ||
f59f708a PA |
3227 | /* Note this wipes step-resume breakpoints, so needs to be done |
3228 | after exit_inferior, which ends up referencing the step-resume | |
3229 | breakpoints through clear_thread_inferior_resources. */ | |
c906108c | 3230 | breakpoint_init_inferior (inf_exited); |
f59f708a | 3231 | |
c906108c SS |
3232 | registers_changed (); |
3233 | ||
c906108c SS |
3234 | reopen_exec_file (); |
3235 | reinit_frame_cache (); | |
3236 | ||
9a4105ab AC |
3237 | if (deprecated_detach_hook) |
3238 | deprecated_detach_hook (); | |
c906108c SS |
3239 | } |
3240 | \f | |
fd0a2a6f MK |
3241 | /* Convert a normal process ID to a string. Returns the string in a |
3242 | static buffer. */ | |
c906108c SS |
3243 | |
3244 | char * | |
39f77062 | 3245 | normal_pid_to_str (ptid_t ptid) |
c906108c | 3246 | { |
fd0a2a6f | 3247 | static char buf[32]; |
c906108c | 3248 | |
5fff8fc0 | 3249 | xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid)); |
c906108c SS |
3250 | return buf; |
3251 | } | |
3252 | ||
2c0b251b | 3253 | static char * |
770234d3 | 3254 | default_pid_to_str (struct target_ops *ops, ptid_t ptid) |
117de6a9 PA |
3255 | { |
3256 | return normal_pid_to_str (ptid); | |
3257 | } | |
3258 | ||
9b4eba8e HZ |
3259 | /* Error-catcher for target_find_memory_regions. */ |
3260 | static int | |
2e73927c TT |
3261 | dummy_find_memory_regions (struct target_ops *self, |
3262 | find_memory_region_ftype ignore1, void *ignore2) | |
be4d1333 | 3263 | { |
9b4eba8e | 3264 | error (_("Command not implemented for this target.")); |
be4d1333 MS |
3265 | return 0; |
3266 | } | |
3267 | ||
9b4eba8e HZ |
3268 | /* Error-catcher for target_make_corefile_notes. */ |
3269 | static char * | |
fc6691b2 TT |
3270 | dummy_make_corefile_notes (struct target_ops *self, |
3271 | bfd *ignore1, int *ignore2) | |
be4d1333 | 3272 | { |
9b4eba8e | 3273 | error (_("Command not implemented for this target.")); |
be4d1333 MS |
3274 | return NULL; |
3275 | } | |
3276 | ||
c906108c SS |
3277 | /* Set up the handful of non-empty slots needed by the dummy target |
3278 | vector. */ | |
3279 | ||
3280 | static void | |
fba45db2 | 3281 | init_dummy_target (void) |
c906108c SS |
3282 | { |
3283 | dummy_target.to_shortname = "None"; | |
3284 | dummy_target.to_longname = "None"; | |
3285 | dummy_target.to_doc = ""; | |
03583c20 UW |
3286 | dummy_target.to_supports_disable_randomization |
3287 | = find_default_supports_disable_randomization; | |
c906108c | 3288 | dummy_target.to_stratum = dummy_stratum; |
555bbdeb TT |
3289 | dummy_target.to_has_all_memory = return_zero; |
3290 | dummy_target.to_has_memory = return_zero; | |
3291 | dummy_target.to_has_stack = return_zero; | |
3292 | dummy_target.to_has_registers = return_zero; | |
3293 | dummy_target.to_has_execution = return_zero_has_execution; | |
c906108c | 3294 | dummy_target.to_magic = OPS_MAGIC; |
1101cb7b TT |
3295 | |
3296 | install_dummy_methods (&dummy_target); | |
c906108c | 3297 | } |
c906108c | 3298 | \f |
c906108c | 3299 | |
f1c07ab0 | 3300 | void |
460014f5 | 3301 | target_close (struct target_ops *targ) |
f1c07ab0 | 3302 | { |
7fdc1521 TT |
3303 | gdb_assert (!target_is_pushed (targ)); |
3304 | ||
f1c07ab0 | 3305 | if (targ->to_xclose != NULL) |
460014f5 | 3306 | targ->to_xclose (targ); |
f1c07ab0 | 3307 | else if (targ->to_close != NULL) |
de90e03d | 3308 | targ->to_close (targ); |
947b8855 PA |
3309 | |
3310 | if (targetdebug) | |
460014f5 | 3311 | fprintf_unfiltered (gdb_stdlog, "target_close ()\n"); |
f1c07ab0 AC |
3312 | } |
3313 | ||
28439f5e PA |
3314 | int |
3315 | target_thread_alive (ptid_t ptid) | |
c906108c | 3316 | { |
a7068b60 | 3317 | return current_target.to_thread_alive (¤t_target, ptid); |
28439f5e PA |
3318 | } |
3319 | ||
3320 | void | |
e8032dde | 3321 | target_update_thread_list (void) |
28439f5e | 3322 | { |
e8032dde | 3323 | current_target.to_update_thread_list (¤t_target); |
c906108c SS |
3324 | } |
3325 | ||
d914c394 SS |
3326 | void |
3327 | target_stop (ptid_t ptid) | |
3328 | { | |
3329 | if (!may_stop) | |
3330 | { | |
3331 | warning (_("May not interrupt or stop the target, ignoring attempt")); | |
3332 | return; | |
3333 | } | |
3334 | ||
1eab8a48 | 3335 | (*current_target.to_stop) (¤t_target, ptid); |
d914c394 SS |
3336 | } |
3337 | ||
bfedc46a PA |
3338 | void |
3339 | target_interrupt (ptid_t ptid) | |
3340 | { | |
3341 | if (!may_stop) | |
3342 | { | |
3343 | warning (_("May not interrupt or stop the target, ignoring attempt")); | |
3344 | return; | |
3345 | } | |
3346 | ||
3347 | (*current_target.to_interrupt) (¤t_target, ptid); | |
3348 | } | |
3349 | ||
abc56d60 PA |
3350 | /* See target.h. */ |
3351 | ||
3352 | void | |
3353 | target_check_pending_interrupt (void) | |
3354 | { | |
3355 | (*current_target.to_check_pending_interrupt) (¤t_target); | |
3356 | } | |
3357 | ||
f8c1d06b GB |
3358 | /* See target/target.h. */ |
3359 | ||
3360 | void | |
03f4463b | 3361 | target_stop_and_wait (ptid_t ptid) |
f8c1d06b GB |
3362 | { |
3363 | struct target_waitstatus status; | |
3364 | int was_non_stop = non_stop; | |
3365 | ||
3366 | non_stop = 1; | |
3367 | target_stop (ptid); | |
3368 | ||
3369 | memset (&status, 0, sizeof (status)); | |
3370 | target_wait (ptid, &status, 0); | |
3371 | ||
3372 | non_stop = was_non_stop; | |
3373 | } | |
3374 | ||
3375 | /* See target/target.h. */ | |
3376 | ||
3377 | void | |
03f4463b | 3378 | target_continue_no_signal (ptid_t ptid) |
f8c1d06b GB |
3379 | { |
3380 | target_resume (ptid, 0, GDB_SIGNAL_0); | |
3381 | } | |
3382 | ||
09826ec5 PA |
3383 | /* Concatenate ELEM to LIST, a comma separate list, and return the |
3384 | result. The LIST incoming argument is released. */ | |
3385 | ||
3386 | static char * | |
3387 | str_comma_list_concat_elem (char *list, const char *elem) | |
3388 | { | |
3389 | if (list == NULL) | |
3390 | return xstrdup (elem); | |
3391 | else | |
3392 | return reconcat (list, list, ", ", elem, (char *) NULL); | |
3393 | } | |
3394 | ||
3395 | /* Helper for target_options_to_string. If OPT is present in | |
3396 | TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET. | |
3397 | Returns the new resulting string. OPT is removed from | |
3398 | TARGET_OPTIONS. */ | |
3399 | ||
3400 | static char * | |
3401 | do_option (int *target_options, char *ret, | |
3402 | int opt, char *opt_str) | |
3403 | { | |
3404 | if ((*target_options & opt) != 0) | |
3405 | { | |
3406 | ret = str_comma_list_concat_elem (ret, opt_str); | |
3407 | *target_options &= ~opt; | |
3408 | } | |
3409 | ||
3410 | return ret; | |
3411 | } | |
3412 | ||
3413 | char * | |
3414 | target_options_to_string (int target_options) | |
3415 | { | |
3416 | char *ret = NULL; | |
3417 | ||
3418 | #define DO_TARG_OPTION(OPT) \ | |
3419 | ret = do_option (&target_options, ret, OPT, #OPT) | |
3420 | ||
3421 | DO_TARG_OPTION (TARGET_WNOHANG); | |
3422 | ||
3423 | if (target_options != 0) | |
3424 | ret = str_comma_list_concat_elem (ret, "unknown???"); | |
3425 | ||
3426 | if (ret == NULL) | |
3427 | ret = xstrdup (""); | |
3428 | return ret; | |
3429 | } | |
3430 | ||
bf0c5130 | 3431 | static void |
56be3814 UW |
3432 | debug_print_register (const char * func, |
3433 | struct regcache *regcache, int regno) | |
bf0c5130 | 3434 | { |
f8d29908 | 3435 | struct gdbarch *gdbarch = get_regcache_arch (regcache); |
5d502164 | 3436 | |
bf0c5130 | 3437 | fprintf_unfiltered (gdb_stdlog, "%s ", func); |
f8d29908 | 3438 | if (regno >= 0 && regno < gdbarch_num_regs (gdbarch) |
f8d29908 UW |
3439 | && gdbarch_register_name (gdbarch, regno) != NULL |
3440 | && gdbarch_register_name (gdbarch, regno)[0] != '\0') | |
3441 | fprintf_unfiltered (gdb_stdlog, "(%s)", | |
3442 | gdbarch_register_name (gdbarch, regno)); | |
bf0c5130 AC |
3443 | else |
3444 | fprintf_unfiltered (gdb_stdlog, "(%d)", regno); | |
0ff58721 | 3445 | if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)) |
bf0c5130 | 3446 | { |
e17a4113 | 3447 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); |
f8d29908 | 3448 | int i, size = register_size (gdbarch, regno); |
e362b510 | 3449 | gdb_byte buf[MAX_REGISTER_SIZE]; |
5d502164 | 3450 | |
0ff58721 | 3451 | regcache_raw_collect (regcache, regno, buf); |
bf0c5130 | 3452 | fprintf_unfiltered (gdb_stdlog, " = "); |
81c4a259 | 3453 | for (i = 0; i < size; i++) |
bf0c5130 AC |
3454 | { |
3455 | fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); | |
3456 | } | |
81c4a259 | 3457 | if (size <= sizeof (LONGEST)) |
bf0c5130 | 3458 | { |
e17a4113 | 3459 | ULONGEST val = extract_unsigned_integer (buf, size, byte_order); |
5d502164 | 3460 | |
0b1553bc UW |
3461 | fprintf_unfiltered (gdb_stdlog, " %s %s", |
3462 | core_addr_to_string_nz (val), plongest (val)); | |
bf0c5130 AC |
3463 | } |
3464 | } | |
3465 | fprintf_unfiltered (gdb_stdlog, "\n"); | |
3466 | } | |
3467 | ||
28439f5e PA |
3468 | void |
3469 | target_fetch_registers (struct regcache *regcache, int regno) | |
c906108c | 3470 | { |
ad5989bd TT |
3471 | current_target.to_fetch_registers (¤t_target, regcache, regno); |
3472 | if (targetdebug) | |
3473 | debug_print_register ("target_fetch_registers", regcache, regno); | |
c906108c SS |
3474 | } |
3475 | ||
28439f5e PA |
3476 | void |
3477 | target_store_registers (struct regcache *regcache, int regno) | |
c906108c | 3478 | { |
28439f5e | 3479 | struct target_ops *t; |
5d502164 | 3480 | |
d914c394 SS |
3481 | if (!may_write_registers) |
3482 | error (_("Writing to registers is not allowed (regno %d)"), regno); | |
3483 | ||
6b84065d TT |
3484 | current_target.to_store_registers (¤t_target, regcache, regno); |
3485 | if (targetdebug) | |
28439f5e | 3486 | { |
6b84065d | 3487 | debug_print_register ("target_store_registers", regcache, regno); |
28439f5e | 3488 | } |
c906108c SS |
3489 | } |
3490 | ||
dc146f7c VP |
3491 | int |
3492 | target_core_of_thread (ptid_t ptid) | |
3493 | { | |
a7068b60 | 3494 | return current_target.to_core_of_thread (¤t_target, ptid); |
dc146f7c VP |
3495 | } |
3496 | ||
936d2992 PA |
3497 | int |
3498 | simple_verify_memory (struct target_ops *ops, | |
3499 | const gdb_byte *data, CORE_ADDR lma, ULONGEST size) | |
3500 | { | |
3501 | LONGEST total_xfered = 0; | |
3502 | ||
3503 | while (total_xfered < size) | |
3504 | { | |
3505 | ULONGEST xfered_len; | |
3506 | enum target_xfer_status status; | |
3507 | gdb_byte buf[1024]; | |
3508 | ULONGEST howmuch = min (sizeof (buf), size - total_xfered); | |
3509 | ||
3510 | status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL, | |
3511 | buf, NULL, lma + total_xfered, howmuch, | |
3512 | &xfered_len); | |
3513 | if (status == TARGET_XFER_OK | |
3514 | && memcmp (data + total_xfered, buf, xfered_len) == 0) | |
3515 | { | |
3516 | total_xfered += xfered_len; | |
3517 | QUIT; | |
3518 | } | |
3519 | else | |
3520 | return 0; | |
3521 | } | |
3522 | return 1; | |
3523 | } | |
3524 | ||
3525 | /* Default implementation of memory verification. */ | |
3526 | ||
3527 | static int | |
3528 | default_verify_memory (struct target_ops *self, | |
3529 | const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size) | |
3530 | { | |
3531 | /* Start over from the top of the target stack. */ | |
3532 | return simple_verify_memory (current_target.beneath, | |
3533 | data, memaddr, size); | |
3534 | } | |
3535 | ||
4a5e7a5b PA |
3536 | int |
3537 | target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size) | |
3538 | { | |
a7068b60 TT |
3539 | return current_target.to_verify_memory (¤t_target, |
3540 | data, memaddr, size); | |
4a5e7a5b PA |
3541 | } |
3542 | ||
9c06b0b4 TJB |
3543 | /* The documentation for this function is in its prototype declaration in |
3544 | target.h. */ | |
3545 | ||
3546 | int | |
f4b0a671 SM |
3547 | target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, |
3548 | enum target_hw_bp_type rw) | |
9c06b0b4 | 3549 | { |
a7068b60 TT |
3550 | return current_target.to_insert_mask_watchpoint (¤t_target, |
3551 | addr, mask, rw); | |
9c06b0b4 TJB |
3552 | } |
3553 | ||
3554 | /* The documentation for this function is in its prototype declaration in | |
3555 | target.h. */ | |
3556 | ||
3557 | int | |
f4b0a671 SM |
3558 | target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, |
3559 | enum target_hw_bp_type rw) | |
9c06b0b4 | 3560 | { |
a7068b60 TT |
3561 | return current_target.to_remove_mask_watchpoint (¤t_target, |
3562 | addr, mask, rw); | |
9c06b0b4 TJB |
3563 | } |
3564 | ||
3565 | /* The documentation for this function is in its prototype declaration | |
3566 | in target.h. */ | |
3567 | ||
3568 | int | |
3569 | target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask) | |
3570 | { | |
6c7e5e5c TT |
3571 | return current_target.to_masked_watch_num_registers (¤t_target, |
3572 | addr, mask); | |
9c06b0b4 TJB |
3573 | } |
3574 | ||
f1310107 TJB |
3575 | /* The documentation for this function is in its prototype declaration |
3576 | in target.h. */ | |
3577 | ||
3578 | int | |
3579 | target_ranged_break_num_registers (void) | |
3580 | { | |
a134316b | 3581 | return current_target.to_ranged_break_num_registers (¤t_target); |
f1310107 TJB |
3582 | } |
3583 | ||
02d27625 MM |
3584 | /* See target.h. */ |
3585 | ||
043c3577 MM |
3586 | int |
3587 | target_supports_btrace (enum btrace_format format) | |
3588 | { | |
3589 | return current_target.to_supports_btrace (¤t_target, format); | |
3590 | } | |
3591 | ||
3592 | /* See target.h. */ | |
3593 | ||
02d27625 | 3594 | struct btrace_target_info * |
f4abbc16 | 3595 | target_enable_btrace (ptid_t ptid, const struct btrace_config *conf) |
02d27625 | 3596 | { |
f4abbc16 | 3597 | return current_target.to_enable_btrace (¤t_target, ptid, conf); |
02d27625 MM |
3598 | } |
3599 | ||
3600 | /* See target.h. */ | |
3601 | ||
3602 | void | |
3603 | target_disable_btrace (struct btrace_target_info *btinfo) | |
3604 | { | |
8dc292d3 | 3605 | current_target.to_disable_btrace (¤t_target, btinfo); |
02d27625 MM |
3606 | } |
3607 | ||
3608 | /* See target.h. */ | |
3609 | ||
3610 | void | |
3611 | target_teardown_btrace (struct btrace_target_info *btinfo) | |
3612 | { | |
9ace480d | 3613 | current_target.to_teardown_btrace (¤t_target, btinfo); |
02d27625 MM |
3614 | } |
3615 | ||
3616 | /* See target.h. */ | |
3617 | ||
969c39fb | 3618 | enum btrace_error |
734b0e4b | 3619 | target_read_btrace (struct btrace_data *btrace, |
969c39fb | 3620 | struct btrace_target_info *btinfo, |
02d27625 MM |
3621 | enum btrace_read_type type) |
3622 | { | |
eb5b20d4 | 3623 | return current_target.to_read_btrace (¤t_target, btrace, btinfo, type); |
02d27625 MM |
3624 | } |
3625 | ||
d02ed0bb MM |
3626 | /* See target.h. */ |
3627 | ||
f4abbc16 MM |
3628 | const struct btrace_config * |
3629 | target_btrace_conf (const struct btrace_target_info *btinfo) | |
3630 | { | |
3631 | return current_target.to_btrace_conf (¤t_target, btinfo); | |
3632 | } | |
3633 | ||
3634 | /* See target.h. */ | |
3635 | ||
7c1687a9 MM |
3636 | void |
3637 | target_stop_recording (void) | |
3638 | { | |
ee97f592 | 3639 | current_target.to_stop_recording (¤t_target); |
7c1687a9 MM |
3640 | } |
3641 | ||
3642 | /* See target.h. */ | |
3643 | ||
d02ed0bb | 3644 | void |
85e1311a | 3645 | target_save_record (const char *filename) |
d02ed0bb | 3646 | { |
f09e2107 | 3647 | current_target.to_save_record (¤t_target, filename); |
d02ed0bb MM |
3648 | } |
3649 | ||
3650 | /* See target.h. */ | |
3651 | ||
3652 | int | |
3653 | target_supports_delete_record (void) | |
3654 | { | |
3655 | struct target_ops *t; | |
3656 | ||
3657 | for (t = current_target.beneath; t != NULL; t = t->beneath) | |
b0ed115f TT |
3658 | if (t->to_delete_record != delegate_delete_record |
3659 | && t->to_delete_record != tdefault_delete_record) | |
d02ed0bb MM |
3660 | return 1; |
3661 | ||
3662 | return 0; | |
3663 | } | |
3664 | ||
3665 | /* See target.h. */ | |
3666 | ||
3667 | void | |
3668 | target_delete_record (void) | |
3669 | { | |
07366925 | 3670 | current_target.to_delete_record (¤t_target); |
d02ed0bb MM |
3671 | } |
3672 | ||
3673 | /* See target.h. */ | |
3674 | ||
3675 | int | |
a52eab48 | 3676 | target_record_is_replaying (ptid_t ptid) |
d02ed0bb | 3677 | { |
a52eab48 | 3678 | return current_target.to_record_is_replaying (¤t_target, ptid); |
d02ed0bb MM |
3679 | } |
3680 | ||
3681 | /* See target.h. */ | |
3682 | ||
7ff27e9b MM |
3683 | int |
3684 | target_record_will_replay (ptid_t ptid, int dir) | |
3685 | { | |
3686 | return current_target.to_record_will_replay (¤t_target, ptid, dir); | |
3687 | } | |
3688 | ||
3689 | /* See target.h. */ | |
3690 | ||
797094dd MM |
3691 | void |
3692 | target_record_stop_replaying (void) | |
3693 | { | |
3694 | current_target.to_record_stop_replaying (¤t_target); | |
3695 | } | |
3696 | ||
3697 | /* See target.h. */ | |
3698 | ||
d02ed0bb MM |
3699 | void |
3700 | target_goto_record_begin (void) | |
3701 | { | |
671e76cc | 3702 | current_target.to_goto_record_begin (¤t_target); |
d02ed0bb MM |
3703 | } |
3704 | ||
3705 | /* See target.h. */ | |
3706 | ||
3707 | void | |
3708 | target_goto_record_end (void) | |
3709 | { | |
e9179bb3 | 3710 | current_target.to_goto_record_end (¤t_target); |
d02ed0bb MM |
3711 | } |
3712 | ||
3713 | /* See target.h. */ | |
3714 | ||
3715 | void | |
3716 | target_goto_record (ULONGEST insn) | |
3717 | { | |
05969c84 | 3718 | current_target.to_goto_record (¤t_target, insn); |
d02ed0bb MM |
3719 | } |
3720 | ||
67c86d06 MM |
3721 | /* See target.h. */ |
3722 | ||
3723 | void | |
3724 | target_insn_history (int size, int flags) | |
3725 | { | |
3679abfa | 3726 | current_target.to_insn_history (¤t_target, size, flags); |
67c86d06 MM |
3727 | } |
3728 | ||
3729 | /* See target.h. */ | |
3730 | ||
3731 | void | |
3732 | target_insn_history_from (ULONGEST from, int size, int flags) | |
3733 | { | |
8444ab58 | 3734 | current_target.to_insn_history_from (¤t_target, from, size, flags); |
67c86d06 MM |
3735 | } |
3736 | ||
3737 | /* See target.h. */ | |
3738 | ||
3739 | void | |
3740 | target_insn_history_range (ULONGEST begin, ULONGEST end, int flags) | |
3741 | { | |
c29302cc | 3742 | current_target.to_insn_history_range (¤t_target, begin, end, flags); |
67c86d06 MM |
3743 | } |
3744 | ||
15984c13 MM |
3745 | /* See target.h. */ |
3746 | ||
3747 | void | |
3748 | target_call_history (int size, int flags) | |
3749 | { | |
170049d4 | 3750 | current_target.to_call_history (¤t_target, size, flags); |
15984c13 MM |
3751 | } |
3752 | ||
3753 | /* See target.h. */ | |
3754 | ||
3755 | void | |
3756 | target_call_history_from (ULONGEST begin, int size, int flags) | |
3757 | { | |
16fc27d6 | 3758 | current_target.to_call_history_from (¤t_target, begin, size, flags); |
15984c13 MM |
3759 | } |
3760 | ||
3761 | /* See target.h. */ | |
3762 | ||
3763 | void | |
3764 | target_call_history_range (ULONGEST begin, ULONGEST end, int flags) | |
3765 | { | |
115d9817 | 3766 | current_target.to_call_history_range (¤t_target, begin, end, flags); |
15984c13 MM |
3767 | } |
3768 | ||
ea001bdc MM |
3769 | /* See target.h. */ |
3770 | ||
3771 | const struct frame_unwind * | |
3772 | target_get_unwinder (void) | |
3773 | { | |
ac01945b | 3774 | return current_target.to_get_unwinder (¤t_target); |
ea001bdc MM |
3775 | } |
3776 | ||
3777 | /* See target.h. */ | |
3778 | ||
3779 | const struct frame_unwind * | |
3780 | target_get_tailcall_unwinder (void) | |
3781 | { | |
ac01945b | 3782 | return current_target.to_get_tailcall_unwinder (¤t_target); |
ea001bdc MM |
3783 | } |
3784 | ||
5fff78c4 MM |
3785 | /* See target.h. */ |
3786 | ||
3787 | void | |
3788 | target_prepare_to_generate_core (void) | |
3789 | { | |
3790 | current_target.to_prepare_to_generate_core (¤t_target); | |
3791 | } | |
3792 | ||
3793 | /* See target.h. */ | |
3794 | ||
3795 | void | |
3796 | target_done_generating_core (void) | |
3797 | { | |
3798 | current_target.to_done_generating_core (¤t_target); | |
3799 | } | |
3800 | ||
c906108c | 3801 | static void |
fba45db2 | 3802 | setup_target_debug (void) |
c906108c SS |
3803 | { |
3804 | memcpy (&debug_target, ¤t_target, sizeof debug_target); | |
3805 | ||
a7068b60 | 3806 | init_debug_target (¤t_target); |
c906108c | 3807 | } |
c906108c | 3808 | \f |
c5aa993b JM |
3809 | |
3810 | static char targ_desc[] = | |
3e43a32a MS |
3811 | "Names of targets and files being debugged.\nShows the entire \ |
3812 | stack of targets currently in use (including the exec-file,\n\ | |
c906108c SS |
3813 | core-file, and process, if any), as well as the symbol file name."; |
3814 | ||
a53f3625 | 3815 | static void |
a30bf1f1 TT |
3816 | default_rcmd (struct target_ops *self, const char *command, |
3817 | struct ui_file *output) | |
a53f3625 TT |
3818 | { |
3819 | error (_("\"monitor\" command not supported by this target.")); | |
3820 | } | |
3821 | ||
96baa820 JM |
3822 | static void |
3823 | do_monitor_command (char *cmd, | |
3824 | int from_tty) | |
3825 | { | |
96baa820 JM |
3826 | target_rcmd (cmd, gdb_stdtarg); |
3827 | } | |
3828 | ||
87680a14 JB |
3829 | /* Print the name of each layers of our target stack. */ |
3830 | ||
3831 | static void | |
3832 | maintenance_print_target_stack (char *cmd, int from_tty) | |
3833 | { | |
3834 | struct target_ops *t; | |
3835 | ||
3836 | printf_filtered (_("The current target stack is:\n")); | |
3837 | ||
3838 | for (t = target_stack; t != NULL; t = t->beneath) | |
3839 | { | |
3840 | printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname); | |
3841 | } | |
3842 | } | |
3843 | ||
372316f1 PA |
3844 | /* See target.h. */ |
3845 | ||
3846 | void | |
3847 | target_async (int enable) | |
3848 | { | |
3849 | infrun_async (enable); | |
3850 | current_target.to_async (¤t_target, enable); | |
3851 | } | |
3852 | ||
329ea579 PA |
3853 | /* Controls if targets can report that they can/are async. This is |
3854 | just for maintainers to use when debugging gdb. */ | |
3855 | int target_async_permitted = 1; | |
c6ebd6cf VP |
3856 | |
3857 | /* The set command writes to this variable. If the inferior is | |
b5419e49 | 3858 | executing, target_async_permitted is *not* updated. */ |
329ea579 | 3859 | static int target_async_permitted_1 = 1; |
c6ebd6cf VP |
3860 | |
3861 | static void | |
329ea579 PA |
3862 | maint_set_target_async_command (char *args, int from_tty, |
3863 | struct cmd_list_element *c) | |
c6ebd6cf | 3864 | { |
c35b1492 | 3865 | if (have_live_inferiors ()) |
c6ebd6cf VP |
3866 | { |
3867 | target_async_permitted_1 = target_async_permitted; | |
3868 | error (_("Cannot change this setting while the inferior is running.")); | |
3869 | } | |
3870 | ||
3871 | target_async_permitted = target_async_permitted_1; | |
3872 | } | |
3873 | ||
3874 | static void | |
329ea579 PA |
3875 | maint_show_target_async_command (struct ui_file *file, int from_tty, |
3876 | struct cmd_list_element *c, | |
3877 | const char *value) | |
c6ebd6cf | 3878 | { |
3e43a32a MS |
3879 | fprintf_filtered (file, |
3880 | _("Controlling the inferior in " | |
3881 | "asynchronous mode is %s.\n"), value); | |
c6ebd6cf VP |
3882 | } |
3883 | ||
fbea99ea PA |
3884 | /* Return true if the target operates in non-stop mode even with "set |
3885 | non-stop off". */ | |
3886 | ||
3887 | static int | |
3888 | target_always_non_stop_p (void) | |
3889 | { | |
3890 | return current_target.to_always_non_stop_p (¤t_target); | |
3891 | } | |
3892 | ||
3893 | /* See target.h. */ | |
3894 | ||
3895 | int | |
3896 | target_is_non_stop_p (void) | |
3897 | { | |
3898 | return (non_stop | |
3899 | || target_non_stop_enabled == AUTO_BOOLEAN_TRUE | |
3900 | || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO | |
3901 | && target_always_non_stop_p ())); | |
3902 | } | |
3903 | ||
3904 | /* Controls if targets can report that they always run in non-stop | |
3905 | mode. This is just for maintainers to use when debugging gdb. */ | |
3906 | enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO; | |
3907 | ||
3908 | /* The set command writes to this variable. If the inferior is | |
3909 | executing, target_non_stop_enabled is *not* updated. */ | |
3910 | static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO; | |
3911 | ||
3912 | /* Implementation of "maint set target-non-stop". */ | |
3913 | ||
3914 | static void | |
3915 | maint_set_target_non_stop_command (char *args, int from_tty, | |
3916 | struct cmd_list_element *c) | |
3917 | { | |
3918 | if (have_live_inferiors ()) | |
3919 | { | |
3920 | target_non_stop_enabled_1 = target_non_stop_enabled; | |
3921 | error (_("Cannot change this setting while the inferior is running.")); | |
3922 | } | |
3923 | ||
3924 | target_non_stop_enabled = target_non_stop_enabled_1; | |
3925 | } | |
3926 | ||
3927 | /* Implementation of "maint show target-non-stop". */ | |
3928 | ||
3929 | static void | |
3930 | maint_show_target_non_stop_command (struct ui_file *file, int from_tty, | |
3931 | struct cmd_list_element *c, | |
3932 | const char *value) | |
3933 | { | |
3934 | if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO) | |
3935 | fprintf_filtered (file, | |
3936 | _("Whether the target is always in non-stop mode " | |
3937 | "is %s (currently %s).\n"), value, | |
3938 | target_always_non_stop_p () ? "on" : "off"); | |
3939 | else | |
3940 | fprintf_filtered (file, | |
3941 | _("Whether the target is always in non-stop mode " | |
3942 | "is %s.\n"), value); | |
3943 | } | |
3944 | ||
d914c394 SS |
3945 | /* Temporary copies of permission settings. */ |
3946 | ||
3947 | static int may_write_registers_1 = 1; | |
3948 | static int may_write_memory_1 = 1; | |
3949 | static int may_insert_breakpoints_1 = 1; | |
3950 | static int may_insert_tracepoints_1 = 1; | |
3951 | static int may_insert_fast_tracepoints_1 = 1; | |
3952 | static int may_stop_1 = 1; | |
3953 | ||
3954 | /* Make the user-set values match the real values again. */ | |
3955 | ||
3956 | void | |
3957 | update_target_permissions (void) | |
3958 | { | |
3959 | may_write_registers_1 = may_write_registers; | |
3960 | may_write_memory_1 = may_write_memory; | |
3961 | may_insert_breakpoints_1 = may_insert_breakpoints; | |
3962 | may_insert_tracepoints_1 = may_insert_tracepoints; | |
3963 | may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints; | |
3964 | may_stop_1 = may_stop; | |
3965 | } | |
3966 | ||
3967 | /* The one function handles (most of) the permission flags in the same | |
3968 | way. */ | |
3969 | ||
3970 | static void | |
3971 | set_target_permissions (char *args, int from_tty, | |
3972 | struct cmd_list_element *c) | |
3973 | { | |
3974 | if (target_has_execution) | |
3975 | { | |
3976 | update_target_permissions (); | |
3977 | error (_("Cannot change this setting while the inferior is running.")); | |
3978 | } | |
3979 | ||
3980 | /* Make the real values match the user-changed values. */ | |
3981 | may_write_registers = may_write_registers_1; | |
3982 | may_insert_breakpoints = may_insert_breakpoints_1; | |
3983 | may_insert_tracepoints = may_insert_tracepoints_1; | |
3984 | may_insert_fast_tracepoints = may_insert_fast_tracepoints_1; | |
3985 | may_stop = may_stop_1; | |
3986 | update_observer_mode (); | |
3987 | } | |
3988 | ||
3989 | /* Set memory write permission independently of observer mode. */ | |
3990 | ||
3991 | static void | |
3992 | set_write_memory_permission (char *args, int from_tty, | |
3993 | struct cmd_list_element *c) | |
3994 | { | |
3995 | /* Make the real values match the user-changed values. */ | |
3996 | may_write_memory = may_write_memory_1; | |
3997 | update_observer_mode (); | |
3998 | } | |
3999 | ||
4000 | ||
c906108c | 4001 | void |
fba45db2 | 4002 | initialize_targets (void) |
c906108c SS |
4003 | { |
4004 | init_dummy_target (); | |
4005 | push_target (&dummy_target); | |
4006 | ||
4007 | add_info ("target", target_info, targ_desc); | |
4008 | add_info ("files", target_info, targ_desc); | |
4009 | ||
ccce17b0 | 4010 | add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\ |
85c07804 AC |
4011 | Set target debugging."), _("\ |
4012 | Show target debugging."), _("\ | |
333dabeb | 4013 | When non-zero, target debugging is enabled. Higher numbers are more\n\ |
3cecbbbe TT |
4014 | verbose."), |
4015 | set_targetdebug, | |
ccce17b0 YQ |
4016 | show_targetdebug, |
4017 | &setdebuglist, &showdebuglist); | |
3a11626d | 4018 | |
2bc416ba | 4019 | add_setshow_boolean_cmd ("trust-readonly-sections", class_support, |
7915a72c AC |
4020 | &trust_readonly, _("\ |
4021 | Set mode for reading from readonly sections."), _("\ | |
4022 | Show mode for reading from readonly sections."), _("\ | |
3a11626d MS |
4023 | When this mode is on, memory reads from readonly sections (such as .text)\n\ |
4024 | will be read from the object file instead of from the target. This will\n\ | |
7915a72c | 4025 | result in significant performance improvement for remote targets."), |
2c5b56ce | 4026 | NULL, |
920d2a44 | 4027 | show_trust_readonly, |
e707bbc2 | 4028 | &setlist, &showlist); |
96baa820 JM |
4029 | |
4030 | add_com ("monitor", class_obscure, do_monitor_command, | |
1bedd215 | 4031 | _("Send a command to the remote monitor (remote targets only).")); |
96baa820 | 4032 | |
87680a14 JB |
4033 | add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack, |
4034 | _("Print the name of each layer of the internal target stack."), | |
4035 | &maintenanceprintlist); | |
4036 | ||
c6ebd6cf VP |
4037 | add_setshow_boolean_cmd ("target-async", no_class, |
4038 | &target_async_permitted_1, _("\ | |
4039 | Set whether gdb controls the inferior in asynchronous mode."), _("\ | |
4040 | Show whether gdb controls the inferior in asynchronous mode."), _("\ | |
4041 | Tells gdb whether to control the inferior in asynchronous mode."), | |
329ea579 PA |
4042 | maint_set_target_async_command, |
4043 | maint_show_target_async_command, | |
4044 | &maintenance_set_cmdlist, | |
4045 | &maintenance_show_cmdlist); | |
c6ebd6cf | 4046 | |
fbea99ea PA |
4047 | add_setshow_auto_boolean_cmd ("target-non-stop", no_class, |
4048 | &target_non_stop_enabled_1, _("\ | |
4049 | Set whether gdb always controls the inferior in non-stop mode."), _("\ | |
4050 | Show whether gdb always controls the inferior in non-stop mode."), _("\ | |
4051 | Tells gdb whether to control the inferior in non-stop mode."), | |
4052 | maint_set_target_non_stop_command, | |
4053 | maint_show_target_non_stop_command, | |
4054 | &maintenance_set_cmdlist, | |
4055 | &maintenance_show_cmdlist); | |
4056 | ||
d914c394 SS |
4057 | add_setshow_boolean_cmd ("may-write-registers", class_support, |
4058 | &may_write_registers_1, _("\ | |
4059 | Set permission to write into registers."), _("\ | |
4060 | Show permission to write into registers."), _("\ | |
4061 | When this permission is on, GDB may write into the target's registers.\n\ | |
4062 | Otherwise, any sort of write attempt will result in an error."), | |
4063 | set_target_permissions, NULL, | |
4064 | &setlist, &showlist); | |
4065 | ||
4066 | add_setshow_boolean_cmd ("may-write-memory", class_support, | |
4067 | &may_write_memory_1, _("\ | |
4068 | Set permission to write into target memory."), _("\ | |
4069 | Show permission to write into target memory."), _("\ | |
4070 | When this permission is on, GDB may write into the target's memory.\n\ | |
4071 | Otherwise, any sort of write attempt will result in an error."), | |
4072 | set_write_memory_permission, NULL, | |
4073 | &setlist, &showlist); | |
4074 | ||
4075 | add_setshow_boolean_cmd ("may-insert-breakpoints", class_support, | |
4076 | &may_insert_breakpoints_1, _("\ | |
4077 | Set permission to insert breakpoints in the target."), _("\ | |
4078 | Show permission to insert breakpoints in the target."), _("\ | |
4079 | When this permission is on, GDB may insert breakpoints in the program.\n\ | |
4080 | Otherwise, any sort of insertion attempt will result in an error."), | |
4081 | set_target_permissions, NULL, | |
4082 | &setlist, &showlist); | |
4083 | ||
4084 | add_setshow_boolean_cmd ("may-insert-tracepoints", class_support, | |
4085 | &may_insert_tracepoints_1, _("\ | |
4086 | Set permission to insert tracepoints in the target."), _("\ | |
4087 | Show permission to insert tracepoints in the target."), _("\ | |
4088 | When this permission is on, GDB may insert tracepoints in the program.\n\ | |
4089 | Otherwise, any sort of insertion attempt will result in an error."), | |
4090 | set_target_permissions, NULL, | |
4091 | &setlist, &showlist); | |
4092 | ||
4093 | add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support, | |
4094 | &may_insert_fast_tracepoints_1, _("\ | |
4095 | Set permission to insert fast tracepoints in the target."), _("\ | |
4096 | Show permission to insert fast tracepoints in the target."), _("\ | |
4097 | When this permission is on, GDB may insert fast tracepoints.\n\ | |
4098 | Otherwise, any sort of insertion attempt will result in an error."), | |
4099 | set_target_permissions, NULL, | |
4100 | &setlist, &showlist); | |
4101 | ||
4102 | add_setshow_boolean_cmd ("may-interrupt", class_support, | |
4103 | &may_stop_1, _("\ | |
4104 | Set permission to interrupt or signal the target."), _("\ | |
4105 | Show permission to interrupt or signal the target."), _("\ | |
4106 | When this permission is on, GDB may interrupt/stop the target's execution.\n\ | |
4107 | Otherwise, any attempt to interrupt or stop will be ignored."), | |
4108 | set_target_permissions, NULL, | |
4109 | &setlist, &showlist); | |
6a3cb8e8 PA |
4110 | |
4111 | add_setshow_boolean_cmd ("auto-connect-native-target", class_support, | |
4112 | &auto_connect_native_target, _("\ | |
4113 | Set whether GDB may automatically connect to the native target."), _("\ | |
4114 | Show whether GDB may automatically connect to the native target."), _("\ | |
4115 | When on, and GDB is not connected to a target yet, GDB\n\ | |
4116 | attempts \"run\" and other commands with the native target."), | |
4117 | NULL, show_auto_connect_native_target, | |
4118 | &setlist, &showlist); | |
c906108c | 4119 | } |