]>
Commit | Line | Data |
---|---|---|
1 | /* SPU target-dependent code for GDB, the GNU debugger. | |
2 | Copyright (C) 2006-2015 Free Software Foundation, Inc. | |
3 | ||
4 | Contributed by Ulrich Weigand <[email protected]>. | |
5 | Based on a port by Sid Manning <[email protected]>. | |
6 | ||
7 | This file is part of GDB. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
11 | the Free Software Foundation; either version 3 of the License, or | |
12 | (at your option) any later version. | |
13 | ||
14 | This program is distributed in the hope that it will be useful, | |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "defs.h" | |
23 | #include "arch-utils.h" | |
24 | #include "gdbtypes.h" | |
25 | #include "gdbcmd.h" | |
26 | #include "gdbcore.h" | |
27 | #include "frame.h" | |
28 | #include "frame-unwind.h" | |
29 | #include "frame-base.h" | |
30 | #include "trad-frame.h" | |
31 | #include "symtab.h" | |
32 | #include "symfile.h" | |
33 | #include "value.h" | |
34 | #include "inferior.h" | |
35 | #include "dis-asm.h" | |
36 | #include "objfiles.h" | |
37 | #include "language.h" | |
38 | #include "regcache.h" | |
39 | #include "reggroups.h" | |
40 | #include "floatformat.h" | |
41 | #include "block.h" | |
42 | #include "observer.h" | |
43 | #include "infcall.h" | |
44 | #include "dwarf2.h" | |
45 | #include "dwarf2-frame.h" | |
46 | #include "ax.h" | |
47 | #include "spu-tdep.h" | |
48 | ||
49 | ||
50 | /* The list of available "set spu " and "show spu " commands. */ | |
51 | static struct cmd_list_element *setspucmdlist = NULL; | |
52 | static struct cmd_list_element *showspucmdlist = NULL; | |
53 | ||
54 | /* Whether to stop for new SPE contexts. */ | |
55 | static int spu_stop_on_load_p = 0; | |
56 | /* Whether to automatically flush the SW-managed cache. */ | |
57 | static int spu_auto_flush_cache_p = 1; | |
58 | ||
59 | ||
60 | /* The tdep structure. */ | |
61 | struct gdbarch_tdep | |
62 | { | |
63 | /* The spufs ID identifying our address space. */ | |
64 | int id; | |
65 | ||
66 | /* SPU-specific vector type. */ | |
67 | struct type *spu_builtin_type_vec128; | |
68 | }; | |
69 | ||
70 | ||
71 | /* SPU-specific vector type. */ | |
72 | static struct type * | |
73 | spu_builtin_type_vec128 (struct gdbarch *gdbarch) | |
74 | { | |
75 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
76 | ||
77 | if (!tdep->spu_builtin_type_vec128) | |
78 | { | |
79 | const struct builtin_type *bt = builtin_type (gdbarch); | |
80 | struct type *t; | |
81 | ||
82 | t = arch_composite_type (gdbarch, | |
83 | "__spu_builtin_type_vec128", TYPE_CODE_UNION); | |
84 | append_composite_type_field (t, "uint128", bt->builtin_int128); | |
85 | append_composite_type_field (t, "v2_int64", | |
86 | init_vector_type (bt->builtin_int64, 2)); | |
87 | append_composite_type_field (t, "v4_int32", | |
88 | init_vector_type (bt->builtin_int32, 4)); | |
89 | append_composite_type_field (t, "v8_int16", | |
90 | init_vector_type (bt->builtin_int16, 8)); | |
91 | append_composite_type_field (t, "v16_int8", | |
92 | init_vector_type (bt->builtin_int8, 16)); | |
93 | append_composite_type_field (t, "v2_double", | |
94 | init_vector_type (bt->builtin_double, 2)); | |
95 | append_composite_type_field (t, "v4_float", | |
96 | init_vector_type (bt->builtin_float, 4)); | |
97 | ||
98 | TYPE_VECTOR (t) = 1; | |
99 | TYPE_NAME (t) = "spu_builtin_type_vec128"; | |
100 | ||
101 | tdep->spu_builtin_type_vec128 = t; | |
102 | } | |
103 | ||
104 | return tdep->spu_builtin_type_vec128; | |
105 | } | |
106 | ||
107 | ||
108 | /* The list of available "info spu " commands. */ | |
109 | static struct cmd_list_element *infospucmdlist = NULL; | |
110 | ||
111 | /* Registers. */ | |
112 | ||
113 | static const char * | |
114 | spu_register_name (struct gdbarch *gdbarch, int reg_nr) | |
115 | { | |
116 | static char *register_names[] = | |
117 | { | |
118 | "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | |
119 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", | |
120 | "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", | |
121 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", | |
122 | "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", | |
123 | "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", | |
124 | "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", | |
125 | "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", | |
126 | "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71", | |
127 | "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79", | |
128 | "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", | |
129 | "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95", | |
130 | "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103", | |
131 | "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111", | |
132 | "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119", | |
133 | "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127", | |
134 | "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status" | |
135 | }; | |
136 | ||
137 | if (reg_nr < 0) | |
138 | return NULL; | |
139 | if (reg_nr >= sizeof register_names / sizeof *register_names) | |
140 | return NULL; | |
141 | ||
142 | return register_names[reg_nr]; | |
143 | } | |
144 | ||
145 | static struct type * | |
146 | spu_register_type (struct gdbarch *gdbarch, int reg_nr) | |
147 | { | |
148 | if (reg_nr < SPU_NUM_GPRS) | |
149 | return spu_builtin_type_vec128 (gdbarch); | |
150 | ||
151 | switch (reg_nr) | |
152 | { | |
153 | case SPU_ID_REGNUM: | |
154 | return builtin_type (gdbarch)->builtin_uint32; | |
155 | ||
156 | case SPU_PC_REGNUM: | |
157 | return builtin_type (gdbarch)->builtin_func_ptr; | |
158 | ||
159 | case SPU_SP_REGNUM: | |
160 | return builtin_type (gdbarch)->builtin_data_ptr; | |
161 | ||
162 | case SPU_FPSCR_REGNUM: | |
163 | return builtin_type (gdbarch)->builtin_uint128; | |
164 | ||
165 | case SPU_SRR0_REGNUM: | |
166 | return builtin_type (gdbarch)->builtin_uint32; | |
167 | ||
168 | case SPU_LSLR_REGNUM: | |
169 | return builtin_type (gdbarch)->builtin_uint32; | |
170 | ||
171 | case SPU_DECR_REGNUM: | |
172 | return builtin_type (gdbarch)->builtin_uint32; | |
173 | ||
174 | case SPU_DECR_STATUS_REGNUM: | |
175 | return builtin_type (gdbarch)->builtin_uint32; | |
176 | ||
177 | default: | |
178 | internal_error (__FILE__, __LINE__, _("invalid regnum")); | |
179 | } | |
180 | } | |
181 | ||
182 | /* Pseudo registers for preferred slots - stack pointer. */ | |
183 | ||
184 | static enum register_status | |
185 | spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname, | |
186 | gdb_byte *buf) | |
187 | { | |
188 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
189 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
190 | enum register_status status; | |
191 | gdb_byte reg[32]; | |
192 | char annex[32]; | |
193 | ULONGEST id; | |
194 | ULONGEST ul; | |
195 | ||
196 | status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); | |
197 | if (status != REG_VALID) | |
198 | return status; | |
199 | xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname); | |
200 | memset (reg, 0, sizeof reg); | |
201 | target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
202 | reg, 0, sizeof reg); | |
203 | ||
204 | ul = strtoulst ((char *) reg, NULL, 16); | |
205 | store_unsigned_integer (buf, 4, byte_order, ul); | |
206 | return REG_VALID; | |
207 | } | |
208 | ||
209 | static enum register_status | |
210 | spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache, | |
211 | int regnum, gdb_byte *buf) | |
212 | { | |
213 | gdb_byte reg[16]; | |
214 | char annex[32]; | |
215 | ULONGEST id; | |
216 | enum register_status status; | |
217 | ||
218 | switch (regnum) | |
219 | { | |
220 | case SPU_SP_REGNUM: | |
221 | status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg); | |
222 | if (status != REG_VALID) | |
223 | return status; | |
224 | memcpy (buf, reg, 4); | |
225 | return status; | |
226 | ||
227 | case SPU_FPSCR_REGNUM: | |
228 | status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); | |
229 | if (status != REG_VALID) | |
230 | return status; | |
231 | xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id); | |
232 | target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16); | |
233 | return status; | |
234 | ||
235 | case SPU_SRR0_REGNUM: | |
236 | return spu_pseudo_register_read_spu (regcache, "srr0", buf); | |
237 | ||
238 | case SPU_LSLR_REGNUM: | |
239 | return spu_pseudo_register_read_spu (regcache, "lslr", buf); | |
240 | ||
241 | case SPU_DECR_REGNUM: | |
242 | return spu_pseudo_register_read_spu (regcache, "decr", buf); | |
243 | ||
244 | case SPU_DECR_STATUS_REGNUM: | |
245 | return spu_pseudo_register_read_spu (regcache, "decr_status", buf); | |
246 | ||
247 | default: | |
248 | internal_error (__FILE__, __LINE__, _("invalid regnum")); | |
249 | } | |
250 | } | |
251 | ||
252 | static void | |
253 | spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname, | |
254 | const gdb_byte *buf) | |
255 | { | |
256 | struct gdbarch *gdbarch = get_regcache_arch (regcache); | |
257 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
258 | char reg[32]; | |
259 | char annex[32]; | |
260 | ULONGEST id; | |
261 | ||
262 | regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); | |
263 | xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname); | |
264 | xsnprintf (reg, sizeof reg, "0x%s", | |
265 | phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4)); | |
266 | target_write (¤t_target, TARGET_OBJECT_SPU, annex, | |
267 | (gdb_byte *) reg, 0, strlen (reg)); | |
268 | } | |
269 | ||
270 | static void | |
271 | spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache, | |
272 | int regnum, const gdb_byte *buf) | |
273 | { | |
274 | gdb_byte reg[16]; | |
275 | char annex[32]; | |
276 | ULONGEST id; | |
277 | ||
278 | switch (regnum) | |
279 | { | |
280 | case SPU_SP_REGNUM: | |
281 | regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg); | |
282 | memcpy (reg, buf, 4); | |
283 | regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg); | |
284 | break; | |
285 | ||
286 | case SPU_FPSCR_REGNUM: | |
287 | regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id); | |
288 | xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id); | |
289 | target_write (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16); | |
290 | break; | |
291 | ||
292 | case SPU_SRR0_REGNUM: | |
293 | spu_pseudo_register_write_spu (regcache, "srr0", buf); | |
294 | break; | |
295 | ||
296 | case SPU_LSLR_REGNUM: | |
297 | spu_pseudo_register_write_spu (regcache, "lslr", buf); | |
298 | break; | |
299 | ||
300 | case SPU_DECR_REGNUM: | |
301 | spu_pseudo_register_write_spu (regcache, "decr", buf); | |
302 | break; | |
303 | ||
304 | case SPU_DECR_STATUS_REGNUM: | |
305 | spu_pseudo_register_write_spu (regcache, "decr_status", buf); | |
306 | break; | |
307 | ||
308 | default: | |
309 | internal_error (__FILE__, __LINE__, _("invalid regnum")); | |
310 | } | |
311 | } | |
312 | ||
313 | static int | |
314 | spu_ax_pseudo_register_collect (struct gdbarch *gdbarch, | |
315 | struct agent_expr *ax, int regnum) | |
316 | { | |
317 | switch (regnum) | |
318 | { | |
319 | case SPU_SP_REGNUM: | |
320 | ax_reg_mask (ax, SPU_RAW_SP_REGNUM); | |
321 | return 0; | |
322 | ||
323 | case SPU_FPSCR_REGNUM: | |
324 | case SPU_SRR0_REGNUM: | |
325 | case SPU_LSLR_REGNUM: | |
326 | case SPU_DECR_REGNUM: | |
327 | case SPU_DECR_STATUS_REGNUM: | |
328 | return -1; | |
329 | ||
330 | default: | |
331 | internal_error (__FILE__, __LINE__, _("invalid regnum")); | |
332 | } | |
333 | } | |
334 | ||
335 | static int | |
336 | spu_ax_pseudo_register_push_stack (struct gdbarch *gdbarch, | |
337 | struct agent_expr *ax, int regnum) | |
338 | { | |
339 | switch (regnum) | |
340 | { | |
341 | case SPU_SP_REGNUM: | |
342 | ax_reg (ax, SPU_RAW_SP_REGNUM); | |
343 | return 0; | |
344 | ||
345 | case SPU_FPSCR_REGNUM: | |
346 | case SPU_SRR0_REGNUM: | |
347 | case SPU_LSLR_REGNUM: | |
348 | case SPU_DECR_REGNUM: | |
349 | case SPU_DECR_STATUS_REGNUM: | |
350 | return -1; | |
351 | ||
352 | default: | |
353 | internal_error (__FILE__, __LINE__, _("invalid regnum")); | |
354 | } | |
355 | } | |
356 | ||
357 | ||
358 | /* Value conversion -- access scalar values at the preferred slot. */ | |
359 | ||
360 | static struct value * | |
361 | spu_value_from_register (struct gdbarch *gdbarch, struct type *type, | |
362 | int regnum, struct frame_id frame_id) | |
363 | { | |
364 | struct value *value = default_value_from_register (gdbarch, type, | |
365 | regnum, frame_id); | |
366 | int len = TYPE_LENGTH (type); | |
367 | ||
368 | if (regnum < SPU_NUM_GPRS && len < 16) | |
369 | { | |
370 | int preferred_slot = len < 4 ? 4 - len : 0; | |
371 | set_value_offset (value, preferred_slot); | |
372 | } | |
373 | ||
374 | return value; | |
375 | } | |
376 | ||
377 | /* Register groups. */ | |
378 | ||
379 | static int | |
380 | spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum, | |
381 | struct reggroup *group) | |
382 | { | |
383 | /* Registers displayed via 'info regs'. */ | |
384 | if (group == general_reggroup) | |
385 | return 1; | |
386 | ||
387 | /* Registers displayed via 'info float'. */ | |
388 | if (group == float_reggroup) | |
389 | return 0; | |
390 | ||
391 | /* Registers that need to be saved/restored in order to | |
392 | push or pop frames. */ | |
393 | if (group == save_reggroup || group == restore_reggroup) | |
394 | return 1; | |
395 | ||
396 | return default_register_reggroup_p (gdbarch, regnum, group); | |
397 | } | |
398 | ||
399 | /* DWARF-2 register numbers. */ | |
400 | ||
401 | static int | |
402 | spu_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg) | |
403 | { | |
404 | /* Use cooked instead of raw SP. */ | |
405 | return (reg == SPU_RAW_SP_REGNUM)? SPU_SP_REGNUM : reg; | |
406 | } | |
407 | ||
408 | ||
409 | /* Address handling. */ | |
410 | ||
411 | static int | |
412 | spu_gdbarch_id (struct gdbarch *gdbarch) | |
413 | { | |
414 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
415 | int id = tdep->id; | |
416 | ||
417 | /* The objfile architecture of a standalone SPU executable does not | |
418 | provide an SPU ID. Retrieve it from the objfile's relocated | |
419 | address range in this special case. */ | |
420 | if (id == -1 | |
421 | && symfile_objfile && symfile_objfile->obfd | |
422 | && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu | |
423 | && symfile_objfile->sections != symfile_objfile->sections_end) | |
424 | id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections)); | |
425 | ||
426 | return id; | |
427 | } | |
428 | ||
429 | static int | |
430 | spu_address_class_type_flags (int byte_size, int dwarf2_addr_class) | |
431 | { | |
432 | if (dwarf2_addr_class == 1) | |
433 | return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1; | |
434 | else | |
435 | return 0; | |
436 | } | |
437 | ||
438 | static const char * | |
439 | spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags) | |
440 | { | |
441 | if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1) | |
442 | return "__ea"; | |
443 | else | |
444 | return NULL; | |
445 | } | |
446 | ||
447 | static int | |
448 | spu_address_class_name_to_type_flags (struct gdbarch *gdbarch, | |
449 | const char *name, int *type_flags_ptr) | |
450 | { | |
451 | if (strcmp (name, "__ea") == 0) | |
452 | { | |
453 | *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1; | |
454 | return 1; | |
455 | } | |
456 | else | |
457 | return 0; | |
458 | } | |
459 | ||
460 | static void | |
461 | spu_address_to_pointer (struct gdbarch *gdbarch, | |
462 | struct type *type, gdb_byte *buf, CORE_ADDR addr) | |
463 | { | |
464 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
465 | store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order, | |
466 | SPUADDR_ADDR (addr)); | |
467 | } | |
468 | ||
469 | static CORE_ADDR | |
470 | spu_pointer_to_address (struct gdbarch *gdbarch, | |
471 | struct type *type, const gdb_byte *buf) | |
472 | { | |
473 | int id = spu_gdbarch_id (gdbarch); | |
474 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
475 | ULONGEST addr | |
476 | = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order); | |
477 | ||
478 | /* Do not convert __ea pointers. */ | |
479 | if (TYPE_ADDRESS_CLASS_1 (type)) | |
480 | return addr; | |
481 | ||
482 | return addr? SPUADDR (id, addr) : 0; | |
483 | } | |
484 | ||
485 | static CORE_ADDR | |
486 | spu_integer_to_address (struct gdbarch *gdbarch, | |
487 | struct type *type, const gdb_byte *buf) | |
488 | { | |
489 | int id = spu_gdbarch_id (gdbarch); | |
490 | ULONGEST addr = unpack_long (type, buf); | |
491 | ||
492 | return SPUADDR (id, addr); | |
493 | } | |
494 | ||
495 | ||
496 | /* Decoding SPU instructions. */ | |
497 | ||
498 | enum | |
499 | { | |
500 | op_lqd = 0x34, | |
501 | op_lqx = 0x3c4, | |
502 | op_lqa = 0x61, | |
503 | op_lqr = 0x67, | |
504 | op_stqd = 0x24, | |
505 | op_stqx = 0x144, | |
506 | op_stqa = 0x41, | |
507 | op_stqr = 0x47, | |
508 | ||
509 | op_il = 0x081, | |
510 | op_ila = 0x21, | |
511 | op_a = 0x0c0, | |
512 | op_ai = 0x1c, | |
513 | ||
514 | op_selb = 0x8, | |
515 | ||
516 | op_br = 0x64, | |
517 | op_bra = 0x60, | |
518 | op_brsl = 0x66, | |
519 | op_brasl = 0x62, | |
520 | op_brnz = 0x42, | |
521 | op_brz = 0x40, | |
522 | op_brhnz = 0x46, | |
523 | op_brhz = 0x44, | |
524 | op_bi = 0x1a8, | |
525 | op_bisl = 0x1a9, | |
526 | op_biz = 0x128, | |
527 | op_binz = 0x129, | |
528 | op_bihz = 0x12a, | |
529 | op_bihnz = 0x12b, | |
530 | }; | |
531 | ||
532 | static int | |
533 | is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb) | |
534 | { | |
535 | if ((insn >> 21) == op) | |
536 | { | |
537 | *rt = insn & 127; | |
538 | *ra = (insn >> 7) & 127; | |
539 | *rb = (insn >> 14) & 127; | |
540 | return 1; | |
541 | } | |
542 | ||
543 | return 0; | |
544 | } | |
545 | ||
546 | static int | |
547 | is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc) | |
548 | { | |
549 | if ((insn >> 28) == op) | |
550 | { | |
551 | *rt = (insn >> 21) & 127; | |
552 | *ra = (insn >> 7) & 127; | |
553 | *rb = (insn >> 14) & 127; | |
554 | *rc = insn & 127; | |
555 | return 1; | |
556 | } | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | static int | |
562 | is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7) | |
563 | { | |
564 | if ((insn >> 21) == op) | |
565 | { | |
566 | *rt = insn & 127; | |
567 | *ra = (insn >> 7) & 127; | |
568 | *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40; | |
569 | return 1; | |
570 | } | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
575 | static int | |
576 | is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10) | |
577 | { | |
578 | if ((insn >> 24) == op) | |
579 | { | |
580 | *rt = insn & 127; | |
581 | *ra = (insn >> 7) & 127; | |
582 | *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200; | |
583 | return 1; | |
584 | } | |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
589 | static int | |
590 | is_ri16 (unsigned int insn, int op, int *rt, int *i16) | |
591 | { | |
592 | if ((insn >> 23) == op) | |
593 | { | |
594 | *rt = insn & 127; | |
595 | *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000; | |
596 | return 1; | |
597 | } | |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
602 | static int | |
603 | is_ri18 (unsigned int insn, int op, int *rt, int *i18) | |
604 | { | |
605 | if ((insn >> 25) == op) | |
606 | { | |
607 | *rt = insn & 127; | |
608 | *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000; | |
609 | return 1; | |
610 | } | |
611 | ||
612 | return 0; | |
613 | } | |
614 | ||
615 | static int | |
616 | is_branch (unsigned int insn, int *offset, int *reg) | |
617 | { | |
618 | int rt, i7, i16; | |
619 | ||
620 | if (is_ri16 (insn, op_br, &rt, &i16) | |
621 | || is_ri16 (insn, op_brsl, &rt, &i16) | |
622 | || is_ri16 (insn, op_brnz, &rt, &i16) | |
623 | || is_ri16 (insn, op_brz, &rt, &i16) | |
624 | || is_ri16 (insn, op_brhnz, &rt, &i16) | |
625 | || is_ri16 (insn, op_brhz, &rt, &i16)) | |
626 | { | |
627 | *reg = SPU_PC_REGNUM; | |
628 | *offset = i16 << 2; | |
629 | return 1; | |
630 | } | |
631 | ||
632 | if (is_ri16 (insn, op_bra, &rt, &i16) | |
633 | || is_ri16 (insn, op_brasl, &rt, &i16)) | |
634 | { | |
635 | *reg = -1; | |
636 | *offset = i16 << 2; | |
637 | return 1; | |
638 | } | |
639 | ||
640 | if (is_ri7 (insn, op_bi, &rt, reg, &i7) | |
641 | || is_ri7 (insn, op_bisl, &rt, reg, &i7) | |
642 | || is_ri7 (insn, op_biz, &rt, reg, &i7) | |
643 | || is_ri7 (insn, op_binz, &rt, reg, &i7) | |
644 | || is_ri7 (insn, op_bihz, &rt, reg, &i7) | |
645 | || is_ri7 (insn, op_bihnz, &rt, reg, &i7)) | |
646 | { | |
647 | *offset = 0; | |
648 | return 1; | |
649 | } | |
650 | ||
651 | return 0; | |
652 | } | |
653 | ||
654 | ||
655 | /* Prolog parsing. */ | |
656 | ||
657 | struct spu_prologue_data | |
658 | { | |
659 | /* Stack frame size. -1 if analysis was unsuccessful. */ | |
660 | int size; | |
661 | ||
662 | /* How to find the CFA. The CFA is equal to SP at function entry. */ | |
663 | int cfa_reg; | |
664 | int cfa_offset; | |
665 | ||
666 | /* Offset relative to CFA where a register is saved. -1 if invalid. */ | |
667 | int reg_offset[SPU_NUM_GPRS]; | |
668 | }; | |
669 | ||
670 | static CORE_ADDR | |
671 | spu_analyze_prologue (struct gdbarch *gdbarch, | |
672 | CORE_ADDR start_pc, CORE_ADDR end_pc, | |
673 | struct spu_prologue_data *data) | |
674 | { | |
675 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
676 | int found_sp = 0; | |
677 | int found_fp = 0; | |
678 | int found_lr = 0; | |
679 | int found_bc = 0; | |
680 | int reg_immed[SPU_NUM_GPRS]; | |
681 | gdb_byte buf[16]; | |
682 | CORE_ADDR prolog_pc = start_pc; | |
683 | CORE_ADDR pc; | |
684 | int i; | |
685 | ||
686 | ||
687 | /* Initialize DATA to default values. */ | |
688 | data->size = -1; | |
689 | ||
690 | data->cfa_reg = SPU_RAW_SP_REGNUM; | |
691 | data->cfa_offset = 0; | |
692 | ||
693 | for (i = 0; i < SPU_NUM_GPRS; i++) | |
694 | data->reg_offset[i] = -1; | |
695 | ||
696 | /* Set up REG_IMMED array. This is non-zero for a register if we know its | |
697 | preferred slot currently holds this immediate value. */ | |
698 | for (i = 0; i < SPU_NUM_GPRS; i++) | |
699 | reg_immed[i] = 0; | |
700 | ||
701 | /* Scan instructions until the first branch. | |
702 | ||
703 | The following instructions are important prolog components: | |
704 | ||
705 | - The first instruction to set up the stack pointer. | |
706 | - The first instruction to set up the frame pointer. | |
707 | - The first instruction to save the link register. | |
708 | - The first instruction to save the backchain. | |
709 | ||
710 | We return the instruction after the latest of these four, | |
711 | or the incoming PC if none is found. The first instruction | |
712 | to set up the stack pointer also defines the frame size. | |
713 | ||
714 | Note that instructions saving incoming arguments to their stack | |
715 | slots are not counted as important, because they are hard to | |
716 | identify with certainty. This should not matter much, because | |
717 | arguments are relevant only in code compiled with debug data, | |
718 | and in such code the GDB core will advance until the first source | |
719 | line anyway, using SAL data. | |
720 | ||
721 | For purposes of stack unwinding, we analyze the following types | |
722 | of instructions in addition: | |
723 | ||
724 | - Any instruction adding to the current frame pointer. | |
725 | - Any instruction loading an immediate constant into a register. | |
726 | - Any instruction storing a register onto the stack. | |
727 | ||
728 | These are used to compute the CFA and REG_OFFSET output. */ | |
729 | ||
730 | for (pc = start_pc; pc < end_pc; pc += 4) | |
731 | { | |
732 | unsigned int insn; | |
733 | int rt, ra, rb, rc, immed; | |
734 | ||
735 | if (target_read_memory (pc, buf, 4)) | |
736 | break; | |
737 | insn = extract_unsigned_integer (buf, 4, byte_order); | |
738 | ||
739 | /* AI is the typical instruction to set up a stack frame. | |
740 | It is also used to initialize the frame pointer. */ | |
741 | if (is_ri10 (insn, op_ai, &rt, &ra, &immed)) | |
742 | { | |
743 | if (rt == data->cfa_reg && ra == data->cfa_reg) | |
744 | data->cfa_offset -= immed; | |
745 | ||
746 | if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM | |
747 | && !found_sp) | |
748 | { | |
749 | found_sp = 1; | |
750 | prolog_pc = pc + 4; | |
751 | ||
752 | data->size = -immed; | |
753 | } | |
754 | else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM | |
755 | && !found_fp) | |
756 | { | |
757 | found_fp = 1; | |
758 | prolog_pc = pc + 4; | |
759 | ||
760 | data->cfa_reg = SPU_FP_REGNUM; | |
761 | data->cfa_offset -= immed; | |
762 | } | |
763 | } | |
764 | ||
765 | /* A is used to set up stack frames of size >= 512 bytes. | |
766 | If we have tracked the contents of the addend register, | |
767 | we can handle this as well. */ | |
768 | else if (is_rr (insn, op_a, &rt, &ra, &rb)) | |
769 | { | |
770 | if (rt == data->cfa_reg && ra == data->cfa_reg) | |
771 | { | |
772 | if (reg_immed[rb] != 0) | |
773 | data->cfa_offset -= reg_immed[rb]; | |
774 | else | |
775 | data->cfa_reg = -1; /* We don't know the CFA any more. */ | |
776 | } | |
777 | ||
778 | if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM | |
779 | && !found_sp) | |
780 | { | |
781 | found_sp = 1; | |
782 | prolog_pc = pc + 4; | |
783 | ||
784 | if (reg_immed[rb] != 0) | |
785 | data->size = -reg_immed[rb]; | |
786 | } | |
787 | } | |
788 | ||
789 | /* We need to track IL and ILA used to load immediate constants | |
790 | in case they are later used as input to an A instruction. */ | |
791 | else if (is_ri16 (insn, op_il, &rt, &immed)) | |
792 | { | |
793 | reg_immed[rt] = immed; | |
794 | ||
795 | if (rt == SPU_RAW_SP_REGNUM && !found_sp) | |
796 | found_sp = 1; | |
797 | } | |
798 | ||
799 | else if (is_ri18 (insn, op_ila, &rt, &immed)) | |
800 | { | |
801 | reg_immed[rt] = immed & 0x3ffff; | |
802 | ||
803 | if (rt == SPU_RAW_SP_REGNUM && !found_sp) | |
804 | found_sp = 1; | |
805 | } | |
806 | ||
807 | /* STQD is used to save registers to the stack. */ | |
808 | else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed)) | |
809 | { | |
810 | if (ra == data->cfa_reg) | |
811 | data->reg_offset[rt] = data->cfa_offset - (immed << 4); | |
812 | ||
813 | if (ra == data->cfa_reg && rt == SPU_LR_REGNUM | |
814 | && !found_lr) | |
815 | { | |
816 | found_lr = 1; | |
817 | prolog_pc = pc + 4; | |
818 | } | |
819 | ||
820 | if (ra == SPU_RAW_SP_REGNUM | |
821 | && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM) | |
822 | && !found_bc) | |
823 | { | |
824 | found_bc = 1; | |
825 | prolog_pc = pc + 4; | |
826 | } | |
827 | } | |
828 | ||
829 | /* _start uses SELB to set up the stack pointer. */ | |
830 | else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc)) | |
831 | { | |
832 | if (rt == SPU_RAW_SP_REGNUM && !found_sp) | |
833 | found_sp = 1; | |
834 | } | |
835 | ||
836 | /* We terminate if we find a branch. */ | |
837 | else if (is_branch (insn, &immed, &ra)) | |
838 | break; | |
839 | } | |
840 | ||
841 | ||
842 | /* If we successfully parsed until here, and didn't find any instruction | |
843 | modifying SP, we assume we have a frameless function. */ | |
844 | if (!found_sp) | |
845 | data->size = 0; | |
846 | ||
847 | /* Return cooked instead of raw SP. */ | |
848 | if (data->cfa_reg == SPU_RAW_SP_REGNUM) | |
849 | data->cfa_reg = SPU_SP_REGNUM; | |
850 | ||
851 | return prolog_pc; | |
852 | } | |
853 | ||
854 | /* Return the first instruction after the prologue starting at PC. */ | |
855 | static CORE_ADDR | |
856 | spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) | |
857 | { | |
858 | struct spu_prologue_data data; | |
859 | return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data); | |
860 | } | |
861 | ||
862 | /* Return the frame pointer in use at address PC. */ | |
863 | static void | |
864 | spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc, | |
865 | int *reg, LONGEST *offset) | |
866 | { | |
867 | struct spu_prologue_data data; | |
868 | spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data); | |
869 | ||
870 | if (data.size != -1 && data.cfa_reg != -1) | |
871 | { | |
872 | /* The 'frame pointer' address is CFA minus frame size. */ | |
873 | *reg = data.cfa_reg; | |
874 | *offset = data.cfa_offset - data.size; | |
875 | } | |
876 | else | |
877 | { | |
878 | /* ??? We don't really know ... */ | |
879 | *reg = SPU_SP_REGNUM; | |
880 | *offset = 0; | |
881 | } | |
882 | } | |
883 | ||
884 | /* Return true if we are in the function's epilogue, i.e. after the | |
885 | instruction that destroyed the function's stack frame. | |
886 | ||
887 | 1) scan forward from the point of execution: | |
888 | a) If you find an instruction that modifies the stack pointer | |
889 | or transfers control (except a return), execution is not in | |
890 | an epilogue, return. | |
891 | b) Stop scanning if you find a return instruction or reach the | |
892 | end of the function or reach the hard limit for the size of | |
893 | an epilogue. | |
894 | 2) scan backward from the point of execution: | |
895 | a) If you find an instruction that modifies the stack pointer, | |
896 | execution *is* in an epilogue, return. | |
897 | b) Stop scanning if you reach an instruction that transfers | |
898 | control or the beginning of the function or reach the hard | |
899 | limit for the size of an epilogue. */ | |
900 | ||
901 | static int | |
902 | spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) | |
903 | { | |
904 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
905 | CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end; | |
906 | bfd_byte buf[4]; | |
907 | unsigned int insn; | |
908 | int rt, ra, rb, immed; | |
909 | ||
910 | /* Find the search limits based on function boundaries and hard limit. | |
911 | We assume the epilogue can be up to 64 instructions long. */ | |
912 | ||
913 | const int spu_max_epilogue_size = 64 * 4; | |
914 | ||
915 | if (!find_pc_partial_function (pc, NULL, &func_start, &func_end)) | |
916 | return 0; | |
917 | ||
918 | if (pc - func_start < spu_max_epilogue_size) | |
919 | epilogue_start = func_start; | |
920 | else | |
921 | epilogue_start = pc - spu_max_epilogue_size; | |
922 | ||
923 | if (func_end - pc < spu_max_epilogue_size) | |
924 | epilogue_end = func_end; | |
925 | else | |
926 | epilogue_end = pc + spu_max_epilogue_size; | |
927 | ||
928 | /* Scan forward until next 'bi $0'. */ | |
929 | ||
930 | for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4) | |
931 | { | |
932 | if (target_read_memory (scan_pc, buf, 4)) | |
933 | return 0; | |
934 | insn = extract_unsigned_integer (buf, 4, byte_order); | |
935 | ||
936 | if (is_branch (insn, &immed, &ra)) | |
937 | { | |
938 | if (immed == 0 && ra == SPU_LR_REGNUM) | |
939 | break; | |
940 | ||
941 | return 0; | |
942 | } | |
943 | ||
944 | if (is_ri10 (insn, op_ai, &rt, &ra, &immed) | |
945 | || is_rr (insn, op_a, &rt, &ra, &rb) | |
946 | || is_ri10 (insn, op_lqd, &rt, &ra, &immed)) | |
947 | { | |
948 | if (rt == SPU_RAW_SP_REGNUM) | |
949 | return 0; | |
950 | } | |
951 | } | |
952 | ||
953 | if (scan_pc >= epilogue_end) | |
954 | return 0; | |
955 | ||
956 | /* Scan backward until adjustment to stack pointer (R1). */ | |
957 | ||
958 | for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4) | |
959 | { | |
960 | if (target_read_memory (scan_pc, buf, 4)) | |
961 | return 0; | |
962 | insn = extract_unsigned_integer (buf, 4, byte_order); | |
963 | ||
964 | if (is_branch (insn, &immed, &ra)) | |
965 | return 0; | |
966 | ||
967 | if (is_ri10 (insn, op_ai, &rt, &ra, &immed) | |
968 | || is_rr (insn, op_a, &rt, &ra, &rb) | |
969 | || is_ri10 (insn, op_lqd, &rt, &ra, &immed)) | |
970 | { | |
971 | if (rt == SPU_RAW_SP_REGNUM) | |
972 | return 1; | |
973 | } | |
974 | } | |
975 | ||
976 | return 0; | |
977 | } | |
978 | ||
979 | ||
980 | /* Normal stack frames. */ | |
981 | ||
982 | struct spu_unwind_cache | |
983 | { | |
984 | CORE_ADDR func; | |
985 | CORE_ADDR frame_base; | |
986 | CORE_ADDR local_base; | |
987 | ||
988 | struct trad_frame_saved_reg *saved_regs; | |
989 | }; | |
990 | ||
991 | static struct spu_unwind_cache * | |
992 | spu_frame_unwind_cache (struct frame_info *this_frame, | |
993 | void **this_prologue_cache) | |
994 | { | |
995 | struct gdbarch *gdbarch = get_frame_arch (this_frame); | |
996 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
997 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
998 | struct spu_unwind_cache *info; | |
999 | struct spu_prologue_data data; | |
1000 | CORE_ADDR id = tdep->id; | |
1001 | gdb_byte buf[16]; | |
1002 | ||
1003 | if (*this_prologue_cache) | |
1004 | return *this_prologue_cache; | |
1005 | ||
1006 | info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache); | |
1007 | *this_prologue_cache = info; | |
1008 | info->saved_regs = trad_frame_alloc_saved_regs (this_frame); | |
1009 | info->frame_base = 0; | |
1010 | info->local_base = 0; | |
1011 | ||
1012 | /* Find the start of the current function, and analyze its prologue. */ | |
1013 | info->func = get_frame_func (this_frame); | |
1014 | if (info->func == 0) | |
1015 | { | |
1016 | /* Fall back to using the current PC as frame ID. */ | |
1017 | info->func = get_frame_pc (this_frame); | |
1018 | data.size = -1; | |
1019 | } | |
1020 | else | |
1021 | spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame), | |
1022 | &data); | |
1023 | ||
1024 | /* If successful, use prologue analysis data. */ | |
1025 | if (data.size != -1 && data.cfa_reg != -1) | |
1026 | { | |
1027 | CORE_ADDR cfa; | |
1028 | int i; | |
1029 | ||
1030 | /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */ | |
1031 | get_frame_register (this_frame, data.cfa_reg, buf); | |
1032 | cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset; | |
1033 | cfa = SPUADDR (id, cfa); | |
1034 | ||
1035 | /* Call-saved register slots. */ | |
1036 | for (i = 0; i < SPU_NUM_GPRS; i++) | |
1037 | if (i == SPU_LR_REGNUM | |
1038 | || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM)) | |
1039 | if (data.reg_offset[i] != -1) | |
1040 | info->saved_regs[i].addr = cfa - data.reg_offset[i]; | |
1041 | ||
1042 | /* Frame bases. */ | |
1043 | info->frame_base = cfa; | |
1044 | info->local_base = cfa - data.size; | |
1045 | } | |
1046 | ||
1047 | /* Otherwise, fall back to reading the backchain link. */ | |
1048 | else | |
1049 | { | |
1050 | CORE_ADDR reg; | |
1051 | LONGEST backchain; | |
1052 | ULONGEST lslr; | |
1053 | int status; | |
1054 | ||
1055 | /* Get local store limit. */ | |
1056 | lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM); | |
1057 | if (!lslr) | |
1058 | lslr = (ULONGEST) -1; | |
1059 | ||
1060 | /* Get the backchain. */ | |
1061 | reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM); | |
1062 | status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order, | |
1063 | &backchain); | |
1064 | ||
1065 | /* A zero backchain terminates the frame chain. Also, sanity | |
1066 | check against the local store size limit. */ | |
1067 | if (status && backchain > 0 && backchain <= lslr) | |
1068 | { | |
1069 | /* Assume the link register is saved into its slot. */ | |
1070 | if (backchain + 16 <= lslr) | |
1071 | info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, | |
1072 | backchain + 16); | |
1073 | ||
1074 | /* Frame bases. */ | |
1075 | info->frame_base = SPUADDR (id, backchain); | |
1076 | info->local_base = SPUADDR (id, reg); | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | /* If we didn't find a frame, we cannot determine SP / return address. */ | |
1081 | if (info->frame_base == 0) | |
1082 | return info; | |
1083 | ||
1084 | /* The previous SP is equal to the CFA. */ | |
1085 | trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, | |
1086 | SPUADDR_ADDR (info->frame_base)); | |
1087 | ||
1088 | /* Read full contents of the unwound link register in order to | |
1089 | be able to determine the return address. */ | |
1090 | if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM)) | |
1091 | target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16); | |
1092 | else | |
1093 | get_frame_register (this_frame, SPU_LR_REGNUM, buf); | |
1094 | ||
1095 | /* Normally, the return address is contained in the slot 0 of the | |
1096 | link register, and slots 1-3 are zero. For an overlay return, | |
1097 | slot 0 contains the address of the overlay manager return stub, | |
1098 | slot 1 contains the partition number of the overlay section to | |
1099 | be returned to, and slot 2 contains the return address within | |
1100 | that section. Return the latter address in that case. */ | |
1101 | if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0) | |
1102 | trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM, | |
1103 | extract_unsigned_integer (buf + 8, 4, byte_order)); | |
1104 | else | |
1105 | trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM, | |
1106 | extract_unsigned_integer (buf, 4, byte_order)); | |
1107 | ||
1108 | return info; | |
1109 | } | |
1110 | ||
1111 | static void | |
1112 | spu_frame_this_id (struct frame_info *this_frame, | |
1113 | void **this_prologue_cache, struct frame_id *this_id) | |
1114 | { | |
1115 | struct spu_unwind_cache *info = | |
1116 | spu_frame_unwind_cache (this_frame, this_prologue_cache); | |
1117 | ||
1118 | if (info->frame_base == 0) | |
1119 | return; | |
1120 | ||
1121 | *this_id = frame_id_build (info->frame_base, info->func); | |
1122 | } | |
1123 | ||
1124 | static struct value * | |
1125 | spu_frame_prev_register (struct frame_info *this_frame, | |
1126 | void **this_prologue_cache, int regnum) | |
1127 | { | |
1128 | struct spu_unwind_cache *info | |
1129 | = spu_frame_unwind_cache (this_frame, this_prologue_cache); | |
1130 | ||
1131 | /* Special-case the stack pointer. */ | |
1132 | if (regnum == SPU_RAW_SP_REGNUM) | |
1133 | regnum = SPU_SP_REGNUM; | |
1134 | ||
1135 | return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum); | |
1136 | } | |
1137 | ||
1138 | static const struct frame_unwind spu_frame_unwind = { | |
1139 | NORMAL_FRAME, | |
1140 | default_frame_unwind_stop_reason, | |
1141 | spu_frame_this_id, | |
1142 | spu_frame_prev_register, | |
1143 | NULL, | |
1144 | default_frame_sniffer | |
1145 | }; | |
1146 | ||
1147 | static CORE_ADDR | |
1148 | spu_frame_base_address (struct frame_info *this_frame, void **this_cache) | |
1149 | { | |
1150 | struct spu_unwind_cache *info | |
1151 | = spu_frame_unwind_cache (this_frame, this_cache); | |
1152 | return info->local_base; | |
1153 | } | |
1154 | ||
1155 | static const struct frame_base spu_frame_base = { | |
1156 | &spu_frame_unwind, | |
1157 | spu_frame_base_address, | |
1158 | spu_frame_base_address, | |
1159 | spu_frame_base_address | |
1160 | }; | |
1161 | ||
1162 | static CORE_ADDR | |
1163 | spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame) | |
1164 | { | |
1165 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1166 | CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM); | |
1167 | /* Mask off interrupt enable bit. */ | |
1168 | return SPUADDR (tdep->id, pc & -4); | |
1169 | } | |
1170 | ||
1171 | static CORE_ADDR | |
1172 | spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame) | |
1173 | { | |
1174 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1175 | CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM); | |
1176 | return SPUADDR (tdep->id, sp); | |
1177 | } | |
1178 | ||
1179 | static CORE_ADDR | |
1180 | spu_read_pc (struct regcache *regcache) | |
1181 | { | |
1182 | struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache)); | |
1183 | ULONGEST pc; | |
1184 | regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc); | |
1185 | /* Mask off interrupt enable bit. */ | |
1186 | return SPUADDR (tdep->id, pc & -4); | |
1187 | } | |
1188 | ||
1189 | static void | |
1190 | spu_write_pc (struct regcache *regcache, CORE_ADDR pc) | |
1191 | { | |
1192 | /* Keep interrupt enabled state unchanged. */ | |
1193 | ULONGEST old_pc; | |
1194 | ||
1195 | regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc); | |
1196 | regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM, | |
1197 | (SPUADDR_ADDR (pc) & -4) | (old_pc & 3)); | |
1198 | } | |
1199 | ||
1200 | ||
1201 | /* Cell/B.E. cross-architecture unwinder support. */ | |
1202 | ||
1203 | struct spu2ppu_cache | |
1204 | { | |
1205 | struct frame_id frame_id; | |
1206 | struct regcache *regcache; | |
1207 | }; | |
1208 | ||
1209 | static struct gdbarch * | |
1210 | spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache) | |
1211 | { | |
1212 | struct spu2ppu_cache *cache = *this_cache; | |
1213 | return get_regcache_arch (cache->regcache); | |
1214 | } | |
1215 | ||
1216 | static void | |
1217 | spu2ppu_this_id (struct frame_info *this_frame, | |
1218 | void **this_cache, struct frame_id *this_id) | |
1219 | { | |
1220 | struct spu2ppu_cache *cache = *this_cache; | |
1221 | *this_id = cache->frame_id; | |
1222 | } | |
1223 | ||
1224 | static struct value * | |
1225 | spu2ppu_prev_register (struct frame_info *this_frame, | |
1226 | void **this_cache, int regnum) | |
1227 | { | |
1228 | struct spu2ppu_cache *cache = *this_cache; | |
1229 | struct gdbarch *gdbarch = get_regcache_arch (cache->regcache); | |
1230 | gdb_byte *buf; | |
1231 | ||
1232 | buf = alloca (register_size (gdbarch, regnum)); | |
1233 | regcache_cooked_read (cache->regcache, regnum, buf); | |
1234 | return frame_unwind_got_bytes (this_frame, regnum, buf); | |
1235 | } | |
1236 | ||
1237 | static int | |
1238 | spu2ppu_sniffer (const struct frame_unwind *self, | |
1239 | struct frame_info *this_frame, void **this_prologue_cache) | |
1240 | { | |
1241 | struct gdbarch *gdbarch = get_frame_arch (this_frame); | |
1242 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
1243 | CORE_ADDR base, func, backchain; | |
1244 | gdb_byte buf[4]; | |
1245 | ||
1246 | if (gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_spu) | |
1247 | return 0; | |
1248 | ||
1249 | base = get_frame_sp (this_frame); | |
1250 | func = get_frame_pc (this_frame); | |
1251 | if (target_read_memory (base, buf, 4)) | |
1252 | return 0; | |
1253 | backchain = extract_unsigned_integer (buf, 4, byte_order); | |
1254 | ||
1255 | if (!backchain) | |
1256 | { | |
1257 | struct frame_info *fi; | |
1258 | ||
1259 | struct spu2ppu_cache *cache | |
1260 | = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache); | |
1261 | ||
1262 | cache->frame_id = frame_id_build (base + 16, func); | |
1263 | ||
1264 | for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi)) | |
1265 | if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu) | |
1266 | break; | |
1267 | ||
1268 | if (fi) | |
1269 | { | |
1270 | cache->regcache = frame_save_as_regcache (fi); | |
1271 | *this_prologue_cache = cache; | |
1272 | return 1; | |
1273 | } | |
1274 | else | |
1275 | { | |
1276 | struct regcache *regcache; | |
1277 | regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch ()); | |
1278 | cache->regcache = regcache_dup (regcache); | |
1279 | *this_prologue_cache = cache; | |
1280 | return 1; | |
1281 | } | |
1282 | } | |
1283 | ||
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | static void | |
1288 | spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache) | |
1289 | { | |
1290 | struct spu2ppu_cache *cache = this_cache; | |
1291 | regcache_xfree (cache->regcache); | |
1292 | } | |
1293 | ||
1294 | static const struct frame_unwind spu2ppu_unwind = { | |
1295 | ARCH_FRAME, | |
1296 | default_frame_unwind_stop_reason, | |
1297 | spu2ppu_this_id, | |
1298 | spu2ppu_prev_register, | |
1299 | NULL, | |
1300 | spu2ppu_sniffer, | |
1301 | spu2ppu_dealloc_cache, | |
1302 | spu2ppu_prev_arch, | |
1303 | }; | |
1304 | ||
1305 | ||
1306 | /* Function calling convention. */ | |
1307 | ||
1308 | static CORE_ADDR | |
1309 | spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp) | |
1310 | { | |
1311 | return sp & ~15; | |
1312 | } | |
1313 | ||
1314 | static CORE_ADDR | |
1315 | spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr, | |
1316 | struct value **args, int nargs, struct type *value_type, | |
1317 | CORE_ADDR *real_pc, CORE_ADDR *bp_addr, | |
1318 | struct regcache *regcache) | |
1319 | { | |
1320 | /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */ | |
1321 | sp = (sp - 4) & ~15; | |
1322 | /* Store the address of that breakpoint */ | |
1323 | *bp_addr = sp; | |
1324 | /* The call starts at the callee's entry point. */ | |
1325 | *real_pc = funaddr; | |
1326 | ||
1327 | return sp; | |
1328 | } | |
1329 | ||
1330 | static int | |
1331 | spu_scalar_value_p (struct type *type) | |
1332 | { | |
1333 | switch (TYPE_CODE (type)) | |
1334 | { | |
1335 | case TYPE_CODE_INT: | |
1336 | case TYPE_CODE_ENUM: | |
1337 | case TYPE_CODE_RANGE: | |
1338 | case TYPE_CODE_CHAR: | |
1339 | case TYPE_CODE_BOOL: | |
1340 | case TYPE_CODE_PTR: | |
1341 | case TYPE_CODE_REF: | |
1342 | return TYPE_LENGTH (type) <= 16; | |
1343 | ||
1344 | default: | |
1345 | return 0; | |
1346 | } | |
1347 | } | |
1348 | ||
1349 | static void | |
1350 | spu_value_to_regcache (struct regcache *regcache, int regnum, | |
1351 | struct type *type, const gdb_byte *in) | |
1352 | { | |
1353 | int len = TYPE_LENGTH (type); | |
1354 | ||
1355 | if (spu_scalar_value_p (type)) | |
1356 | { | |
1357 | int preferred_slot = len < 4 ? 4 - len : 0; | |
1358 | regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in); | |
1359 | } | |
1360 | else | |
1361 | { | |
1362 | while (len >= 16) | |
1363 | { | |
1364 | regcache_cooked_write (regcache, regnum++, in); | |
1365 | in += 16; | |
1366 | len -= 16; | |
1367 | } | |
1368 | ||
1369 | if (len > 0) | |
1370 | regcache_cooked_write_part (regcache, regnum, 0, len, in); | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | static void | |
1375 | spu_regcache_to_value (struct regcache *regcache, int regnum, | |
1376 | struct type *type, gdb_byte *out) | |
1377 | { | |
1378 | int len = TYPE_LENGTH (type); | |
1379 | ||
1380 | if (spu_scalar_value_p (type)) | |
1381 | { | |
1382 | int preferred_slot = len < 4 ? 4 - len : 0; | |
1383 | regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out); | |
1384 | } | |
1385 | else | |
1386 | { | |
1387 | while (len >= 16) | |
1388 | { | |
1389 | regcache_cooked_read (regcache, regnum++, out); | |
1390 | out += 16; | |
1391 | len -= 16; | |
1392 | } | |
1393 | ||
1394 | if (len > 0) | |
1395 | regcache_cooked_read_part (regcache, regnum, 0, len, out); | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | static CORE_ADDR | |
1400 | spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function, | |
1401 | struct regcache *regcache, CORE_ADDR bp_addr, | |
1402 | int nargs, struct value **args, CORE_ADDR sp, | |
1403 | int struct_return, CORE_ADDR struct_addr) | |
1404 | { | |
1405 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
1406 | CORE_ADDR sp_delta; | |
1407 | int i; | |
1408 | int regnum = SPU_ARG1_REGNUM; | |
1409 | int stack_arg = -1; | |
1410 | gdb_byte buf[16]; | |
1411 | ||
1412 | /* Set the return address. */ | |
1413 | memset (buf, 0, sizeof buf); | |
1414 | store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr)); | |
1415 | regcache_cooked_write (regcache, SPU_LR_REGNUM, buf); | |
1416 | ||
1417 | /* If STRUCT_RETURN is true, then the struct return address (in | |
1418 | STRUCT_ADDR) will consume the first argument-passing register. | |
1419 | Both adjust the register count and store that value. */ | |
1420 | if (struct_return) | |
1421 | { | |
1422 | memset (buf, 0, sizeof buf); | |
1423 | store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr)); | |
1424 | regcache_cooked_write (regcache, regnum++, buf); | |
1425 | } | |
1426 | ||
1427 | /* Fill in argument registers. */ | |
1428 | for (i = 0; i < nargs; i++) | |
1429 | { | |
1430 | struct value *arg = args[i]; | |
1431 | struct type *type = check_typedef (value_type (arg)); | |
1432 | const gdb_byte *contents = value_contents (arg); | |
1433 | int n_regs = align_up (TYPE_LENGTH (type), 16) / 16; | |
1434 | ||
1435 | /* If the argument doesn't wholly fit into registers, it and | |
1436 | all subsequent arguments go to the stack. */ | |
1437 | if (regnum + n_regs - 1 > SPU_ARGN_REGNUM) | |
1438 | { | |
1439 | stack_arg = i; | |
1440 | break; | |
1441 | } | |
1442 | ||
1443 | spu_value_to_regcache (regcache, regnum, type, contents); | |
1444 | regnum += n_regs; | |
1445 | } | |
1446 | ||
1447 | /* Overflow arguments go to the stack. */ | |
1448 | if (stack_arg != -1) | |
1449 | { | |
1450 | CORE_ADDR ap; | |
1451 | ||
1452 | /* Allocate all required stack size. */ | |
1453 | for (i = stack_arg; i < nargs; i++) | |
1454 | { | |
1455 | struct type *type = check_typedef (value_type (args[i])); | |
1456 | sp -= align_up (TYPE_LENGTH (type), 16); | |
1457 | } | |
1458 | ||
1459 | /* Fill in stack arguments. */ | |
1460 | ap = sp; | |
1461 | for (i = stack_arg; i < nargs; i++) | |
1462 | { | |
1463 | struct value *arg = args[i]; | |
1464 | struct type *type = check_typedef (value_type (arg)); | |
1465 | int len = TYPE_LENGTH (type); | |
1466 | int preferred_slot; | |
1467 | ||
1468 | if (spu_scalar_value_p (type)) | |
1469 | preferred_slot = len < 4 ? 4 - len : 0; | |
1470 | else | |
1471 | preferred_slot = 0; | |
1472 | ||
1473 | target_write_memory (ap + preferred_slot, value_contents (arg), len); | |
1474 | ap += align_up (TYPE_LENGTH (type), 16); | |
1475 | } | |
1476 | } | |
1477 | ||
1478 | /* Allocate stack frame header. */ | |
1479 | sp -= 32; | |
1480 | ||
1481 | /* Store stack back chain. */ | |
1482 | regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf); | |
1483 | target_write_memory (sp, buf, 16); | |
1484 | ||
1485 | /* Finally, update all slots of the SP register. */ | |
1486 | sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order); | |
1487 | for (i = 0; i < 4; i++) | |
1488 | { | |
1489 | CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order); | |
1490 | store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta); | |
1491 | } | |
1492 | regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf); | |
1493 | ||
1494 | return sp; | |
1495 | } | |
1496 | ||
1497 | static struct frame_id | |
1498 | spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame) | |
1499 | { | |
1500 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1501 | CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM); | |
1502 | CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM); | |
1503 | return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4)); | |
1504 | } | |
1505 | ||
1506 | /* Function return value access. */ | |
1507 | ||
1508 | static enum return_value_convention | |
1509 | spu_return_value (struct gdbarch *gdbarch, struct value *function, | |
1510 | struct type *type, struct regcache *regcache, | |
1511 | gdb_byte *out, const gdb_byte *in) | |
1512 | { | |
1513 | struct type *func_type = function ? value_type (function) : NULL; | |
1514 | enum return_value_convention rvc; | |
1515 | int opencl_vector = 0; | |
1516 | ||
1517 | if (func_type) | |
1518 | { | |
1519 | func_type = check_typedef (func_type); | |
1520 | ||
1521 | if (TYPE_CODE (func_type) == TYPE_CODE_PTR) | |
1522 | func_type = check_typedef (TYPE_TARGET_TYPE (func_type)); | |
1523 | ||
1524 | if (TYPE_CODE (func_type) == TYPE_CODE_FUNC | |
1525 | && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL | |
1526 | && TYPE_CODE (type) == TYPE_CODE_ARRAY | |
1527 | && TYPE_VECTOR (type)) | |
1528 | opencl_vector = 1; | |
1529 | } | |
1530 | ||
1531 | if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16) | |
1532 | rvc = RETURN_VALUE_REGISTER_CONVENTION; | |
1533 | else | |
1534 | rvc = RETURN_VALUE_STRUCT_CONVENTION; | |
1535 | ||
1536 | if (in) | |
1537 | { | |
1538 | switch (rvc) | |
1539 | { | |
1540 | case RETURN_VALUE_REGISTER_CONVENTION: | |
1541 | if (opencl_vector && TYPE_LENGTH (type) == 2) | |
1542 | regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in); | |
1543 | else | |
1544 | spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in); | |
1545 | break; | |
1546 | ||
1547 | case RETURN_VALUE_STRUCT_CONVENTION: | |
1548 | error (_("Cannot set function return value.")); | |
1549 | break; | |
1550 | } | |
1551 | } | |
1552 | else if (out) | |
1553 | { | |
1554 | switch (rvc) | |
1555 | { | |
1556 | case RETURN_VALUE_REGISTER_CONVENTION: | |
1557 | if (opencl_vector && TYPE_LENGTH (type) == 2) | |
1558 | regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out); | |
1559 | else | |
1560 | spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out); | |
1561 | break; | |
1562 | ||
1563 | case RETURN_VALUE_STRUCT_CONVENTION: | |
1564 | error (_("Function return value unknown.")); | |
1565 | break; | |
1566 | } | |
1567 | } | |
1568 | ||
1569 | return rvc; | |
1570 | } | |
1571 | ||
1572 | ||
1573 | /* Breakpoints. */ | |
1574 | ||
1575 | static const gdb_byte * | |
1576 | spu_breakpoint_from_pc (struct gdbarch *gdbarch, | |
1577 | CORE_ADDR * pcptr, int *lenptr) | |
1578 | { | |
1579 | static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff }; | |
1580 | ||
1581 | *lenptr = sizeof breakpoint; | |
1582 | return breakpoint; | |
1583 | } | |
1584 | ||
1585 | static int | |
1586 | spu_memory_remove_breakpoint (struct gdbarch *gdbarch, | |
1587 | struct bp_target_info *bp_tgt) | |
1588 | { | |
1589 | /* We work around a problem in combined Cell/B.E. debugging here. Consider | |
1590 | that in a combined application, we have some breakpoints inserted in SPU | |
1591 | code, and now the application forks (on the PPU side). GDB common code | |
1592 | will assume that the fork system call copied all breakpoints into the new | |
1593 | process' address space, and that all those copies now need to be removed | |
1594 | (see breakpoint.c:detach_breakpoints). | |
1595 | ||
1596 | While this is certainly true for PPU side breakpoints, it is not true | |
1597 | for SPU side breakpoints. fork will clone the SPU context file | |
1598 | descriptors, so that all the existing SPU contexts are in accessible | |
1599 | in the new process. However, the contents of the SPU contexts themselves | |
1600 | are *not* cloned. Therefore the effect of detach_breakpoints is to | |
1601 | remove SPU breakpoints from the *original* SPU context's local store | |
1602 | -- this is not the correct behaviour. | |
1603 | ||
1604 | The workaround is to check whether the PID we are asked to remove this | |
1605 | breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the | |
1606 | PID of the current inferior (i.e. current_inferior ()->pid). This is only | |
1607 | true in the context of detach_breakpoints. If so, we simply do nothing. | |
1608 | [ Note that for the fork child process, it does not matter if breakpoints | |
1609 | remain inserted, because those SPU contexts are not runnable anyway -- | |
1610 | the Linux kernel allows only the original process to invoke spu_run. */ | |
1611 | ||
1612 | if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid) | |
1613 | return 0; | |
1614 | ||
1615 | return default_memory_remove_breakpoint (gdbarch, bp_tgt); | |
1616 | } | |
1617 | ||
1618 | ||
1619 | /* Software single-stepping support. */ | |
1620 | ||
1621 | static int | |
1622 | spu_software_single_step (struct frame_info *frame) | |
1623 | { | |
1624 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
1625 | struct address_space *aspace = get_frame_address_space (frame); | |
1626 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
1627 | CORE_ADDR pc, next_pc; | |
1628 | unsigned int insn; | |
1629 | int offset, reg; | |
1630 | gdb_byte buf[4]; | |
1631 | ULONGEST lslr; | |
1632 | ||
1633 | pc = get_frame_pc (frame); | |
1634 | ||
1635 | if (target_read_memory (pc, buf, 4)) | |
1636 | return 1; | |
1637 | insn = extract_unsigned_integer (buf, 4, byte_order); | |
1638 | ||
1639 | /* Get local store limit. */ | |
1640 | lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM); | |
1641 | if (!lslr) | |
1642 | lslr = (ULONGEST) -1; | |
1643 | ||
1644 | /* Next sequential instruction is at PC + 4, except if the current | |
1645 | instruction is a PPE-assisted call, in which case it is at PC + 8. | |
1646 | Wrap around LS limit to be on the safe side. */ | |
1647 | if ((insn & 0xffffff00) == 0x00002100) | |
1648 | next_pc = (SPUADDR_ADDR (pc) + 8) & lslr; | |
1649 | else | |
1650 | next_pc = (SPUADDR_ADDR (pc) + 4) & lslr; | |
1651 | ||
1652 | insert_single_step_breakpoint (gdbarch, | |
1653 | aspace, SPUADDR (SPUADDR_SPU (pc), next_pc)); | |
1654 | ||
1655 | if (is_branch (insn, &offset, ®)) | |
1656 | { | |
1657 | CORE_ADDR target = offset; | |
1658 | ||
1659 | if (reg == SPU_PC_REGNUM) | |
1660 | target += SPUADDR_ADDR (pc); | |
1661 | else if (reg != -1) | |
1662 | { | |
1663 | int optim, unavail; | |
1664 | ||
1665 | if (get_frame_register_bytes (frame, reg, 0, 4, buf, | |
1666 | &optim, &unavail)) | |
1667 | target += extract_unsigned_integer (buf, 4, byte_order) & -4; | |
1668 | else | |
1669 | { | |
1670 | if (optim) | |
1671 | throw_error (OPTIMIZED_OUT_ERROR, | |
1672 | _("Could not determine address of " | |
1673 | "single-step breakpoint.")); | |
1674 | if (unavail) | |
1675 | throw_error (NOT_AVAILABLE_ERROR, | |
1676 | _("Could not determine address of " | |
1677 | "single-step breakpoint.")); | |
1678 | } | |
1679 | } | |
1680 | ||
1681 | target = target & lslr; | |
1682 | if (target != next_pc) | |
1683 | insert_single_step_breakpoint (gdbarch, aspace, | |
1684 | SPUADDR (SPUADDR_SPU (pc), target)); | |
1685 | } | |
1686 | ||
1687 | return 1; | |
1688 | } | |
1689 | ||
1690 | ||
1691 | /* Longjmp support. */ | |
1692 | ||
1693 | static int | |
1694 | spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc) | |
1695 | { | |
1696 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
1697 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1698 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
1699 | gdb_byte buf[4]; | |
1700 | CORE_ADDR jb_addr; | |
1701 | int optim, unavail; | |
1702 | ||
1703 | /* Jump buffer is pointed to by the argument register $r3. */ | |
1704 | if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf, | |
1705 | &optim, &unavail)) | |
1706 | return 0; | |
1707 | ||
1708 | jb_addr = extract_unsigned_integer (buf, 4, byte_order); | |
1709 | if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4)) | |
1710 | return 0; | |
1711 | ||
1712 | *pc = extract_unsigned_integer (buf, 4, byte_order); | |
1713 | *pc = SPUADDR (tdep->id, *pc); | |
1714 | return 1; | |
1715 | } | |
1716 | ||
1717 | ||
1718 | /* Disassembler. */ | |
1719 | ||
1720 | struct spu_dis_asm_data | |
1721 | { | |
1722 | struct gdbarch *gdbarch; | |
1723 | int id; | |
1724 | }; | |
1725 | ||
1726 | static void | |
1727 | spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info) | |
1728 | { | |
1729 | struct spu_dis_asm_data *data = info->application_data; | |
1730 | print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream); | |
1731 | } | |
1732 | ||
1733 | static int | |
1734 | gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info) | |
1735 | { | |
1736 | /* The opcodes disassembler does 18-bit address arithmetic. Make | |
1737 | sure the SPU ID encoded in the high bits is added back when we | |
1738 | call print_address. */ | |
1739 | struct disassemble_info spu_info = *info; | |
1740 | struct spu_dis_asm_data data; | |
1741 | data.gdbarch = info->application_data; | |
1742 | data.id = SPUADDR_SPU (memaddr); | |
1743 | ||
1744 | spu_info.application_data = &data; | |
1745 | spu_info.print_address_func = spu_dis_asm_print_address; | |
1746 | return print_insn_spu (memaddr, &spu_info); | |
1747 | } | |
1748 | ||
1749 | ||
1750 | /* Target overlays for the SPU overlay manager. | |
1751 | ||
1752 | See the documentation of simple_overlay_update for how the | |
1753 | interface is supposed to work. | |
1754 | ||
1755 | Data structures used by the overlay manager: | |
1756 | ||
1757 | struct ovly_table | |
1758 | { | |
1759 | u32 vma; | |
1760 | u32 size; | |
1761 | u32 pos; | |
1762 | u32 buf; | |
1763 | } _ovly_table[]; -- one entry per overlay section | |
1764 | ||
1765 | struct ovly_buf_table | |
1766 | { | |
1767 | u32 mapped; | |
1768 | } _ovly_buf_table[]; -- one entry per overlay buffer | |
1769 | ||
1770 | _ovly_table should never change. | |
1771 | ||
1772 | Both tables are aligned to a 16-byte boundary, the symbols | |
1773 | _ovly_table and _ovly_buf_table are of type STT_OBJECT and their | |
1774 | size set to the size of the respective array. buf in _ovly_table is | |
1775 | an index into _ovly_buf_table. | |
1776 | ||
1777 | mapped is an index into _ovly_table. Both the mapped and buf indices start | |
1778 | from one to reference the first entry in their respective tables. */ | |
1779 | ||
1780 | /* Using the per-objfile private data mechanism, we store for each | |
1781 | objfile an array of "struct spu_overlay_table" structures, one | |
1782 | for each obj_section of the objfile. This structure holds two | |
1783 | fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this | |
1784 | is *not* an overlay section. If it is non-zero, it represents | |
1785 | a target address. The overlay section is mapped iff the target | |
1786 | integer at this location equals MAPPED_VAL. */ | |
1787 | ||
1788 | static const struct objfile_data *spu_overlay_data; | |
1789 | ||
1790 | struct spu_overlay_table | |
1791 | { | |
1792 | CORE_ADDR mapped_ptr; | |
1793 | CORE_ADDR mapped_val; | |
1794 | }; | |
1795 | ||
1796 | /* Retrieve the overlay table for OBJFILE. If not already cached, read | |
1797 | the _ovly_table data structure from the target and initialize the | |
1798 | spu_overlay_table data structure from it. */ | |
1799 | static struct spu_overlay_table * | |
1800 | spu_get_overlay_table (struct objfile *objfile) | |
1801 | { | |
1802 | enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)? | |
1803 | BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE; | |
1804 | struct bound_minimal_symbol ovly_table_msym, ovly_buf_table_msym; | |
1805 | CORE_ADDR ovly_table_base, ovly_buf_table_base; | |
1806 | unsigned ovly_table_size, ovly_buf_table_size; | |
1807 | struct spu_overlay_table *tbl; | |
1808 | struct obj_section *osect; | |
1809 | gdb_byte *ovly_table; | |
1810 | int i; | |
1811 | ||
1812 | tbl = objfile_data (objfile, spu_overlay_data); | |
1813 | if (tbl) | |
1814 | return tbl; | |
1815 | ||
1816 | ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile); | |
1817 | if (!ovly_table_msym.minsym) | |
1818 | return NULL; | |
1819 | ||
1820 | ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", | |
1821 | NULL, objfile); | |
1822 | if (!ovly_buf_table_msym.minsym) | |
1823 | return NULL; | |
1824 | ||
1825 | ovly_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_table_msym); | |
1826 | ovly_table_size = MSYMBOL_SIZE (ovly_table_msym.minsym); | |
1827 | ||
1828 | ovly_buf_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_buf_table_msym); | |
1829 | ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym.minsym); | |
1830 | ||
1831 | ovly_table = xmalloc (ovly_table_size); | |
1832 | read_memory (ovly_table_base, ovly_table, ovly_table_size); | |
1833 | ||
1834 | tbl = OBSTACK_CALLOC (&objfile->objfile_obstack, | |
1835 | objfile->sections_end - objfile->sections, | |
1836 | struct spu_overlay_table); | |
1837 | ||
1838 | for (i = 0; i < ovly_table_size / 16; i++) | |
1839 | { | |
1840 | CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, | |
1841 | 4, byte_order); | |
1842 | CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, | |
1843 | 4, byte_order); | |
1844 | CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, | |
1845 | 4, byte_order); | |
1846 | CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, | |
1847 | 4, byte_order); | |
1848 | ||
1849 | if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size) | |
1850 | continue; | |
1851 | ||
1852 | ALL_OBJFILE_OSECTIONS (objfile, osect) | |
1853 | if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section) | |
1854 | && pos == osect->the_bfd_section->filepos) | |
1855 | { | |
1856 | int ndx = osect - objfile->sections; | |
1857 | tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4; | |
1858 | tbl[ndx].mapped_val = i + 1; | |
1859 | break; | |
1860 | } | |
1861 | } | |
1862 | ||
1863 | xfree (ovly_table); | |
1864 | set_objfile_data (objfile, spu_overlay_data, tbl); | |
1865 | return tbl; | |
1866 | } | |
1867 | ||
1868 | /* Read _ovly_buf_table entry from the target to dermine whether | |
1869 | OSECT is currently mapped, and update the mapped state. */ | |
1870 | static void | |
1871 | spu_overlay_update_osect (struct obj_section *osect) | |
1872 | { | |
1873 | enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)? | |
1874 | BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE; | |
1875 | struct spu_overlay_table *ovly_table; | |
1876 | CORE_ADDR id, val; | |
1877 | ||
1878 | ovly_table = spu_get_overlay_table (osect->objfile); | |
1879 | if (!ovly_table) | |
1880 | return; | |
1881 | ||
1882 | ovly_table += osect - osect->objfile->sections; | |
1883 | if (ovly_table->mapped_ptr == 0) | |
1884 | return; | |
1885 | ||
1886 | id = SPUADDR_SPU (obj_section_addr (osect)); | |
1887 | val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr), | |
1888 | 4, byte_order); | |
1889 | osect->ovly_mapped = (val == ovly_table->mapped_val); | |
1890 | } | |
1891 | ||
1892 | /* If OSECT is NULL, then update all sections' mapped state. | |
1893 | If OSECT is non-NULL, then update only OSECT's mapped state. */ | |
1894 | static void | |
1895 | spu_overlay_update (struct obj_section *osect) | |
1896 | { | |
1897 | /* Just one section. */ | |
1898 | if (osect) | |
1899 | spu_overlay_update_osect (osect); | |
1900 | ||
1901 | /* All sections. */ | |
1902 | else | |
1903 | { | |
1904 | struct objfile *objfile; | |
1905 | ||
1906 | ALL_OBJSECTIONS (objfile, osect) | |
1907 | if (section_is_overlay (osect)) | |
1908 | spu_overlay_update_osect (osect); | |
1909 | } | |
1910 | } | |
1911 | ||
1912 | /* Whenever a new objfile is loaded, read the target's _ovly_table. | |
1913 | If there is one, go through all sections and make sure for non- | |
1914 | overlay sections LMA equals VMA, while for overlay sections LMA | |
1915 | is larger than SPU_OVERLAY_LMA. */ | |
1916 | static void | |
1917 | spu_overlay_new_objfile (struct objfile *objfile) | |
1918 | { | |
1919 | struct spu_overlay_table *ovly_table; | |
1920 | struct obj_section *osect; | |
1921 | ||
1922 | /* If we've already touched this file, do nothing. */ | |
1923 | if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL) | |
1924 | return; | |
1925 | ||
1926 | /* Consider only SPU objfiles. */ | |
1927 | if (bfd_get_arch (objfile->obfd) != bfd_arch_spu) | |
1928 | return; | |
1929 | ||
1930 | /* Check if this objfile has overlays. */ | |
1931 | ovly_table = spu_get_overlay_table (objfile); | |
1932 | if (!ovly_table) | |
1933 | return; | |
1934 | ||
1935 | /* Now go and fiddle with all the LMAs. */ | |
1936 | ALL_OBJFILE_OSECTIONS (objfile, osect) | |
1937 | { | |
1938 | bfd *obfd = objfile->obfd; | |
1939 | asection *bsect = osect->the_bfd_section; | |
1940 | int ndx = osect - objfile->sections; | |
1941 | ||
1942 | if (ovly_table[ndx].mapped_ptr == 0) | |
1943 | bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect); | |
1944 | else | |
1945 | bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos; | |
1946 | } | |
1947 | } | |
1948 | ||
1949 | ||
1950 | /* Insert temporary breakpoint on "main" function of newly loaded | |
1951 | SPE context OBJFILE. */ | |
1952 | static void | |
1953 | spu_catch_start (struct objfile *objfile) | |
1954 | { | |
1955 | struct bound_minimal_symbol minsym; | |
1956 | struct compunit_symtab *cust; | |
1957 | CORE_ADDR pc; | |
1958 | char buf[32]; | |
1959 | ||
1960 | /* Do this only if requested by "set spu stop-on-load on". */ | |
1961 | if (!spu_stop_on_load_p) | |
1962 | return; | |
1963 | ||
1964 | /* Consider only SPU objfiles. */ | |
1965 | if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu) | |
1966 | return; | |
1967 | ||
1968 | /* The main objfile is handled differently. */ | |
1969 | if (objfile == symfile_objfile) | |
1970 | return; | |
1971 | ||
1972 | /* There can be multiple symbols named "main". Search for the | |
1973 | "main" in *this* objfile. */ | |
1974 | minsym = lookup_minimal_symbol ("main", NULL, objfile); | |
1975 | if (!minsym.minsym) | |
1976 | return; | |
1977 | ||
1978 | /* If we have debugging information, try to use it -- this | |
1979 | will allow us to properly skip the prologue. */ | |
1980 | pc = BMSYMBOL_VALUE_ADDRESS (minsym); | |
1981 | cust | |
1982 | = find_pc_sect_compunit_symtab (pc, MSYMBOL_OBJ_SECTION (minsym.objfile, | |
1983 | minsym.minsym)); | |
1984 | if (cust != NULL) | |
1985 | { | |
1986 | const struct blockvector *bv = COMPUNIT_BLOCKVECTOR (cust); | |
1987 | struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK); | |
1988 | struct symbol *sym; | |
1989 | struct symtab_and_line sal; | |
1990 | ||
1991 | sym = block_lookup_symbol (block, "main", VAR_DOMAIN); | |
1992 | if (sym) | |
1993 | { | |
1994 | fixup_symbol_section (sym, objfile); | |
1995 | sal = find_function_start_sal (sym, 1); | |
1996 | pc = sal.pc; | |
1997 | } | |
1998 | } | |
1999 | ||
2000 | /* Use a numerical address for the set_breakpoint command to avoid having | |
2001 | the breakpoint re-set incorrectly. */ | |
2002 | xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc)); | |
2003 | create_breakpoint (get_objfile_arch (objfile), buf /* arg */, | |
2004 | NULL /* cond_string */, -1 /* thread */, | |
2005 | NULL /* extra_string */, | |
2006 | 0 /* parse_condition_and_thread */, 1 /* tempflag */, | |
2007 | bp_breakpoint /* type_wanted */, | |
2008 | 0 /* ignore_count */, | |
2009 | AUTO_BOOLEAN_FALSE /* pending_break_support */, | |
2010 | &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */, | |
2011 | 1 /* enabled */, 0 /* internal */, 0); | |
2012 | } | |
2013 | ||
2014 | ||
2015 | /* Look up OBJFILE loaded into FRAME's SPU context. */ | |
2016 | static struct objfile * | |
2017 | spu_objfile_from_frame (struct frame_info *frame) | |
2018 | { | |
2019 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
2020 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
2021 | struct objfile *obj; | |
2022 | ||
2023 | if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) | |
2024 | return NULL; | |
2025 | ||
2026 | ALL_OBJFILES (obj) | |
2027 | { | |
2028 | if (obj->sections != obj->sections_end | |
2029 | && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id) | |
2030 | return obj; | |
2031 | } | |
2032 | ||
2033 | return NULL; | |
2034 | } | |
2035 | ||
2036 | /* Flush cache for ea pointer access if available. */ | |
2037 | static void | |
2038 | flush_ea_cache (void) | |
2039 | { | |
2040 | struct bound_minimal_symbol msymbol; | |
2041 | struct objfile *obj; | |
2042 | ||
2043 | if (!has_stack_frames ()) | |
2044 | return; | |
2045 | ||
2046 | obj = spu_objfile_from_frame (get_current_frame ()); | |
2047 | if (obj == NULL) | |
2048 | return; | |
2049 | ||
2050 | /* Lookup inferior function __cache_flush. */ | |
2051 | msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj); | |
2052 | if (msymbol.minsym != NULL) | |
2053 | { | |
2054 | struct type *type; | |
2055 | CORE_ADDR addr; | |
2056 | ||
2057 | type = objfile_type (obj)->builtin_void; | |
2058 | type = lookup_function_type (type); | |
2059 | type = lookup_pointer_type (type); | |
2060 | addr = BMSYMBOL_VALUE_ADDRESS (msymbol); | |
2061 | ||
2062 | call_function_by_hand (value_from_pointer (type, addr), 0, NULL); | |
2063 | } | |
2064 | } | |
2065 | ||
2066 | /* This handler is called when the inferior has stopped. If it is stopped in | |
2067 | SPU architecture then flush the ea cache if used. */ | |
2068 | static void | |
2069 | spu_attach_normal_stop (struct bpstats *bs, int print_frame) | |
2070 | { | |
2071 | if (!spu_auto_flush_cache_p) | |
2072 | return; | |
2073 | ||
2074 | /* Temporarily reset spu_auto_flush_cache_p to avoid recursively | |
2075 | re-entering this function when __cache_flush stops. */ | |
2076 | spu_auto_flush_cache_p = 0; | |
2077 | flush_ea_cache (); | |
2078 | spu_auto_flush_cache_p = 1; | |
2079 | } | |
2080 | ||
2081 | ||
2082 | /* "info spu" commands. */ | |
2083 | ||
2084 | static void | |
2085 | info_spu_event_command (char *args, int from_tty) | |
2086 | { | |
2087 | struct frame_info *frame = get_selected_frame (NULL); | |
2088 | ULONGEST event_status = 0; | |
2089 | ULONGEST event_mask = 0; | |
2090 | struct cleanup *chain; | |
2091 | gdb_byte buf[100]; | |
2092 | char annex[32]; | |
2093 | LONGEST len; | |
2094 | int id; | |
2095 | ||
2096 | if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu) | |
2097 | error (_("\"info spu\" is only supported on the SPU architecture.")); | |
2098 | ||
2099 | id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); | |
2100 | ||
2101 | xsnprintf (annex, sizeof annex, "%d/event_status", id); | |
2102 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2103 | buf, 0, (sizeof (buf) - 1)); | |
2104 | if (len <= 0) | |
2105 | error (_("Could not read event_status.")); | |
2106 | buf[len] = '\0'; | |
2107 | event_status = strtoulst ((char *) buf, NULL, 16); | |
2108 | ||
2109 | xsnprintf (annex, sizeof annex, "%d/event_mask", id); | |
2110 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2111 | buf, 0, (sizeof (buf) - 1)); | |
2112 | if (len <= 0) | |
2113 | error (_("Could not read event_mask.")); | |
2114 | buf[len] = '\0'; | |
2115 | event_mask = strtoulst ((char *) buf, NULL, 16); | |
2116 | ||
2117 | chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent"); | |
2118 | ||
2119 | if (ui_out_is_mi_like_p (current_uiout)) | |
2120 | { | |
2121 | ui_out_field_fmt (current_uiout, "event_status", | |
2122 | "0x%s", phex_nz (event_status, 4)); | |
2123 | ui_out_field_fmt (current_uiout, "event_mask", | |
2124 | "0x%s", phex_nz (event_mask, 4)); | |
2125 | } | |
2126 | else | |
2127 | { | |
2128 | printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4)); | |
2129 | printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4)); | |
2130 | } | |
2131 | ||
2132 | do_cleanups (chain); | |
2133 | } | |
2134 | ||
2135 | static void | |
2136 | info_spu_signal_command (char *args, int from_tty) | |
2137 | { | |
2138 | struct frame_info *frame = get_selected_frame (NULL); | |
2139 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
2140 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
2141 | ULONGEST signal1 = 0; | |
2142 | ULONGEST signal1_type = 0; | |
2143 | int signal1_pending = 0; | |
2144 | ULONGEST signal2 = 0; | |
2145 | ULONGEST signal2_type = 0; | |
2146 | int signal2_pending = 0; | |
2147 | struct cleanup *chain; | |
2148 | char annex[32]; | |
2149 | gdb_byte buf[100]; | |
2150 | LONGEST len; | |
2151 | int id; | |
2152 | ||
2153 | if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) | |
2154 | error (_("\"info spu\" is only supported on the SPU architecture.")); | |
2155 | ||
2156 | id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); | |
2157 | ||
2158 | xsnprintf (annex, sizeof annex, "%d/signal1", id); | |
2159 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4); | |
2160 | if (len < 0) | |
2161 | error (_("Could not read signal1.")); | |
2162 | else if (len == 4) | |
2163 | { | |
2164 | signal1 = extract_unsigned_integer (buf, 4, byte_order); | |
2165 | signal1_pending = 1; | |
2166 | } | |
2167 | ||
2168 | xsnprintf (annex, sizeof annex, "%d/signal1_type", id); | |
2169 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2170 | buf, 0, (sizeof (buf) - 1)); | |
2171 | if (len <= 0) | |
2172 | error (_("Could not read signal1_type.")); | |
2173 | buf[len] = '\0'; | |
2174 | signal1_type = strtoulst ((char *) buf, NULL, 16); | |
2175 | ||
2176 | xsnprintf (annex, sizeof annex, "%d/signal2", id); | |
2177 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4); | |
2178 | if (len < 0) | |
2179 | error (_("Could not read signal2.")); | |
2180 | else if (len == 4) | |
2181 | { | |
2182 | signal2 = extract_unsigned_integer (buf, 4, byte_order); | |
2183 | signal2_pending = 1; | |
2184 | } | |
2185 | ||
2186 | xsnprintf (annex, sizeof annex, "%d/signal2_type", id); | |
2187 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2188 | buf, 0, (sizeof (buf) - 1)); | |
2189 | if (len <= 0) | |
2190 | error (_("Could not read signal2_type.")); | |
2191 | buf[len] = '\0'; | |
2192 | signal2_type = strtoulst ((char *) buf, NULL, 16); | |
2193 | ||
2194 | chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal"); | |
2195 | ||
2196 | if (ui_out_is_mi_like_p (current_uiout)) | |
2197 | { | |
2198 | ui_out_field_int (current_uiout, "signal1_pending", signal1_pending); | |
2199 | ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4)); | |
2200 | ui_out_field_int (current_uiout, "signal1_type", signal1_type); | |
2201 | ui_out_field_int (current_uiout, "signal2_pending", signal2_pending); | |
2202 | ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4)); | |
2203 | ui_out_field_int (current_uiout, "signal2_type", signal2_type); | |
2204 | } | |
2205 | else | |
2206 | { | |
2207 | if (signal1_pending) | |
2208 | printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4)); | |
2209 | else | |
2210 | printf_filtered (_("Signal 1 not pending ")); | |
2211 | ||
2212 | if (signal1_type) | |
2213 | printf_filtered (_("(Type Or)\n")); | |
2214 | else | |
2215 | printf_filtered (_("(Type Overwrite)\n")); | |
2216 | ||
2217 | if (signal2_pending) | |
2218 | printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4)); | |
2219 | else | |
2220 | printf_filtered (_("Signal 2 not pending ")); | |
2221 | ||
2222 | if (signal2_type) | |
2223 | printf_filtered (_("(Type Or)\n")); | |
2224 | else | |
2225 | printf_filtered (_("(Type Overwrite)\n")); | |
2226 | } | |
2227 | ||
2228 | do_cleanups (chain); | |
2229 | } | |
2230 | ||
2231 | static void | |
2232 | info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order, | |
2233 | const char *field, const char *msg) | |
2234 | { | |
2235 | struct cleanup *chain; | |
2236 | int i; | |
2237 | ||
2238 | if (nr <= 0) | |
2239 | return; | |
2240 | ||
2241 | chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox"); | |
2242 | ||
2243 | ui_out_table_header (current_uiout, 32, ui_left, field, msg); | |
2244 | ui_out_table_body (current_uiout); | |
2245 | ||
2246 | for (i = 0; i < nr; i++) | |
2247 | { | |
2248 | struct cleanup *val_chain; | |
2249 | ULONGEST val; | |
2250 | val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox"); | |
2251 | val = extract_unsigned_integer (buf + 4*i, 4, byte_order); | |
2252 | ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4)); | |
2253 | do_cleanups (val_chain); | |
2254 | ||
2255 | if (!ui_out_is_mi_like_p (current_uiout)) | |
2256 | printf_filtered ("\n"); | |
2257 | } | |
2258 | ||
2259 | do_cleanups (chain); | |
2260 | } | |
2261 | ||
2262 | static void | |
2263 | info_spu_mailbox_command (char *args, int from_tty) | |
2264 | { | |
2265 | struct frame_info *frame = get_selected_frame (NULL); | |
2266 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
2267 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
2268 | struct cleanup *chain; | |
2269 | char annex[32]; | |
2270 | gdb_byte buf[1024]; | |
2271 | LONGEST len; | |
2272 | int id; | |
2273 | ||
2274 | if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) | |
2275 | error (_("\"info spu\" is only supported on the SPU architecture.")); | |
2276 | ||
2277 | id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); | |
2278 | ||
2279 | chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox"); | |
2280 | ||
2281 | xsnprintf (annex, sizeof annex, "%d/mbox_info", id); | |
2282 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2283 | buf, 0, sizeof buf); | |
2284 | if (len < 0) | |
2285 | error (_("Could not read mbox_info.")); | |
2286 | ||
2287 | info_spu_mailbox_list (buf, len / 4, byte_order, | |
2288 | "mbox", "SPU Outbound Mailbox"); | |
2289 | ||
2290 | xsnprintf (annex, sizeof annex, "%d/ibox_info", id); | |
2291 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2292 | buf, 0, sizeof buf); | |
2293 | if (len < 0) | |
2294 | error (_("Could not read ibox_info.")); | |
2295 | ||
2296 | info_spu_mailbox_list (buf, len / 4, byte_order, | |
2297 | "ibox", "SPU Outbound Interrupt Mailbox"); | |
2298 | ||
2299 | xsnprintf (annex, sizeof annex, "%d/wbox_info", id); | |
2300 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2301 | buf, 0, sizeof buf); | |
2302 | if (len < 0) | |
2303 | error (_("Could not read wbox_info.")); | |
2304 | ||
2305 | info_spu_mailbox_list (buf, len / 4, byte_order, | |
2306 | "wbox", "SPU Inbound Mailbox"); | |
2307 | ||
2308 | do_cleanups (chain); | |
2309 | } | |
2310 | ||
2311 | static ULONGEST | |
2312 | spu_mfc_get_bitfield (ULONGEST word, int first, int last) | |
2313 | { | |
2314 | ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1)); | |
2315 | return (word >> (63 - last)) & mask; | |
2316 | } | |
2317 | ||
2318 | static void | |
2319 | info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order) | |
2320 | { | |
2321 | static char *spu_mfc_opcode[256] = | |
2322 | { | |
2323 | /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2324 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2325 | /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2326 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2327 | /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL, | |
2328 | "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL, | |
2329 | /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL, | |
2330 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2331 | /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL, | |
2332 | "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL, | |
2333 | /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2334 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2335 | /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2336 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2337 | /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2338 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2339 | /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL, | |
2340 | NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf", | |
2341 | /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2342 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2343 | /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL, | |
2344 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2345 | /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL, | |
2346 | "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2347 | /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2348 | "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL, | |
2349 | /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2350 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2351 | /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2352 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2353 | /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2354 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2355 | }; | |
2356 | ||
2357 | int *seq = alloca (nr * sizeof (int)); | |
2358 | int done = 0; | |
2359 | struct cleanup *chain; | |
2360 | int i, j; | |
2361 | ||
2362 | ||
2363 | /* Determine sequence in which to display (valid) entries. */ | |
2364 | for (i = 0; i < nr; i++) | |
2365 | { | |
2366 | /* Search for the first valid entry all of whose | |
2367 | dependencies are met. */ | |
2368 | for (j = 0; j < nr; j++) | |
2369 | { | |
2370 | ULONGEST mfc_cq_dw3; | |
2371 | ULONGEST dependencies; | |
2372 | ||
2373 | if (done & (1 << (nr - 1 - j))) | |
2374 | continue; | |
2375 | ||
2376 | mfc_cq_dw3 | |
2377 | = extract_unsigned_integer (buf + 32*j + 24,8, byte_order); | |
2378 | if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16)) | |
2379 | continue; | |
2380 | ||
2381 | dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1); | |
2382 | if ((dependencies & done) != dependencies) | |
2383 | continue; | |
2384 | ||
2385 | seq[i] = j; | |
2386 | done |= 1 << (nr - 1 - j); | |
2387 | break; | |
2388 | } | |
2389 | ||
2390 | if (j == nr) | |
2391 | break; | |
2392 | } | |
2393 | ||
2394 | nr = i; | |
2395 | ||
2396 | ||
2397 | chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr, | |
2398 | "dma_cmd"); | |
2399 | ||
2400 | ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode"); | |
2401 | ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag"); | |
2402 | ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId"); | |
2403 | ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId"); | |
2404 | ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA"); | |
2405 | ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA"); | |
2406 | ui_out_table_header (current_uiout, 7, ui_left, "size", "Size"); | |
2407 | ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr"); | |
2408 | ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize"); | |
2409 | ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E"); | |
2410 | ||
2411 | ui_out_table_body (current_uiout); | |
2412 | ||
2413 | for (i = 0; i < nr; i++) | |
2414 | { | |
2415 | struct cleanup *cmd_chain; | |
2416 | ULONGEST mfc_cq_dw0; | |
2417 | ULONGEST mfc_cq_dw1; | |
2418 | ULONGEST mfc_cq_dw2; | |
2419 | int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id; | |
2420 | int list_lsa, list_size, mfc_lsa, mfc_size; | |
2421 | ULONGEST mfc_ea; | |
2422 | int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p; | |
2423 | ||
2424 | /* Decode contents of MFC Command Queue Context Save/Restore Registers. | |
2425 | See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */ | |
2426 | ||
2427 | mfc_cq_dw0 | |
2428 | = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order); | |
2429 | mfc_cq_dw1 | |
2430 | = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order); | |
2431 | mfc_cq_dw2 | |
2432 | = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order); | |
2433 | ||
2434 | list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14); | |
2435 | list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26); | |
2436 | mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34); | |
2437 | mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39); | |
2438 | list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40); | |
2439 | rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43); | |
2440 | tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46); | |
2441 | ||
2442 | mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12 | |
2443 | | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36); | |
2444 | ||
2445 | mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13); | |
2446 | mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24); | |
2447 | noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37); | |
2448 | qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38); | |
2449 | ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39); | |
2450 | cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40); | |
2451 | ||
2452 | cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd"); | |
2453 | ||
2454 | if (spu_mfc_opcode[mfc_cmd_opcode]) | |
2455 | ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]); | |
2456 | else | |
2457 | ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode); | |
2458 | ||
2459 | ui_out_field_int (current_uiout, "tag", mfc_cmd_tag); | |
2460 | ui_out_field_int (current_uiout, "tid", tclass_id); | |
2461 | ui_out_field_int (current_uiout, "rid", rclass_id); | |
2462 | ||
2463 | if (ea_valid_p) | |
2464 | ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8)); | |
2465 | else | |
2466 | ui_out_field_skip (current_uiout, "ea"); | |
2467 | ||
2468 | ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4); | |
2469 | if (qw_valid_p) | |
2470 | ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4); | |
2471 | else | |
2472 | ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size); | |
2473 | ||
2474 | if (list_valid_p) | |
2475 | { | |
2476 | ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3); | |
2477 | ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3); | |
2478 | } | |
2479 | else | |
2480 | { | |
2481 | ui_out_field_skip (current_uiout, "lstaddr"); | |
2482 | ui_out_field_skip (current_uiout, "lstsize"); | |
2483 | } | |
2484 | ||
2485 | if (cmd_error_p) | |
2486 | ui_out_field_string (current_uiout, "error_p", "*"); | |
2487 | else | |
2488 | ui_out_field_skip (current_uiout, "error_p"); | |
2489 | ||
2490 | do_cleanups (cmd_chain); | |
2491 | ||
2492 | if (!ui_out_is_mi_like_p (current_uiout)) | |
2493 | printf_filtered ("\n"); | |
2494 | } | |
2495 | ||
2496 | do_cleanups (chain); | |
2497 | } | |
2498 | ||
2499 | static void | |
2500 | info_spu_dma_command (char *args, int from_tty) | |
2501 | { | |
2502 | struct frame_info *frame = get_selected_frame (NULL); | |
2503 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
2504 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
2505 | ULONGEST dma_info_type; | |
2506 | ULONGEST dma_info_mask; | |
2507 | ULONGEST dma_info_status; | |
2508 | ULONGEST dma_info_stall_and_notify; | |
2509 | ULONGEST dma_info_atomic_command_status; | |
2510 | struct cleanup *chain; | |
2511 | char annex[32]; | |
2512 | gdb_byte buf[1024]; | |
2513 | LONGEST len; | |
2514 | int id; | |
2515 | ||
2516 | if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu) | |
2517 | error (_("\"info spu\" is only supported on the SPU architecture.")); | |
2518 | ||
2519 | id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); | |
2520 | ||
2521 | xsnprintf (annex, sizeof annex, "%d/dma_info", id); | |
2522 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2523 | buf, 0, 40 + 16 * 32); | |
2524 | if (len <= 0) | |
2525 | error (_("Could not read dma_info.")); | |
2526 | ||
2527 | dma_info_type | |
2528 | = extract_unsigned_integer (buf, 8, byte_order); | |
2529 | dma_info_mask | |
2530 | = extract_unsigned_integer (buf + 8, 8, byte_order); | |
2531 | dma_info_status | |
2532 | = extract_unsigned_integer (buf + 16, 8, byte_order); | |
2533 | dma_info_stall_and_notify | |
2534 | = extract_unsigned_integer (buf + 24, 8, byte_order); | |
2535 | dma_info_atomic_command_status | |
2536 | = extract_unsigned_integer (buf + 32, 8, byte_order); | |
2537 | ||
2538 | chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA"); | |
2539 | ||
2540 | if (ui_out_is_mi_like_p (current_uiout)) | |
2541 | { | |
2542 | ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s", | |
2543 | phex_nz (dma_info_type, 4)); | |
2544 | ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s", | |
2545 | phex_nz (dma_info_mask, 4)); | |
2546 | ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s", | |
2547 | phex_nz (dma_info_status, 4)); | |
2548 | ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s", | |
2549 | phex_nz (dma_info_stall_and_notify, 4)); | |
2550 | ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s", | |
2551 | phex_nz (dma_info_atomic_command_status, 4)); | |
2552 | } | |
2553 | else | |
2554 | { | |
2555 | const char *query_msg = _("no query pending"); | |
2556 | ||
2557 | if (dma_info_type & 4) | |
2558 | switch (dma_info_type & 3) | |
2559 | { | |
2560 | case 1: query_msg = _("'any' query pending"); break; | |
2561 | case 2: query_msg = _("'all' query pending"); break; | |
2562 | default: query_msg = _("undefined query type"); break; | |
2563 | } | |
2564 | ||
2565 | printf_filtered (_("Tag-Group Status 0x%s\n"), | |
2566 | phex (dma_info_status, 4)); | |
2567 | printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"), | |
2568 | phex (dma_info_mask, 4), query_msg); | |
2569 | printf_filtered (_("Stall-and-Notify 0x%s\n"), | |
2570 | phex (dma_info_stall_and_notify, 4)); | |
2571 | printf_filtered (_("Atomic Cmd Status 0x%s\n"), | |
2572 | phex (dma_info_atomic_command_status, 4)); | |
2573 | printf_filtered ("\n"); | |
2574 | } | |
2575 | ||
2576 | info_spu_dma_cmdlist (buf + 40, 16, byte_order); | |
2577 | do_cleanups (chain); | |
2578 | } | |
2579 | ||
2580 | static void | |
2581 | info_spu_proxydma_command (char *args, int from_tty) | |
2582 | { | |
2583 | struct frame_info *frame = get_selected_frame (NULL); | |
2584 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
2585 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
2586 | ULONGEST dma_info_type; | |
2587 | ULONGEST dma_info_mask; | |
2588 | ULONGEST dma_info_status; | |
2589 | struct cleanup *chain; | |
2590 | char annex[32]; | |
2591 | gdb_byte buf[1024]; | |
2592 | LONGEST len; | |
2593 | int id; | |
2594 | ||
2595 | if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu) | |
2596 | error (_("\"info spu\" is only supported on the SPU architecture.")); | |
2597 | ||
2598 | id = get_frame_register_unsigned (frame, SPU_ID_REGNUM); | |
2599 | ||
2600 | xsnprintf (annex, sizeof annex, "%d/proxydma_info", id); | |
2601 | len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, | |
2602 | buf, 0, 24 + 8 * 32); | |
2603 | if (len <= 0) | |
2604 | error (_("Could not read proxydma_info.")); | |
2605 | ||
2606 | dma_info_type = extract_unsigned_integer (buf, 8, byte_order); | |
2607 | dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order); | |
2608 | dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order); | |
2609 | ||
2610 | chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, | |
2611 | "SPUInfoProxyDMA"); | |
2612 | ||
2613 | if (ui_out_is_mi_like_p (current_uiout)) | |
2614 | { | |
2615 | ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s", | |
2616 | phex_nz (dma_info_type, 4)); | |
2617 | ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s", | |
2618 | phex_nz (dma_info_mask, 4)); | |
2619 | ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s", | |
2620 | phex_nz (dma_info_status, 4)); | |
2621 | } | |
2622 | else | |
2623 | { | |
2624 | const char *query_msg; | |
2625 | ||
2626 | switch (dma_info_type & 3) | |
2627 | { | |
2628 | case 0: query_msg = _("no query pending"); break; | |
2629 | case 1: query_msg = _("'any' query pending"); break; | |
2630 | case 2: query_msg = _("'all' query pending"); break; | |
2631 | default: query_msg = _("undefined query type"); break; | |
2632 | } | |
2633 | ||
2634 | printf_filtered (_("Tag-Group Status 0x%s\n"), | |
2635 | phex (dma_info_status, 4)); | |
2636 | printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"), | |
2637 | phex (dma_info_mask, 4), query_msg); | |
2638 | printf_filtered ("\n"); | |
2639 | } | |
2640 | ||
2641 | info_spu_dma_cmdlist (buf + 24, 8, byte_order); | |
2642 | do_cleanups (chain); | |
2643 | } | |
2644 | ||
2645 | static void | |
2646 | info_spu_command (char *args, int from_tty) | |
2647 | { | |
2648 | printf_unfiltered (_("\"info spu\" must be followed by " | |
2649 | "the name of an SPU facility.\n")); | |
2650 | help_list (infospucmdlist, "info spu ", all_commands, gdb_stdout); | |
2651 | } | |
2652 | ||
2653 | ||
2654 | /* Root of all "set spu "/"show spu " commands. */ | |
2655 | ||
2656 | static void | |
2657 | show_spu_command (char *args, int from_tty) | |
2658 | { | |
2659 | help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout); | |
2660 | } | |
2661 | ||
2662 | static void | |
2663 | set_spu_command (char *args, int from_tty) | |
2664 | { | |
2665 | help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout); | |
2666 | } | |
2667 | ||
2668 | static void | |
2669 | show_spu_stop_on_load (struct ui_file *file, int from_tty, | |
2670 | struct cmd_list_element *c, const char *value) | |
2671 | { | |
2672 | fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"), | |
2673 | value); | |
2674 | } | |
2675 | ||
2676 | static void | |
2677 | show_spu_auto_flush_cache (struct ui_file *file, int from_tty, | |
2678 | struct cmd_list_element *c, const char *value) | |
2679 | { | |
2680 | fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"), | |
2681 | value); | |
2682 | } | |
2683 | ||
2684 | ||
2685 | /* Set up gdbarch struct. */ | |
2686 | ||
2687 | static struct gdbarch * | |
2688 | spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) | |
2689 | { | |
2690 | struct gdbarch *gdbarch; | |
2691 | struct gdbarch_tdep *tdep; | |
2692 | int id = -1; | |
2693 | ||
2694 | /* Which spufs ID was requested as address space? */ | |
2695 | if (info.tdep_info) | |
2696 | id = *(int *)info.tdep_info; | |
2697 | /* For objfile architectures of SPU solibs, decode the ID from the name. | |
2698 | This assumes the filename convention employed by solib-spu.c. */ | |
2699 | else if (info.abfd) | |
2700 | { | |
2701 | char *name = strrchr (info.abfd->filename, '@'); | |
2702 | if (name) | |
2703 | sscanf (name, "@0x%*x <%d>", &id); | |
2704 | } | |
2705 | ||
2706 | /* Find a candidate among extant architectures. */ | |
2707 | for (arches = gdbarch_list_lookup_by_info (arches, &info); | |
2708 | arches != NULL; | |
2709 | arches = gdbarch_list_lookup_by_info (arches->next, &info)) | |
2710 | { | |
2711 | tdep = gdbarch_tdep (arches->gdbarch); | |
2712 | if (tdep && tdep->id == id) | |
2713 | return arches->gdbarch; | |
2714 | } | |
2715 | ||
2716 | /* None found, so create a new architecture. */ | |
2717 | tdep = XCNEW (struct gdbarch_tdep); | |
2718 | tdep->id = id; | |
2719 | gdbarch = gdbarch_alloc (&info, tdep); | |
2720 | ||
2721 | /* Disassembler. */ | |
2722 | set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu); | |
2723 | ||
2724 | /* Registers. */ | |
2725 | set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS); | |
2726 | set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS); | |
2727 | set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM); | |
2728 | set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM); | |
2729 | set_gdbarch_read_pc (gdbarch, spu_read_pc); | |
2730 | set_gdbarch_write_pc (gdbarch, spu_write_pc); | |
2731 | set_gdbarch_register_name (gdbarch, spu_register_name); | |
2732 | set_gdbarch_register_type (gdbarch, spu_register_type); | |
2733 | set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read); | |
2734 | set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write); | |
2735 | set_gdbarch_value_from_register (gdbarch, spu_value_from_register); | |
2736 | set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p); | |
2737 | set_gdbarch_dwarf2_reg_to_regnum (gdbarch, spu_dwarf_reg_to_regnum); | |
2738 | set_gdbarch_ax_pseudo_register_collect | |
2739 | (gdbarch, spu_ax_pseudo_register_collect); | |
2740 | set_gdbarch_ax_pseudo_register_push_stack | |
2741 | (gdbarch, spu_ax_pseudo_register_push_stack); | |
2742 | ||
2743 | /* Data types. */ | |
2744 | set_gdbarch_char_signed (gdbarch, 0); | |
2745 | set_gdbarch_ptr_bit (gdbarch, 32); | |
2746 | set_gdbarch_addr_bit (gdbarch, 32); | |
2747 | set_gdbarch_short_bit (gdbarch, 16); | |
2748 | set_gdbarch_int_bit (gdbarch, 32); | |
2749 | set_gdbarch_long_bit (gdbarch, 32); | |
2750 | set_gdbarch_long_long_bit (gdbarch, 64); | |
2751 | set_gdbarch_float_bit (gdbarch, 32); | |
2752 | set_gdbarch_double_bit (gdbarch, 64); | |
2753 | set_gdbarch_long_double_bit (gdbarch, 64); | |
2754 | set_gdbarch_float_format (gdbarch, floatformats_ieee_single); | |
2755 | set_gdbarch_double_format (gdbarch, floatformats_ieee_double); | |
2756 | set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double); | |
2757 | ||
2758 | /* Address handling. */ | |
2759 | set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer); | |
2760 | set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address); | |
2761 | set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address); | |
2762 | set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags); | |
2763 | set_gdbarch_address_class_type_flags_to_name | |
2764 | (gdbarch, spu_address_class_type_flags_to_name); | |
2765 | set_gdbarch_address_class_name_to_type_flags | |
2766 | (gdbarch, spu_address_class_name_to_type_flags); | |
2767 | ||
2768 | ||
2769 | /* Inferior function calls. */ | |
2770 | set_gdbarch_call_dummy_location (gdbarch, ON_STACK); | |
2771 | set_gdbarch_frame_align (gdbarch, spu_frame_align); | |
2772 | set_gdbarch_frame_red_zone_size (gdbarch, 2000); | |
2773 | set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code); | |
2774 | set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call); | |
2775 | set_gdbarch_dummy_id (gdbarch, spu_dummy_id); | |
2776 | set_gdbarch_return_value (gdbarch, spu_return_value); | |
2777 | ||
2778 | /* Frame handling. */ | |
2779 | set_gdbarch_inner_than (gdbarch, core_addr_lessthan); | |
2780 | dwarf2_append_unwinders (gdbarch); | |
2781 | frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind); | |
2782 | frame_base_set_default (gdbarch, &spu_frame_base); | |
2783 | set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc); | |
2784 | set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp); | |
2785 | set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer); | |
2786 | set_gdbarch_frame_args_skip (gdbarch, 0); | |
2787 | set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue); | |
2788 | set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p); | |
2789 | ||
2790 | /* Cell/B.E. cross-architecture unwinder support. */ | |
2791 | frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind); | |
2792 | ||
2793 | /* Breakpoints. */ | |
2794 | set_gdbarch_decr_pc_after_break (gdbarch, 4); | |
2795 | set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc); | |
2796 | set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint); | |
2797 | set_gdbarch_cannot_step_breakpoint (gdbarch, 1); | |
2798 | set_gdbarch_software_single_step (gdbarch, spu_software_single_step); | |
2799 | set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target); | |
2800 | ||
2801 | /* Overlays. */ | |
2802 | set_gdbarch_overlay_update (gdbarch, spu_overlay_update); | |
2803 | ||
2804 | return gdbarch; | |
2805 | } | |
2806 | ||
2807 | /* Provide a prototype to silence -Wmissing-prototypes. */ | |
2808 | extern initialize_file_ftype _initialize_spu_tdep; | |
2809 | ||
2810 | void | |
2811 | _initialize_spu_tdep (void) | |
2812 | { | |
2813 | register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init); | |
2814 | ||
2815 | /* Add ourselves to objfile event chain. */ | |
2816 | observer_attach_new_objfile (spu_overlay_new_objfile); | |
2817 | spu_overlay_data = register_objfile_data (); | |
2818 | ||
2819 | /* Install spu stop-on-load handler. */ | |
2820 | observer_attach_new_objfile (spu_catch_start); | |
2821 | ||
2822 | /* Add ourselves to normal_stop event chain. */ | |
2823 | observer_attach_normal_stop (spu_attach_normal_stop); | |
2824 | ||
2825 | /* Add root prefix command for all "set spu"/"show spu" commands. */ | |
2826 | add_prefix_cmd ("spu", no_class, set_spu_command, | |
2827 | _("Various SPU specific commands."), | |
2828 | &setspucmdlist, "set spu ", 0, &setlist); | |
2829 | add_prefix_cmd ("spu", no_class, show_spu_command, | |
2830 | _("Various SPU specific commands."), | |
2831 | &showspucmdlist, "show spu ", 0, &showlist); | |
2832 | ||
2833 | /* Toggle whether or not to add a temporary breakpoint at the "main" | |
2834 | function of new SPE contexts. */ | |
2835 | add_setshow_boolean_cmd ("stop-on-load", class_support, | |
2836 | &spu_stop_on_load_p, _("\ | |
2837 | Set whether to stop for new SPE threads."), | |
2838 | _("\ | |
2839 | Show whether to stop for new SPE threads."), | |
2840 | _("\ | |
2841 | Use \"on\" to give control to the user when a new SPE thread\n\ | |
2842 | enters its \"main\" function.\n\ | |
2843 | Use \"off\" to disable stopping for new SPE threads."), | |
2844 | NULL, | |
2845 | show_spu_stop_on_load, | |
2846 | &setspucmdlist, &showspucmdlist); | |
2847 | ||
2848 | /* Toggle whether or not to automatically flush the software-managed | |
2849 | cache whenever SPE execution stops. */ | |
2850 | add_setshow_boolean_cmd ("auto-flush-cache", class_support, | |
2851 | &spu_auto_flush_cache_p, _("\ | |
2852 | Set whether to automatically flush the software-managed cache."), | |
2853 | _("\ | |
2854 | Show whether to automatically flush the software-managed cache."), | |
2855 | _("\ | |
2856 | Use \"on\" to automatically flush the software-managed cache\n\ | |
2857 | whenever SPE execution stops.\n\ | |
2858 | Use \"off\" to never automatically flush the software-managed cache."), | |
2859 | NULL, | |
2860 | show_spu_auto_flush_cache, | |
2861 | &setspucmdlist, &showspucmdlist); | |
2862 | ||
2863 | /* Add root prefix command for all "info spu" commands. */ | |
2864 | add_prefix_cmd ("spu", class_info, info_spu_command, | |
2865 | _("Various SPU specific commands."), | |
2866 | &infospucmdlist, "info spu ", 0, &infolist); | |
2867 | ||
2868 | /* Add various "info spu" commands. */ | |
2869 | add_cmd ("event", class_info, info_spu_event_command, | |
2870 | _("Display SPU event facility status.\n"), | |
2871 | &infospucmdlist); | |
2872 | add_cmd ("signal", class_info, info_spu_signal_command, | |
2873 | _("Display SPU signal notification facility status.\n"), | |
2874 | &infospucmdlist); | |
2875 | add_cmd ("mailbox", class_info, info_spu_mailbox_command, | |
2876 | _("Display SPU mailbox facility status.\n"), | |
2877 | &infospucmdlist); | |
2878 | add_cmd ("dma", class_info, info_spu_dma_command, | |
2879 | _("Display MFC DMA status.\n"), | |
2880 | &infospucmdlist); | |
2881 | add_cmd ("proxydma", class_info, info_spu_proxydma_command, | |
2882 | _("Display MFC Proxy-DMA status.\n"), | |
2883 | &infospucmdlist); | |
2884 | } |