1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "elf/external.h"
25 #include "elf/common.h"
36 #include "gdbthread.h"
39 #include "gdb_assert.h"
43 #include "solib-svr4.h"
45 #include "bfd-target.h"
49 #include "exceptions.h"
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
55 /* Link map info to include in an allocated so_list entry. */
59 /* Pointer to copy of link map from inferior. The type is char *
60 rather than void *, so that we may use byte offsets to find the
61 various fields without the need for a cast. */
64 /* Amount by which addresses in the binary should be relocated to
65 match the inferior. This could most often be taken directly
66 from lm, but when prelinking is involved and the prelink base
67 address changes, we may need a different offset, we want to
68 warn about the difference and compute it only once. */
71 /* The target location of lm. */
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
83 static const char * const solib_break_names[] =
89 "__dl_rtld_db_dlactivity",
95 static const char * const bkpt_names[] =
103 static const char * const main_name_list[] =
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
143 /* link map access functions. */
146 lm_addr_from_link_map (struct so_list *so)
148 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
151 return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
156 has_lm_dynamic_from_link_map (void)
158 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
160 return lmo->l_ld_offset >= 0;
164 lm_dynamic_from_link_map (struct so_list *so)
166 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
169 return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
174 lm_addr_check (struct so_list *so, bfd *abfd)
176 if (so->lm_info->l_addr == (CORE_ADDR)-1)
178 struct bfd_section *dyninfo_sect;
179 CORE_ADDR l_addr, l_dynaddr, dynaddr;
181 l_addr = lm_addr_from_link_map (so);
183 if (! abfd || ! has_lm_dynamic_from_link_map ())
186 l_dynaddr = lm_dynamic_from_link_map (so);
188 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189 if (dyninfo_sect == NULL)
192 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
194 if (dynaddr + l_addr != l_dynaddr)
196 CORE_ADDR align = 0x1000;
197 CORE_ADDR minpagesize = align;
199 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
201 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
207 for (i = 0; i < ehdr->e_phnum; i++)
208 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209 align = phdr[i].p_align;
211 minpagesize = get_elf_backend_data (abfd)->minpagesize;
214 /* Turn it into a mask. */
217 /* If the changes match the alignment requirements, we
218 assume we're using a core file that was generated by the
219 same binary, just prelinked with a different base offset.
220 If it doesn't match, we may have a different binary, the
221 same binary with the dynamic table loaded at an unrelated
222 location, or anything, really. To avoid regressions,
223 don't adjust the base offset in the latter case, although
224 odds are that, if things really changed, debugging won't
227 One could expect more the condition
228 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229 but the one below is relaxed for PPC. The PPC kernel supports
230 either 4k or 64k page sizes. To be prepared for 64k pages,
231 PPC ELF files are built using an alignment requirement of 64k.
232 However, when running on a kernel supporting 4k pages, the memory
233 mapping of the library may not actually happen on a 64k boundary!
235 (In the usual case where (l_addr & align) == 0, this check is
236 equivalent to the possibly expected check above.)
238 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
240 l_addr = l_dynaddr - dynaddr;
242 if ((l_addr & (minpagesize - 1)) == 0
243 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
246 printf_unfiltered (_("Using PIC (Position Independent Code) "
247 "prelink displacement %s for \"%s\".\n"),
248 paddress (target_gdbarch, l_addr),
253 /* There is no way to verify the library file matches. prelink
254 can during prelinking of an unprelinked file (or unprelinking
255 of a prelinked file) shift the DYNAMIC segment by arbitrary
256 offset without any page size alignment. There is no way to
257 find out the ELF header and/or Program Headers for a limited
258 verification if it they match. One could do a verification
259 of the DYNAMIC segment. Still the found address is the best
260 one GDB could find. */
262 warning (_(".dynamic section for \"%s\" "
263 "is not at the expected address "
264 "(wrong library or version mismatch?)"), so->so_name);
269 so->lm_info->l_addr = l_addr;
272 return so->lm_info->l_addr;
276 lm_next (struct so_list *so)
278 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
279 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
281 return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
286 lm_prev (struct so_list *so)
288 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
289 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
291 return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
296 lm_name (struct so_list *so)
298 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
299 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
301 return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
306 ignore_first_link_map_entry (struct so_list *so)
308 /* Assume that everything is a library if the dynamic loader was loaded
309 late by a static executable. */
310 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
313 return lm_prev (so) == 0;
316 /* Per pspace SVR4 specific data. */
320 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
322 /* Validity flag for debug_loader_offset. */
323 int debug_loader_offset_p;
325 /* Load address for the dynamic linker, inferred. */
326 CORE_ADDR debug_loader_offset;
328 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
329 char *debug_loader_name;
331 /* Load map address for the main executable. */
332 CORE_ADDR main_lm_addr;
334 CORE_ADDR interp_text_sect_low;
335 CORE_ADDR interp_text_sect_high;
336 CORE_ADDR interp_plt_sect_low;
337 CORE_ADDR interp_plt_sect_high;
340 /* Per-program-space data key. */
341 static const struct program_space_data *solib_svr4_pspace_data;
344 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
346 struct svr4_info *info;
348 info = program_space_data (pspace, solib_svr4_pspace_data);
352 /* Get the current svr4 data. If none is found yet, add it now. This
353 function always returns a valid object. */
355 static struct svr4_info *
358 struct svr4_info *info;
360 info = program_space_data (current_program_space, solib_svr4_pspace_data);
364 info = XZALLOC (struct svr4_info);
365 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
369 /* Local function prototypes */
371 static int match_main (const char *);
377 bfd_lookup_symbol -- lookup the value for a specific symbol
381 CORE_ADDR bfd_lookup_symbol (bfd *abfd, char *symname)
385 An expensive way to lookup the value of a single symbol for
386 bfd's that are only temporary anyway. This is used by the
387 shared library support to find the address of the debugger
388 notification routine in the shared library.
390 The returned symbol may be in a code or data section; functions
391 will normally be in a code section, but may be in a data section
392 if this architecture uses function descriptors.
394 Note that 0 is specifically allowed as an error return (no
399 bfd_lookup_symbol (bfd *abfd, const char *symname)
403 asymbol **symbol_table;
404 unsigned int number_of_symbols;
406 struct cleanup *back_to;
407 CORE_ADDR symaddr = 0;
409 storage_needed = bfd_get_symtab_upper_bound (abfd);
411 if (storage_needed > 0)
413 symbol_table = (asymbol **) xmalloc (storage_needed);
414 back_to = make_cleanup (xfree, symbol_table);
415 number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
417 for (i = 0; i < number_of_symbols; i++)
419 sym = *symbol_table++;
420 if (strcmp (sym->name, symname) == 0
421 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
423 /* BFD symbols are section relative. */
424 symaddr = sym->value + sym->section->vma;
428 do_cleanups (back_to);
434 /* On FreeBSD, the dynamic linker is stripped by default. So we'll
435 have to check the dynamic string table too. */
437 storage_needed = bfd_get_dynamic_symtab_upper_bound (abfd);
439 if (storage_needed > 0)
441 symbol_table = (asymbol **) xmalloc (storage_needed);
442 back_to = make_cleanup (xfree, symbol_table);
443 number_of_symbols = bfd_canonicalize_dynamic_symtab (abfd, symbol_table);
445 for (i = 0; i < number_of_symbols; i++)
447 sym = *symbol_table++;
449 if (strcmp (sym->name, symname) == 0
450 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
452 /* BFD symbols are section relative. */
453 symaddr = sym->value + sym->section->vma;
457 do_cleanups (back_to);
464 /* Read program header TYPE from inferior memory. The header is found
465 by scanning the OS auxillary vector.
467 If TYPE == -1, return the program headers instead of the contents of
470 Return a pointer to allocated memory holding the program header contents,
471 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
472 size of those contents is returned to P_SECT_SIZE. Likewise, the target
473 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
476 read_program_header (int type, int *p_sect_size, int *p_arch_size)
478 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
479 CORE_ADDR at_phdr, at_phent, at_phnum;
480 int arch_size, sect_size;
484 /* Get required auxv elements from target. */
485 if (target_auxv_search (¤t_target, AT_PHDR, &at_phdr) <= 0)
487 if (target_auxv_search (¤t_target, AT_PHENT, &at_phent) <= 0)
489 if (target_auxv_search (¤t_target, AT_PHNUM, &at_phnum) <= 0)
491 if (!at_phdr || !at_phnum)
494 /* Determine ELF architecture type. */
495 if (at_phent == sizeof (Elf32_External_Phdr))
497 else if (at_phent == sizeof (Elf64_External_Phdr))
502 /* Find the requested segment. */
506 sect_size = at_phent * at_phnum;
508 else if (arch_size == 32)
510 Elf32_External_Phdr phdr;
513 /* Search for requested PHDR. */
514 for (i = 0; i < at_phnum; i++)
516 if (target_read_memory (at_phdr + i * sizeof (phdr),
517 (gdb_byte *)&phdr, sizeof (phdr)))
520 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
521 4, byte_order) == type)
528 /* Retrieve address and size. */
529 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
531 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
536 Elf64_External_Phdr phdr;
539 /* Search for requested PHDR. */
540 for (i = 0; i < at_phnum; i++)
542 if (target_read_memory (at_phdr + i * sizeof (phdr),
543 (gdb_byte *)&phdr, sizeof (phdr)))
546 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
547 4, byte_order) == type)
554 /* Retrieve address and size. */
555 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
557 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
561 /* Read in requested program header. */
562 buf = xmalloc (sect_size);
563 if (target_read_memory (sect_addr, buf, sect_size))
570 *p_arch_size = arch_size;
572 *p_sect_size = sect_size;
578 /* Return program interpreter string. */
580 find_program_interpreter (void)
582 gdb_byte *buf = NULL;
584 /* If we have an exec_bfd, use its section table. */
586 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
588 struct bfd_section *interp_sect;
590 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
591 if (interp_sect != NULL)
593 int sect_size = bfd_section_size (exec_bfd, interp_sect);
595 buf = xmalloc (sect_size);
596 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
600 /* If we didn't find it, use the target auxillary vector. */
602 buf = read_program_header (PT_INTERP, NULL, NULL);
608 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
609 returned and the corresponding PTR is set. */
612 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
614 int arch_size, step, sect_size;
616 CORE_ADDR dyn_ptr, dyn_addr;
617 gdb_byte *bufend, *bufstart, *buf;
618 Elf32_External_Dyn *x_dynp_32;
619 Elf64_External_Dyn *x_dynp_64;
620 struct bfd_section *sect;
621 struct target_section *target_section;
626 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
629 arch_size = bfd_get_arch_size (abfd);
633 /* Find the start address of the .dynamic section. */
634 sect = bfd_get_section_by_name (abfd, ".dynamic");
638 for (target_section = current_target_sections->sections;
639 target_section < current_target_sections->sections_end;
641 if (sect == target_section->the_bfd_section)
643 if (target_section < current_target_sections->sections_end)
644 dyn_addr = target_section->addr;
647 /* ABFD may come from OBJFILE acting only as a symbol file without being
648 loaded into the target (see add_symbol_file_command). This case is
649 such fallback to the file VMA address without the possibility of
650 having the section relocated to its actual in-memory address. */
652 dyn_addr = bfd_section_vma (abfd, sect);
655 /* Read in .dynamic from the BFD. We will get the actual value
656 from memory later. */
657 sect_size = bfd_section_size (abfd, sect);
658 buf = bufstart = alloca (sect_size);
659 if (!bfd_get_section_contents (abfd, sect,
663 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
664 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
665 : sizeof (Elf64_External_Dyn);
666 for (bufend = buf + sect_size;
672 x_dynp_32 = (Elf32_External_Dyn *) buf;
673 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
674 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
678 x_dynp_64 = (Elf64_External_Dyn *) buf;
679 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
680 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
682 if (dyn_tag == DT_NULL)
684 if (dyn_tag == dyntag)
686 /* If requested, try to read the runtime value of this .dynamic
690 struct type *ptr_type;
694 ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
695 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
696 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
697 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
707 /* Scan for DYNTAG in .dynamic section of the target's main executable,
708 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
709 returned and the corresponding PTR is set. */
712 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
714 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
715 int sect_size, arch_size, step;
718 gdb_byte *bufend, *bufstart, *buf;
720 /* Read in .dynamic section. */
721 buf = bufstart = read_program_header (PT_DYNAMIC, §_size, &arch_size);
725 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
726 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
727 : sizeof (Elf64_External_Dyn);
728 for (bufend = buf + sect_size;
734 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
736 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
738 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
743 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
745 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
747 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
750 if (dyn_tag == DT_NULL)
753 if (dyn_tag == dyntag)
772 elf_locate_base -- locate the base address of dynamic linker structs
773 for SVR4 elf targets.
777 CORE_ADDR elf_locate_base (void)
781 For SVR4 elf targets the address of the dynamic linker's runtime
782 structure is contained within the dynamic info section in the
783 executable file. The dynamic section is also mapped into the
784 inferior address space. Because the runtime loader fills in the
785 real address before starting the inferior, we have to read in the
786 dynamic info section from the inferior address space.
787 If there are any errors while trying to find the address, we
788 silently return 0, otherwise the found address is returned.
793 elf_locate_base (void)
795 struct minimal_symbol *msymbol;
798 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
799 instead of DT_DEBUG, although they sometimes contain an unused
801 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
802 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
804 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
806 int pbuf_size = TYPE_LENGTH (ptr_type);
808 pbuf = alloca (pbuf_size);
809 /* DT_MIPS_RLD_MAP contains a pointer to the address
810 of the dynamic link structure. */
811 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
813 return extract_typed_address (pbuf, ptr_type);
817 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
818 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
821 /* This may be a static executable. Look for the symbol
822 conventionally named _r_debug, as a last resort. */
823 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
825 return SYMBOL_VALUE_ADDRESS (msymbol);
827 /* DT_DEBUG entry not found. */
835 locate_base -- locate the base address of dynamic linker structs
839 CORE_ADDR locate_base (struct svr4_info *)
843 For both the SunOS and SVR4 shared library implementations, if the
844 inferior executable has been linked dynamically, there is a single
845 address somewhere in the inferior's data space which is the key to
846 locating all of the dynamic linker's runtime structures. This
847 address is the value of the debug base symbol. The job of this
848 function is to find and return that address, or to return 0 if there
849 is no such address (the executable is statically linked for example).
851 For SunOS, the job is almost trivial, since the dynamic linker and
852 all of it's structures are statically linked to the executable at
853 link time. Thus the symbol for the address we are looking for has
854 already been added to the minimal symbol table for the executable's
855 objfile at the time the symbol file's symbols were read, and all we
856 have to do is look it up there. Note that we explicitly do NOT want
857 to find the copies in the shared library.
859 The SVR4 version is a bit more complicated because the address
860 is contained somewhere in the dynamic info section. We have to go
861 to a lot more work to discover the address of the debug base symbol.
862 Because of this complexity, we cache the value we find and return that
863 value on subsequent invocations. Note there is no copy in the
864 executable symbol tables.
869 locate_base (struct svr4_info *info)
871 /* Check to see if we have a currently valid address, and if so, avoid
872 doing all this work again and just return the cached address. If
873 we have no cached address, try to locate it in the dynamic info
874 section for ELF executables. There's no point in doing any of this
875 though if we don't have some link map offsets to work with. */
877 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
878 info->debug_base = elf_locate_base ();
879 return info->debug_base;
882 /* Find the first element in the inferior's dynamic link map, and
883 return its address in the inferior. Return zero if the address
884 could not be determined.
886 FIXME: Perhaps we should validate the info somehow, perhaps by
887 checking r_version for a known version number, or r_state for
891 solib_svr4_r_map (struct svr4_info *info)
893 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
894 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
896 volatile struct gdb_exception ex;
898 TRY_CATCH (ex, RETURN_MASK_ERROR)
900 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
903 exception_print (gdb_stderr, ex);
907 /* Find r_brk from the inferior's debug base. */
910 solib_svr4_r_brk (struct svr4_info *info)
912 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
913 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
915 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
919 /* Find the link map for the dynamic linker (if it is not in the
920 normal list of loaded shared objects). */
923 solib_svr4_r_ldsomap (struct svr4_info *info)
925 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
926 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
927 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
930 /* Check version, and return zero if `struct r_debug' doesn't have
931 the r_ldsomap member. */
933 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
934 lmo->r_version_size, byte_order);
935 if (version < 2 || lmo->r_ldsomap_offset == -1)
938 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
942 /* On Solaris systems with some versions of the dynamic linker,
943 ld.so's l_name pointer points to the SONAME in the string table
944 rather than into writable memory. So that GDB can find shared
945 libraries when loading a core file generated by gcore, ensure that
946 memory areas containing the l_name string are saved in the core
950 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
952 struct svr4_info *info;
955 struct cleanup *old_chain;
956 struct link_map_offsets *lmo;
959 info = get_svr4_info ();
961 info->debug_base = 0;
963 if (!info->debug_base)
966 ldsomap = solib_svr4_r_ldsomap (info);
970 lmo = svr4_fetch_link_map_offsets ();
971 new = XZALLOC (struct so_list);
972 old_chain = make_cleanup (xfree, new);
973 new->lm_info = xmalloc (sizeof (struct lm_info));
974 make_cleanup (xfree, new->lm_info);
975 new->lm_info->l_addr = (CORE_ADDR)-1;
976 new->lm_info->lm_addr = ldsomap;
977 new->lm_info->lm = xzalloc (lmo->link_map_size);
978 make_cleanup (xfree, new->lm_info->lm);
979 read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
980 name_lm = lm_name (new);
981 do_cleanups (old_chain);
983 return (name_lm >= vaddr && name_lm < vaddr + size);
990 open_symbol_file_object
994 void open_symbol_file_object (void *from_tty)
998 If no open symbol file, attempt to locate and open the main symbol
999 file. On SVR4 systems, this is the first link map entry. If its
1000 name is here, we can open it. Useful when attaching to a process
1001 without first loading its symbol file.
1003 If FROM_TTYP dereferences to a non-zero integer, allow messages to
1004 be printed. This parameter is a pointer rather than an int because
1005 open_symbol_file_object() is called via catch_errors() and
1006 catch_errors() requires a pointer argument. */
1009 open_symbol_file_object (void *from_ttyp)
1011 CORE_ADDR lm, l_name;
1014 int from_tty = *(int *)from_ttyp;
1015 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1016 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
1017 int l_name_size = TYPE_LENGTH (ptr_type);
1018 gdb_byte *l_name_buf = xmalloc (l_name_size);
1019 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1020 struct svr4_info *info = get_svr4_info ();
1022 if (symfile_objfile)
1023 if (!query (_("Attempt to reload symbols from process? ")))
1026 /* Always locate the debug struct, in case it has moved. */
1027 info->debug_base = 0;
1028 if (locate_base (info) == 0)
1029 return 0; /* failed somehow... */
1031 /* First link map member should be the executable. */
1032 lm = solib_svr4_r_map (info);
1034 return 0; /* failed somehow... */
1036 /* Read address of name from target memory to GDB. */
1037 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1039 /* Convert the address to host format. */
1040 l_name = extract_typed_address (l_name_buf, ptr_type);
1042 /* Free l_name_buf. */
1043 do_cleanups (cleanups);
1046 return 0; /* No filename. */
1048 /* Now fetch the filename from target memory. */
1049 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1050 make_cleanup (xfree, filename);
1054 warning (_("failed to read exec filename from attached file: %s"),
1055 safe_strerror (errcode));
1059 /* Have a pathname: read the symbol file. */
1060 symbol_file_add_main (filename, from_tty);
1065 /* If no shared library information is available from the dynamic
1066 linker, build a fallback list from other sources. */
1068 static struct so_list *
1069 svr4_default_sos (void)
1071 struct svr4_info *info = get_svr4_info ();
1073 struct so_list *head = NULL;
1074 struct so_list **link_ptr = &head;
1076 if (info->debug_loader_offset_p)
1078 struct so_list *new = XZALLOC (struct so_list);
1080 new->lm_info = xmalloc (sizeof (struct lm_info));
1082 /* Nothing will ever check the cached copy of the link
1083 map if we set l_addr. */
1084 new->lm_info->l_addr = info->debug_loader_offset;
1085 new->lm_info->lm_addr = 0;
1086 new->lm_info->lm = NULL;
1088 strncpy (new->so_name, info->debug_loader_name,
1089 SO_NAME_MAX_PATH_SIZE - 1);
1090 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1091 strcpy (new->so_original_name, new->so_name);
1094 link_ptr = &new->next;
1102 current_sos -- build a list of currently loaded shared objects
1106 struct so_list *current_sos ()
1110 Build a list of `struct so_list' objects describing the shared
1111 objects currently loaded in the inferior. This list does not
1112 include an entry for the main executable file.
1114 Note that we only gather information directly available from the
1115 inferior --- we don't examine any of the shared library files
1116 themselves. The declaration of `struct so_list' says which fields
1117 we provide values for. */
1119 static struct so_list *
1120 svr4_current_sos (void)
1122 CORE_ADDR lm, prev_lm;
1123 struct so_list *head = 0;
1124 struct so_list **link_ptr = &head;
1125 CORE_ADDR ldsomap = 0;
1126 struct svr4_info *info;
1128 info = get_svr4_info ();
1130 /* Always locate the debug struct, in case it has moved. */
1131 info->debug_base = 0;
1134 /* If we can't find the dynamic linker's base structure, this
1135 must not be a dynamically linked executable. Hmm. */
1136 if (! info->debug_base)
1137 return svr4_default_sos ();
1139 /* Walk the inferior's link map list, and build our list of
1140 `struct so_list' nodes. */
1142 lm = solib_svr4_r_map (info);
1146 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1147 struct so_list *new = XZALLOC (struct so_list);
1148 struct cleanup *old_chain = make_cleanup (xfree, new);
1151 new->lm_info = xmalloc (sizeof (struct lm_info));
1152 make_cleanup (xfree, new->lm_info);
1154 new->lm_info->l_addr = (CORE_ADDR)-1;
1155 new->lm_info->lm_addr = lm;
1156 new->lm_info->lm = xzalloc (lmo->link_map_size);
1157 make_cleanup (xfree, new->lm_info->lm);
1159 read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1161 next_lm = lm_next (new);
1163 if (lm_prev (new) != prev_lm)
1165 warning (_("Corrupted shared library list"));
1170 /* For SVR4 versions, the first entry in the link map is for the
1171 inferior executable, so we must ignore it. For some versions of
1172 SVR4, it has no name. For others (Solaris 2.3 for example), it
1173 does have a name, so we can no longer use a missing name to
1174 decide when to ignore it. */
1175 else if (ignore_first_link_map_entry (new) && ldsomap == 0)
1177 info->main_lm_addr = new->lm_info->lm_addr;
1185 /* Extract this shared object's name. */
1186 target_read_string (lm_name (new), &buffer,
1187 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1189 warning (_("Can't read pathname for load map: %s."),
1190 safe_strerror (errcode));
1193 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1194 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1195 strcpy (new->so_original_name, new->so_name);
1199 /* If this entry has no name, or its name matches the name
1200 for the main executable, don't include it in the list. */
1201 if (! new->so_name[0]
1202 || match_main (new->so_name))
1208 link_ptr = &new->next;
1215 /* On Solaris, the dynamic linker is not in the normal list of
1216 shared objects, so make sure we pick it up too. Having
1217 symbol information for the dynamic linker is quite crucial
1218 for skipping dynamic linker resolver code. */
1219 if (lm == 0 && ldsomap == 0)
1221 lm = ldsomap = solib_svr4_r_ldsomap (info);
1225 discard_cleanups (old_chain);
1229 return svr4_default_sos ();
1234 /* Get the address of the link_map for a given OBJFILE. */
1237 svr4_fetch_objfile_link_map (struct objfile *objfile)
1240 struct svr4_info *info = get_svr4_info ();
1242 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1243 if (info->main_lm_addr == 0)
1244 solib_add (NULL, 0, ¤t_target, auto_solib_add);
1246 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1247 if (objfile == symfile_objfile)
1248 return info->main_lm_addr;
1250 /* The other link map addresses may be found by examining the list
1251 of shared libraries. */
1252 for (so = master_so_list (); so; so = so->next)
1253 if (so->objfile == objfile)
1254 return so->lm_info->lm_addr;
1260 /* On some systems, the only way to recognize the link map entry for
1261 the main executable file is by looking at its name. Return
1262 non-zero iff SONAME matches one of the known main executable names. */
1265 match_main (const char *soname)
1267 const char * const *mainp;
1269 for (mainp = main_name_list; *mainp != NULL; mainp++)
1271 if (strcmp (soname, *mainp) == 0)
1278 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1279 SVR4 run time loader. */
1282 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1284 struct svr4_info *info = get_svr4_info ();
1286 return ((pc >= info->interp_text_sect_low
1287 && pc < info->interp_text_sect_high)
1288 || (pc >= info->interp_plt_sect_low
1289 && pc < info->interp_plt_sect_high)
1290 || in_plt_section (pc, NULL)
1291 || in_gnu_ifunc_stub (pc));
1294 /* Given an executable's ABFD and target, compute the entry-point
1298 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1300 /* KevinB wrote ... for most targets, the address returned by
1301 bfd_get_start_address() is the entry point for the start
1302 function. But, for some targets, bfd_get_start_address() returns
1303 the address of a function descriptor from which the entry point
1304 address may be extracted. This address is extracted by
1305 gdbarch_convert_from_func_ptr_addr(). The method
1306 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1307 function for targets which don't use function descriptors. */
1308 return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1309 bfd_get_start_address (abfd),
1317 enable_break -- arrange for dynamic linker to hit breakpoint
1321 int enable_break (void)
1325 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1326 debugger interface, support for arranging for the inferior to hit
1327 a breakpoint after mapping in the shared libraries. This function
1328 enables that breakpoint.
1330 For SunOS, there is a special flag location (in_debugger) which we
1331 set to 1. When the dynamic linker sees this flag set, it will set
1332 a breakpoint at a location known only to itself, after saving the
1333 original contents of that place and the breakpoint address itself,
1334 in it's own internal structures. When we resume the inferior, it
1335 will eventually take a SIGTRAP when it runs into the breakpoint.
1336 We handle this (in a different place) by restoring the contents of
1337 the breakpointed location (which is only known after it stops),
1338 chasing around to locate the shared libraries that have been
1339 loaded, then resuming.
1341 For SVR4, the debugger interface structure contains a member (r_brk)
1342 which is statically initialized at the time the shared library is
1343 built, to the offset of a function (_r_debug_state) which is guaran-
1344 teed to be called once before mapping in a library, and again when
1345 the mapping is complete. At the time we are examining this member,
1346 it contains only the unrelocated offset of the function, so we have
1347 to do our own relocation. Later, when the dynamic linker actually
1348 runs, it relocates r_brk to be the actual address of _r_debug_state().
1350 The debugger interface structure also contains an enumeration which
1351 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1352 depending upon whether or not the library is being mapped or unmapped,
1353 and then set to RT_CONSISTENT after the library is mapped/unmapped.
1357 enable_break (struct svr4_info *info, int from_tty)
1359 struct minimal_symbol *msymbol;
1360 const char * const *bkpt_namep;
1361 asection *interp_sect;
1362 gdb_byte *interp_name;
1365 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1366 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1368 /* If we already have a shared library list in the target, and
1369 r_debug contains r_brk, set the breakpoint there - this should
1370 mean r_brk has already been relocated. Assume the dynamic linker
1371 is the object containing r_brk. */
1373 solib_add (NULL, from_tty, ¤t_target, auto_solib_add);
1375 if (info->debug_base && solib_svr4_r_map (info) != 0)
1376 sym_addr = solib_svr4_r_brk (info);
1380 struct obj_section *os;
1382 sym_addr = gdbarch_addr_bits_remove
1383 (target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1387 /* On at least some versions of Solaris there's a dynamic relocation
1388 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1389 we get control before the dynamic linker has self-relocated.
1390 Check if SYM_ADDR is in a known section, if it is assume we can
1391 trust its value. This is just a heuristic though, it could go away
1392 or be replaced if it's getting in the way.
1394 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1395 however it's spelled in your particular system) is ARM or Thumb.
1396 That knowledge is encoded in the address, if it's Thumb the low bit
1397 is 1. However, we've stripped that info above and it's not clear
1398 what all the consequences are of passing a non-addr_bits_remove'd
1399 address to create_solib_event_breakpoint. The call to
1400 find_pc_section verifies we know about the address and have some
1401 hope of computing the right kind of breakpoint to use (via
1402 symbol info). It does mean that GDB needs to be pointed at a
1403 non-stripped version of the dynamic linker in order to obtain
1404 information it already knows about. Sigh. */
1406 os = find_pc_section (sym_addr);
1409 /* Record the relocated start and end address of the dynamic linker
1410 text and plt section for svr4_in_dynsym_resolve_code. */
1412 CORE_ADDR load_addr;
1414 tmp_bfd = os->objfile->obfd;
1415 load_addr = ANOFFSET (os->objfile->section_offsets,
1416 os->objfile->sect_index_text);
1418 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1421 info->interp_text_sect_low =
1422 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1423 info->interp_text_sect_high =
1424 info->interp_text_sect_low
1425 + bfd_section_size (tmp_bfd, interp_sect);
1427 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1430 info->interp_plt_sect_low =
1431 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1432 info->interp_plt_sect_high =
1433 info->interp_plt_sect_low
1434 + bfd_section_size (tmp_bfd, interp_sect);
1437 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1442 /* Find the program interpreter; if not found, warn the user and drop
1443 into the old breakpoint at symbol code. */
1444 interp_name = find_program_interpreter ();
1447 CORE_ADDR load_addr = 0;
1448 int load_addr_found = 0;
1449 int loader_found_in_list = 0;
1451 bfd *tmp_bfd = NULL;
1452 struct target_ops *tmp_bfd_target;
1453 volatile struct gdb_exception ex;
1457 /* Now we need to figure out where the dynamic linker was
1458 loaded so that we can load its symbols and place a breakpoint
1459 in the dynamic linker itself.
1461 This address is stored on the stack. However, I've been unable
1462 to find any magic formula to find it for Solaris (appears to
1463 be trivial on GNU/Linux). Therefore, we have to try an alternate
1464 mechanism to find the dynamic linker's base address. */
1466 TRY_CATCH (ex, RETURN_MASK_ALL)
1468 tmp_bfd = solib_bfd_open (interp_name);
1470 if (tmp_bfd == NULL)
1471 goto bkpt_at_symbol;
1473 /* Now convert the TMP_BFD into a target. That way target, as
1474 well as BFD operations can be used. Note that closing the
1475 target will also close the underlying bfd. */
1476 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1478 /* On a running target, we can get the dynamic linker's base
1479 address from the shared library table. */
1480 so = master_so_list ();
1483 if (svr4_same_1 (interp_name, so->so_original_name))
1485 load_addr_found = 1;
1486 loader_found_in_list = 1;
1487 load_addr = lm_addr_check (so, tmp_bfd);
1493 /* If we were not able to find the base address of the loader
1494 from our so_list, then try using the AT_BASE auxilliary entry. */
1495 if (!load_addr_found)
1496 if (target_auxv_search (¤t_target, AT_BASE, &load_addr) > 0)
1498 int addr_bit = gdbarch_addr_bit (target_gdbarch);
1500 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1501 that `+ load_addr' will overflow CORE_ADDR width not creating
1502 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1505 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1507 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1508 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1511 gdb_assert (load_addr < space_size);
1513 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1514 64bit ld.so with 32bit executable, it should not happen. */
1516 if (tmp_entry_point < space_size
1517 && tmp_entry_point + load_addr >= space_size)
1518 load_addr -= space_size;
1521 load_addr_found = 1;
1524 /* Otherwise we find the dynamic linker's base address by examining
1525 the current pc (which should point at the entry point for the
1526 dynamic linker) and subtracting the offset of the entry point.
1528 This is more fragile than the previous approaches, but is a good
1529 fallback method because it has actually been working well in
1531 if (!load_addr_found)
1533 struct regcache *regcache
1534 = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1536 load_addr = (regcache_read_pc (regcache)
1537 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1540 if (!loader_found_in_list)
1542 info->debug_loader_name = xstrdup (interp_name);
1543 info->debug_loader_offset_p = 1;
1544 info->debug_loader_offset = load_addr;
1545 solib_add (NULL, from_tty, ¤t_target, auto_solib_add);
1548 /* Record the relocated start and end address of the dynamic linker
1549 text and plt section for svr4_in_dynsym_resolve_code. */
1550 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1553 info->interp_text_sect_low =
1554 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1555 info->interp_text_sect_high =
1556 info->interp_text_sect_low
1557 + bfd_section_size (tmp_bfd, interp_sect);
1559 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1562 info->interp_plt_sect_low =
1563 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1564 info->interp_plt_sect_high =
1565 info->interp_plt_sect_low
1566 + bfd_section_size (tmp_bfd, interp_sect);
1569 /* Now try to set a breakpoint in the dynamic linker. */
1570 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1572 sym_addr = bfd_lookup_symbol (tmp_bfd, *bkpt_namep);
1578 /* Convert 'sym_addr' from a function pointer to an address.
1579 Because we pass tmp_bfd_target instead of the current
1580 target, this will always produce an unrelocated value. */
1581 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1585 /* We're done with both the temporary bfd and target. Remember,
1586 closing the target closes the underlying bfd. */
1587 target_close (tmp_bfd_target, 0);
1591 create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1592 xfree (interp_name);
1596 /* For whatever reason we couldn't set a breakpoint in the dynamic
1597 linker. Warn and drop into the old code. */
1599 xfree (interp_name);
1600 warning (_("Unable to find dynamic linker breakpoint function.\n"
1601 "GDB will be unable to debug shared library initializers\n"
1602 "and track explicitly loaded dynamic code."));
1605 /* Scan through the lists of symbols, trying to look up the symbol and
1606 set a breakpoint there. Terminate loop when we/if we succeed. */
1608 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1610 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1611 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1613 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1614 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1617 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1622 if (!current_inferior ()->attach_flag)
1624 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1626 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1627 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1629 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1630 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1633 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1645 special_symbol_handling -- additional shared library symbol handling
1649 void special_symbol_handling ()
1653 Once the symbols from a shared object have been loaded in the usual
1654 way, we are called to do any system specific symbol handling that
1657 For SunOS4, this consisted of grunging around in the dynamic
1658 linkers structures to find symbol definitions for "common" symbols
1659 and adding them to the minimal symbol table for the runtime common
1662 However, for SVR4, there's nothing to do.
1667 svr4_special_symbol_handling (void)
1671 /* Read the ELF program headers from ABFD. Return the contents and
1672 set *PHDRS_SIZE to the size of the program headers. */
1675 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1677 Elf_Internal_Ehdr *ehdr;
1680 ehdr = elf_elfheader (abfd);
1682 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1683 if (*phdrs_size == 0)
1686 buf = xmalloc (*phdrs_size);
1687 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1688 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1697 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1698 exec_bfd. Otherwise return 0.
1700 We relocate all of the sections by the same amount. This
1701 behavior is mandated by recent editions of the System V ABI.
1702 According to the System V Application Binary Interface,
1703 Edition 4.1, page 5-5:
1705 ... Though the system chooses virtual addresses for
1706 individual processes, it maintains the segments' relative
1707 positions. Because position-independent code uses relative
1708 addressesing between segments, the difference between
1709 virtual addresses in memory must match the difference
1710 between virtual addresses in the file. The difference
1711 between the virtual address of any segment in memory and
1712 the corresponding virtual address in the file is thus a
1713 single constant value for any one executable or shared
1714 object in a given process. This difference is the base
1715 address. One use of the base address is to relocate the
1716 memory image of the program during dynamic linking.
1718 The same language also appears in Edition 4.0 of the System V
1719 ABI and is left unspecified in some of the earlier editions.
1721 Decide if the objfile needs to be relocated. As indicated above, we will
1722 only be here when execution is stopped. But during attachment PC can be at
1723 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1724 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1725 regcache_read_pc would point to the interpreter and not the main executable.
1727 So, to summarize, relocations are necessary when the start address obtained
1728 from the executable is different from the address in auxv AT_ENTRY entry.
1730 [ The astute reader will note that we also test to make sure that
1731 the executable in question has the DYNAMIC flag set. It is my
1732 opinion that this test is unnecessary (undesirable even). It
1733 was added to avoid inadvertent relocation of an executable
1734 whose e_type member in the ELF header is not ET_DYN. There may
1735 be a time in the future when it is desirable to do relocations
1736 on other types of files as well in which case this condition
1737 should either be removed or modified to accomodate the new file
1738 type. - Kevin, Nov 2000. ] */
1741 svr4_exec_displacement (CORE_ADDR *displacementp)
1743 /* ENTRY_POINT is a possible function descriptor - before
1744 a call to gdbarch_convert_from_func_ptr_addr. */
1745 CORE_ADDR entry_point, displacement;
1747 if (exec_bfd == NULL)
1750 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1751 being executed themselves and PIE (Position Independent Executable)
1752 executables are ET_DYN. */
1754 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1757 if (target_auxv_search (¤t_target, AT_ENTRY, &entry_point) <= 0)
1760 displacement = entry_point - bfd_get_start_address (exec_bfd);
1762 /* Verify the DISPLACEMENT candidate complies with the required page
1763 alignment. It is cheaper than the program headers comparison below. */
1765 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1767 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1769 /* p_align of PT_LOAD segments does not specify any alignment but
1770 only congruency of addresses:
1771 p_offset % p_align == p_vaddr % p_align
1772 Kernel is free to load the executable with lower alignment. */
1774 if ((displacement & (elf->minpagesize - 1)) != 0)
1778 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1779 comparing their program headers. If the program headers in the auxilliary
1780 vector do not match the program headers in the executable, then we are
1781 looking at a different file than the one used by the kernel - for
1782 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1784 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1786 /* Be optimistic and clear OK only if GDB was able to verify the headers
1787 really do not match. */
1788 int phdrs_size, phdrs2_size, ok = 1;
1789 gdb_byte *buf, *buf2;
1792 buf = read_program_header (-1, &phdrs_size, &arch_size);
1793 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1794 if (buf != NULL && buf2 != NULL)
1796 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1798 /* We are dealing with three different addresses. EXEC_BFD
1799 represents current address in on-disk file. target memory content
1800 may be different from EXEC_BFD as the file may have been prelinked
1801 to a different address after the executable has been loaded.
1802 Moreover the address of placement in target memory can be
1803 different from what the program headers in target memory say -
1804 this is the goal of PIE.
1806 Detected DISPLACEMENT covers both the offsets of PIE placement and
1807 possible new prelink performed after start of the program. Here
1808 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1809 content offset for the verification purpose. */
1811 if (phdrs_size != phdrs2_size
1812 || bfd_get_arch_size (exec_bfd) != arch_size)
1814 else if (arch_size == 32
1815 && phdrs_size >= sizeof (Elf32_External_Phdr)
1816 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1818 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1819 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1820 CORE_ADDR displacement = 0;
1823 /* DISPLACEMENT could be found more easily by the difference of
1824 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1825 already have enough information to compute that displacement
1826 with what we've read. */
1828 for (i = 0; i < ehdr2->e_phnum; i++)
1829 if (phdr2[i].p_type == PT_LOAD)
1831 Elf32_External_Phdr *phdrp;
1832 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1833 CORE_ADDR vaddr, paddr;
1834 CORE_ADDR displacement_vaddr = 0;
1835 CORE_ADDR displacement_paddr = 0;
1837 phdrp = &((Elf32_External_Phdr *) buf)[i];
1838 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1839 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1841 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1843 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1845 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1847 displacement_paddr = paddr - phdr2[i].p_paddr;
1849 if (displacement_vaddr == displacement_paddr)
1850 displacement = displacement_vaddr;
1855 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1857 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1859 Elf32_External_Phdr *phdrp;
1860 Elf32_External_Phdr *phdr2p;
1861 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1862 CORE_ADDR vaddr, paddr;
1863 asection *plt2_asect;
1865 phdrp = &((Elf32_External_Phdr *) buf)[i];
1866 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1867 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1868 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1870 /* PT_GNU_STACK is an exception by being never relocated by
1871 prelink as its addresses are always zero. */
1873 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1876 /* Check also other adjustment combinations - PR 11786. */
1878 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1880 vaddr -= displacement;
1881 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1883 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1885 paddr -= displacement;
1886 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1888 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1891 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1892 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1896 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1899 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1900 & SEC_HAS_CONTENTS) != 0;
1902 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1905 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1906 FILESZ is from the in-memory image. */
1908 filesz += bfd_get_section_size (plt2_asect);
1910 filesz -= bfd_get_section_size (plt2_asect);
1912 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1915 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1923 else if (arch_size == 64
1924 && phdrs_size >= sizeof (Elf64_External_Phdr)
1925 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1927 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1928 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1929 CORE_ADDR displacement = 0;
1932 /* DISPLACEMENT could be found more easily by the difference of
1933 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1934 already have enough information to compute that displacement
1935 with what we've read. */
1937 for (i = 0; i < ehdr2->e_phnum; i++)
1938 if (phdr2[i].p_type == PT_LOAD)
1940 Elf64_External_Phdr *phdrp;
1941 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1942 CORE_ADDR vaddr, paddr;
1943 CORE_ADDR displacement_vaddr = 0;
1944 CORE_ADDR displacement_paddr = 0;
1946 phdrp = &((Elf64_External_Phdr *) buf)[i];
1947 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1948 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1950 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1952 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1954 paddr = extract_unsigned_integer (buf_paddr_p, 8,
1956 displacement_paddr = paddr - phdr2[i].p_paddr;
1958 if (displacement_vaddr == displacement_paddr)
1959 displacement = displacement_vaddr;
1964 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1966 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
1968 Elf64_External_Phdr *phdrp;
1969 Elf64_External_Phdr *phdr2p;
1970 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1971 CORE_ADDR vaddr, paddr;
1972 asection *plt2_asect;
1974 phdrp = &((Elf64_External_Phdr *) buf)[i];
1975 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1976 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1977 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
1979 /* PT_GNU_STACK is an exception by being never relocated by
1980 prelink as its addresses are always zero. */
1982 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1985 /* Check also other adjustment combinations - PR 11786. */
1987 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1989 vaddr -= displacement;
1990 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
1992 paddr = extract_unsigned_integer (buf_paddr_p, 8,
1994 paddr -= displacement;
1995 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
1997 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2000 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2001 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2005 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2008 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2009 & SEC_HAS_CONTENTS) != 0;
2011 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2014 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2015 FILESZ is from the in-memory image. */
2017 filesz += bfd_get_section_size (plt2_asect);
2019 filesz -= bfd_get_section_size (plt2_asect);
2021 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2024 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2045 /* It can be printed repeatedly as there is no easy way to check
2046 the executable symbols/file has been already relocated to
2049 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2050 "displacement %s for \"%s\".\n"),
2051 paddress (target_gdbarch, displacement),
2052 bfd_get_filename (exec_bfd));
2055 *displacementp = displacement;
2059 /* Relocate the main executable. This function should be called upon
2060 stopping the inferior process at the entry point to the program.
2061 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2062 different, the main executable is relocated by the proper amount. */
2065 svr4_relocate_main_executable (void)
2067 CORE_ADDR displacement;
2069 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2070 probably contains the offsets computed using the PIE displacement
2071 from the previous run, which of course are irrelevant for this run.
2072 So we need to determine the new PIE displacement and recompute the
2073 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2074 already contains pre-computed offsets.
2076 If we cannot compute the PIE displacement, either:
2078 - The executable is not PIE.
2080 - SYMFILE_OBJFILE does not match the executable started in the target.
2081 This can happen for main executable symbols loaded at the host while
2082 `ld.so --ld-args main-executable' is loaded in the target.
2084 Then we leave the section offsets untouched and use them as is for
2087 - These section offsets were properly reset earlier, and thus
2088 already contain the correct values. This can happen for instance
2089 when reconnecting via the remote protocol to a target that supports
2090 the `qOffsets' packet.
2092 - The section offsets were not reset earlier, and the best we can
2093 hope is that the old offsets are still applicable to the new run. */
2095 if (! svr4_exec_displacement (&displacement))
2098 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2101 if (symfile_objfile)
2103 struct section_offsets *new_offsets;
2106 new_offsets = alloca (symfile_objfile->num_sections
2107 * sizeof (*new_offsets));
2109 for (i = 0; i < symfile_objfile->num_sections; i++)
2110 new_offsets->offsets[i] = displacement;
2112 objfile_relocate (symfile_objfile, new_offsets);
2118 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2119 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2120 (bfd_section_vma (exec_bfd, asect)
2129 svr4_solib_create_inferior_hook -- shared library startup support
2133 void svr4_solib_create_inferior_hook (int from_tty)
2137 When gdb starts up the inferior, it nurses it along (through the
2138 shell) until it is ready to execute it's first instruction. At this
2139 point, this function gets called via expansion of the macro
2140 SOLIB_CREATE_INFERIOR_HOOK.
2142 For SunOS executables, this first instruction is typically the
2143 one at "_start", or a similar text label, regardless of whether
2144 the executable is statically or dynamically linked. The runtime
2145 startup code takes care of dynamically linking in any shared
2146 libraries, once gdb allows the inferior to continue.
2148 For SVR4 executables, this first instruction is either the first
2149 instruction in the dynamic linker (for dynamically linked
2150 executables) or the instruction at "start" for statically linked
2151 executables. For dynamically linked executables, the system
2152 first exec's /lib/libc.so.N, which contains the dynamic linker,
2153 and starts it running. The dynamic linker maps in any needed
2154 shared libraries, maps in the actual user executable, and then
2155 jumps to "start" in the user executable.
2157 For both SunOS shared libraries, and SVR4 shared libraries, we
2158 can arrange to cooperate with the dynamic linker to discover the
2159 names of shared libraries that are dynamically linked, and the
2160 base addresses to which they are linked.
2162 This function is responsible for discovering those names and
2163 addresses, and saving sufficient information about them to allow
2164 their symbols to be read at a later time.
2168 Between enable_break() and disable_break(), this code does not
2169 properly handle hitting breakpoints which the user might have
2170 set in the startup code or in the dynamic linker itself. Proper
2171 handling will probably have to wait until the implementation is
2172 changed to use the "breakpoint handler function" method.
2174 Also, what if child has exit()ed? Must exit loop somehow.
2178 svr4_solib_create_inferior_hook (int from_tty)
2180 #if defined(_SCO_DS)
2181 struct inferior *inf;
2182 struct thread_info *tp;
2183 #endif /* defined(_SCO_DS) */
2184 struct svr4_info *info;
2186 info = get_svr4_info ();
2188 /* Relocate the main executable if necessary. */
2189 svr4_relocate_main_executable ();
2191 if (!svr4_have_link_map_offsets ())
2194 if (!enable_break (info, from_tty))
2197 #if defined(_SCO_DS)
2198 /* SCO needs the loop below, other systems should be using the
2199 special shared library breakpoints and the shared library breakpoint
2202 Now run the target. It will eventually hit the breakpoint, at
2203 which point all of the libraries will have been mapped in and we
2204 can go groveling around in the dynamic linker structures to find
2205 out what we need to know about them. */
2207 inf = current_inferior ();
2208 tp = inferior_thread ();
2210 clear_proceed_status ();
2211 inf->control.stop_soon = STOP_QUIETLY;
2212 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2215 target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2216 wait_for_inferior ();
2218 while (tp->suspend.stop_signal != TARGET_SIGNAL_TRAP);
2219 inf->control.stop_soon = NO_STOP_QUIETLY;
2220 #endif /* defined(_SCO_DS) */
2224 svr4_clear_solib (void)
2226 struct svr4_info *info;
2228 info = get_svr4_info ();
2229 info->debug_base = 0;
2230 info->debug_loader_offset_p = 0;
2231 info->debug_loader_offset = 0;
2232 xfree (info->debug_loader_name);
2233 info->debug_loader_name = NULL;
2237 svr4_free_so (struct so_list *so)
2239 xfree (so->lm_info->lm);
2240 xfree (so->lm_info);
2244 /* Clear any bits of ADDR that wouldn't fit in a target-format
2245 data pointer. "Data pointer" here refers to whatever sort of
2246 address the dynamic linker uses to manage its sections. At the
2247 moment, we don't support shared libraries on any processors where
2248 code and data pointers are different sizes.
2250 This isn't really the right solution. What we really need here is
2251 a way to do arithmetic on CORE_ADDR values that respects the
2252 natural pointer/address correspondence. (For example, on the MIPS,
2253 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2254 sign-extend the value. There, simply truncating the bits above
2255 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2256 be a new gdbarch method or something. */
2258 svr4_truncate_ptr (CORE_ADDR addr)
2260 if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2261 /* We don't need to truncate anything, and the bit twiddling below
2262 will fail due to overflow problems. */
2265 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2270 svr4_relocate_section_addresses (struct so_list *so,
2271 struct target_section *sec)
2273 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2275 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2280 /* Architecture-specific operations. */
2282 /* Per-architecture data key. */
2283 static struct gdbarch_data *solib_svr4_data;
2285 struct solib_svr4_ops
2287 /* Return a description of the layout of `struct link_map'. */
2288 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2291 /* Return a default for the architecture-specific operations. */
2294 solib_svr4_init (struct obstack *obstack)
2296 struct solib_svr4_ops *ops;
2298 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2299 ops->fetch_link_map_offsets = NULL;
2303 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2304 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2307 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2308 struct link_map_offsets *(*flmo) (void))
2310 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2312 ops->fetch_link_map_offsets = flmo;
2314 set_solib_ops (gdbarch, &svr4_so_ops);
2317 /* Fetch a link_map_offsets structure using the architecture-specific
2318 `struct link_map_offsets' fetcher. */
2320 static struct link_map_offsets *
2321 svr4_fetch_link_map_offsets (void)
2323 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2325 gdb_assert (ops->fetch_link_map_offsets);
2326 return ops->fetch_link_map_offsets ();
2329 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2332 svr4_have_link_map_offsets (void)
2334 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2336 return (ops->fetch_link_map_offsets != NULL);
2340 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2341 `struct r_debug' and a `struct link_map' that are binary compatible
2342 with the origional SVR4 implementation. */
2344 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2345 for an ILP32 SVR4 system. */
2347 struct link_map_offsets *
2348 svr4_ilp32_fetch_link_map_offsets (void)
2350 static struct link_map_offsets lmo;
2351 static struct link_map_offsets *lmp = NULL;
2357 lmo.r_version_offset = 0;
2358 lmo.r_version_size = 4;
2359 lmo.r_map_offset = 4;
2360 lmo.r_brk_offset = 8;
2361 lmo.r_ldsomap_offset = 20;
2363 /* Everything we need is in the first 20 bytes. */
2364 lmo.link_map_size = 20;
2365 lmo.l_addr_offset = 0;
2366 lmo.l_name_offset = 4;
2367 lmo.l_ld_offset = 8;
2368 lmo.l_next_offset = 12;
2369 lmo.l_prev_offset = 16;
2375 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2376 for an LP64 SVR4 system. */
2378 struct link_map_offsets *
2379 svr4_lp64_fetch_link_map_offsets (void)
2381 static struct link_map_offsets lmo;
2382 static struct link_map_offsets *lmp = NULL;
2388 lmo.r_version_offset = 0;
2389 lmo.r_version_size = 4;
2390 lmo.r_map_offset = 8;
2391 lmo.r_brk_offset = 16;
2392 lmo.r_ldsomap_offset = 40;
2394 /* Everything we need is in the first 40 bytes. */
2395 lmo.link_map_size = 40;
2396 lmo.l_addr_offset = 0;
2397 lmo.l_name_offset = 8;
2398 lmo.l_ld_offset = 16;
2399 lmo.l_next_offset = 24;
2400 lmo.l_prev_offset = 32;
2407 struct target_so_ops svr4_so_ops;
2409 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2410 different rule for symbol lookup. The lookup begins here in the DSO, not in
2411 the main executable. */
2413 static struct symbol *
2414 elf_lookup_lib_symbol (const struct objfile *objfile,
2416 const domain_enum domain)
2420 if (objfile == symfile_objfile)
2424 /* OBJFILE should have been passed as the non-debug one. */
2425 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2427 abfd = objfile->obfd;
2430 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2433 return lookup_global_symbol_from_objfile (objfile, name, domain);
2436 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2439 _initialize_svr4_solib (void)
2441 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2442 solib_svr4_pspace_data
2443 = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2445 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2446 svr4_so_ops.free_so = svr4_free_so;
2447 svr4_so_ops.clear_solib = svr4_clear_solib;
2448 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2449 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2450 svr4_so_ops.current_sos = svr4_current_sos;
2451 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2452 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2453 svr4_so_ops.bfd_open = solib_bfd_open;
2454 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2455 svr4_so_ops.same = svr4_same;
2456 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;