/* GNU/Linux native-dependent code common to multiple platforms.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GDB.
#include "xml-support.h"
#include "terminal.h"
#include <sys/vfs.h>
+#include "solib.h"
#ifndef SPUFS_MAGIC
#define SPUFS_MAGIC 0x23c9b64e
if (!detach_fork)
linux_enable_event_reporting (pid_to_ptid (child_pid));
+ if (has_vforked
+ && !non_stop /* Non-stop always resumes both branches. */
+ && (!target_is_async_p () || sync_execution)
+ && !(follow_child || detach_fork || sched_multi))
+ {
+ /* The parent stays blocked inside the vfork syscall until the
+ child execs or exits. If we don't let the child run, then
+ the parent stays blocked. If we're telling the parent to run
+ in the foreground, the user will not be able to ctrl-c to get
+ back the terminal, effectively hanging the debug session. */
+ fprintf_filtered (gdb_stderr, _("\
+Can not resume the parent process over vfork in the foreground while \n\
+holding the child stopped. Try \"set detach-on-fork\" or \
+\"set schedule-multiple\".\n"));
+ return 1;
+ }
+
if (! follow_child)
{
- /* We're already attached to the parent, by default. */
+ struct lwp_info *child_lp = NULL;
- /* Before detaching from the child, remove all breakpoints from
- it. If we forked, then this has already been taken care of
- by infrun.c. If we vforked however, any breakpoint inserted
- in the parent is visible in the child, even those added while
- stopped in a vfork catchpoint. This won't actually modify
- the breakpoint list, but will physically remove the
- breakpoints from the child. This will remove the breakpoints
- from the parent also, but they'll be reinserted below. */
- if (has_vforked)
- detach_breakpoints (child_pid);
+ /* We're already attached to the parent, by default. */
/* Detach new forked process? */
if (detach_fork)
{
+ /* Before detaching from the child, remove all breakpoints
+ from it. If we forked, then this has already been taken
+ care of by infrun.c. If we vforked however, any
+ breakpoint inserted in the parent is visible in the
+ child, even those added while stopped in a vfork
+ catchpoint. This will remove the breakpoints from the
+ parent also, but they'll be reinserted below. */
+ if (has_vforked)
+ {
+ /* keep breakpoints list in sync. */
+ remove_breakpoints_pid (GET_PID (inferior_ptid));
+ }
+
if (info_verbose || debug_linux_nat)
{
target_terminal_ours ();
else
{
struct inferior *parent_inf, *child_inf;
- struct lwp_info *lp;
struct cleanup *old_chain;
/* Add process to GDB's tables. */
copy_terminal_info (child_inf, parent_inf);
old_chain = save_inferior_ptid ();
+ save_current_program_space ();
inferior_ptid = ptid_build (child_pid, child_pid, 0);
add_thread (inferior_ptid);
- lp = add_lwp (inferior_ptid);
- lp->stopped = 1;
+ child_lp = add_lwp (inferior_ptid);
+ child_lp->stopped = 1;
+ child_lp->resumed = 1;
+ /* If this is a vfork child, then the address-space is
+ shared with the parent. */
+ if (has_vforked)
+ {
+ child_inf->pspace = parent_inf->pspace;
+ child_inf->aspace = parent_inf->aspace;
+
+ /* The parent will be frozen until the child is done
+ with the shared region. Keep track of the
+ parent. */
+ child_inf->vfork_parent = parent_inf;
+ child_inf->pending_detach = 0;
+ parent_inf->vfork_child = child_inf;
+ parent_inf->pending_detach = 0;
+ }
+ else
+ {
+ child_inf->aspace = new_address_space ();
+ child_inf->pspace = add_program_space (child_inf->aspace);
+ child_inf->removable = 1;
+ set_current_program_space (child_inf->pspace);
+ clone_program_space (child_inf->pspace, parent_inf->pspace);
+
+ /* Let the shared library layer (solib-svr4) learn about
+ this new process, relocate the cloned exec, pull in
+ shared libraries, and install the solib event
+ breakpoint. If a "cloned-VM" event was propagated
+ better throughout the core, this wouldn't be
+ required. */
+ solib_create_inferior_hook (0);
+ }
+
+ /* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
do_cleanups (old_chain);
if (has_vforked)
{
+ struct lwp_info *lp;
+ struct inferior *parent_inf;
+
+ parent_inf = current_inferior ();
+
+ /* If we detached from the child, then we have to be careful
+ to not insert breakpoints in the parent until the child
+ is done with the shared memory region. However, if we're
+ staying attached to the child, then we can and should
+ insert breakpoints, so that we can debug it. A
+ subsequent child exec or exit is enough to know when does
+ the child stops using the parent's address space. */
+ parent_inf->waiting_for_vfork_done = detach_fork;
+ parent_inf->pspace->breakpoints_not_allowed = detach_fork;
+
+ lp = find_lwp_pid (pid_to_ptid (parent_pid));
gdb_assert (linux_supports_tracefork_flag >= 0);
if (linux_supports_tracevforkdone (0))
{
- int status;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LCFF: waiting for VFORK_DONE on %d\n",
+ parent_pid);
+
+ lp->stopped = 1;
+ lp->resumed = 1;
- ptrace (PTRACE_CONT, parent_pid, 0, 0);
- my_waitpid (parent_pid, &status, __WALL);
- if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
- warning (_("Unexpected waitpid result %06x when waiting for "
- "vfork-done"), status);
+ /* We'll handle the VFORK_DONE event like any other
+ event, in target_wait. */
}
else
{
is only the single-step breakpoint at vfork's return
point. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LCFF: no VFORK_DONE support, sleeping a bit\n");
+
usleep (10000);
- }
- /* Since we vforked, breakpoints were removed in the parent
- too. Put them back. */
- reattach_breakpoints (parent_pid);
+ /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
+ and leave it pending. The next linux_nat_resume call
+ will notice a pending event, and bypasses actually
+ resuming the inferior. */
+ lp->status = 0;
+ lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
+ lp->stopped = 0;
+ lp->resumed = 1;
+
+ /* If we're in async mode, need to tell the event loop
+ there's something here to process. */
+ if (target_can_async_p ())
+ async_file_mark ();
+ }
}
}
else
struct thread_info *tp;
struct inferior *parent_inf, *child_inf;
struct lwp_info *lp;
-
- /* Before detaching from the parent, remove all breakpoints from it. */
- remove_breakpoints ();
+ struct program_space *parent_pspace;
if (info_verbose || debug_linux_nat)
{
target_terminal_ours ();
- fprintf_filtered (gdb_stdlog,
- "Attaching after fork to child process %d.\n",
- child_pid);
+ if (has_vforked)
+ fprintf_filtered (gdb_stdlog, _("\
+Attaching after process %d vfork to child process %d.\n"),
+ parent_pid, child_pid);
+ else
+ fprintf_filtered (gdb_stdlog, _("\
+Attaching after process %d fork to child process %d.\n"),
+ parent_pid, child_pid);
}
/* Add the new inferior first, so that the target_detach below
child_inf->attach_flag = parent_inf->attach_flag;
copy_terminal_info (child_inf, parent_inf);
- /* If we're vforking, we may want to hold on to the parent until
- the child exits or execs. At exec time we can remove the old
- breakpoints from the parent and detach it; at exit time we
- could do the same (or even, sneakily, resume debugging it - the
- child's exec has failed, or something similar).
-
- This doesn't clean up "properly", because we can't call
- target_detach, but that's OK; if the current target is "child",
- then it doesn't need any further cleanups, and lin_lwp will
- generally not encounter vfork (vfork is defined to fork
- in libpthread.so).
+ parent_pspace = parent_inf->pspace;
- The holding part is very easy if we have VFORKDONE events;
- but keeping track of both processes is beyond GDB at the
- moment. So we don't expose the parent to the rest of GDB.
- Instead we quietly hold onto it until such time as we can
- safely resume it. */
+ /* If we're vforking, we want to hold on to the parent until the
+ child exits or execs. At child exec or exit time we can
+ remove the old breakpoints from the parent and detach or
+ resume debugging it. Otherwise, detach the parent now; we'll
+ want to reuse it's program/address spaces, but we can't set
+ them to the child before removing breakpoints from the
+ parent, otherwise, the breakpoints module could decide to
+ remove breakpoints from the wrong process (since they'd be
+ assigned to the same address space). */
if (has_vforked)
{
- struct lwp_info *parent_lwp;
-
- linux_parent_pid = parent_pid;
-
- /* Get rid of the inferior on the core side as well. */
- inferior_ptid = null_ptid;
- detach_inferior (parent_pid);
-
- /* Also get rid of all its lwps. We will detach from this
- inferior soon-ish, but, we will still get an exit event
- reported through waitpid when it exits. If we didn't get
- rid of the lwps from our list, we would end up reporting
- the inferior exit to the core, which would then try to
- mourn a non-existing (from the core's perspective)
- inferior. */
- parent_lwp = find_lwp_pid (pid_to_ptid (parent_pid));
- purge_lwp_list (GET_PID (parent_lwp->ptid));
- linux_parent_pid = parent_pid;
+ gdb_assert (child_inf->vfork_parent == NULL);
+ gdb_assert (parent_inf->vfork_child == NULL);
+ child_inf->vfork_parent = parent_inf;
+ child_inf->pending_detach = 0;
+ parent_inf->vfork_child = child_inf;
+ parent_inf->pending_detach = detach_fork;
+ parent_inf->waiting_for_vfork_done = 0;
}
else if (detach_fork)
target_detach (NULL, 0);
+ /* Note that the detach above makes PARENT_INF dangling. */
+
+ /* Add the child thread to the appropriate lists, and switch to
+ this new thread, before cloning the program space, and
+ informing the solib layer about this new process. */
+
inferior_ptid = ptid_build (child_pid, child_pid, 0);
add_thread (inferior_ptid);
lp = add_lwp (inferior_ptid);
lp->stopped = 1;
+ lp->resumed = 1;
+ /* If this is a vfork child, then the address-space is shared
+ with the parent. If we detached from the parent, then we can
+ reuse the parent's program/address spaces. */
+ if (has_vforked || detach_fork)
+ {
+ child_inf->pspace = parent_pspace;
+ child_inf->aspace = child_inf->pspace->aspace;
+ }
+ else
+ {
+ child_inf->aspace = new_address_space ();
+ child_inf->pspace = add_program_space (child_inf->aspace);
+ child_inf->removable = 1;
+ set_current_program_space (child_inf->pspace);
+ clone_program_space (child_inf->pspace, parent_pspace);
+
+ /* Let the shared library layer (solib-svr4) learn about
+ this new process, relocate the cloned exec, pull in
+ shared libraries, and install the solib event breakpoint.
+ If a "cloned-VM" event was propagated better throughout
+ the core, this wouldn't be required. */
+ solib_create_inferior_hook (0);
+ }
+
+ /* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
}
return buf;
}
-/* Initialize the list of LWPs. Note that this module, contrary to
- what GDB's generic threads layer does for its thread list,
- re-initializes the LWP lists whenever we mourn or detach (which
- doesn't involve mourning) the inferior. */
-
-static void
-init_lwp_list (void)
-{
- struct lwp_info *lp, *lpnext;
-
- for (lp = lwp_list; lp; lp = lpnext)
- {
- lpnext = lp->next;
- xfree (lp);
- }
-
- lwp_list = NULL;
-}
-
/* Remove all LWPs belong to PID from the lwp list. */
static void
lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
lp->ptid = ptid;
+ lp->core = -1;
lp->next = lwp_list;
lwp_list = lp;
*cloned = 1;
}
- gdb_assert (pid == new_pid && WIFSTOPPED (status));
+ gdb_assert (pid == new_pid);
+
+ if (!WIFSTOPPED (status))
+ {
+ /* The pid we tried to attach has apparently just exited. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
+ pid, status_to_str (status));
+ return status;
+ }
if (WSTOPSIG (status) != SIGSTOP)
{
target_pid_to_str (ptid));
status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
+ if (!WIFSTOPPED (status))
+ return -1;
+
lp = add_lwp (ptid);
lp->stopped = 1;
lp->cloned = cloned;
status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
&lp->signalled);
+ if (!WIFSTOPPED (status))
+ {
+ if (WIFEXITED (status))
+ {
+ int exit_code = WEXITSTATUS (status);
+
+ target_terminal_ours ();
+ target_mourn_inferior ();
+ if (exit_code == 0)
+ error (_("Unable to attach: program exited normally."));
+ else
+ error (_("Unable to attach: program exited with code %d."),
+ exit_code);
+ }
+ else if (WIFSIGNALED (status))
+ {
+ enum target_signal signo;
+
+ target_terminal_ours ();
+ target_mourn_inferior ();
+
+ signo = target_signal_from_host (WTERMSIG (status));
+ error (_("Unable to attach: program terminated with signal "
+ "%s, %s."),
+ target_signal_to_name (signo),
+ target_signal_to_string (signo));
+ }
+
+ internal_error (__FILE__, __LINE__,
+ _("unexpected status %d for PID %ld"),
+ status, (long) GET_LWP (ptid));
+ }
+
lp->stopped = 1;
/* Save the wait status to report later. */
static int
resume_callback (struct lwp_info *lp, void *data)
{
- if (lp->stopped && lp->status == 0)
+ struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
+
+ if (lp->stopped && inf->vfork_child != NULL)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RC: Not resuming %s (vfork parent)\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (lp->stopped && lp->status == 0)
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
lp->stopped = 0;
lp->step = 0;
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
}
else if (lp->stopped && debug_linux_nat)
fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
}
}
- if (lp->status)
+ if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
/* FIXME: What should we do if we are supposed to continue
this thread with a signal? */
linux_ops->to_resume (linux_ops, ptid, step, signo);
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
target_async (inferior_event_handler, 0);
}
-/* Issue kill to specified lwp. */
-
-static int tkill_failed;
+/* Send a signal to an LWP. */
static int
kill_lwp (int lwpid, int signo)
{
- errno = 0;
-
-/* Use tkill, if possible, in case we are using nptl threads. If tkill
- fails, then we are not using nptl threads and we should be using kill. */
+ /* Use tkill, if possible, in case we are using nptl threads. If tkill
+ fails, then we are not using nptl threads and we should be using kill. */
#ifdef HAVE_TKILL_SYSCALL
- if (!tkill_failed)
- {
- int ret = syscall (__NR_tkill, lwpid, signo);
- if (errno != ENOSYS)
- return ret;
- errno = 0;
- tkill_failed = 1;
- }
+ {
+ static int tkill_failed;
+
+ if (!tkill_failed)
+ {
+ int ret;
+
+ errno = 0;
+ ret = syscall (__NR_tkill, lwpid, signo);
+ if (errno != ENOSYS)
+ return ret;
+ tkill_failed = 1;
+ }
+ }
#endif
return kill (lwpid, signo);
ourstatus->value.execd_pathname
= xstrdup (linux_child_pid_to_exec_file (pid));
- if (linux_parent_pid)
+ return 0;
+ }
+
+ if (event == PTRACE_EVENT_VFORK_DONE)
+ {
+ if (current_inferior ()->waiting_for_vfork_done)
{
- detach_breakpoints (linux_parent_pid);
- ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "\
+LHEW: Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping\n",
+ GET_LWP (lp->ptid));
- linux_parent_pid = 0;
+ ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
+ return 0;
}
- /* At this point, all inserted breakpoints are gone. Doing this
- as soon as we detect an exec prevents the badness of deleting
- a breakpoint writing the current "shadow contents" to lift
- the bp. That shadow is NOT valid after an exec.
-
- Note that we have to do this after the detach_breakpoints
- call above, otherwise breakpoints wouldn't be lifted from the
- parent on a vfork, because detach_breakpoints would think
- that breakpoints are not inserted. */
- mark_breakpoints_out ();
- return 0;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "\
+LHEW: Got PTRACE_EVENT_VFORK_DONE from LWP %ld: resuming\n",
+ GET_LWP (lp->ptid));
+ ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
+ return 1;
}
internal_error (__FILE__, __LINE__,
}
}
+/* Fetch the possible triggered data watchpoint info and store it in
+ LP.
+
+ On some archs, like x86, that use debug registers to set
+ watchpoints, it's possible that the way to know which watched
+ address trapped, is to check the register that is used to select
+ which address to watch. Problem is, between setting the watchpoint
+ and reading back which data address trapped, the user may change
+ the set of watchpoints, and, as a consequence, GDB changes the
+ debug registers in the inferior. To avoid reading back a stale
+ stopped-data-address when that happens, we cache in LP the fact
+ that a watchpoint trapped, and the corresponding data address, as
+ soon as we see LP stop with a SIGTRAP. If GDB changes the debug
+ registers meanwhile, we have the cached data we can rely on. */
+
+static void
+save_sigtrap (struct lwp_info *lp)
+{
+ struct cleanup *old_chain;
+
+ if (linux_ops->to_stopped_by_watchpoint == NULL)
+ {
+ lp->stopped_by_watchpoint = 0;
+ return;
+ }
+
+ old_chain = save_inferior_ptid ();
+ inferior_ptid = lp->ptid;
+
+ lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
+
+ if (lp->stopped_by_watchpoint)
+ {
+ if (linux_ops->to_stopped_data_address != NULL)
+ lp->stopped_data_address_p =
+ linux_ops->to_stopped_data_address (¤t_target,
+ &lp->stopped_data_address);
+ else
+ lp->stopped_data_address_p = 0;
+ }
+
+ do_cleanups (old_chain);
+}
+
+/* See save_sigtrap. */
+
+static int
+linux_nat_stopped_by_watchpoint (void)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ return lp->stopped_by_watchpoint;
+}
+
+static int
+linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ *addr_p = lp->stopped_data_address;
+
+ return lp->stopped_data_address_p;
+}
+
/* Wait until LP is stopped. */
static int
stop_wait_callback (struct lwp_info *lp, void *data)
{
+ struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
+
+ /* If this is a vfork parent, bail out, it is not going to report
+ any SIGSTOP until the vfork is done with. */
+ if (inf->vfork_child != NULL)
+ return 0;
+
if (!lp->stopped)
{
int status;
/* Save the trap's siginfo in case we need it later. */
save_siginfo (lp);
+ save_sigtrap (lp);
+
/* Now resume this LWP and get the SIGSTOP event. */
errno = 0;
ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
CORE_ADDR pc;
pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
- if (breakpoint_inserted_here_p (pc))
+ if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
return NULL;
}
- /* Save the trap's siginfo in case we need it later. */
if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
- save_siginfo (lp);
+ {
+ /* Save the trap's siginfo in case we need it later. */
+ save_siginfo (lp);
+
+ save_sigtrap (lp);
+ }
/* Check if the thread has exited. */
if ((WIFEXITED (status) || WIFSIGNALED (status))
fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
restore_child_signals_mask (&prev_mask);
+ lp->core = linux_nat_core_of_thread_1 (lp->ptid);
return lp->ptid;
}
LONGEST xfer;
if (object == TARGET_OBJECT_AUXV)
- return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
+ return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
offset, len);
if (object == TARGET_OBJECT_OSDATA)
linux_ops->to_close (quitting);
}
+/* When requests are passed down from the linux-nat layer to the
+ single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
+ used. The address space pointer is stored in the inferior object,
+ but the common code that is passed such ptid can't tell whether
+ lwpid is a "main" process id or not (it assumes so). We reverse
+ look up the "main" process id from the lwp here. */
+
+struct address_space *
+linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
+{
+ struct lwp_info *lwp;
+ struct inferior *inf;
+ int pid;
+
+ pid = GET_LWP (ptid);
+ if (GET_LWP (ptid) == 0)
+ {
+ /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
+ tgid. */
+ lwp = find_lwp_pid (ptid);
+ pid = GET_PID (lwp->ptid);
+ }
+ else
+ {
+ /* A (pid,lwpid,0) ptid. */
+ pid = GET_PID (ptid);
+ }
+
+ inf = find_inferior_pid (pid);
+ gdb_assert (inf != NULL);
+ return inf->aspace;
+}
+
+int
+linux_nat_core_of_thread_1 (ptid_t ptid)
+{
+ struct cleanup *back_to;
+ char *filename;
+ FILE *f;
+ char *content = NULL;
+ char *p;
+ char *ts = 0;
+ int content_read = 0;
+ int i;
+ int core;
+
+ filename = xstrprintf ("/proc/%d/task/%ld/stat",
+ GET_PID (ptid), GET_LWP (ptid));
+ back_to = make_cleanup (xfree, filename);
+
+ f = fopen (filename, "r");
+ if (!f)
+ {
+ do_cleanups (back_to);
+ return -1;
+ }
+
+ make_cleanup_fclose (f);
+
+ for (;;)
+ {
+ int n;
+ content = xrealloc (content, content_read + 1024);
+ n = fread (content + content_read, 1, 1024, f);
+ content_read += n;
+ if (n < 1024)
+ {
+ content[content_read] = '\0';
+ break;
+ }
+ }
+
+ make_cleanup (xfree, content);
+
+ p = strchr (content, '(');
+ p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
+
+ /* If the first field after program name has index 0, then core number is
+ the field with index 36. There's no constant for that anywhere. */
+ p = strtok_r (p, " ", &ts);
+ for (i = 0; i != 36; ++i)
+ p = strtok_r (NULL, " ", &ts);
+
+ if (sscanf (p, "%d", &core) == 0)
+ core = -1;
+
+ do_cleanups (back_to);
+
+ return core;
+}
+
+/* Return the cached value of the processor core for thread PTID. */
+
+int
+linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
+{
+ struct lwp_info *info = find_lwp_pid (ptid);
+ if (info)
+ return info->core;
+ return -1;
+}
+
void
linux_nat_add_target (struct target_ops *t)
{
t->to_thread_alive = linux_nat_thread_alive;
t->to_pid_to_str = linux_nat_pid_to_str;
t->to_has_thread_control = tc_schedlock;
+ t->to_thread_address_space = linux_nat_thread_address_space;
+ t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
+ t->to_stopped_data_address = linux_nat_stopped_data_address;
t->to_can_async_p = linux_nat_can_async_p;
t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_multi_process = linux_nat_supports_multi_process;
+ t->to_core_of_thread = linux_nat_core_of_thread;
+
/* We don't change the stratum; this target will sit at
process_stratum and thread_db will set at thread_stratum. This
is a little strange, since this is a multi-threaded-capable