]> Git Repo - uclibc-ng.git/blame - libpthread/nptl/sysdeps/unix/sysv/linux/fork.c
mass sync with glibc nptl
[uclibc-ng.git] / libpthread / nptl / sysdeps / unix / sysv / linux / fork.c
CommitLineData
a032a658 1/* Copyright (C) 2002, 2003, 2007, 2008 Free Software Foundation, Inc.
c68d0fa2
AF
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <[email protected]>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <unistd.h>
24#include <sys/types.h>
25#include <sysdep.h>
26#include <tls.h>
27#include "fork.h"
28#include <hp-timing.h>
29#include <ldsodefs.h>
30#include <atomic.h>
31#include <errno.h>
32
33unsigned long int *__fork_generation_pointer;
34
35
36
37/* The single linked list of all currently registered for handlers. */
38struct fork_handler *__fork_handlers;
39
40
41static void
42fresetlockfiles (void)
43{
44 FILE *fp;
45#ifdef __USE_STDIO_FUTEXES__
46 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen)
47 STDIO_INIT_MUTEX(fp->__lock);
48#else
49 pthread_mutexattr_t attr;
50
51 pthread_mutexattr_init(&attr);
52 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
53
54 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen)
55 pthread_mutex_init(&fp->__lock, &attr);
56
57 pthread_mutexattr_destroy(&attr);
58#endif
59}
60
a032a658
AF
61
62pid_t
63__libc_fork (void)
c68d0fa2
AF
64{
65 pid_t pid;
66 struct used_handler
67 {
68 struct fork_handler *handler;
69 struct used_handler *next;
70 } *allp = NULL;
71
72 /* Run all the registered preparation handlers. In reverse order.
73 While doing this we build up a list of all the entries. */
74 struct fork_handler *runp;
75 while ((runp = __fork_handlers) != NULL)
76 {
a032a658
AF
77 /* Make sure we read from the current RUNP pointer. */
78 atomic_full_barrier ();
79
c68d0fa2
AF
80 unsigned int oldval = runp->refcntr;
81
82 if (oldval == 0)
83 /* This means some other thread removed the list just after
84 the pointer has been loaded. Try again. Either the list
85 is empty or we can retry it. */
86 continue;
87
88 /* Bump the reference counter. */
89 if (atomic_compare_and_exchange_bool_acq (&__fork_handlers->refcntr,
90 oldval + 1, oldval))
91 /* The value changed, try again. */
92 continue;
93
94 /* We bumped the reference counter for the first entry in the
95 list. That means that none of the following entries will
96 just go away. The unloading code works in the order of the
97 list.
98
99 While executing the registered handlers we are building a
100 list of all the entries so that we can go backward later on. */
101 while (1)
102 {
103 /* Execute the handler if there is one. */
104 if (runp->prepare_handler != NULL)
105 runp->prepare_handler ();
106
107 /* Create a new element for the list. */
108 struct used_handler *newp
109 = (struct used_handler *) alloca (sizeof (*newp));
110 newp->handler = runp;
111 newp->next = allp;
112 allp = newp;
113
114 /* Advance to the next handler. */
115 runp = runp->next;
116 if (runp == NULL)
117 break;
118
119 /* Bump the reference counter for the next entry. */
120 atomic_increment (&runp->refcntr);
121 }
122
123 /* We are done. */
124 break;
125 }
126
127 __UCLIBC_IO_MUTEX_LOCK_CANCEL_UNSAFE(_stdio_openlist_add_lock);
128
129#ifndef NDEBUG
130 pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid);
131#endif
132
133 /* We need to prevent the getpid() code to update the PID field so
134 that, if a signal arrives in the child very early and the signal
135 handler uses getpid(), the value returned is correct. */
136 pid_t parentpid = THREAD_GETMEM (THREAD_SELF, pid);
137 THREAD_SETMEM (THREAD_SELF, pid, -parentpid);
138
139#ifdef ARCH_FORK
140 pid = ARCH_FORK ();
141#else
142# error "ARCH_FORK must be defined so that the CLONE_SETTID flag is used"
143 pid = INLINE_SYSCALL (fork, 0);
144#endif
145
146
147 if (pid == 0)
148 {
149 struct pthread *self = THREAD_SELF;
150
151 assert (THREAD_GETMEM (self, tid) != ppid);
152
153 if (__fork_generation_pointer != NULL)
154 *__fork_generation_pointer += 4;
155
156 /* Adjust the PID field for the new process. */
157 THREAD_SETMEM (self, pid, THREAD_GETMEM (self, tid));
158
159#if HP_TIMING_AVAIL
160 /* The CPU clock of the thread and process have to be set to zero. */
161 hp_timing_t now;
162 HP_TIMING_NOW (now);
163 THREAD_SETMEM (self, cpuclock_offset, now);
164 GL(dl_cpuclock_offset) = now;
165#endif
166
167 /* Reset the file list. These are recursive mutexes. */
168 fresetlockfiles ();
169
170 /* Reset locks in the I/O code. */
171 STDIO_INIT_MUTEX(_stdio_openlist_add_lock);
172
a032a658
AF
173 /* XXX reset any locks in dynamic loader */
174
c68d0fa2
AF
175 /* Run the handlers registered for the child. */
176 while (allp != NULL)
177 {
178 if (allp->handler->child_handler != NULL)
179 allp->handler->child_handler ();
180
181 /* Note that we do not have to wake any possible waiter.
a032a658
AF
182 This is the only thread in the new process. The count
183 may have been bumped up by other threads doing a fork.
184 We reset it to 1, to avoid waiting for non-existing
185 thread(s) to release the count. */
186 allp->handler->refcntr = 1;
c68d0fa2
AF
187
188 /* XXX We could at this point look through the object pool
189 and mark all objects not on the __fork_handlers list as
190 unused. This is necessary in case the fork() happened
191 while another thread called dlclose() and that call had
192 to create a new list. */
193
194 allp = allp->next;
195 }
196
197 /* Initialize the fork lock. */
a032a658 198 __fork_lock = LLL_LOCK_INITIALIZER;
c68d0fa2
AF
199 }
200 else
201 {
202 assert (THREAD_GETMEM (THREAD_SELF, tid) == ppid);
203
204 /* Restore the PID value. */
205 THREAD_SETMEM (THREAD_SELF, pid, parentpid);
206
207 /* We execute this even if the 'fork' call failed. */
208 __UCLIBC_IO_MUTEX_UNLOCK_CANCEL_UNSAFE(_stdio_openlist_add_lock);
209
210 /* Run the handlers registered for the parent. */
211 while (allp != NULL)
212 {
213 if (allp->handler->parent_handler != NULL)
214 allp->handler->parent_handler ();
215
216 if (atomic_decrement_and_test (&allp->handler->refcntr)
217 && allp->handler->need_signal)
a032a658 218 lll_futex_wake (allp->handler->refcntr, 1, LLL_PRIVATE);
c68d0fa2
AF
219
220 allp = allp->next;
221 }
222 }
223
224 return pid;
225}
226weak_alias(__libc_fork,__fork)
227libc_hidden_proto(fork)
228weak_alias(__libc_fork,fork)
229libc_hidden_weak(fork)
This page took 0.051362 seconds and 4 git commands to generate.