]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NET3: Garbage Collector For AF_UNIX sockets | |
3 | * | |
4 | * Garbage Collector: | |
5 | * Copyright (C) Barak A. Pearlmutter. | |
6 | * Released under the GPL version 2 or later. | |
7 | * | |
8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. | |
9 | * If it doesn't work blame me, it worked when Barak sent it. | |
10 | * | |
11 | * Assumptions: | |
12 | * | |
13 | * - object w/ a bit | |
14 | * - free list | |
15 | * | |
16 | * Current optimizations: | |
17 | * | |
18 | * - explicit stack instead of recursion | |
19 | * - tail recurse on first born instead of immediate push/pop | |
20 | * - we gather the stuff that should not be killed into tree | |
21 | * and stack is just a path from root to the current pointer. | |
22 | * | |
23 | * Future optimizations: | |
24 | * | |
25 | * - don't just push entire root set; process in place | |
26 | * | |
27 | * This program is free software; you can redistribute it and/or | |
28 | * modify it under the terms of the GNU General Public License | |
29 | * as published by the Free Software Foundation; either version | |
30 | * 2 of the License, or (at your option) any later version. | |
31 | * | |
32 | * Fixes: | |
33 | * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. | |
34 | * Cope with changing max_files. | |
35 | * Al Viro 11 Oct 1998 | |
36 | * Graph may have cycles. That is, we can send the descriptor | |
37 | * of foo to bar and vice versa. Current code chokes on that. | |
38 | * Fix: move SCM_RIGHTS ones into the separate list and then | |
39 | * skb_free() them all instead of doing explicit fput's. | |
40 | * Another problem: since fput() may block somebody may | |
41 | * create a new unix_socket when we are in the middle of sweep | |
42 | * phase. Fix: revert the logic wrt MARKED. Mark everything | |
43 | * upon the beginning and unmark non-junk ones. | |
44 | * | |
45 | * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS | |
46 | * sent to connect()'ed but still not accept()'ed sockets. | |
47 | * Fixed. Old code had slightly different problem here: | |
48 | * extra fput() in situation when we passed the descriptor via | |
49 | * such socket and closed it (descriptor). That would happen on | |
50 | * each unix_gc() until the accept(). Since the struct file in | |
51 | * question would go to the free list and might be reused... | |
52 | * That might be the reason of random oopses on filp_close() | |
53 | * in unrelated processes. | |
54 | * | |
55 | * AV 28 Feb 1999 | |
56 | * Kill the explicit allocation of stack. Now we keep the tree | |
57 | * with root in dummy + pointer (gc_current) to one of the nodes. | |
58 | * Stack is represented as path from gc_current to dummy. Unmark | |
59 | * now means "add to tree". Push == "make it a son of gc_current". | |
60 | * Pop == "move gc_current to parent". We keep only pointers to | |
61 | * parents (->gc_tree). | |
62 | * AV 1 Mar 1999 | |
63 | * Damn. Added missing check for ->dead in listen queues scanning. | |
64 | * | |
1fd05ba5 MS |
65 | * Miklos Szeredi 25 Jun 2007 |
66 | * Reimplement with a cycle collecting algorithm. This should | |
67 | * solve several problems with the previous code, like being racy | |
68 | * wrt receive and holding up unrelated socket operations. | |
1da177e4 | 69 | */ |
ac7bfa62 | 70 | |
1da177e4 | 71 | #include <linux/kernel.h> |
1da177e4 LT |
72 | #include <linux/string.h> |
73 | #include <linux/socket.h> | |
74 | #include <linux/un.h> | |
75 | #include <linux/net.h> | |
76 | #include <linux/fs.h> | |
1da177e4 LT |
77 | #include <linux/skbuff.h> |
78 | #include <linux/netdevice.h> | |
79 | #include <linux/file.h> | |
80 | #include <linux/proc_fs.h> | |
4a3e2f71 | 81 | #include <linux/mutex.h> |
5f23b734 | 82 | #include <linux/wait.h> |
1da177e4 LT |
83 | |
84 | #include <net/sock.h> | |
85 | #include <net/af_unix.h> | |
86 | #include <net/scm.h> | |
c752f073 | 87 | #include <net/tcp_states.h> |
1da177e4 LT |
88 | |
89 | /* Internal data structures and random procedures: */ | |
90 | ||
1fd05ba5 MS |
91 | static LIST_HEAD(gc_inflight_list); |
92 | static LIST_HEAD(gc_candidates); | |
93 | static DEFINE_SPINLOCK(unix_gc_lock); | |
5f23b734 | 94 | static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); |
1da177e4 | 95 | |
9305cfa4 | 96 | unsigned int unix_tot_inflight; |
1da177e4 | 97 | |
25888e30 | 98 | struct sock *unix_get_socket(struct file *filp) |
1da177e4 LT |
99 | { |
100 | struct sock *u_sock = NULL; | |
496ad9aa | 101 | struct inode *inode = file_inode(filp); |
1da177e4 | 102 | |
d1ab39f1 | 103 | /* Socket ? */ |
326be7b4 | 104 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { |
e27dfcea JK |
105 | struct socket *sock = SOCKET_I(inode); |
106 | struct sock *s = sock->sk; | |
1da177e4 | 107 | |
d1ab39f1 | 108 | /* PF_UNIX ? */ |
1da177e4 LT |
109 | if (s && sock->ops && sock->ops->family == PF_UNIX) |
110 | u_sock = s; | |
111 | } | |
112 | return u_sock; | |
113 | } | |
114 | ||
d1ab39f1 JE |
115 | /* Keep the number of times in flight count for the file |
116 | * descriptor if it is for an AF_UNIX socket. | |
1da177e4 | 117 | */ |
ac7bfa62 | 118 | |
415e3d3e | 119 | void unix_inflight(struct user_struct *user, struct file *fp) |
1da177e4 LT |
120 | { |
121 | struct sock *s = unix_get_socket(fp); | |
d1ab39f1 | 122 | |
712f4aad | 123 | spin_lock(&unix_gc_lock); |
124 | ||
e27dfcea | 125 | if (s) { |
1fd05ba5 | 126 | struct unix_sock *u = unix_sk(s); |
d1ab39f1 | 127 | |
516e0cc5 | 128 | if (atomic_long_inc_return(&u->inflight) == 1) { |
1fd05ba5 MS |
129 | BUG_ON(!list_empty(&u->link)); |
130 | list_add_tail(&u->link, &gc_inflight_list); | |
131 | } else { | |
132 | BUG_ON(list_empty(&u->link)); | |
133 | } | |
9305cfa4 | 134 | unix_tot_inflight++; |
1da177e4 | 135 | } |
415e3d3e | 136 | user->unix_inflight++; |
712f4aad | 137 | spin_unlock(&unix_gc_lock); |
1da177e4 LT |
138 | } |
139 | ||
415e3d3e | 140 | void unix_notinflight(struct user_struct *user, struct file *fp) |
1da177e4 LT |
141 | { |
142 | struct sock *s = unix_get_socket(fp); | |
d1ab39f1 | 143 | |
712f4aad | 144 | spin_lock(&unix_gc_lock); |
145 | ||
e27dfcea | 146 | if (s) { |
1fd05ba5 | 147 | struct unix_sock *u = unix_sk(s); |
d1ab39f1 | 148 | |
1fd05ba5 | 149 | BUG_ON(list_empty(&u->link)); |
d1ab39f1 | 150 | |
516e0cc5 | 151 | if (atomic_long_dec_and_test(&u->inflight)) |
1fd05ba5 | 152 | list_del_init(&u->link); |
9305cfa4 | 153 | unix_tot_inflight--; |
1da177e4 | 154 | } |
415e3d3e | 155 | user->unix_inflight--; |
712f4aad | 156 | spin_unlock(&unix_gc_lock); |
1da177e4 LT |
157 | } |
158 | ||
5c80f1ae | 159 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 160 | struct sk_buff_head *hitlist) |
1da177e4 | 161 | { |
1fd05ba5 MS |
162 | struct sk_buff *skb; |
163 | struct sk_buff *next; | |
164 | ||
165 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 166 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
d1ab39f1 | 167 | /* Do we have file descriptors ? */ |
1fd05ba5 MS |
168 | if (UNIXCB(skb).fp) { |
169 | bool hit = false; | |
d1ab39f1 | 170 | /* Process the descriptors of this socket */ |
1fd05ba5 MS |
171 | int nfd = UNIXCB(skb).fp->count; |
172 | struct file **fp = UNIXCB(skb).fp->fp; | |
d1ab39f1 | 173 | |
1fd05ba5 | 174 | while (nfd--) { |
d1ab39f1 | 175 | /* Get the socket the fd matches if it indeed does so */ |
1fd05ba5 | 176 | struct sock *sk = unix_get_socket(*fp++); |
d1ab39f1 | 177 | |
5c80f1ae | 178 | if (sk) { |
6209344f MS |
179 | struct unix_sock *u = unix_sk(sk); |
180 | ||
d1ab39f1 | 181 | /* Ignore non-candidates, they could |
6209344f MS |
182 | * have been added to the queues after |
183 | * starting the garbage collection | |
184 | */ | |
60bc851a | 185 | if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { |
6209344f | 186 | hit = true; |
d1ab39f1 | 187 | |
6209344f MS |
188 | func(u); |
189 | } | |
1fd05ba5 MS |
190 | } |
191 | } | |
192 | if (hit && hitlist != NULL) { | |
193 | __skb_unlink(skb, &x->sk_receive_queue); | |
194 | __skb_queue_tail(hitlist, skb); | |
195 | } | |
196 | } | |
197 | } | |
198 | spin_unlock(&x->sk_receive_queue.lock); | |
1da177e4 LT |
199 | } |
200 | ||
5c80f1ae | 201 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 202 | struct sk_buff_head *hitlist) |
1da177e4 | 203 | { |
d1ab39f1 | 204 | if (x->sk_state != TCP_LISTEN) { |
1fd05ba5 | 205 | scan_inflight(x, func, hitlist); |
d1ab39f1 | 206 | } else { |
1fd05ba5 MS |
207 | struct sk_buff *skb; |
208 | struct sk_buff *next; | |
209 | struct unix_sock *u; | |
210 | LIST_HEAD(embryos); | |
211 | ||
d1ab39f1 | 212 | /* For a listening socket collect the queued embryos |
1fd05ba5 MS |
213 | * and perform a scan on them as well. |
214 | */ | |
215 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 216 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
1fd05ba5 MS |
217 | u = unix_sk(skb->sk); |
218 | ||
d1ab39f1 | 219 | /* An embryo cannot be in-flight, so it's safe |
1fd05ba5 MS |
220 | * to use the list link. |
221 | */ | |
222 | BUG_ON(!list_empty(&u->link)); | |
223 | list_add_tail(&u->link, &embryos); | |
224 | } | |
225 | spin_unlock(&x->sk_receive_queue.lock); | |
226 | ||
227 | while (!list_empty(&embryos)) { | |
228 | u = list_entry(embryos.next, struct unix_sock, link); | |
229 | scan_inflight(&u->sk, func, hitlist); | |
230 | list_del_init(&u->link); | |
231 | } | |
232 | } | |
1da177e4 LT |
233 | } |
234 | ||
5c80f1ae | 235 | static void dec_inflight(struct unix_sock *usk) |
1da177e4 | 236 | { |
516e0cc5 | 237 | atomic_long_dec(&usk->inflight); |
1fd05ba5 | 238 | } |
1da177e4 | 239 | |
5c80f1ae | 240 | static void inc_inflight(struct unix_sock *usk) |
1fd05ba5 | 241 | { |
516e0cc5 | 242 | atomic_long_inc(&usk->inflight); |
1da177e4 LT |
243 | } |
244 | ||
5c80f1ae | 245 | static void inc_inflight_move_tail(struct unix_sock *u) |
1fd05ba5 | 246 | { |
516e0cc5 | 247 | atomic_long_inc(&u->inflight); |
d1ab39f1 | 248 | /* If this still might be part of a cycle, move it to the end |
6209344f MS |
249 | * of the list, so that it's checked even if it was already |
250 | * passed over | |
1fd05ba5 | 251 | */ |
60bc851a | 252 | if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) |
1fd05ba5 MS |
253 | list_move_tail(&u->link, &gc_candidates); |
254 | } | |
1da177e4 | 255 | |
505e907d | 256 | static bool gc_in_progress; |
9915672d | 257 | #define UNIX_INFLIGHT_TRIGGER_GC 16000 |
1da177e4 | 258 | |
5f23b734 | 259 | void wait_for_unix_gc(void) |
1da177e4 | 260 | { |
d1ab39f1 | 261 | /* If number of inflight sockets is insane, |
9915672d ED |
262 | * force a garbage collect right now. |
263 | */ | |
264 | if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) | |
265 | unix_gc(); | |
5f23b734 | 266 | wait_event(unix_gc_wait, gc_in_progress == false); |
267 | } | |
1da177e4 | 268 | |
5f23b734 | 269 | /* The external entry point: unix_gc() */ |
270 | void unix_gc(void) | |
271 | { | |
1fd05ba5 MS |
272 | struct unix_sock *u; |
273 | struct unix_sock *next; | |
274 | struct sk_buff_head hitlist; | |
275 | struct list_head cursor; | |
6209344f | 276 | LIST_HEAD(not_cycle_list); |
1da177e4 | 277 | |
1fd05ba5 | 278 | spin_lock(&unix_gc_lock); |
1da177e4 | 279 | |
1fd05ba5 MS |
280 | /* Avoid a recursive GC. */ |
281 | if (gc_in_progress) | |
282 | goto out; | |
1da177e4 | 283 | |
1fd05ba5 | 284 | gc_in_progress = true; |
d1ab39f1 | 285 | /* First, select candidates for garbage collection. Only |
1fd05ba5 MS |
286 | * in-flight sockets are considered, and from those only ones |
287 | * which don't have any external reference. | |
288 | * | |
289 | * Holding unix_gc_lock will protect these candidates from | |
290 | * being detached, and hence from gaining an external | |
6209344f MS |
291 | * reference. Since there are no possible receivers, all |
292 | * buffers currently on the candidates' queues stay there | |
293 | * during the garbage collection. | |
294 | * | |
295 | * We also know that no new candidate can be added onto the | |
296 | * receive queues. Other, non candidate sockets _can_ be | |
297 | * added to queue, so we must make sure only to touch | |
298 | * candidates. | |
1da177e4 | 299 | */ |
1fd05ba5 | 300 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
516e0cc5 AV |
301 | long total_refs; |
302 | long inflight_refs; | |
1fd05ba5 MS |
303 | |
304 | total_refs = file_count(u->sk.sk_socket->file); | |
516e0cc5 | 305 | inflight_refs = atomic_long_read(&u->inflight); |
1fd05ba5 MS |
306 | |
307 | BUG_ON(inflight_refs < 1); | |
308 | BUG_ON(total_refs < inflight_refs); | |
309 | if (total_refs == inflight_refs) { | |
310 | list_move_tail(&u->link, &gc_candidates); | |
60bc851a ED |
311 | __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
312 | __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); | |
1fd05ba5 MS |
313 | } |
314 | } | |
1da177e4 | 315 | |
d1ab39f1 | 316 | /* Now remove all internal in-flight reference to children of |
1fd05ba5 | 317 | * the candidates. |
1da177e4 | 318 | */ |
1fd05ba5 MS |
319 | list_for_each_entry(u, &gc_candidates, link) |
320 | scan_children(&u->sk, dec_inflight, NULL); | |
1da177e4 | 321 | |
d1ab39f1 | 322 | /* Restore the references for children of all candidates, |
1fd05ba5 MS |
323 | * which have remaining references. Do this recursively, so |
324 | * only those remain, which form cyclic references. | |
325 | * | |
326 | * Use a "cursor" link, to make the list traversal safe, even | |
327 | * though elements might be moved about. | |
1da177e4 | 328 | */ |
1fd05ba5 MS |
329 | list_add(&cursor, &gc_candidates); |
330 | while (cursor.next != &gc_candidates) { | |
331 | u = list_entry(cursor.next, struct unix_sock, link); | |
1da177e4 | 332 | |
1fd05ba5 MS |
333 | /* Move cursor to after the current position. */ |
334 | list_move(&cursor, &u->link); | |
ac7bfa62 | 335 | |
516e0cc5 | 336 | if (atomic_long_read(&u->inflight) > 0) { |
6209344f | 337 | list_move_tail(&u->link, ¬_cycle_list); |
60bc851a | 338 | __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); |
1fd05ba5 | 339 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
1da177e4 | 340 | } |
1da177e4 | 341 | } |
1fd05ba5 | 342 | list_del(&cursor); |
1da177e4 | 343 | |
d1ab39f1 | 344 | /* not_cycle_list contains those sockets which do not make up a |
6209344f MS |
345 | * cycle. Restore these to the inflight list. |
346 | */ | |
347 | while (!list_empty(¬_cycle_list)) { | |
348 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | |
60bc851a | 349 | __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
6209344f MS |
350 | list_move_tail(&u->link, &gc_inflight_list); |
351 | } | |
352 | ||
d1ab39f1 | 353 | /* Now gc_candidates contains only garbage. Restore original |
1fd05ba5 MS |
354 | * inflight counters for these as well, and remove the skbuffs |
355 | * which are creating the cycle(s). | |
356 | */ | |
1da177e4 | 357 | skb_queue_head_init(&hitlist); |
1fd05ba5 | 358 | list_for_each_entry(u, &gc_candidates, link) |
e27dfcea | 359 | scan_children(&u->sk, inc_inflight, &hitlist); |
1da177e4 | 360 | |
1fd05ba5 | 361 | spin_unlock(&unix_gc_lock); |
1da177e4 | 362 | |
1fd05ba5 MS |
363 | /* Here we are. Hitlist is filled. Die. */ |
364 | __skb_queue_purge(&hitlist); | |
1da177e4 | 365 | |
1fd05ba5 | 366 | spin_lock(&unix_gc_lock); |
1da177e4 | 367 | |
1fd05ba5 MS |
368 | /* All candidates should have been detached by now. */ |
369 | BUG_ON(!list_empty(&gc_candidates)); | |
370 | gc_in_progress = false; | |
5f23b734 | 371 | wake_up(&unix_gc_wait); |
1da177e4 | 372 | |
1fd05ba5 MS |
373 | out: |
374 | spin_unlock(&unix_gc_lock); | |
1da177e4 | 375 | } |