]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NET3: Garbage Collector For AF_UNIX sockets | |
3 | * | |
4 | * Garbage Collector: | |
5 | * Copyright (C) Barak A. Pearlmutter. | |
6 | * Released under the GPL version 2 or later. | |
7 | * | |
8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. | |
9 | * If it doesn't work blame me, it worked when Barak sent it. | |
10 | * | |
11 | * Assumptions: | |
12 | * | |
13 | * - object w/ a bit | |
14 | * - free list | |
15 | * | |
16 | * Current optimizations: | |
17 | * | |
18 | * - explicit stack instead of recursion | |
19 | * - tail recurse on first born instead of immediate push/pop | |
20 | * - we gather the stuff that should not be killed into tree | |
21 | * and stack is just a path from root to the current pointer. | |
22 | * | |
23 | * Future optimizations: | |
24 | * | |
25 | * - don't just push entire root set; process in place | |
26 | * | |
27 | * This program is free software; you can redistribute it and/or | |
28 | * modify it under the terms of the GNU General Public License | |
29 | * as published by the Free Software Foundation; either version | |
30 | * 2 of the License, or (at your option) any later version. | |
31 | * | |
32 | * Fixes: | |
33 | * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. | |
34 | * Cope with changing max_files. | |
35 | * Al Viro 11 Oct 1998 | |
36 | * Graph may have cycles. That is, we can send the descriptor | |
37 | * of foo to bar and vice versa. Current code chokes on that. | |
38 | * Fix: move SCM_RIGHTS ones into the separate list and then | |
39 | * skb_free() them all instead of doing explicit fput's. | |
40 | * Another problem: since fput() may block somebody may | |
41 | * create a new unix_socket when we are in the middle of sweep | |
42 | * phase. Fix: revert the logic wrt MARKED. Mark everything | |
43 | * upon the beginning and unmark non-junk ones. | |
44 | * | |
45 | * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS | |
46 | * sent to connect()'ed but still not accept()'ed sockets. | |
47 | * Fixed. Old code had slightly different problem here: | |
48 | * extra fput() in situation when we passed the descriptor via | |
49 | * such socket and closed it (descriptor). That would happen on | |
50 | * each unix_gc() until the accept(). Since the struct file in | |
51 | * question would go to the free list and might be reused... | |
52 | * That might be the reason of random oopses on filp_close() | |
53 | * in unrelated processes. | |
54 | * | |
55 | * AV 28 Feb 1999 | |
56 | * Kill the explicit allocation of stack. Now we keep the tree | |
57 | * with root in dummy + pointer (gc_current) to one of the nodes. | |
58 | * Stack is represented as path from gc_current to dummy. Unmark | |
59 | * now means "add to tree". Push == "make it a son of gc_current". | |
60 | * Pop == "move gc_current to parent". We keep only pointers to | |
61 | * parents (->gc_tree). | |
62 | * AV 1 Mar 1999 | |
63 | * Damn. Added missing check for ->dead in listen queues scanning. | |
64 | * | |
1fd05ba5 MS |
65 | * Miklos Szeredi 25 Jun 2007 |
66 | * Reimplement with a cycle collecting algorithm. This should | |
67 | * solve several problems with the previous code, like being racy | |
68 | * wrt receive and holding up unrelated socket operations. | |
1da177e4 | 69 | */ |
ac7bfa62 | 70 | |
1da177e4 | 71 | #include <linux/kernel.h> |
1da177e4 LT |
72 | #include <linux/string.h> |
73 | #include <linux/socket.h> | |
74 | #include <linux/un.h> | |
75 | #include <linux/net.h> | |
76 | #include <linux/fs.h> | |
1da177e4 LT |
77 | #include <linux/skbuff.h> |
78 | #include <linux/netdevice.h> | |
79 | #include <linux/file.h> | |
80 | #include <linux/proc_fs.h> | |
4a3e2f71 | 81 | #include <linux/mutex.h> |
5f23b734 | 82 | #include <linux/wait.h> |
1da177e4 LT |
83 | |
84 | #include <net/sock.h> | |
85 | #include <net/af_unix.h> | |
86 | #include <net/scm.h> | |
c752f073 | 87 | #include <net/tcp_states.h> |
1da177e4 LT |
88 | |
89 | /* Internal data structures and random procedures: */ | |
90 | ||
1fd05ba5 MS |
91 | static LIST_HEAD(gc_inflight_list); |
92 | static LIST_HEAD(gc_candidates); | |
93 | static DEFINE_SPINLOCK(unix_gc_lock); | |
5f23b734 | 94 | static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); |
1da177e4 | 95 | |
9305cfa4 | 96 | unsigned int unix_tot_inflight; |
1da177e4 LT |
97 | |
98 | ||
25888e30 | 99 | struct sock *unix_get_socket(struct file *filp) |
1da177e4 LT |
100 | { |
101 | struct sock *u_sock = NULL; | |
496ad9aa | 102 | struct inode *inode = file_inode(filp); |
1da177e4 LT |
103 | |
104 | /* | |
105 | * Socket ? | |
106 | */ | |
326be7b4 | 107 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { |
e27dfcea JK |
108 | struct socket *sock = SOCKET_I(inode); |
109 | struct sock *s = sock->sk; | |
1da177e4 LT |
110 | |
111 | /* | |
112 | * PF_UNIX ? | |
113 | */ | |
114 | if (s && sock->ops && sock->ops->family == PF_UNIX) | |
115 | u_sock = s; | |
116 | } | |
117 | return u_sock; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Keep the number of times in flight count for the file | |
122 | * descriptor if it is for an AF_UNIX socket. | |
123 | */ | |
ac7bfa62 | 124 | |
1da177e4 LT |
125 | void unix_inflight(struct file *fp) |
126 | { | |
127 | struct sock *s = unix_get_socket(fp); | |
e27dfcea | 128 | if (s) { |
1fd05ba5 MS |
129 | struct unix_sock *u = unix_sk(s); |
130 | spin_lock(&unix_gc_lock); | |
516e0cc5 | 131 | if (atomic_long_inc_return(&u->inflight) == 1) { |
1fd05ba5 MS |
132 | BUG_ON(!list_empty(&u->link)); |
133 | list_add_tail(&u->link, &gc_inflight_list); | |
134 | } else { | |
135 | BUG_ON(list_empty(&u->link)); | |
136 | } | |
9305cfa4 | 137 | unix_tot_inflight++; |
1fd05ba5 | 138 | spin_unlock(&unix_gc_lock); |
1da177e4 LT |
139 | } |
140 | } | |
141 | ||
142 | void unix_notinflight(struct file *fp) | |
143 | { | |
144 | struct sock *s = unix_get_socket(fp); | |
e27dfcea | 145 | if (s) { |
1fd05ba5 MS |
146 | struct unix_sock *u = unix_sk(s); |
147 | spin_lock(&unix_gc_lock); | |
148 | BUG_ON(list_empty(&u->link)); | |
516e0cc5 | 149 | if (atomic_long_dec_and_test(&u->inflight)) |
1fd05ba5 | 150 | list_del_init(&u->link); |
9305cfa4 | 151 | unix_tot_inflight--; |
1fd05ba5 | 152 | spin_unlock(&unix_gc_lock); |
1da177e4 LT |
153 | } |
154 | } | |
155 | ||
5c80f1ae | 156 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 157 | struct sk_buff_head *hitlist) |
1da177e4 | 158 | { |
1fd05ba5 MS |
159 | struct sk_buff *skb; |
160 | struct sk_buff *next; | |
161 | ||
162 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 163 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
1fd05ba5 MS |
164 | /* |
165 | * Do we have file descriptors ? | |
166 | */ | |
167 | if (UNIXCB(skb).fp) { | |
168 | bool hit = false; | |
169 | /* | |
170 | * Process the descriptors of this socket | |
171 | */ | |
172 | int nfd = UNIXCB(skb).fp->count; | |
173 | struct file **fp = UNIXCB(skb).fp->fp; | |
174 | while (nfd--) { | |
175 | /* | |
176 | * Get the socket the fd matches | |
177 | * if it indeed does so | |
178 | */ | |
179 | struct sock *sk = unix_get_socket(*fp++); | |
5c80f1ae | 180 | if (sk) { |
6209344f MS |
181 | struct unix_sock *u = unix_sk(sk); |
182 | ||
183 | /* | |
184 | * Ignore non-candidates, they could | |
185 | * have been added to the queues after | |
186 | * starting the garbage collection | |
187 | */ | |
60bc851a | 188 | if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { |
6209344f MS |
189 | hit = true; |
190 | func(u); | |
191 | } | |
1fd05ba5 MS |
192 | } |
193 | } | |
194 | if (hit && hitlist != NULL) { | |
195 | __skb_unlink(skb, &x->sk_receive_queue); | |
196 | __skb_queue_tail(hitlist, skb); | |
197 | } | |
198 | } | |
199 | } | |
200 | spin_unlock(&x->sk_receive_queue.lock); | |
1da177e4 LT |
201 | } |
202 | ||
5c80f1ae | 203 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 204 | struct sk_buff_head *hitlist) |
1da177e4 | 205 | { |
1fd05ba5 MS |
206 | if (x->sk_state != TCP_LISTEN) |
207 | scan_inflight(x, func, hitlist); | |
208 | else { | |
209 | struct sk_buff *skb; | |
210 | struct sk_buff *next; | |
211 | struct unix_sock *u; | |
212 | LIST_HEAD(embryos); | |
213 | ||
214 | /* | |
215 | * For a listening socket collect the queued embryos | |
216 | * and perform a scan on them as well. | |
217 | */ | |
218 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 219 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
1fd05ba5 MS |
220 | u = unix_sk(skb->sk); |
221 | ||
222 | /* | |
223 | * An embryo cannot be in-flight, so it's safe | |
224 | * to use the list link. | |
225 | */ | |
226 | BUG_ON(!list_empty(&u->link)); | |
227 | list_add_tail(&u->link, &embryos); | |
228 | } | |
229 | spin_unlock(&x->sk_receive_queue.lock); | |
230 | ||
231 | while (!list_empty(&embryos)) { | |
232 | u = list_entry(embryos.next, struct unix_sock, link); | |
233 | scan_inflight(&u->sk, func, hitlist); | |
234 | list_del_init(&u->link); | |
235 | } | |
236 | } | |
1da177e4 LT |
237 | } |
238 | ||
5c80f1ae | 239 | static void dec_inflight(struct unix_sock *usk) |
1da177e4 | 240 | { |
516e0cc5 | 241 | atomic_long_dec(&usk->inflight); |
1fd05ba5 | 242 | } |
1da177e4 | 243 | |
5c80f1ae | 244 | static void inc_inflight(struct unix_sock *usk) |
1fd05ba5 | 245 | { |
516e0cc5 | 246 | atomic_long_inc(&usk->inflight); |
1da177e4 LT |
247 | } |
248 | ||
5c80f1ae | 249 | static void inc_inflight_move_tail(struct unix_sock *u) |
1fd05ba5 | 250 | { |
516e0cc5 | 251 | atomic_long_inc(&u->inflight); |
1fd05ba5 | 252 | /* |
6209344f MS |
253 | * If this still might be part of a cycle, move it to the end |
254 | * of the list, so that it's checked even if it was already | |
255 | * passed over | |
1fd05ba5 | 256 | */ |
60bc851a | 257 | if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) |
1fd05ba5 MS |
258 | list_move_tail(&u->link, &gc_candidates); |
259 | } | |
1da177e4 | 260 | |
5f23b734 | 261 | static bool gc_in_progress = false; |
9915672d | 262 | #define UNIX_INFLIGHT_TRIGGER_GC 16000 |
1da177e4 | 263 | |
5f23b734 | 264 | void wait_for_unix_gc(void) |
1da177e4 | 265 | { |
9915672d ED |
266 | /* |
267 | * If number of inflight sockets is insane, | |
268 | * force a garbage collect right now. | |
269 | */ | |
270 | if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) | |
271 | unix_gc(); | |
5f23b734 | 272 | wait_event(unix_gc_wait, gc_in_progress == false); |
273 | } | |
1da177e4 | 274 | |
5f23b734 | 275 | /* The external entry point: unix_gc() */ |
276 | void unix_gc(void) | |
277 | { | |
1fd05ba5 MS |
278 | struct unix_sock *u; |
279 | struct unix_sock *next; | |
280 | struct sk_buff_head hitlist; | |
281 | struct list_head cursor; | |
6209344f | 282 | LIST_HEAD(not_cycle_list); |
1da177e4 | 283 | |
1fd05ba5 | 284 | spin_lock(&unix_gc_lock); |
1da177e4 | 285 | |
1fd05ba5 MS |
286 | /* Avoid a recursive GC. */ |
287 | if (gc_in_progress) | |
288 | goto out; | |
1da177e4 | 289 | |
1fd05ba5 | 290 | gc_in_progress = true; |
1da177e4 | 291 | /* |
1fd05ba5 MS |
292 | * First, select candidates for garbage collection. Only |
293 | * in-flight sockets are considered, and from those only ones | |
294 | * which don't have any external reference. | |
295 | * | |
296 | * Holding unix_gc_lock will protect these candidates from | |
297 | * being detached, and hence from gaining an external | |
6209344f MS |
298 | * reference. Since there are no possible receivers, all |
299 | * buffers currently on the candidates' queues stay there | |
300 | * during the garbage collection. | |
301 | * | |
302 | * We also know that no new candidate can be added onto the | |
303 | * receive queues. Other, non candidate sockets _can_ be | |
304 | * added to queue, so we must make sure only to touch | |
305 | * candidates. | |
1da177e4 | 306 | */ |
1fd05ba5 | 307 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
516e0cc5 AV |
308 | long total_refs; |
309 | long inflight_refs; | |
1fd05ba5 MS |
310 | |
311 | total_refs = file_count(u->sk.sk_socket->file); | |
516e0cc5 | 312 | inflight_refs = atomic_long_read(&u->inflight); |
1fd05ba5 MS |
313 | |
314 | BUG_ON(inflight_refs < 1); | |
315 | BUG_ON(total_refs < inflight_refs); | |
316 | if (total_refs == inflight_refs) { | |
317 | list_move_tail(&u->link, &gc_candidates); | |
60bc851a ED |
318 | __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
319 | __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); | |
1fd05ba5 MS |
320 | } |
321 | } | |
1da177e4 LT |
322 | |
323 | /* | |
1fd05ba5 MS |
324 | * Now remove all internal in-flight reference to children of |
325 | * the candidates. | |
1da177e4 | 326 | */ |
1fd05ba5 MS |
327 | list_for_each_entry(u, &gc_candidates, link) |
328 | scan_children(&u->sk, dec_inflight, NULL); | |
1da177e4 LT |
329 | |
330 | /* | |
1fd05ba5 MS |
331 | * Restore the references for children of all candidates, |
332 | * which have remaining references. Do this recursively, so | |
333 | * only those remain, which form cyclic references. | |
334 | * | |
335 | * Use a "cursor" link, to make the list traversal safe, even | |
336 | * though elements might be moved about. | |
1da177e4 | 337 | */ |
1fd05ba5 MS |
338 | list_add(&cursor, &gc_candidates); |
339 | while (cursor.next != &gc_candidates) { | |
340 | u = list_entry(cursor.next, struct unix_sock, link); | |
1da177e4 | 341 | |
1fd05ba5 MS |
342 | /* Move cursor to after the current position. */ |
343 | list_move(&cursor, &u->link); | |
ac7bfa62 | 344 | |
516e0cc5 | 345 | if (atomic_long_read(&u->inflight) > 0) { |
6209344f | 346 | list_move_tail(&u->link, ¬_cycle_list); |
60bc851a | 347 | __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); |
1fd05ba5 | 348 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
1da177e4 | 349 | } |
1da177e4 | 350 | } |
1fd05ba5 | 351 | list_del(&cursor); |
1da177e4 | 352 | |
6209344f MS |
353 | /* |
354 | * not_cycle_list contains those sockets which do not make up a | |
355 | * cycle. Restore these to the inflight list. | |
356 | */ | |
357 | while (!list_empty(¬_cycle_list)) { | |
358 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | |
60bc851a | 359 | __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
6209344f MS |
360 | list_move_tail(&u->link, &gc_inflight_list); |
361 | } | |
362 | ||
1fd05ba5 MS |
363 | /* |
364 | * Now gc_candidates contains only garbage. Restore original | |
365 | * inflight counters for these as well, and remove the skbuffs | |
366 | * which are creating the cycle(s). | |
367 | */ | |
1da177e4 | 368 | skb_queue_head_init(&hitlist); |
1fd05ba5 | 369 | list_for_each_entry(u, &gc_candidates, link) |
e27dfcea | 370 | scan_children(&u->sk, inc_inflight, &hitlist); |
1da177e4 | 371 | |
1fd05ba5 | 372 | spin_unlock(&unix_gc_lock); |
1da177e4 | 373 | |
1fd05ba5 MS |
374 | /* Here we are. Hitlist is filled. Die. */ |
375 | __skb_queue_purge(&hitlist); | |
1da177e4 | 376 | |
1fd05ba5 | 377 | spin_lock(&unix_gc_lock); |
1da177e4 | 378 | |
1fd05ba5 MS |
379 | /* All candidates should have been detached by now. */ |
380 | BUG_ON(!list_empty(&gc_candidates)); | |
381 | gc_in_progress = false; | |
5f23b734 | 382 | wake_up(&unix_gc_wait); |
1da177e4 | 383 | |
1fd05ba5 MS |
384 | out: |
385 | spin_unlock(&unix_gc_lock); | |
1da177e4 | 386 | } |