]>
Commit | Line | Data |
---|---|---|
a85036f6 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * NET3: Garbage Collector For AF_UNIX sockets | |
4 | * | |
5 | * Garbage Collector: | |
6 | * Copyright (C) Barak A. Pearlmutter. | |
1da177e4 LT |
7 | * |
8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. | |
9 | * If it doesn't work blame me, it worked when Barak sent it. | |
10 | * | |
11 | * Assumptions: | |
12 | * | |
13 | * - object w/ a bit | |
14 | * - free list | |
15 | * | |
16 | * Current optimizations: | |
17 | * | |
18 | * - explicit stack instead of recursion | |
19 | * - tail recurse on first born instead of immediate push/pop | |
20 | * - we gather the stuff that should not be killed into tree | |
21 | * and stack is just a path from root to the current pointer. | |
22 | * | |
23 | * Future optimizations: | |
24 | * | |
25 | * - don't just push entire root set; process in place | |
26 | * | |
1da177e4 LT |
27 | * Fixes: |
28 | * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. | |
29 | * Cope with changing max_files. | |
30 | * Al Viro 11 Oct 1998 | |
31 | * Graph may have cycles. That is, we can send the descriptor | |
32 | * of foo to bar and vice versa. Current code chokes on that. | |
33 | * Fix: move SCM_RIGHTS ones into the separate list and then | |
34 | * skb_free() them all instead of doing explicit fput's. | |
35 | * Another problem: since fput() may block somebody may | |
36 | * create a new unix_socket when we are in the middle of sweep | |
37 | * phase. Fix: revert the logic wrt MARKED. Mark everything | |
38 | * upon the beginning and unmark non-junk ones. | |
39 | * | |
40 | * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS | |
41 | * sent to connect()'ed but still not accept()'ed sockets. | |
42 | * Fixed. Old code had slightly different problem here: | |
43 | * extra fput() in situation when we passed the descriptor via | |
44 | * such socket and closed it (descriptor). That would happen on | |
45 | * each unix_gc() until the accept(). Since the struct file in | |
46 | * question would go to the free list and might be reused... | |
47 | * That might be the reason of random oopses on filp_close() | |
48 | * in unrelated processes. | |
49 | * | |
50 | * AV 28 Feb 1999 | |
51 | * Kill the explicit allocation of stack. Now we keep the tree | |
52 | * with root in dummy + pointer (gc_current) to one of the nodes. | |
53 | * Stack is represented as path from gc_current to dummy. Unmark | |
54 | * now means "add to tree". Push == "make it a son of gc_current". | |
55 | * Pop == "move gc_current to parent". We keep only pointers to | |
56 | * parents (->gc_tree). | |
57 | * AV 1 Mar 1999 | |
58 | * Damn. Added missing check for ->dead in listen queues scanning. | |
59 | * | |
1fd05ba5 MS |
60 | * Miklos Szeredi 25 Jun 2007 |
61 | * Reimplement with a cycle collecting algorithm. This should | |
62 | * solve several problems with the previous code, like being racy | |
63 | * wrt receive and holding up unrelated socket operations. | |
1da177e4 | 64 | */ |
ac7bfa62 | 65 | |
1da177e4 | 66 | #include <linux/kernel.h> |
1da177e4 LT |
67 | #include <linux/string.h> |
68 | #include <linux/socket.h> | |
69 | #include <linux/un.h> | |
70 | #include <linux/net.h> | |
71 | #include <linux/fs.h> | |
1da177e4 LT |
72 | #include <linux/skbuff.h> |
73 | #include <linux/netdevice.h> | |
74 | #include <linux/file.h> | |
75 | #include <linux/proc_fs.h> | |
4a3e2f71 | 76 | #include <linux/mutex.h> |
5f23b734 | 77 | #include <linux/wait.h> |
1da177e4 LT |
78 | |
79 | #include <net/sock.h> | |
80 | #include <net/af_unix.h> | |
81 | #include <net/scm.h> | |
c752f073 | 82 | #include <net/tcp_states.h> |
1da177e4 | 83 | |
99a7a5b9 KI |
84 | struct unix_sock *unix_get_socket(struct file *filp) |
85 | { | |
86 | struct inode *inode = file_inode(filp); | |
87 | ||
88 | /* Socket ? */ | |
89 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { | |
90 | struct socket *sock = SOCKET_I(inode); | |
91 | const struct proto_ops *ops; | |
92 | struct sock *sk = sock->sk; | |
93 | ||
94 | ops = READ_ONCE(sock->ops); | |
f4e65870 | 95 | |
99a7a5b9 KI |
96 | /* PF_UNIX ? */ |
97 | if (sk && ops && ops->family == PF_UNIX) | |
98 | return unix_sk(sk); | |
99 | } | |
1da177e4 | 100 | |
99a7a5b9 KI |
101 | return NULL; |
102 | } | |
103 | ||
dcf70df2 KI |
104 | static struct unix_vertex *unix_edge_successor(struct unix_edge *edge) |
105 | { | |
106 | /* If an embryo socket has a fd, | |
107 | * the listener indirectly holds the fd's refcnt. | |
108 | */ | |
109 | if (edge->successor->listener) | |
110 | return unix_sk(edge->successor->listener)->vertex; | |
111 | ||
112 | return edge->successor->vertex; | |
113 | } | |
114 | ||
77e5593a | 115 | static bool unix_graph_maybe_cyclic; |
ad081928 | 116 | static bool unix_graph_grouped; |
77e5593a KI |
117 | |
118 | static void unix_update_graph(struct unix_vertex *vertex) | |
119 | { | |
120 | /* If the receiver socket is not inflight, no cyclic | |
121 | * reference could be formed. | |
122 | */ | |
123 | if (!vertex) | |
124 | return; | |
125 | ||
126 | unix_graph_maybe_cyclic = true; | |
ad081928 | 127 | unix_graph_grouped = false; |
77e5593a KI |
128 | } |
129 | ||
42f298c0 KI |
130 | static LIST_HEAD(unix_unvisited_vertices); |
131 | ||
6ba76fd2 | 132 | enum unix_vertex_index { |
ba31b4a4 KI |
133 | UNIX_VERTEX_INDEX_MARK1, |
134 | UNIX_VERTEX_INDEX_MARK2, | |
6ba76fd2 KI |
135 | UNIX_VERTEX_INDEX_START, |
136 | }; | |
137 | ||
ba31b4a4 KI |
138 | static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; |
139 | ||
42f298c0 KI |
140 | static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) |
141 | { | |
142 | struct unix_vertex *vertex = edge->predecessor->vertex; | |
143 | ||
144 | if (!vertex) { | |
145 | vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); | |
ba31b4a4 | 146 | vertex->index = unix_vertex_unvisited_index; |
42f298c0 KI |
147 | vertex->out_degree = 0; |
148 | INIT_LIST_HEAD(&vertex->edges); | |
ad081928 | 149 | INIT_LIST_HEAD(&vertex->scc_entry); |
42f298c0 KI |
150 | |
151 | list_move_tail(&vertex->entry, &unix_unvisited_vertices); | |
152 | edge->predecessor->vertex = vertex; | |
153 | } | |
154 | ||
155 | vertex->out_degree++; | |
156 | list_add_tail(&edge->vertex_entry, &vertex->edges); | |
77e5593a KI |
157 | |
158 | unix_update_graph(unix_edge_successor(edge)); | |
42f298c0 KI |
159 | } |
160 | ||
161 | static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) | |
162 | { | |
163 | struct unix_vertex *vertex = edge->predecessor->vertex; | |
164 | ||
7172dc93 | 165 | if (!fpl->dead) |
1af2dfac | 166 | unix_update_graph(unix_edge_successor(edge)); |
77e5593a | 167 | |
42f298c0 KI |
168 | list_del(&edge->vertex_entry); |
169 | vertex->out_degree--; | |
170 | ||
171 | if (!vertex->out_degree) { | |
172 | edge->predecessor->vertex = NULL; | |
173 | list_move_tail(&vertex->entry, &fpl->vertices); | |
174 | } | |
175 | } | |
176 | ||
1fbfdfaa KI |
177 | static void unix_free_vertices(struct scm_fp_list *fpl) |
178 | { | |
179 | struct unix_vertex *vertex, *next_vertex; | |
180 | ||
181 | list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { | |
182 | list_del(&vertex->entry); | |
183 | kfree(vertex); | |
184 | } | |
185 | } | |
186 | ||
118f457d | 187 | static DEFINE_SPINLOCK(unix_gc_lock); |
22c3c0c5 | 188 | unsigned int unix_tot_inflight; |
42f298c0 KI |
189 | |
190 | void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) | |
191 | { | |
192 | int i = 0, j = 0; | |
193 | ||
194 | spin_lock(&unix_gc_lock); | |
195 | ||
196 | if (!fpl->count_unix) | |
197 | goto out; | |
198 | ||
199 | do { | |
200 | struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); | |
201 | struct unix_edge *edge; | |
202 | ||
203 | if (!inflight) | |
204 | continue; | |
205 | ||
206 | edge = fpl->edges + i++; | |
207 | edge->predecessor = inflight; | |
208 | edge->successor = receiver; | |
209 | ||
210 | unix_add_edge(fpl, edge); | |
211 | } while (i < fpl->count_unix); | |
212 | ||
fd863448 | 213 | receiver->scm_stat.nr_unix_fds += fpl->count_unix; |
22c3c0c5 | 214 | WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); |
42f298c0 | 215 | out: |
22c3c0c5 KI |
216 | WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); |
217 | ||
42f298c0 KI |
218 | spin_unlock(&unix_gc_lock); |
219 | ||
220 | fpl->inflight = true; | |
221 | ||
222 | unix_free_vertices(fpl); | |
223 | } | |
224 | ||
225 | void unix_del_edges(struct scm_fp_list *fpl) | |
226 | { | |
fd863448 | 227 | struct unix_sock *receiver; |
42f298c0 KI |
228 | int i = 0; |
229 | ||
230 | spin_lock(&unix_gc_lock); | |
231 | ||
232 | if (!fpl->count_unix) | |
233 | goto out; | |
234 | ||
235 | do { | |
236 | struct unix_edge *edge = fpl->edges + i++; | |
237 | ||
238 | unix_del_edge(fpl, edge); | |
239 | } while (i < fpl->count_unix); | |
240 | ||
7172dc93 | 241 | if (!fpl->dead) { |
1af2dfac KI |
242 | receiver = fpl->edges[0].successor; |
243 | receiver->scm_stat.nr_unix_fds -= fpl->count_unix; | |
244 | } | |
22c3c0c5 | 245 | WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); |
42f298c0 | 246 | out: |
22c3c0c5 KI |
247 | WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); |
248 | ||
42f298c0 KI |
249 | spin_unlock(&unix_gc_lock); |
250 | ||
251 | fpl->inflight = false; | |
252 | } | |
253 | ||
dcf70df2 KI |
254 | void unix_update_edges(struct unix_sock *receiver) |
255 | { | |
fd863448 KI |
256 | /* nr_unix_fds is only updated under unix_state_lock(). |
257 | * If it's 0 here, the embryo socket is not part of the | |
258 | * inflight graph, and GC will not see it, so no lock needed. | |
259 | */ | |
260 | if (!receiver->scm_stat.nr_unix_fds) { | |
261 | receiver->listener = NULL; | |
262 | } else { | |
263 | spin_lock(&unix_gc_lock); | |
264 | unix_update_graph(unix_sk(receiver->listener)->vertex); | |
265 | receiver->listener = NULL; | |
266 | spin_unlock(&unix_gc_lock); | |
267 | } | |
dcf70df2 KI |
268 | } |
269 | ||
1fbfdfaa KI |
270 | int unix_prepare_fpl(struct scm_fp_list *fpl) |
271 | { | |
272 | struct unix_vertex *vertex; | |
273 | int i; | |
274 | ||
275 | if (!fpl->count_unix) | |
276 | return 0; | |
277 | ||
278 | for (i = 0; i < fpl->count_unix; i++) { | |
279 | vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); | |
280 | if (!vertex) | |
281 | goto err; | |
282 | ||
283 | list_add(&vertex->entry, &fpl->vertices); | |
284 | } | |
285 | ||
29b64e35 KI |
286 | fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), |
287 | GFP_KERNEL_ACCOUNT); | |
288 | if (!fpl->edges) | |
289 | goto err; | |
290 | ||
1fbfdfaa KI |
291 | return 0; |
292 | ||
293 | err: | |
294 | unix_free_vertices(fpl); | |
295 | return -ENOMEM; | |
296 | } | |
297 | ||
298 | void unix_destroy_fpl(struct scm_fp_list *fpl) | |
299 | { | |
42f298c0 KI |
300 | if (fpl->inflight) |
301 | unix_del_edges(fpl); | |
302 | ||
29b64e35 | 303 | kvfree(fpl->edges); |
1fbfdfaa KI |
304 | unix_free_vertices(fpl); |
305 | } | |
306 | ||
a15702d8 KI |
307 | static bool unix_vertex_dead(struct unix_vertex *vertex) |
308 | { | |
309 | struct unix_edge *edge; | |
310 | struct unix_sock *u; | |
311 | long total_ref; | |
312 | ||
313 | list_for_each_entry(edge, &vertex->edges, vertex_entry) { | |
314 | struct unix_vertex *next_vertex = unix_edge_successor(edge); | |
315 | ||
316 | /* The vertex's fd can be received by a non-inflight socket. */ | |
317 | if (!next_vertex) | |
318 | return false; | |
319 | ||
320 | /* The vertex's fd can be received by an inflight socket in | |
321 | * another SCC. | |
322 | */ | |
323 | if (next_vertex->scc_index != vertex->scc_index) | |
324 | return false; | |
325 | } | |
326 | ||
327 | /* No receiver exists out of the same SCC. */ | |
328 | ||
329 | edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); | |
330 | u = edge->predecessor; | |
331 | total_ref = file_count(u->sk.sk_socket->file); | |
332 | ||
333 | /* If not close()d, total_ref > out_degree. */ | |
334 | if (total_ref != vertex->out_degree) | |
335 | return false; | |
336 | ||
337 | return true; | |
338 | } | |
339 | ||
4090fa37 KI |
340 | static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) |
341 | { | |
342 | struct unix_vertex *vertex; | |
343 | ||
344 | list_for_each_entry_reverse(vertex, scc, scc_entry) { | |
345 | struct sk_buff_head *queue; | |
346 | struct unix_edge *edge; | |
347 | struct unix_sock *u; | |
348 | ||
349 | edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); | |
350 | u = edge->predecessor; | |
351 | queue = &u->sk.sk_receive_queue; | |
352 | ||
353 | spin_lock(&queue->lock); | |
354 | ||
355 | if (u->sk.sk_state == TCP_LISTEN) { | |
356 | struct sk_buff *skb; | |
357 | ||
358 | skb_queue_walk(queue, skb) { | |
359 | struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; | |
360 | ||
8647ece4 | 361 | spin_lock(&embryo_queue->lock); |
8594d9b8 | 362 | skb_queue_splice_init(embryo_queue, hitlist); |
4090fa37 KI |
363 | spin_unlock(&embryo_queue->lock); |
364 | } | |
365 | } else { | |
8594d9b8 | 366 | skb_queue_splice_init(queue, hitlist); |
4090fa37 KI |
367 | } |
368 | ||
369 | spin_unlock(&queue->lock); | |
370 | } | |
371 | } | |
372 | ||
77e5593a KI |
373 | static bool unix_scc_cyclic(struct list_head *scc) |
374 | { | |
375 | struct unix_vertex *vertex; | |
376 | struct unix_edge *edge; | |
377 | ||
378 | /* SCC containing multiple vertices ? */ | |
379 | if (!list_is_singular(scc)) | |
380 | return true; | |
381 | ||
382 | vertex = list_first_entry(scc, typeof(*vertex), scc_entry); | |
383 | ||
384 | /* Self-reference or a embryo-listener circle ? */ | |
385 | list_for_each_entry(edge, &vertex->edges, vertex_entry) { | |
386 | if (unix_edge_successor(edge) == vertex) | |
387 | return true; | |
388 | } | |
389 | ||
390 | return false; | |
391 | } | |
392 | ||
6ba76fd2 | 393 | static LIST_HEAD(unix_visited_vertices); |
ba31b4a4 | 394 | static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; |
6ba76fd2 | 395 | |
4090fa37 KI |
396 | static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, |
397 | struct sk_buff_head *hitlist) | |
6ba76fd2 | 398 | { |
3484f063 | 399 | LIST_HEAD(vertex_stack); |
6ba76fd2 KI |
400 | struct unix_edge *edge; |
401 | LIST_HEAD(edge_stack); | |
402 | ||
403 | next_vertex: | |
ba31b4a4 KI |
404 | /* Push vertex to vertex_stack and mark it as on-stack |
405 | * (index >= UNIX_VERTEX_INDEX_START). | |
3484f063 KI |
406 | * The vertex will be popped when finalising SCC later. |
407 | */ | |
3484f063 KI |
408 | list_add(&vertex->scc_entry, &vertex_stack); |
409 | ||
bfdb0128 KI |
410 | vertex->index = *last_index; |
411 | vertex->scc_index = *last_index; | |
412 | (*last_index)++; | |
6ba76fd2 KI |
413 | |
414 | /* Explore neighbour vertices (receivers of the current vertex's fd). */ | |
415 | list_for_each_entry(edge, &vertex->edges, vertex_entry) { | |
dcf70df2 | 416 | struct unix_vertex *next_vertex = unix_edge_successor(edge); |
6ba76fd2 KI |
417 | |
418 | if (!next_vertex) | |
419 | continue; | |
420 | ||
ba31b4a4 | 421 | if (next_vertex->index == unix_vertex_unvisited_index) { |
6ba76fd2 KI |
422 | /* Iterative deepening depth first search |
423 | * | |
424 | * 1. Push a forward edge to edge_stack and set | |
425 | * the successor to vertex for the next iteration. | |
426 | */ | |
427 | list_add(&edge->stack_entry, &edge_stack); | |
428 | ||
429 | vertex = next_vertex; | |
430 | goto next_vertex; | |
431 | ||
432 | /* 2. Pop the edge directed to the current vertex | |
433 | * and restore the ancestor for backtracking. | |
434 | */ | |
435 | prev_vertex: | |
436 | edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); | |
437 | list_del_init(&edge->stack_entry); | |
438 | ||
3484f063 | 439 | next_vertex = vertex; |
6ba76fd2 | 440 | vertex = edge->predecessor->vertex; |
3484f063 | 441 | |
bfdb0128 KI |
442 | /* If the successor has a smaller scc_index, two vertices |
443 | * are in the same SCC, so propagate the smaller scc_index | |
3484f063 KI |
444 | * to skip SCC finalisation. |
445 | */ | |
bfdb0128 | 446 | vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); |
ba31b4a4 | 447 | } else if (next_vertex->index != unix_vertex_grouped_index) { |
3484f063 KI |
448 | /* Loop detected by a back/cross edge. |
449 | * | |
bfdb0128 KI |
450 | * The successor is on vertex_stack, so two vertices are in |
451 | * the same SCC. If the successor has a smaller *scc_index*, | |
3484f063 KI |
452 | * propagate it to skip SCC finalisation. |
453 | */ | |
bfdb0128 | 454 | vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); |
3484f063 KI |
455 | } else { |
456 | /* The successor was already grouped as another SCC */ | |
6ba76fd2 KI |
457 | } |
458 | } | |
459 | ||
bfdb0128 | 460 | if (vertex->index == vertex->scc_index) { |
927fa5b3 | 461 | struct unix_vertex *v; |
3484f063 | 462 | struct list_head scc; |
a15702d8 | 463 | bool scc_dead = true; |
3484f063 KI |
464 | |
465 | /* SCC finalised. | |
466 | * | |
bfdb0128 | 467 | * If the scc_index was not updated, all the vertices above on |
3484f063 KI |
468 | * vertex_stack are in the same SCC. Group them using scc_entry. |
469 | */ | |
470 | __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); | |
471 | ||
927fa5b3 | 472 | list_for_each_entry_reverse(v, &scc, scc_entry) { |
3484f063 | 473 | /* Don't restart DFS from this vertex in unix_walk_scc(). */ |
927fa5b3 | 474 | list_move_tail(&v->entry, &unix_visited_vertices); |
3484f063 | 475 | |
ba31b4a4 | 476 | /* Mark vertex as off-stack. */ |
927fa5b3 | 477 | v->index = unix_vertex_grouped_index; |
a15702d8 KI |
478 | |
479 | if (scc_dead) | |
927fa5b3 | 480 | scc_dead = unix_vertex_dead(v); |
3484f063 KI |
481 | } |
482 | ||
4090fa37 KI |
483 | if (scc_dead) |
484 | unix_collect_skb(&scc, hitlist); | |
485 | else if (!unix_graph_maybe_cyclic) | |
77e5593a KI |
486 | unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); |
487 | ||
3484f063 KI |
488 | list_del(&scc); |
489 | } | |
6ba76fd2 KI |
490 | |
491 | /* Need backtracking ? */ | |
492 | if (!list_empty(&edge_stack)) | |
493 | goto prev_vertex; | |
494 | } | |
495 | ||
4090fa37 | 496 | static void unix_walk_scc(struct sk_buff_head *hitlist) |
6ba76fd2 | 497 | { |
bfdb0128 KI |
498 | unsigned long last_index = UNIX_VERTEX_INDEX_START; |
499 | ||
77e5593a KI |
500 | unix_graph_maybe_cyclic = false; |
501 | ||
6ba76fd2 KI |
502 | /* Visit every vertex exactly once. |
503 | * __unix_walk_scc() moves visited vertices to unix_visited_vertices. | |
504 | */ | |
505 | while (!list_empty(&unix_unvisited_vertices)) { | |
ba31b4a4 KI |
506 | struct unix_vertex *vertex; |
507 | ||
6ba76fd2 | 508 | vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); |
4090fa37 | 509 | __unix_walk_scc(vertex, &last_index, hitlist); |
6ba76fd2 KI |
510 | } |
511 | ||
512 | list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); | |
ba31b4a4 | 513 | swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); |
ad081928 KI |
514 | |
515 | unix_graph_grouped = true; | |
516 | } | |
517 | ||
4090fa37 | 518 | static void unix_walk_scc_fast(struct sk_buff_head *hitlist) |
ad081928 | 519 | { |
1af2dfac KI |
520 | unix_graph_maybe_cyclic = false; |
521 | ||
ad081928 KI |
522 | while (!list_empty(&unix_unvisited_vertices)) { |
523 | struct unix_vertex *vertex; | |
524 | struct list_head scc; | |
a15702d8 | 525 | bool scc_dead = true; |
ad081928 KI |
526 | |
527 | vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); | |
528 | list_add(&scc, &vertex->scc_entry); | |
529 | ||
a15702d8 | 530 | list_for_each_entry_reverse(vertex, &scc, scc_entry) { |
ad081928 KI |
531 | list_move_tail(&vertex->entry, &unix_visited_vertices); |
532 | ||
a15702d8 KI |
533 | if (scc_dead) |
534 | scc_dead = unix_vertex_dead(vertex); | |
535 | } | |
536 | ||
4090fa37 KI |
537 | if (scc_dead) |
538 | unix_collect_skb(&scc, hitlist); | |
1af2dfac KI |
539 | else if (!unix_graph_maybe_cyclic) |
540 | unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); | |
4090fa37 | 541 | |
ad081928 KI |
542 | list_del(&scc); |
543 | } | |
544 | ||
545 | list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); | |
6ba76fd2 KI |
546 | } |
547 | ||
7172dc93 KI |
548 | static bool gc_in_progress; |
549 | ||
8b90a9f8 | 550 | static void __unix_gc(struct work_struct *work) |
5f23b734 | 551 | { |
1fd05ba5 | 552 | struct sk_buff_head hitlist; |
7172dc93 | 553 | struct sk_buff *skb; |
1da177e4 | 554 | |
1fd05ba5 | 555 | spin_lock(&unix_gc_lock); |
1da177e4 | 556 | |
4090fa37 KI |
557 | if (!unix_graph_maybe_cyclic) { |
558 | spin_unlock(&unix_gc_lock); | |
77e5593a | 559 | goto skip_gc; |
1da177e4 LT |
560 | } |
561 | ||
4090fa37 | 562 | __skb_queue_head_init(&hitlist); |
7df9c246 | 563 | |
4090fa37 KI |
564 | if (unix_graph_grouped) |
565 | unix_walk_scc_fast(&hitlist); | |
566 | else | |
567 | unix_walk_scc(&hitlist); | |
6209344f | 568 | |
1fd05ba5 | 569 | spin_unlock(&unix_gc_lock); |
1da177e4 | 570 | |
7172dc93 KI |
571 | skb_queue_walk(&hitlist, skb) { |
572 | if (UNIXCB(skb).fp) | |
573 | UNIXCB(skb).fp->dead = true; | |
574 | } | |
575 | ||
c49a157c | 576 | __skb_queue_purge_reason(&hitlist, SKB_DROP_REASON_SOCKET_CLOSE); |
77e5593a | 577 | skip_gc: |
9d6d7f1c | 578 | WRITE_ONCE(gc_in_progress, false); |
1da177e4 | 579 | } |
8b90a9f8 KI |
580 | |
581 | static DECLARE_WORK(unix_gc_work, __unix_gc); | |
582 | ||
583 | void unix_gc(void) | |
584 | { | |
585 | WRITE_ONCE(gc_in_progress, true); | |
586 | queue_work(system_unbound_wq, &unix_gc_work); | |
587 | } | |
588 | ||
589 | #define UNIX_INFLIGHT_TRIGGER_GC 16000 | |
d9f21b36 | 590 | #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) |
8b90a9f8 | 591 | |
d9f21b36 | 592 | void wait_for_unix_gc(struct scm_fp_list *fpl) |
8b90a9f8 KI |
593 | { |
594 | /* If number of inflight sockets is insane, | |
595 | * force a garbage collect right now. | |
596 | * | |
597 | * Paired with the WRITE_ONCE() in unix_inflight(), | |
598 | * unix_notinflight(), and __unix_gc(). | |
599 | */ | |
600 | if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && | |
601 | !READ_ONCE(gc_in_progress)) | |
602 | unix_gc(); | |
603 | ||
d9f21b36 KI |
604 | /* Penalise users who want to send AF_UNIX sockets |
605 | * but whose sockets have not been received yet. | |
606 | */ | |
607 | if (!fpl || !fpl->count_unix || | |
608 | READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) | |
609 | return; | |
610 | ||
8b90a9f8 KI |
611 | if (READ_ONCE(gc_in_progress)) |
612 | flush_work(&unix_gc_work); | |
613 | } |