1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/splice.h>
116 #include <linux/freezer.h>
117 #include <linux/file.h>
118 #include <linux/btf_ids.h>
119 #include <linux/bpf-cgroup.h>
121 static atomic_long_t unix_nr_socks;
122 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
123 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
125 /* SMP locking strategy:
126 * hash table is protected with spinlock.
127 * each socket state is protected by separate spinlock.
130 static unsigned int unix_unbound_hash(struct sock *sk)
132 unsigned long hash = (unsigned long)sk;
138 return hash & UNIX_HASH_MOD;
141 static unsigned int unix_bsd_hash(struct inode *i)
143 return i->i_ino & UNIX_HASH_MOD;
146 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
147 int addr_len, int type)
149 __wsum csum = csum_partial(sunaddr, addr_len, 0);
152 hash = (__force unsigned int)csum_fold(csum);
156 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
159 static void unix_table_double_lock(struct net *net,
160 unsigned int hash1, unsigned int hash2)
162 if (hash1 == hash2) {
163 spin_lock(&net->unx.table.locks[hash1]);
170 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
174 static void unix_table_double_unlock(struct net *net,
175 unsigned int hash1, unsigned int hash2)
177 if (hash1 == hash2) {
178 spin_unlock(&net->unx.table.locks[hash1]);
182 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash2]);
186 #ifdef CONFIG_SECURITY_NETWORK
187 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
189 UNIXCB(skb).secid = scm->secid;
192 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
194 scm->secid = UNIXCB(skb).secid;
197 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
199 return (scm->secid == UNIXCB(skb).secid);
202 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
205 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
208 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
212 #endif /* CONFIG_SECURITY_NETWORK */
214 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
216 return unix_peer(osk) == sk;
219 static inline int unix_may_send(struct sock *sk, struct sock *osk)
221 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
224 static inline int unix_recvq_full(const struct sock *sk)
226 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
229 static inline int unix_recvq_full_lockless(const struct sock *sk)
231 return skb_queue_len_lockless(&sk->sk_receive_queue) >
232 READ_ONCE(sk->sk_max_ack_backlog);
235 struct sock *unix_peer_get(struct sock *s)
243 unix_state_unlock(s);
246 EXPORT_SYMBOL_GPL(unix_peer_get);
248 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
251 struct unix_address *addr;
253 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
257 refcount_set(&addr->refcnt, 1);
258 addr->len = addr_len;
259 memcpy(addr->name, sunaddr, addr_len);
264 static inline void unix_release_addr(struct unix_address *addr)
266 if (refcount_dec_and_test(&addr->refcnt))
271 * Check unix socket name:
272 * - should be not zero length.
273 * - if started by not zero, should be NULL terminated (FS object)
274 * - if started by zero, it is abstract name.
277 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
279 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
280 addr_len > sizeof(*sunaddr))
283 if (sunaddr->sun_family != AF_UNIX)
289 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
291 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
292 short offset = offsetof(struct sockaddr_storage, __data);
294 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
296 /* This may look like an off by one error but it is a bit more
297 * subtle. 108 is the longest valid AF_UNIX path for a binding.
298 * sun_path[108] doesn't as such exist. However in kernel space
299 * we are guaranteed that it is a valid memory location in our
300 * kernel address buffer because syscall functions always pass
301 * a pointer of struct sockaddr_storage which has a bigger buffer
302 * than 108. Also, we must terminate sun_path for strlen() in
305 addr->__data[addr_len - offset] = 0;
307 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
308 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
309 * know the actual buffer.
311 return strlen(addr->__data) + offset + 1;
314 static void __unix_remove_socket(struct sock *sk)
316 sk_del_node_init(sk);
319 static void __unix_insert_socket(struct net *net, struct sock *sk)
321 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
322 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
325 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
326 struct unix_address *addr, unsigned int hash)
328 __unix_remove_socket(sk);
329 smp_store_release(&unix_sk(sk)->addr, addr);
332 __unix_insert_socket(net, sk);
335 static void unix_remove_socket(struct net *net, struct sock *sk)
337 spin_lock(&net->unx.table.locks[sk->sk_hash]);
338 __unix_remove_socket(sk);
339 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
342 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
344 spin_lock(&net->unx.table.locks[sk->sk_hash]);
345 __unix_insert_socket(net, sk);
346 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
349 static void unix_insert_bsd_socket(struct sock *sk)
351 spin_lock(&bsd_socket_locks[sk->sk_hash]);
352 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
353 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
356 static void unix_remove_bsd_socket(struct sock *sk)
358 if (!hlist_unhashed(&sk->sk_bind_node)) {
359 spin_lock(&bsd_socket_locks[sk->sk_hash]);
360 __sk_del_bind_node(sk);
361 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
363 sk_node_init(&sk->sk_bind_node);
367 static struct sock *__unix_find_socket_byname(struct net *net,
368 struct sockaddr_un *sunname,
369 int len, unsigned int hash)
373 sk_for_each(s, &net->unx.table.buckets[hash]) {
374 struct unix_sock *u = unix_sk(s);
376 if (u->addr->len == len &&
377 !memcmp(u->addr->name, sunname, len))
383 static inline struct sock *unix_find_socket_byname(struct net *net,
384 struct sockaddr_un *sunname,
385 int len, unsigned int hash)
389 spin_lock(&net->unx.table.locks[hash]);
390 s = __unix_find_socket_byname(net, sunname, len, hash);
393 spin_unlock(&net->unx.table.locks[hash]);
397 static struct sock *unix_find_socket_byinode(struct inode *i)
399 unsigned int hash = unix_bsd_hash(i);
402 spin_lock(&bsd_socket_locks[hash]);
403 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
404 struct dentry *dentry = unix_sk(s)->path.dentry;
406 if (dentry && d_backing_inode(dentry) == i) {
408 spin_unlock(&bsd_socket_locks[hash]);
412 spin_unlock(&bsd_socket_locks[hash]);
416 /* Support code for asymmetrically connected dgram sockets
418 * If a datagram socket is connected to a socket not itself connected
419 * to the first socket (eg, /dev/log), clients may only enqueue more
420 * messages if the present receive queue of the server socket is not
421 * "too large". This means there's a second writeability condition
422 * poll and sendmsg need to test. The dgram recv code will do a wake
423 * up on the peer_wait wait queue of a socket upon reception of a
424 * datagram which needs to be propagated to sleeping would-be writers
425 * since these might not have sent anything so far. This can't be
426 * accomplished via poll_wait because the lifetime of the server
427 * socket might be less than that of its clients if these break their
428 * association with it or if the server socket is closed while clients
429 * are still connected to it and there's no way to inform "a polling
430 * implementation" that it should let go of a certain wait queue
432 * In order to propagate a wake up, a wait_queue_entry_t of the client
433 * socket is enqueued on the peer_wait queue of the server socket
434 * whose wake function does a wake_up on the ordinary client socket
435 * wait queue. This connection is established whenever a write (or
436 * poll for write) hit the flow control condition and broken when the
437 * association to the server socket is dissolved or after a wake up
441 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
445 wait_queue_head_t *u_sleep;
447 u = container_of(q, struct unix_sock, peer_wake);
449 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
451 u->peer_wake.private = NULL;
453 /* relaying can only happen while the wq still exists */
454 u_sleep = sk_sleep(&u->sk);
456 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
461 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
463 struct unix_sock *u, *u_other;
467 u_other = unix_sk(other);
469 spin_lock(&u_other->peer_wait.lock);
471 if (!u->peer_wake.private) {
472 u->peer_wake.private = other;
473 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
478 spin_unlock(&u_other->peer_wait.lock);
482 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
485 struct unix_sock *u, *u_other;
488 u_other = unix_sk(other);
489 spin_lock(&u_other->peer_wait.lock);
491 if (u->peer_wake.private == other) {
492 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
493 u->peer_wake.private = NULL;
496 spin_unlock(&u_other->peer_wait.lock);
499 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
502 unix_dgram_peer_wake_disconnect(sk, other);
503 wake_up_interruptible_poll(sk_sleep(sk),
510 * - unix_peer(sk) == other
511 * - association is stable
513 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
517 connected = unix_dgram_peer_wake_connect(sk, other);
519 /* If other is SOCK_DEAD, we want to make sure we signal
520 * POLLOUT, such that a subsequent write() can get a
521 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
522 * to other and its full, we will hang waiting for POLLOUT.
524 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
528 unix_dgram_peer_wake_disconnect(sk, other);
533 static int unix_writable(const struct sock *sk)
535 return sk->sk_state != TCP_LISTEN &&
536 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
539 static void unix_write_space(struct sock *sk)
541 struct socket_wq *wq;
544 if (unix_writable(sk)) {
545 wq = rcu_dereference(sk->sk_wq);
546 if (skwq_has_sleeper(wq))
547 wake_up_interruptible_sync_poll(&wq->wait,
548 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
549 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
554 /* When dgram socket disconnects (or changes its peer), we clear its receive
555 * queue of packets arrived from previous peer. First, it allows to do
556 * flow control based only on wmem_alloc; second, sk connected to peer
557 * may receive messages only from that peer. */
558 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
560 if (!skb_queue_empty(&sk->sk_receive_queue)) {
561 skb_queue_purge(&sk->sk_receive_queue);
562 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
564 /* If one link of bidirectional dgram pipe is disconnected,
565 * we signal error. Messages are lost. Do not make this,
566 * when peer was not connected to us.
568 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
569 WRITE_ONCE(other->sk_err, ECONNRESET);
570 sk_error_report(other);
573 other->sk_state = TCP_CLOSE;
576 static void unix_sock_destructor(struct sock *sk)
578 struct unix_sock *u = unix_sk(sk);
580 skb_queue_purge(&sk->sk_receive_queue);
582 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
583 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
584 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
585 if (!sock_flag(sk, SOCK_DEAD)) {
586 pr_info("Attempt to release alive unix socket: %p\n", sk);
591 unix_release_addr(u->addr);
593 atomic_long_dec(&unix_nr_socks);
594 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
595 #ifdef UNIX_REFCNT_DEBUG
596 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
597 atomic_long_read(&unix_nr_socks));
601 static void unix_release_sock(struct sock *sk, int embrion)
603 struct unix_sock *u = unix_sk(sk);
609 unix_remove_socket(sock_net(sk), sk);
610 unix_remove_bsd_socket(sk);
615 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
617 u->path.dentry = NULL;
619 state = sk->sk_state;
620 sk->sk_state = TCP_CLOSE;
622 skpair = unix_peer(sk);
623 unix_peer(sk) = NULL;
625 unix_state_unlock(sk);
627 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
629 kfree_skb(u->oob_skb);
634 wake_up_interruptible_all(&u->peer_wait);
636 if (skpair != NULL) {
637 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
638 unix_state_lock(skpair);
640 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
641 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
642 WRITE_ONCE(skpair->sk_err, ECONNRESET);
643 unix_state_unlock(skpair);
644 skpair->sk_state_change(skpair);
645 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
648 unix_dgram_peer_wake_disconnect(sk, skpair);
649 sock_put(skpair); /* It may now die */
652 /* Try to flush out this socket. Throw out buffers at least */
654 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
655 if (state == TCP_LISTEN)
656 unix_release_sock(skb->sk, 1);
657 /* passed fds are erased in the kfree_skb hook */
658 UNIXCB(skb).consumed = skb->len;
667 /* ---- Socket is dead now and most probably destroyed ---- */
670 * Fixme: BSD difference: In BSD all sockets connected to us get
671 * ECONNRESET and we die on the spot. In Linux we behave
672 * like files and pipes do and wait for the last
675 * Can't we simply set sock->err?
677 * What the above comment does talk about? --ANK(980817)
680 if (READ_ONCE(unix_tot_inflight))
681 unix_gc(); /* Garbage collect fds */
684 static void init_peercred(struct sock *sk)
686 const struct cred *old_cred;
689 spin_lock(&sk->sk_peer_lock);
690 old_pid = sk->sk_peer_pid;
691 old_cred = sk->sk_peer_cred;
692 sk->sk_peer_pid = get_pid(task_tgid(current));
693 sk->sk_peer_cred = get_current_cred();
694 spin_unlock(&sk->sk_peer_lock);
700 static void copy_peercred(struct sock *sk, struct sock *peersk)
702 const struct cred *old_cred;
706 spin_lock(&sk->sk_peer_lock);
707 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
709 spin_lock(&peersk->sk_peer_lock);
710 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
712 old_pid = sk->sk_peer_pid;
713 old_cred = sk->sk_peer_cred;
714 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
715 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
717 spin_unlock(&sk->sk_peer_lock);
718 spin_unlock(&peersk->sk_peer_lock);
724 static int unix_listen(struct socket *sock, int backlog)
727 struct sock *sk = sock->sk;
728 struct unix_sock *u = unix_sk(sk);
731 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
732 goto out; /* Only stream/seqpacket sockets accept */
735 goto out; /* No listens on an unbound socket */
737 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
739 if (backlog > sk->sk_max_ack_backlog)
740 wake_up_interruptible_all(&u->peer_wait);
741 sk->sk_max_ack_backlog = backlog;
742 sk->sk_state = TCP_LISTEN;
743 /* set credentials so connect can copy them */
748 unix_state_unlock(sk);
753 static int unix_release(struct socket *);
754 static int unix_bind(struct socket *, struct sockaddr *, int);
755 static int unix_stream_connect(struct socket *, struct sockaddr *,
756 int addr_len, int flags);
757 static int unix_socketpair(struct socket *, struct socket *);
758 static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
759 static int unix_getname(struct socket *, struct sockaddr *, int);
760 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
761 static __poll_t unix_dgram_poll(struct file *, struct socket *,
763 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
765 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
767 static int unix_shutdown(struct socket *, int);
768 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
769 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
770 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
771 struct pipe_inode_info *, size_t size,
773 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
774 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
775 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
776 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
777 static int unix_dgram_connect(struct socket *, struct sockaddr *,
779 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
780 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
783 #ifdef CONFIG_PROC_FS
784 static int unix_count_nr_fds(struct sock *sk)
790 spin_lock(&sk->sk_receive_queue.lock);
791 skb = skb_peek(&sk->sk_receive_queue);
793 u = unix_sk(skb->sk);
794 nr_fds += atomic_read(&u->scm_stat.nr_fds);
795 skb = skb_peek_next(skb, &sk->sk_receive_queue);
797 spin_unlock(&sk->sk_receive_queue.lock);
802 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
804 struct sock *sk = sock->sk;
805 unsigned char s_state;
810 s_state = READ_ONCE(sk->sk_state);
813 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
814 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
815 * SOCK_DGRAM is ordinary. So, no lock is needed.
817 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
818 nr_fds = atomic_read(&u->scm_stat.nr_fds);
819 else if (s_state == TCP_LISTEN)
820 nr_fds = unix_count_nr_fds(sk);
822 seq_printf(m, "scm_fds: %u\n", nr_fds);
826 #define unix_show_fdinfo NULL
829 static const struct proto_ops unix_stream_ops = {
831 .owner = THIS_MODULE,
832 .release = unix_release,
834 .connect = unix_stream_connect,
835 .socketpair = unix_socketpair,
836 .accept = unix_accept,
837 .getname = unix_getname,
841 .compat_ioctl = unix_compat_ioctl,
843 .listen = unix_listen,
844 .shutdown = unix_shutdown,
845 .sendmsg = unix_stream_sendmsg,
846 .recvmsg = unix_stream_recvmsg,
847 .read_skb = unix_stream_read_skb,
848 .mmap = sock_no_mmap,
849 .splice_read = unix_stream_splice_read,
850 .set_peek_off = sk_set_peek_off,
851 .show_fdinfo = unix_show_fdinfo,
854 static const struct proto_ops unix_dgram_ops = {
856 .owner = THIS_MODULE,
857 .release = unix_release,
859 .connect = unix_dgram_connect,
860 .socketpair = unix_socketpair,
861 .accept = sock_no_accept,
862 .getname = unix_getname,
863 .poll = unix_dgram_poll,
866 .compat_ioctl = unix_compat_ioctl,
868 .listen = sock_no_listen,
869 .shutdown = unix_shutdown,
870 .sendmsg = unix_dgram_sendmsg,
871 .read_skb = unix_read_skb,
872 .recvmsg = unix_dgram_recvmsg,
873 .mmap = sock_no_mmap,
874 .set_peek_off = sk_set_peek_off,
875 .show_fdinfo = unix_show_fdinfo,
878 static const struct proto_ops unix_seqpacket_ops = {
880 .owner = THIS_MODULE,
881 .release = unix_release,
883 .connect = unix_stream_connect,
884 .socketpair = unix_socketpair,
885 .accept = unix_accept,
886 .getname = unix_getname,
887 .poll = unix_dgram_poll,
890 .compat_ioctl = unix_compat_ioctl,
892 .listen = unix_listen,
893 .shutdown = unix_shutdown,
894 .sendmsg = unix_seqpacket_sendmsg,
895 .recvmsg = unix_seqpacket_recvmsg,
896 .mmap = sock_no_mmap,
897 .set_peek_off = sk_set_peek_off,
898 .show_fdinfo = unix_show_fdinfo,
901 static void unix_close(struct sock *sk, long timeout)
903 /* Nothing to do here, unix socket does not need a ->close().
904 * This is merely for sockmap.
908 static void unix_unhash(struct sock *sk)
910 /* Nothing to do here, unix socket does not need a ->unhash().
911 * This is merely for sockmap.
915 static bool unix_bpf_bypass_getsockopt(int level, int optname)
917 if (level == SOL_SOCKET) {
929 struct proto unix_dgram_proto = {
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct unix_sock),
934 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
935 #ifdef CONFIG_BPF_SYSCALL
936 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
940 struct proto unix_stream_proto = {
941 .name = "UNIX-STREAM",
942 .owner = THIS_MODULE,
943 .obj_size = sizeof(struct unix_sock),
945 .unhash = unix_unhash,
946 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
947 #ifdef CONFIG_BPF_SYSCALL
948 .psock_update_sk_prot = unix_stream_bpf_update_proto,
952 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
958 atomic_long_inc(&unix_nr_socks);
959 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
964 if (type == SOCK_STREAM)
965 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
966 else /*dgram and seqpacket */
967 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
974 sock_init_data(sock, sk);
976 sk->sk_hash = unix_unbound_hash(sk);
977 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
978 sk->sk_write_space = unix_write_space;
979 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
980 sk->sk_destruct = unix_sock_destructor;
984 u->path.dentry = NULL;
986 spin_lock_init(&u->lock);
987 mutex_init(&u->iolock); /* single task reading lock */
988 mutex_init(&u->bindlock); /* single task binding lock */
989 init_waitqueue_head(&u->peer_wait);
990 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
991 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
992 unix_insert_unbound_socket(net, sk);
994 sock_prot_inuse_add(net, sk->sk_prot, 1);
999 atomic_long_dec(&unix_nr_socks);
1000 return ERR_PTR(err);
1003 static int unix_create(struct net *net, struct socket *sock, int protocol,
1008 if (protocol && protocol != PF_UNIX)
1009 return -EPROTONOSUPPORT;
1011 sock->state = SS_UNCONNECTED;
1013 switch (sock->type) {
1015 sock->ops = &unix_stream_ops;
1018 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1022 sock->type = SOCK_DGRAM;
1025 sock->ops = &unix_dgram_ops;
1027 case SOCK_SEQPACKET:
1028 sock->ops = &unix_seqpacket_ops;
1031 return -ESOCKTNOSUPPORT;
1034 sk = unix_create1(net, sock, kern, sock->type);
1041 static int unix_release(struct socket *sock)
1043 struct sock *sk = sock->sk;
1048 sk->sk_prot->close(sk, 0);
1049 unix_release_sock(sk, 0);
1055 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1058 struct inode *inode;
1063 unix_mkname_bsd(sunaddr, addr_len);
1064 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1068 err = path_permission(&path, MAY_WRITE);
1072 err = -ECONNREFUSED;
1073 inode = d_backing_inode(path.dentry);
1074 if (!S_ISSOCK(inode->i_mode))
1077 sk = unix_find_socket_byinode(inode);
1082 if (sk->sk_type == type)
1096 return ERR_PTR(err);
1099 static struct sock *unix_find_abstract(struct net *net,
1100 struct sockaddr_un *sunaddr,
1101 int addr_len, int type)
1103 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1104 struct dentry *dentry;
1107 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1109 return ERR_PTR(-ECONNREFUSED);
1111 dentry = unix_sk(sk)->path.dentry;
1113 touch_atime(&unix_sk(sk)->path);
1118 static struct sock *unix_find_other(struct net *net,
1119 struct sockaddr_un *sunaddr,
1120 int addr_len, int type)
1124 if (sunaddr->sun_path[0])
1125 sk = unix_find_bsd(sunaddr, addr_len, type);
1127 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1132 static int unix_autobind(struct sock *sk)
1134 unsigned int new_hash, old_hash = sk->sk_hash;
1135 struct unix_sock *u = unix_sk(sk);
1136 struct net *net = sock_net(sk);
1137 struct unix_address *addr;
1138 u32 lastnum, ordernum;
1141 err = mutex_lock_interruptible(&u->bindlock);
1149 addr = kzalloc(sizeof(*addr) +
1150 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1154 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1155 addr->name->sun_family = AF_UNIX;
1156 refcount_set(&addr->refcnt, 1);
1158 ordernum = get_random_u32();
1159 lastnum = ordernum & 0xFFFFF;
1161 ordernum = (ordernum + 1) & 0xFFFFF;
1162 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1164 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1165 unix_table_double_lock(net, old_hash, new_hash);
1167 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1168 unix_table_double_unlock(net, old_hash, new_hash);
1170 /* __unix_find_socket_byname() may take long time if many names
1171 * are already in use.
1175 if (ordernum == lastnum) {
1176 /* Give up if all names seems to be in use. */
1178 unix_release_addr(addr);
1185 __unix_set_addr_hash(net, sk, addr, new_hash);
1186 unix_table_double_unlock(net, old_hash, new_hash);
1189 out: mutex_unlock(&u->bindlock);
1193 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1196 umode_t mode = S_IFSOCK |
1197 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1198 unsigned int new_hash, old_hash = sk->sk_hash;
1199 struct unix_sock *u = unix_sk(sk);
1200 struct net *net = sock_net(sk);
1201 struct mnt_idmap *idmap;
1202 struct unix_address *addr;
1203 struct dentry *dentry;
1207 addr_len = unix_mkname_bsd(sunaddr, addr_len);
1208 addr = unix_create_addr(sunaddr, addr_len);
1213 * Get the parent directory, calculate the hash for last
1216 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1217 if (IS_ERR(dentry)) {
1218 err = PTR_ERR(dentry);
1223 * All right, let's create it.
1225 idmap = mnt_idmap(parent.mnt);
1226 err = security_path_mknod(&parent, dentry, mode, 0);
1228 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1231 err = mutex_lock_interruptible(&u->bindlock);
1237 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1238 unix_table_double_lock(net, old_hash, new_hash);
1239 u->path.mnt = mntget(parent.mnt);
1240 u->path.dentry = dget(dentry);
1241 __unix_set_addr_hash(net, sk, addr, new_hash);
1242 unix_table_double_unlock(net, old_hash, new_hash);
1243 unix_insert_bsd_socket(sk);
1244 mutex_unlock(&u->bindlock);
1245 done_path_create(&parent, dentry);
1249 mutex_unlock(&u->bindlock);
1252 /* failed after successful mknod? unlink what we'd created... */
1253 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1255 done_path_create(&parent, dentry);
1257 unix_release_addr(addr);
1258 return err == -EEXIST ? -EADDRINUSE : err;
1261 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264 unsigned int new_hash, old_hash = sk->sk_hash;
1265 struct unix_sock *u = unix_sk(sk);
1266 struct net *net = sock_net(sk);
1267 struct unix_address *addr;
1270 addr = unix_create_addr(sunaddr, addr_len);
1274 err = mutex_lock_interruptible(&u->bindlock);
1283 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1284 unix_table_double_lock(net, old_hash, new_hash);
1286 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1289 __unix_set_addr_hash(net, sk, addr, new_hash);
1290 unix_table_double_unlock(net, old_hash, new_hash);
1291 mutex_unlock(&u->bindlock);
1295 unix_table_double_unlock(net, old_hash, new_hash);
1298 mutex_unlock(&u->bindlock);
1300 unix_release_addr(addr);
1304 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1306 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1307 struct sock *sk = sock->sk;
1310 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1311 sunaddr->sun_family == AF_UNIX)
1312 return unix_autobind(sk);
1314 err = unix_validate_addr(sunaddr, addr_len);
1318 if (sunaddr->sun_path[0])
1319 err = unix_bind_bsd(sk, sunaddr, addr_len);
1321 err = unix_bind_abstract(sk, sunaddr, addr_len);
1326 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1328 if (unlikely(sk1 == sk2) || !sk2) {
1329 unix_state_lock(sk1);
1335 unix_state_lock(sk1);
1336 unix_state_lock_nested(sk2, U_LOCK_SECOND);
1339 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1341 if (unlikely(sk1 == sk2) || !sk2) {
1342 unix_state_unlock(sk1);
1345 unix_state_unlock(sk1);
1346 unix_state_unlock(sk2);
1349 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1350 int alen, int flags)
1352 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1353 struct sock *sk = sock->sk;
1358 if (alen < offsetofend(struct sockaddr, sa_family))
1361 if (addr->sa_family != AF_UNSPEC) {
1362 err = unix_validate_addr(sunaddr, alen);
1366 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1370 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1371 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1372 !unix_sk(sk)->addr) {
1373 err = unix_autobind(sk);
1379 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1380 if (IS_ERR(other)) {
1381 err = PTR_ERR(other);
1385 unix_state_double_lock(sk, other);
1387 /* Apparently VFS overslept socket death. Retry. */
1388 if (sock_flag(other, SOCK_DEAD)) {
1389 unix_state_double_unlock(sk, other);
1395 if (!unix_may_send(sk, other))
1398 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1402 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1405 * 1003.1g breaking connected state with AF_UNSPEC
1408 unix_state_double_lock(sk, other);
1412 * If it was connected, reconnect.
1414 if (unix_peer(sk)) {
1415 struct sock *old_peer = unix_peer(sk);
1417 unix_peer(sk) = other;
1419 sk->sk_state = TCP_CLOSE;
1420 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1422 unix_state_double_unlock(sk, other);
1424 if (other != old_peer)
1425 unix_dgram_disconnected(sk, old_peer);
1428 unix_peer(sk) = other;
1429 unix_state_double_unlock(sk, other);
1435 unix_state_double_unlock(sk, other);
1441 static long unix_wait_for_peer(struct sock *other, long timeo)
1442 __releases(&unix_sk(other)->lock)
1444 struct unix_sock *u = unix_sk(other);
1448 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1450 sched = !sock_flag(other, SOCK_DEAD) &&
1451 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1452 unix_recvq_full_lockless(other);
1454 unix_state_unlock(other);
1457 timeo = schedule_timeout(timeo);
1459 finish_wait(&u->peer_wait, &wait);
1463 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1464 int addr_len, int flags)
1466 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1467 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1468 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1469 struct net *net = sock_net(sk);
1470 struct sk_buff *skb = NULL;
1475 err = unix_validate_addr(sunaddr, addr_len);
1479 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1483 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1484 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1485 err = unix_autobind(sk);
1490 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1492 /* First of all allocate resources.
1493 If we will make it after state is locked,
1494 we will have to recheck all again in any case.
1497 /* create new sock for complete connection */
1498 newsk = unix_create1(net, NULL, 0, sock->type);
1499 if (IS_ERR(newsk)) {
1500 err = PTR_ERR(newsk);
1507 /* Allocate skb for sending to listening sock */
1508 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1513 /* Find listening sock. */
1514 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1515 if (IS_ERR(other)) {
1516 err = PTR_ERR(other);
1521 /* Latch state of peer */
1522 unix_state_lock(other);
1524 /* Apparently VFS overslept socket death. Retry. */
1525 if (sock_flag(other, SOCK_DEAD)) {
1526 unix_state_unlock(other);
1531 err = -ECONNREFUSED;
1532 if (other->sk_state != TCP_LISTEN)
1534 if (other->sk_shutdown & RCV_SHUTDOWN)
1537 if (unix_recvq_full(other)) {
1542 timeo = unix_wait_for_peer(other, timeo);
1544 err = sock_intr_errno(timeo);
1545 if (signal_pending(current))
1553 It is tricky place. We need to grab our state lock and cannot
1554 drop lock on peer. It is dangerous because deadlock is
1555 possible. Connect to self case and simultaneous
1556 attempt to connect are eliminated by checking socket
1557 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1558 check this before attempt to grab lock.
1560 Well, and we have to recheck the state after socket locked.
1566 /* This is ok... continue with connect */
1568 case TCP_ESTABLISHED:
1569 /* Socket is already connected */
1577 unix_state_lock_nested(sk, U_LOCK_SECOND);
1579 if (sk->sk_state != st) {
1580 unix_state_unlock(sk);
1581 unix_state_unlock(other);
1586 err = security_unix_stream_connect(sk, other, newsk);
1588 unix_state_unlock(sk);
1592 /* The way is open! Fastly set all the necessary fields... */
1595 unix_peer(newsk) = sk;
1596 newsk->sk_state = TCP_ESTABLISHED;
1597 newsk->sk_type = sk->sk_type;
1598 init_peercred(newsk);
1599 newu = unix_sk(newsk);
1600 newu->listener = other;
1601 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1602 otheru = unix_sk(other);
1604 /* copy address information from listening to new sock
1606 * The contents of *(otheru->addr) and otheru->path
1607 * are seen fully set up here, since we have found
1608 * otheru in hash under its lock. Insertion into the
1609 * hash chain we'd found it in had been done in an
1610 * earlier critical area protected by the chain's lock,
1611 * the same one where we'd set *(otheru->addr) contents,
1612 * as well as otheru->path and otheru->addr itself.
1614 * Using smp_store_release() here to set newu->addr
1615 * is enough to make those stores, as well as stores
1616 * to newu->path visible to anyone who gets newu->addr
1617 * by smp_load_acquire(). IOW, the same warranties
1618 * as for unix_sock instances bound in unix_bind() or
1619 * in unix_autobind().
1621 if (otheru->path.dentry) {
1622 path_get(&otheru->path);
1623 newu->path = otheru->path;
1625 refcount_inc(&otheru->addr->refcnt);
1626 smp_store_release(&newu->addr, otheru->addr);
1628 /* Set credentials */
1629 copy_peercred(sk, other);
1631 sock->state = SS_CONNECTED;
1632 sk->sk_state = TCP_ESTABLISHED;
1635 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1636 unix_peer(sk) = newsk;
1638 unix_state_unlock(sk);
1640 /* take ten and send info to listening sock */
1641 spin_lock(&other->sk_receive_queue.lock);
1642 __skb_queue_tail(&other->sk_receive_queue, skb);
1643 spin_unlock(&other->sk_receive_queue.lock);
1644 unix_state_unlock(other);
1645 other->sk_data_ready(other);
1651 unix_state_unlock(other);
1656 unix_release_sock(newsk, 0);
1662 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1664 struct sock *ska = socka->sk, *skb = sockb->sk;
1666 /* Join our sockets back to back */
1669 unix_peer(ska) = skb;
1670 unix_peer(skb) = ska;
1674 ska->sk_state = TCP_ESTABLISHED;
1675 skb->sk_state = TCP_ESTABLISHED;
1676 socka->state = SS_CONNECTED;
1677 sockb->state = SS_CONNECTED;
1681 static void unix_sock_inherit_flags(const struct socket *old,
1684 if (test_bit(SOCK_PASSCRED, &old->flags))
1685 set_bit(SOCK_PASSCRED, &new->flags);
1686 if (test_bit(SOCK_PASSPIDFD, &old->flags))
1687 set_bit(SOCK_PASSPIDFD, &new->flags);
1688 if (test_bit(SOCK_PASSSEC, &old->flags))
1689 set_bit(SOCK_PASSSEC, &new->flags);
1692 static int unix_accept(struct socket *sock, struct socket *newsock,
1693 struct proto_accept_arg *arg)
1695 struct sock *sk = sock->sk;
1696 struct sk_buff *skb;
1699 arg->err = -EOPNOTSUPP;
1700 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1704 if (sk->sk_state != TCP_LISTEN)
1707 /* If socket state is TCP_LISTEN it cannot change (for now...),
1708 * so that no locks are necessary.
1711 skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1714 /* This means receive shutdown. */
1721 skb_free_datagram(sk, skb);
1722 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1724 /* attach accepted sock to socket */
1725 unix_state_lock(tsk);
1726 unix_update_edges(unix_sk(tsk));
1727 newsock->state = SS_CONNECTED;
1728 unix_sock_inherit_flags(sock, newsock);
1729 sock_graft(tsk, newsock);
1730 unix_state_unlock(tsk);
1738 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1740 struct sock *sk = sock->sk;
1741 struct unix_address *addr;
1742 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1746 sk = unix_peer_get(sk);
1756 addr = smp_load_acquire(&unix_sk(sk)->addr);
1758 sunaddr->sun_family = AF_UNIX;
1759 sunaddr->sun_path[0] = 0;
1760 err = offsetof(struct sockaddr_un, sun_path);
1763 memcpy(sunaddr, addr->name, addr->len);
1766 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1767 CGROUP_UNIX_GETPEERNAME);
1769 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1770 CGROUP_UNIX_GETSOCKNAME);
1777 /* The "user->unix_inflight" variable is protected by the garbage
1778 * collection lock, and we just read it locklessly here. If you go
1779 * over the limit, there might be a tiny race in actually noticing
1780 * it across threads. Tough.
1782 static inline bool too_many_unix_fds(struct task_struct *p)
1784 struct user_struct *user = current_user();
1786 if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1787 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1791 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1793 if (too_many_unix_fds(current))
1794 return -ETOOMANYREFS;
1796 UNIXCB(skb).fp = scm->fp;
1799 if (unix_prepare_fpl(UNIXCB(skb).fp))
1805 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1807 scm->fp = UNIXCB(skb).fp;
1808 UNIXCB(skb).fp = NULL;
1810 unix_destroy_fpl(scm->fp);
1813 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1815 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1818 static void unix_destruct_scm(struct sk_buff *skb)
1820 struct scm_cookie scm;
1822 memset(&scm, 0, sizeof(scm));
1823 scm.pid = UNIXCB(skb).pid;
1825 unix_detach_fds(&scm, skb);
1827 /* Alas, it calls VFS */
1828 /* So fscking what? fput() had been SMP-safe since the last Summer */
1833 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1837 UNIXCB(skb).pid = get_pid(scm->pid);
1838 UNIXCB(skb).uid = scm->creds.uid;
1839 UNIXCB(skb).gid = scm->creds.gid;
1840 UNIXCB(skb).fp = NULL;
1841 unix_get_secdata(scm, skb);
1842 if (scm->fp && send_fds)
1843 err = unix_attach_fds(scm, skb);
1845 skb->destructor = unix_destruct_scm;
1849 static bool unix_passcred_enabled(const struct socket *sock,
1850 const struct sock *other)
1852 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1853 test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1854 !other->sk_socket ||
1855 test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1856 test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1860 * Some apps rely on write() giving SCM_CREDENTIALS
1861 * We include credentials if source or destination socket
1862 * asserted SOCK_PASSCRED.
1864 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1865 const struct sock *other)
1867 if (UNIXCB(skb).pid)
1869 if (unix_passcred_enabled(sock, other)) {
1870 UNIXCB(skb).pid = get_pid(task_tgid(current));
1871 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1875 static bool unix_skb_scm_eq(struct sk_buff *skb,
1876 struct scm_cookie *scm)
1878 return UNIXCB(skb).pid == scm->pid &&
1879 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1880 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1881 unix_secdata_eq(scm, skb);
1884 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1886 struct scm_fp_list *fp = UNIXCB(skb).fp;
1887 struct unix_sock *u = unix_sk(sk);
1889 if (unlikely(fp && fp->count)) {
1890 atomic_add(fp->count, &u->scm_stat.nr_fds);
1891 unix_add_edges(fp, u);
1895 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1897 struct scm_fp_list *fp = UNIXCB(skb).fp;
1898 struct unix_sock *u = unix_sk(sk);
1900 if (unlikely(fp && fp->count)) {
1901 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1907 * Send AF_UNIX data.
1910 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1913 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1914 struct sock *sk = sock->sk, *other = NULL;
1915 struct unix_sock *u = unix_sk(sk);
1916 struct scm_cookie scm;
1917 struct sk_buff *skb;
1923 err = scm_send(sock, msg, &scm, false);
1927 wait_for_unix_gc(scm.fp);
1930 if (msg->msg_flags&MSG_OOB)
1933 if (msg->msg_namelen) {
1934 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1938 err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1947 other = unix_peer_get(sk);
1952 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1953 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1954 err = unix_autobind(sk);
1960 if (len > sk->sk_sndbuf - 32)
1963 if (len > SKB_MAX_ALLOC) {
1964 data_len = min_t(size_t,
1965 len - SKB_MAX_ALLOC,
1966 MAX_SKB_FRAGS * PAGE_SIZE);
1967 data_len = PAGE_ALIGN(data_len);
1969 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1972 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1973 msg->msg_flags & MSG_DONTWAIT, &err,
1974 PAGE_ALLOC_COSTLY_ORDER);
1978 err = unix_scm_to_skb(&scm, skb, true);
1982 skb_put(skb, len - data_len);
1983 skb->data_len = data_len;
1985 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1989 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1994 if (sunaddr == NULL)
1997 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1999 if (IS_ERR(other)) {
2000 err = PTR_ERR(other);
2006 if (sk_filter(other, skb) < 0) {
2007 /* Toss the packet but do not return any error to the sender */
2013 unix_state_lock(other);
2016 if (!unix_may_send(sk, other))
2019 if (unlikely(sock_flag(other, SOCK_DEAD))) {
2021 * Check with 1003.1g - what should
2024 unix_state_unlock(other);
2028 unix_state_lock(sk);
2031 if (sk->sk_type == SOCK_SEQPACKET) {
2032 /* We are here only when racing with unix_release_sock()
2033 * is clearing @other. Never change state to TCP_CLOSE
2034 * unlike SOCK_DGRAM wants.
2036 unix_state_unlock(sk);
2038 } else if (unix_peer(sk) == other) {
2039 unix_peer(sk) = NULL;
2040 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2042 sk->sk_state = TCP_CLOSE;
2043 unix_state_unlock(sk);
2045 unix_dgram_disconnected(sk, other);
2047 err = -ECONNREFUSED;
2049 unix_state_unlock(sk);
2059 if (other->sk_shutdown & RCV_SHUTDOWN)
2062 if (sk->sk_type != SOCK_SEQPACKET) {
2063 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2068 /* other == sk && unix_peer(other) != sk if
2069 * - unix_peer(sk) == NULL, destination address bound to sk
2070 * - unix_peer(sk) == sk by time of get but disconnected before lock
2073 unlikely(unix_peer(other) != sk &&
2074 unix_recvq_full_lockless(other))) {
2076 timeo = unix_wait_for_peer(other, timeo);
2078 err = sock_intr_errno(timeo);
2079 if (signal_pending(current))
2086 unix_state_unlock(other);
2087 unix_state_double_lock(sk, other);
2090 if (unix_peer(sk) != other ||
2091 unix_dgram_peer_wake_me(sk, other)) {
2099 goto restart_locked;
2103 if (unlikely(sk_locked))
2104 unix_state_unlock(sk);
2106 if (sock_flag(other, SOCK_RCVTSTAMP))
2107 __net_timestamp(skb);
2108 maybe_add_creds(skb, sock, other);
2109 scm_stat_add(other, skb);
2110 skb_queue_tail(&other->sk_receive_queue, skb);
2111 unix_state_unlock(other);
2112 other->sk_data_ready(other);
2119 unix_state_unlock(sk);
2120 unix_state_unlock(other);
2130 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2131 * bytes, and a minimum of a full page.
2133 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2135 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2136 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2137 struct scm_cookie *scm, bool fds_sent)
2139 struct unix_sock *ousk = unix_sk(other);
2140 struct sk_buff *skb;
2143 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2148 err = unix_scm_to_skb(scm, skb, !fds_sent);
2154 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2161 unix_state_lock(other);
2163 if (sock_flag(other, SOCK_DEAD) ||
2164 (other->sk_shutdown & RCV_SHUTDOWN)) {
2165 unix_state_unlock(other);
2170 maybe_add_creds(skb, sock, other);
2173 scm_stat_add(other, skb);
2175 spin_lock(&other->sk_receive_queue.lock);
2177 consume_skb(ousk->oob_skb);
2178 WRITE_ONCE(ousk->oob_skb, skb);
2179 __skb_queue_tail(&other->sk_receive_queue, skb);
2180 spin_unlock(&other->sk_receive_queue.lock);
2182 sk_send_sigurg(other);
2183 unix_state_unlock(other);
2184 other->sk_data_ready(other);
2190 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2193 struct sock *sk = sock->sk;
2194 struct sock *other = NULL;
2196 struct sk_buff *skb;
2198 struct scm_cookie scm;
2199 bool fds_sent = false;
2202 err = scm_send(sock, msg, &scm, false);
2206 wait_for_unix_gc(scm.fp);
2209 if (msg->msg_flags & MSG_OOB) {
2210 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2218 if (msg->msg_namelen) {
2219 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2223 other = unix_peer(sk);
2228 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2231 while (sent < len) {
2234 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2235 skb = sock_alloc_send_pskb(sk, 0, 0,
2236 msg->msg_flags & MSG_DONTWAIT,
2239 /* Keep two messages in the pipe so it schedules better */
2240 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2242 /* allow fallback to order-0 allocations */
2243 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2245 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2247 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2249 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2250 msg->msg_flags & MSG_DONTWAIT, &err,
2251 get_order(UNIX_SKB_FRAGS_SZ));
2256 /* Only send the fds in the first buffer */
2257 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2264 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2265 err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2272 refcount_add(size, &sk->sk_wmem_alloc);
2274 skb_put(skb, size - data_len);
2275 skb->data_len = data_len;
2277 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2284 unix_state_lock(other);
2286 if (sock_flag(other, SOCK_DEAD) ||
2287 (other->sk_shutdown & RCV_SHUTDOWN))
2290 maybe_add_creds(skb, sock, other);
2291 scm_stat_add(other, skb);
2292 skb_queue_tail(&other->sk_receive_queue, skb);
2293 unix_state_unlock(other);
2294 other->sk_data_ready(other);
2298 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2299 if (msg->msg_flags & MSG_OOB) {
2300 err = queue_oob(sock, msg, other, &scm, fds_sent);
2312 unix_state_unlock(other);
2315 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2316 send_sig(SIGPIPE, current, 0);
2320 return sent ? : err;
2323 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2327 struct sock *sk = sock->sk;
2329 err = sock_error(sk);
2333 if (sk->sk_state != TCP_ESTABLISHED)
2336 if (msg->msg_namelen)
2337 msg->msg_namelen = 0;
2339 return unix_dgram_sendmsg(sock, msg, len);
2342 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2343 size_t size, int flags)
2345 struct sock *sk = sock->sk;
2347 if (sk->sk_state != TCP_ESTABLISHED)
2350 return unix_dgram_recvmsg(sock, msg, size, flags);
2353 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2355 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2358 msg->msg_namelen = addr->len;
2359 memcpy(msg->msg_name, addr->name, addr->len);
2363 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2366 struct scm_cookie scm;
2367 struct socket *sock = sk->sk_socket;
2368 struct unix_sock *u = unix_sk(sk);
2369 struct sk_buff *skb, *last;
2378 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2381 mutex_lock(&u->iolock);
2383 skip = sk_peek_offset(sk, flags);
2384 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2385 &skip, &err, &last);
2387 if (!(flags & MSG_PEEK))
2388 scm_stat_del(sk, skb);
2392 mutex_unlock(&u->iolock);
2397 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2398 &err, &timeo, last));
2400 if (!skb) { /* implies iolock unlocked */
2401 unix_state_lock(sk);
2402 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2403 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2404 (sk->sk_shutdown & RCV_SHUTDOWN))
2406 unix_state_unlock(sk);
2410 if (wq_has_sleeper(&u->peer_wait))
2411 wake_up_interruptible_sync_poll(&u->peer_wait,
2412 EPOLLOUT | EPOLLWRNORM |
2415 if (msg->msg_name) {
2416 unix_copy_addr(msg, skb->sk);
2418 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2423 if (size > skb->len - skip)
2424 size = skb->len - skip;
2425 else if (size < skb->len - skip)
2426 msg->msg_flags |= MSG_TRUNC;
2428 err = skb_copy_datagram_msg(skb, skip, msg, size);
2432 if (sock_flag(sk, SOCK_RCVTSTAMP))
2433 __sock_recv_timestamp(msg, sk, skb);
2435 memset(&scm, 0, sizeof(scm));
2437 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2438 unix_set_secdata(&scm, skb);
2440 if (!(flags & MSG_PEEK)) {
2442 unix_detach_fds(&scm, skb);
2444 sk_peek_offset_bwd(sk, skb->len);
2446 /* It is questionable: on PEEK we could:
2447 - do not return fds - good, but too simple 8)
2448 - return fds, and do not return them on read (old strategy,
2450 - clone fds (I chose it for now, it is the most universal
2453 POSIX 1003.1g does not actually define this clearly
2454 at all. POSIX 1003.1g doesn't define a lot of things
2459 sk_peek_offset_fwd(sk, size);
2462 unix_peek_fds(&scm, skb);
2464 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2466 scm_recv_unix(sock, msg, &scm, flags);
2469 skb_free_datagram(sk, skb);
2470 mutex_unlock(&u->iolock);
2475 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2478 struct sock *sk = sock->sk;
2480 #ifdef CONFIG_BPF_SYSCALL
2481 const struct proto *prot = READ_ONCE(sk->sk_prot);
2483 if (prot != &unix_dgram_proto)
2484 return prot->recvmsg(sk, msg, size, flags, NULL);
2486 return __unix_dgram_recvmsg(sk, msg, size, flags);
2489 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2491 struct unix_sock *u = unix_sk(sk);
2492 struct sk_buff *skb;
2495 mutex_lock(&u->iolock);
2496 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2497 mutex_unlock(&u->iolock);
2501 return recv_actor(sk, skb);
2505 * Sleep until more data has arrived. But check for races..
2507 static long unix_stream_data_wait(struct sock *sk, long timeo,
2508 struct sk_buff *last, unsigned int last_len,
2511 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2512 struct sk_buff *tail;
2515 unix_state_lock(sk);
2518 prepare_to_wait(sk_sleep(sk), &wait, state);
2520 tail = skb_peek_tail(&sk->sk_receive_queue);
2522 (tail && tail->len != last_len) ||
2524 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2525 signal_pending(current) ||
2529 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2530 unix_state_unlock(sk);
2531 timeo = schedule_timeout(timeo);
2532 unix_state_lock(sk);
2534 if (sock_flag(sk, SOCK_DEAD))
2537 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2540 finish_wait(sk_sleep(sk), &wait);
2541 unix_state_unlock(sk);
2545 static unsigned int unix_skb_len(const struct sk_buff *skb)
2547 return skb->len - UNIXCB(skb).consumed;
2550 struct unix_stream_read_state {
2551 int (*recv_actor)(struct sk_buff *, int, int,
2552 struct unix_stream_read_state *);
2553 struct socket *socket;
2555 struct pipe_inode_info *pipe;
2558 unsigned int splice_flags;
2561 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2562 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2564 struct socket *sock = state->socket;
2565 struct sock *sk = sock->sk;
2566 struct unix_sock *u = unix_sk(sk);
2568 struct sk_buff *oob_skb;
2570 mutex_lock(&u->iolock);
2571 unix_state_lock(sk);
2572 spin_lock(&sk->sk_receive_queue.lock);
2574 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2575 spin_unlock(&sk->sk_receive_queue.lock);
2576 unix_state_unlock(sk);
2577 mutex_unlock(&u->iolock);
2581 oob_skb = u->oob_skb;
2583 if (!(state->flags & MSG_PEEK))
2584 WRITE_ONCE(u->oob_skb, NULL);
2588 spin_unlock(&sk->sk_receive_queue.lock);
2589 unix_state_unlock(sk);
2591 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2593 if (!(state->flags & MSG_PEEK))
2594 UNIXCB(oob_skb).consumed += 1;
2596 consume_skb(oob_skb);
2598 mutex_unlock(&u->iolock);
2603 state->msg->msg_flags |= MSG_OOB;
2607 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2608 int flags, int copied)
2610 struct unix_sock *u = unix_sk(sk);
2612 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2613 skb_unlink(skb, &sk->sk_receive_queue);
2617 struct sk_buff *unlinked_skb = NULL;
2619 spin_lock(&sk->sk_receive_queue.lock);
2621 if (skb == u->oob_skb) {
2624 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2625 if (!(flags & MSG_PEEK)) {
2626 WRITE_ONCE(u->oob_skb, NULL);
2629 } else if (flags & MSG_PEEK) {
2632 __skb_unlink(skb, &sk->sk_receive_queue);
2633 WRITE_ONCE(u->oob_skb, NULL);
2635 skb = skb_peek(&sk->sk_receive_queue);
2639 spin_unlock(&sk->sk_receive_queue.lock);
2642 WARN_ON_ONCE(skb_unref(unlinked_skb));
2643 kfree_skb(unlinked_skb);
2650 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2652 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2655 return unix_read_skb(sk, recv_actor);
2658 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2661 struct scm_cookie scm;
2662 struct socket *sock = state->socket;
2663 struct sock *sk = sock->sk;
2664 struct unix_sock *u = unix_sk(sk);
2666 int flags = state->flags;
2667 int noblock = flags & MSG_DONTWAIT;
2668 bool check_creds = false;
2673 size_t size = state->size;
2674 unsigned int last_len;
2676 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2681 if (unlikely(flags & MSG_OOB)) {
2683 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2684 err = unix_stream_recv_urg(state);
2689 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2690 timeo = sock_rcvtimeo(sk, noblock);
2692 memset(&scm, 0, sizeof(scm));
2694 /* Lock the socket to prevent queue disordering
2695 * while sleeps in memcpy_tomsg
2697 mutex_lock(&u->iolock);
2699 skip = max(sk_peek_offset(sk, flags), 0);
2704 struct sk_buff *skb, *last;
2707 unix_state_lock(sk);
2708 if (sock_flag(sk, SOCK_DEAD)) {
2712 last = skb = skb_peek(&sk->sk_receive_queue);
2713 last_len = last ? last->len : 0;
2716 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2718 skb = manage_oob(skb, sk, flags, copied);
2719 if (!skb && copied) {
2720 unix_state_unlock(sk);
2726 if (copied >= target)
2730 * POSIX 1003.1g mandates this order.
2733 err = sock_error(sk);
2736 if (sk->sk_shutdown & RCV_SHUTDOWN)
2739 unix_state_unlock(sk);
2745 mutex_unlock(&u->iolock);
2747 timeo = unix_stream_data_wait(sk, timeo, last,
2748 last_len, freezable);
2750 if (signal_pending(current)) {
2751 err = sock_intr_errno(timeo);
2756 mutex_lock(&u->iolock);
2759 unix_state_unlock(sk);
2763 while (skip >= unix_skb_len(skb)) {
2764 skip -= unix_skb_len(skb);
2766 last_len = skb->len;
2767 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2772 unix_state_unlock(sk);
2775 /* Never glue messages from different writers */
2776 if (!unix_skb_scm_eq(skb, &scm))
2778 } else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2779 test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2780 /* Copy credentials */
2781 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2782 unix_set_secdata(&scm, skb);
2786 /* Copy address just once */
2787 if (state->msg && state->msg->msg_name) {
2788 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2789 state->msg->msg_name);
2790 unix_copy_addr(state->msg, skb->sk);
2792 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2793 state->msg->msg_name,
2794 &state->msg->msg_namelen);
2799 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2801 chunk = state->recv_actor(skb, skip, chunk, state);
2802 drop_skb = !unix_skb_len(skb);
2803 /* skb is only safe to use if !drop_skb */
2814 /* the skb was touched by a concurrent reader;
2815 * we should not expect anything from this skb
2816 * anymore and assume it invalid - we can be
2817 * sure it was dropped from the socket queue
2819 * let's report a short read
2825 /* Mark read part of skb as used */
2826 if (!(flags & MSG_PEEK)) {
2827 UNIXCB(skb).consumed += chunk;
2829 sk_peek_offset_bwd(sk, chunk);
2831 if (UNIXCB(skb).fp) {
2832 scm_stat_del(sk, skb);
2833 unix_detach_fds(&scm, skb);
2836 if (unix_skb_len(skb))
2839 skb_unlink(skb, &sk->sk_receive_queue);
2845 /* It is questionable, see note in unix_dgram_recvmsg.
2848 unix_peek_fds(&scm, skb);
2850 sk_peek_offset_fwd(sk, chunk);
2857 last_len = skb->len;
2858 unix_state_lock(sk);
2859 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2862 unix_state_unlock(sk);
2867 mutex_unlock(&u->iolock);
2869 scm_recv_unix(sock, state->msg, &scm, flags);
2873 return copied ? : err;
2876 static int unix_stream_read_actor(struct sk_buff *skb,
2877 int skip, int chunk,
2878 struct unix_stream_read_state *state)
2882 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2884 return ret ?: chunk;
2887 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2888 size_t size, int flags)
2890 struct unix_stream_read_state state = {
2891 .recv_actor = unix_stream_read_actor,
2892 .socket = sk->sk_socket,
2898 return unix_stream_read_generic(&state, true);
2901 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2902 size_t size, int flags)
2904 struct unix_stream_read_state state = {
2905 .recv_actor = unix_stream_read_actor,
2912 #ifdef CONFIG_BPF_SYSCALL
2913 struct sock *sk = sock->sk;
2914 const struct proto *prot = READ_ONCE(sk->sk_prot);
2916 if (prot != &unix_stream_proto)
2917 return prot->recvmsg(sk, msg, size, flags, NULL);
2919 return unix_stream_read_generic(&state, true);
2922 static int unix_stream_splice_actor(struct sk_buff *skb,
2923 int skip, int chunk,
2924 struct unix_stream_read_state *state)
2926 return skb_splice_bits(skb, state->socket->sk,
2927 UNIXCB(skb).consumed + skip,
2928 state->pipe, chunk, state->splice_flags);
2931 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2932 struct pipe_inode_info *pipe,
2933 size_t size, unsigned int flags)
2935 struct unix_stream_read_state state = {
2936 .recv_actor = unix_stream_splice_actor,
2940 .splice_flags = flags,
2943 if (unlikely(*ppos))
2946 if (sock->file->f_flags & O_NONBLOCK ||
2947 flags & SPLICE_F_NONBLOCK)
2948 state.flags = MSG_DONTWAIT;
2950 return unix_stream_read_generic(&state, false);
2953 static int unix_shutdown(struct socket *sock, int mode)
2955 struct sock *sk = sock->sk;
2958 if (mode < SHUT_RD || mode > SHUT_RDWR)
2961 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2962 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2963 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2967 unix_state_lock(sk);
2968 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2969 other = unix_peer(sk);
2972 unix_state_unlock(sk);
2973 sk->sk_state_change(sk);
2976 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2979 const struct proto *prot = READ_ONCE(other->sk_prot);
2982 prot->unhash(other);
2983 if (mode&RCV_SHUTDOWN)
2984 peer_mode |= SEND_SHUTDOWN;
2985 if (mode&SEND_SHUTDOWN)
2986 peer_mode |= RCV_SHUTDOWN;
2987 unix_state_lock(other);
2988 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2989 unix_state_unlock(other);
2990 other->sk_state_change(other);
2991 if (peer_mode == SHUTDOWN_MASK)
2992 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2993 else if (peer_mode & RCV_SHUTDOWN)
2994 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3002 long unix_inq_len(struct sock *sk)
3004 struct sk_buff *skb;
3007 if (sk->sk_state == TCP_LISTEN)
3010 spin_lock(&sk->sk_receive_queue.lock);
3011 if (sk->sk_type == SOCK_STREAM ||
3012 sk->sk_type == SOCK_SEQPACKET) {
3013 skb_queue_walk(&sk->sk_receive_queue, skb)
3014 amount += unix_skb_len(skb);
3016 skb = skb_peek(&sk->sk_receive_queue);
3020 spin_unlock(&sk->sk_receive_queue.lock);
3024 EXPORT_SYMBOL_GPL(unix_inq_len);
3026 long unix_outq_len(struct sock *sk)
3028 return sk_wmem_alloc_get(sk);
3030 EXPORT_SYMBOL_GPL(unix_outq_len);
3032 static int unix_open_file(struct sock *sk)
3038 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3041 if (!smp_load_acquire(&unix_sk(sk)->addr))
3044 path = unix_sk(sk)->path;
3050 fd = get_unused_fd_flags(O_CLOEXEC);
3054 f = dentry_open(&path, O_PATH, current_cred());
3068 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3070 struct sock *sk = sock->sk;
3076 amount = unix_outq_len(sk);
3077 err = put_user(amount, (int __user *)arg);
3080 amount = unix_inq_len(sk);
3084 err = put_user(amount, (int __user *)arg);
3087 err = unix_open_file(sk);
3089 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3092 struct sk_buff *skb;
3095 skb = skb_peek(&sk->sk_receive_queue);
3096 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3098 err = put_user(answ, (int __user *)arg);
3109 #ifdef CONFIG_COMPAT
3110 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3112 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3116 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3118 struct sock *sk = sock->sk;
3122 sock_poll_wait(file, sock, wait);
3124 shutdown = READ_ONCE(sk->sk_shutdown);
3126 /* exceptional events? */
3127 if (READ_ONCE(sk->sk_err))
3129 if (shutdown == SHUTDOWN_MASK)
3131 if (shutdown & RCV_SHUTDOWN)
3132 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3135 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3136 mask |= EPOLLIN | EPOLLRDNORM;
3137 if (sk_is_readable(sk))
3138 mask |= EPOLLIN | EPOLLRDNORM;
3139 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3140 if (READ_ONCE(unix_sk(sk)->oob_skb))
3144 /* Connection-based need to check for termination and startup */
3145 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3146 sk->sk_state == TCP_CLOSE)
3150 * we set writable also when the other side has shut down the
3151 * connection. This prevents stuck sockets.
3153 if (unix_writable(sk))
3154 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3159 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3162 struct sock *sk = sock->sk, *other;
3163 unsigned int writable;
3167 sock_poll_wait(file, sock, wait);
3169 shutdown = READ_ONCE(sk->sk_shutdown);
3171 /* exceptional events? */
3172 if (READ_ONCE(sk->sk_err) ||
3173 !skb_queue_empty_lockless(&sk->sk_error_queue))
3175 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3177 if (shutdown & RCV_SHUTDOWN)
3178 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3179 if (shutdown == SHUTDOWN_MASK)
3183 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3184 mask |= EPOLLIN | EPOLLRDNORM;
3185 if (sk_is_readable(sk))
3186 mask |= EPOLLIN | EPOLLRDNORM;
3188 /* Connection-based need to check for termination and startup */
3189 if (sk->sk_type == SOCK_SEQPACKET) {
3190 if (sk->sk_state == TCP_CLOSE)
3192 /* connection hasn't started yet? */
3193 if (sk->sk_state == TCP_SYN_SENT)
3197 /* No write status requested, avoid expensive OUT tests. */
3198 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3201 writable = unix_writable(sk);
3203 unix_state_lock(sk);
3205 other = unix_peer(sk);
3206 if (other && unix_peer(other) != sk &&
3207 unix_recvq_full_lockless(other) &&
3208 unix_dgram_peer_wake_me(sk, other))
3211 unix_state_unlock(sk);
3215 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3217 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3222 #ifdef CONFIG_PROC_FS
3224 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3226 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3227 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3228 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3230 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3232 unsigned long offset = get_offset(*pos);
3233 unsigned long bucket = get_bucket(*pos);
3234 unsigned long count = 0;
3237 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3238 sk; sk = sk_next(sk)) {
3239 if (++count == offset)
3246 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3248 unsigned long bucket = get_bucket(*pos);
3249 struct net *net = seq_file_net(seq);
3252 while (bucket < UNIX_HASH_SIZE) {
3253 spin_lock(&net->unx.table.locks[bucket]);
3255 sk = unix_from_bucket(seq, pos);
3259 spin_unlock(&net->unx.table.locks[bucket]);
3261 *pos = set_bucket_offset(++bucket, 1);
3267 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3270 unsigned long bucket = get_bucket(*pos);
3277 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3279 *pos = set_bucket_offset(++bucket, 1);
3281 return unix_get_first(seq, pos);
3284 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3287 return SEQ_START_TOKEN;
3289 return unix_get_first(seq, pos);
3292 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3296 if (v == SEQ_START_TOKEN)
3297 return unix_get_first(seq, pos);
3299 return unix_get_next(seq, v, pos);
3302 static void unix_seq_stop(struct seq_file *seq, void *v)
3304 struct sock *sk = v;
3307 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3310 static int unix_seq_show(struct seq_file *seq, void *v)
3313 if (v == SEQ_START_TOKEN)
3314 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3318 struct unix_sock *u = unix_sk(s);
3321 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3323 refcount_read(&s->sk_refcnt),
3325 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3328 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3329 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3332 if (u->addr) { // under a hash table lock here
3337 len = u->addr->len -
3338 offsetof(struct sockaddr_un, sun_path);
3339 if (u->addr->name->sun_path[0]) {
3345 for ( ; i < len; i++)
3346 seq_putc(seq, u->addr->name->sun_path[i] ?:
3349 unix_state_unlock(s);
3350 seq_putc(seq, '\n');
3356 static const struct seq_operations unix_seq_ops = {
3357 .start = unix_seq_start,
3358 .next = unix_seq_next,
3359 .stop = unix_seq_stop,
3360 .show = unix_seq_show,
3363 #ifdef CONFIG_BPF_SYSCALL
3364 struct bpf_unix_iter_state {
3365 struct seq_net_private p;
3366 unsigned int cur_sk;
3367 unsigned int end_sk;
3368 unsigned int max_sk;
3369 struct sock **batch;
3370 bool st_bucket_done;
3373 struct bpf_iter__unix {
3374 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3375 __bpf_md_ptr(struct unix_sock *, unix_sk);
3376 uid_t uid __aligned(8);
3379 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3380 struct unix_sock *unix_sk, uid_t uid)
3382 struct bpf_iter__unix ctx;
3384 meta->seq_num--; /* skip SEQ_START_TOKEN */
3386 ctx.unix_sk = unix_sk;
3388 return bpf_iter_run_prog(prog, &ctx);
3391 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3394 struct bpf_unix_iter_state *iter = seq->private;
3395 unsigned int expected = 1;
3398 sock_hold(start_sk);
3399 iter->batch[iter->end_sk++] = start_sk;
3401 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3402 if (iter->end_sk < iter->max_sk) {
3404 iter->batch[iter->end_sk++] = sk;
3410 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3415 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3417 while (iter->cur_sk < iter->end_sk)
3418 sock_put(iter->batch[iter->cur_sk++]);
3421 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3422 unsigned int new_batch_sz)
3424 struct sock **new_batch;
3426 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3427 GFP_USER | __GFP_NOWARN);
3431 bpf_iter_unix_put_batch(iter);
3432 kvfree(iter->batch);
3433 iter->batch = new_batch;
3434 iter->max_sk = new_batch_sz;
3439 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3442 struct bpf_unix_iter_state *iter = seq->private;
3443 unsigned int expected;
3444 bool resized = false;
3447 if (iter->st_bucket_done)
3448 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3451 /* Get a new batch */
3455 sk = unix_get_first(seq, pos);
3457 return NULL; /* Done */
3459 expected = bpf_iter_unix_hold_batch(seq, sk);
3461 if (iter->end_sk == expected) {
3462 iter->st_bucket_done = true;
3466 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3474 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3477 return SEQ_START_TOKEN;
3479 /* bpf iter does not support lseek, so it always
3480 * continue from where it was stop()-ped.
3482 return bpf_iter_unix_batch(seq, pos);
3485 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3487 struct bpf_unix_iter_state *iter = seq->private;
3490 /* Whenever seq_next() is called, the iter->cur_sk is
3491 * done with seq_show(), so advance to the next sk in
3494 if (iter->cur_sk < iter->end_sk)
3495 sock_put(iter->batch[iter->cur_sk++]);
3499 if (iter->cur_sk < iter->end_sk)
3500 sk = iter->batch[iter->cur_sk];
3502 sk = bpf_iter_unix_batch(seq, pos);
3507 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3509 struct bpf_iter_meta meta;
3510 struct bpf_prog *prog;
3511 struct sock *sk = v;
3516 if (v == SEQ_START_TOKEN)
3519 slow = lock_sock_fast(sk);
3521 if (unlikely(sk_unhashed(sk))) {
3526 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3528 prog = bpf_iter_get_info(&meta, false);
3529 ret = unix_prog_seq_show(prog, &meta, v, uid);
3531 unlock_sock_fast(sk, slow);
3535 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3537 struct bpf_unix_iter_state *iter = seq->private;
3538 struct bpf_iter_meta meta;
3539 struct bpf_prog *prog;
3543 prog = bpf_iter_get_info(&meta, true);
3545 (void)unix_prog_seq_show(prog, &meta, v, 0);
3548 if (iter->cur_sk < iter->end_sk)
3549 bpf_iter_unix_put_batch(iter);
3552 static const struct seq_operations bpf_iter_unix_seq_ops = {
3553 .start = bpf_iter_unix_seq_start,
3554 .next = bpf_iter_unix_seq_next,
3555 .stop = bpf_iter_unix_seq_stop,
3556 .show = bpf_iter_unix_seq_show,
3561 static const struct net_proto_family unix_family_ops = {
3563 .create = unix_create,
3564 .owner = THIS_MODULE,
3568 static int __net_init unix_net_init(struct net *net)
3572 net->unx.sysctl_max_dgram_qlen = 10;
3573 if (unix_sysctl_register(net))
3576 #ifdef CONFIG_PROC_FS
3577 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3578 sizeof(struct seq_net_private)))
3582 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3583 sizeof(spinlock_t), GFP_KERNEL);
3584 if (!net->unx.table.locks)
3587 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3588 sizeof(struct hlist_head),
3590 if (!net->unx.table.buckets)
3593 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3594 spin_lock_init(&net->unx.table.locks[i]);
3595 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3601 kvfree(net->unx.table.locks);
3603 #ifdef CONFIG_PROC_FS
3604 remove_proc_entry("unix", net->proc_net);
3607 unix_sysctl_unregister(net);
3612 static void __net_exit unix_net_exit(struct net *net)
3614 kvfree(net->unx.table.buckets);
3615 kvfree(net->unx.table.locks);
3616 unix_sysctl_unregister(net);
3617 remove_proc_entry("unix", net->proc_net);
3620 static struct pernet_operations unix_net_ops = {
3621 .init = unix_net_init,
3622 .exit = unix_net_exit,
3625 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3626 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3627 struct unix_sock *unix_sk, uid_t uid)
3629 #define INIT_BATCH_SZ 16
3631 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3633 struct bpf_unix_iter_state *iter = priv_data;
3636 err = bpf_iter_init_seq_net(priv_data, aux);
3640 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3642 bpf_iter_fini_seq_net(priv_data);
3649 static void bpf_iter_fini_unix(void *priv_data)
3651 struct bpf_unix_iter_state *iter = priv_data;
3653 bpf_iter_fini_seq_net(priv_data);
3654 kvfree(iter->batch);
3657 static const struct bpf_iter_seq_info unix_seq_info = {
3658 .seq_ops = &bpf_iter_unix_seq_ops,
3659 .init_seq_private = bpf_iter_init_unix,
3660 .fini_seq_private = bpf_iter_fini_unix,
3661 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3664 static const struct bpf_func_proto *
3665 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3666 const struct bpf_prog *prog)
3669 case BPF_FUNC_setsockopt:
3670 return &bpf_sk_setsockopt_proto;
3671 case BPF_FUNC_getsockopt:
3672 return &bpf_sk_getsockopt_proto;
3678 static struct bpf_iter_reg unix_reg_info = {
3680 .ctx_arg_info_size = 1,
3682 { offsetof(struct bpf_iter__unix, unix_sk),
3683 PTR_TO_BTF_ID_OR_NULL },
3685 .get_func_proto = bpf_iter_unix_get_func_proto,
3686 .seq_info = &unix_seq_info,
3689 static void __init bpf_iter_register(void)
3691 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3692 if (bpf_iter_reg_target(&unix_reg_info))
3693 pr_warn("Warning: could not register bpf iterator unix\n");
3697 static int __init af_unix_init(void)
3701 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3703 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3704 spin_lock_init(&bsd_socket_locks[i]);
3705 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3708 rc = proto_register(&unix_dgram_proto, 1);
3710 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3714 rc = proto_register(&unix_stream_proto, 1);
3716 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3717 proto_unregister(&unix_dgram_proto);
3721 sock_register(&unix_family_ops);
3722 register_pernet_subsys(&unix_net_ops);
3723 unix_bpf_build_proto();
3725 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3726 bpf_iter_register();
3733 /* Later than subsys_initcall() because we depend on stuff initialised there */
3734 fs_initcall(af_unix_init);