1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/splice.h>
116 #include <linux/freezer.h>
117 #include <linux/file.h>
118 #include <linux/btf_ids.h>
119 #include <linux/bpf-cgroup.h>
121 static atomic_long_t unix_nr_socks;
122 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
123 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
125 /* SMP locking strategy:
126 * hash table is protected with spinlock.
127 * each socket state is protected by separate spinlock.
130 static unsigned int unix_unbound_hash(struct sock *sk)
132 unsigned long hash = (unsigned long)sk;
138 return hash & UNIX_HASH_MOD;
141 static unsigned int unix_bsd_hash(struct inode *i)
143 return i->i_ino & UNIX_HASH_MOD;
146 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
147 int addr_len, int type)
149 __wsum csum = csum_partial(sunaddr, addr_len, 0);
152 hash = (__force unsigned int)csum_fold(csum);
156 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
159 static void unix_table_double_lock(struct net *net,
160 unsigned int hash1, unsigned int hash2)
162 if (hash1 == hash2) {
163 spin_lock(&net->unx.table.locks[hash1]);
170 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
174 static void unix_table_double_unlock(struct net *net,
175 unsigned int hash1, unsigned int hash2)
177 if (hash1 == hash2) {
178 spin_unlock(&net->unx.table.locks[hash1]);
182 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash2]);
186 #ifdef CONFIG_SECURITY_NETWORK
187 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
189 UNIXCB(skb).secid = scm->secid;
192 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
194 scm->secid = UNIXCB(skb).secid;
197 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
199 return (scm->secid == UNIXCB(skb).secid);
202 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
205 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
208 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
212 #endif /* CONFIG_SECURITY_NETWORK */
214 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
216 return unix_peer(osk) == sk;
219 static inline int unix_may_send(struct sock *sk, struct sock *osk)
221 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
224 static inline int unix_recvq_full(const struct sock *sk)
226 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
229 static inline int unix_recvq_full_lockless(const struct sock *sk)
231 return skb_queue_len_lockless(&sk->sk_receive_queue) >
232 READ_ONCE(sk->sk_max_ack_backlog);
235 struct sock *unix_peer_get(struct sock *s)
243 unix_state_unlock(s);
246 EXPORT_SYMBOL_GPL(unix_peer_get);
248 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
251 struct unix_address *addr;
253 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
257 refcount_set(&addr->refcnt, 1);
258 addr->len = addr_len;
259 memcpy(addr->name, sunaddr, addr_len);
264 static inline void unix_release_addr(struct unix_address *addr)
266 if (refcount_dec_and_test(&addr->refcnt))
271 * Check unix socket name:
272 * - should be not zero length.
273 * - if started by not zero, should be NULL terminated (FS object)
274 * - if started by zero, it is abstract name.
277 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
279 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
280 addr_len > sizeof(*sunaddr))
283 if (sunaddr->sun_family != AF_UNIX)
289 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
291 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
292 short offset = offsetof(struct sockaddr_storage, __data);
294 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
296 /* This may look like an off by one error but it is a bit more
297 * subtle. 108 is the longest valid AF_UNIX path for a binding.
298 * sun_path[108] doesn't as such exist. However in kernel space
299 * we are guaranteed that it is a valid memory location in our
300 * kernel address buffer because syscall functions always pass
301 * a pointer of struct sockaddr_storage which has a bigger buffer
302 * than 108. Also, we must terminate sun_path for strlen() in
305 addr->__data[addr_len - offset] = 0;
307 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
308 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
309 * know the actual buffer.
311 return strlen(addr->__data) + offset + 1;
314 static void __unix_remove_socket(struct sock *sk)
316 sk_del_node_init(sk);
319 static void __unix_insert_socket(struct net *net, struct sock *sk)
321 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
322 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
325 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
326 struct unix_address *addr, unsigned int hash)
328 __unix_remove_socket(sk);
329 smp_store_release(&unix_sk(sk)->addr, addr);
332 __unix_insert_socket(net, sk);
335 static void unix_remove_socket(struct net *net, struct sock *sk)
337 spin_lock(&net->unx.table.locks[sk->sk_hash]);
338 __unix_remove_socket(sk);
339 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
342 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
344 spin_lock(&net->unx.table.locks[sk->sk_hash]);
345 __unix_insert_socket(net, sk);
346 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
349 static void unix_insert_bsd_socket(struct sock *sk)
351 spin_lock(&bsd_socket_locks[sk->sk_hash]);
352 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
353 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
356 static void unix_remove_bsd_socket(struct sock *sk)
358 if (!hlist_unhashed(&sk->sk_bind_node)) {
359 spin_lock(&bsd_socket_locks[sk->sk_hash]);
360 __sk_del_bind_node(sk);
361 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
363 sk_node_init(&sk->sk_bind_node);
367 static struct sock *__unix_find_socket_byname(struct net *net,
368 struct sockaddr_un *sunname,
369 int len, unsigned int hash)
373 sk_for_each(s, &net->unx.table.buckets[hash]) {
374 struct unix_sock *u = unix_sk(s);
376 if (u->addr->len == len &&
377 !memcmp(u->addr->name, sunname, len))
383 static inline struct sock *unix_find_socket_byname(struct net *net,
384 struct sockaddr_un *sunname,
385 int len, unsigned int hash)
389 spin_lock(&net->unx.table.locks[hash]);
390 s = __unix_find_socket_byname(net, sunname, len, hash);
393 spin_unlock(&net->unx.table.locks[hash]);
397 static struct sock *unix_find_socket_byinode(struct inode *i)
399 unsigned int hash = unix_bsd_hash(i);
402 spin_lock(&bsd_socket_locks[hash]);
403 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
404 struct dentry *dentry = unix_sk(s)->path.dentry;
406 if (dentry && d_backing_inode(dentry) == i) {
408 spin_unlock(&bsd_socket_locks[hash]);
412 spin_unlock(&bsd_socket_locks[hash]);
416 /* Support code for asymmetrically connected dgram sockets
418 * If a datagram socket is connected to a socket not itself connected
419 * to the first socket (eg, /dev/log), clients may only enqueue more
420 * messages if the present receive queue of the server socket is not
421 * "too large". This means there's a second writeability condition
422 * poll and sendmsg need to test. The dgram recv code will do a wake
423 * up on the peer_wait wait queue of a socket upon reception of a
424 * datagram which needs to be propagated to sleeping would-be writers
425 * since these might not have sent anything so far. This can't be
426 * accomplished via poll_wait because the lifetime of the server
427 * socket might be less than that of its clients if these break their
428 * association with it or if the server socket is closed while clients
429 * are still connected to it and there's no way to inform "a polling
430 * implementation" that it should let go of a certain wait queue
432 * In order to propagate a wake up, a wait_queue_entry_t of the client
433 * socket is enqueued on the peer_wait queue of the server socket
434 * whose wake function does a wake_up on the ordinary client socket
435 * wait queue. This connection is established whenever a write (or
436 * poll for write) hit the flow control condition and broken when the
437 * association to the server socket is dissolved or after a wake up
441 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
445 wait_queue_head_t *u_sleep;
447 u = container_of(q, struct unix_sock, peer_wake);
449 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
451 u->peer_wake.private = NULL;
453 /* relaying can only happen while the wq still exists */
454 u_sleep = sk_sleep(&u->sk);
456 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
461 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
463 struct unix_sock *u, *u_other;
467 u_other = unix_sk(other);
469 spin_lock(&u_other->peer_wait.lock);
471 if (!u->peer_wake.private) {
472 u->peer_wake.private = other;
473 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
478 spin_unlock(&u_other->peer_wait.lock);
482 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
485 struct unix_sock *u, *u_other;
488 u_other = unix_sk(other);
489 spin_lock(&u_other->peer_wait.lock);
491 if (u->peer_wake.private == other) {
492 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
493 u->peer_wake.private = NULL;
496 spin_unlock(&u_other->peer_wait.lock);
499 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
502 unix_dgram_peer_wake_disconnect(sk, other);
503 wake_up_interruptible_poll(sk_sleep(sk),
510 * - unix_peer(sk) == other
511 * - association is stable
513 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
517 connected = unix_dgram_peer_wake_connect(sk, other);
519 /* If other is SOCK_DEAD, we want to make sure we signal
520 * POLLOUT, such that a subsequent write() can get a
521 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
522 * to other and its full, we will hang waiting for POLLOUT.
524 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
528 unix_dgram_peer_wake_disconnect(sk, other);
533 static int unix_writable(const struct sock *sk)
535 return sk->sk_state != TCP_LISTEN &&
536 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
539 static void unix_write_space(struct sock *sk)
541 struct socket_wq *wq;
544 if (unix_writable(sk)) {
545 wq = rcu_dereference(sk->sk_wq);
546 if (skwq_has_sleeper(wq))
547 wake_up_interruptible_sync_poll(&wq->wait,
548 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
549 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
554 /* When dgram socket disconnects (or changes its peer), we clear its receive
555 * queue of packets arrived from previous peer. First, it allows to do
556 * flow control based only on wmem_alloc; second, sk connected to peer
557 * may receive messages only from that peer. */
558 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
560 if (!skb_queue_empty(&sk->sk_receive_queue)) {
561 skb_queue_purge(&sk->sk_receive_queue);
562 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
564 /* If one link of bidirectional dgram pipe is disconnected,
565 * we signal error. Messages are lost. Do not make this,
566 * when peer was not connected to us.
568 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
569 WRITE_ONCE(other->sk_err, ECONNRESET);
570 sk_error_report(other);
573 other->sk_state = TCP_CLOSE;
576 static void unix_sock_destructor(struct sock *sk)
578 struct unix_sock *u = unix_sk(sk);
580 skb_queue_purge(&sk->sk_receive_queue);
582 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
583 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
584 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
585 if (!sock_flag(sk, SOCK_DEAD)) {
586 pr_info("Attempt to release alive unix socket: %p\n", sk);
591 unix_release_addr(u->addr);
593 atomic_long_dec(&unix_nr_socks);
594 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
595 #ifdef UNIX_REFCNT_DEBUG
596 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
597 atomic_long_read(&unix_nr_socks));
601 static void unix_release_sock(struct sock *sk, int embrion)
603 struct unix_sock *u = unix_sk(sk);
609 unix_remove_socket(sock_net(sk), sk);
610 unix_remove_bsd_socket(sk);
615 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
617 u->path.dentry = NULL;
619 state = sk->sk_state;
620 sk->sk_state = TCP_CLOSE;
622 skpair = unix_peer(sk);
623 unix_peer(sk) = NULL;
625 unix_state_unlock(sk);
627 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
629 kfree_skb(u->oob_skb);
634 wake_up_interruptible_all(&u->peer_wait);
636 if (skpair != NULL) {
637 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
638 unix_state_lock(skpair);
640 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
641 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
642 WRITE_ONCE(skpair->sk_err, ECONNRESET);
643 unix_state_unlock(skpair);
644 skpair->sk_state_change(skpair);
645 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
648 unix_dgram_peer_wake_disconnect(sk, skpair);
649 sock_put(skpair); /* It may now die */
652 /* Try to flush out this socket. Throw out buffers at least */
654 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
655 if (state == TCP_LISTEN)
656 unix_release_sock(skb->sk, 1);
657 /* passed fds are erased in the kfree_skb hook */
658 UNIXCB(skb).consumed = skb->len;
667 /* ---- Socket is dead now and most probably destroyed ---- */
670 * Fixme: BSD difference: In BSD all sockets connected to us get
671 * ECONNRESET and we die on the spot. In Linux we behave
672 * like files and pipes do and wait for the last
675 * Can't we simply set sock->err?
677 * What the above comment does talk about? --ANK(980817)
680 if (READ_ONCE(unix_tot_inflight))
681 unix_gc(); /* Garbage collect fds */
684 static void init_peercred(struct sock *sk)
686 const struct cred *old_cred;
689 spin_lock(&sk->sk_peer_lock);
690 old_pid = sk->sk_peer_pid;
691 old_cred = sk->sk_peer_cred;
692 sk->sk_peer_pid = get_pid(task_tgid(current));
693 sk->sk_peer_cred = get_current_cred();
694 spin_unlock(&sk->sk_peer_lock);
700 static void copy_peercred(struct sock *sk, struct sock *peersk)
702 const struct cred *old_cred;
706 spin_lock(&sk->sk_peer_lock);
707 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
709 spin_lock(&peersk->sk_peer_lock);
710 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
712 old_pid = sk->sk_peer_pid;
713 old_cred = sk->sk_peer_cred;
714 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
715 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
717 spin_unlock(&sk->sk_peer_lock);
718 spin_unlock(&peersk->sk_peer_lock);
724 static int unix_listen(struct socket *sock, int backlog)
727 struct sock *sk = sock->sk;
728 struct unix_sock *u = unix_sk(sk);
731 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
732 goto out; /* Only stream/seqpacket sockets accept */
735 goto out; /* No listens on an unbound socket */
737 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
739 if (backlog > sk->sk_max_ack_backlog)
740 wake_up_interruptible_all(&u->peer_wait);
741 sk->sk_max_ack_backlog = backlog;
742 sk->sk_state = TCP_LISTEN;
743 /* set credentials so connect can copy them */
748 unix_state_unlock(sk);
753 static int unix_release(struct socket *);
754 static int unix_bind(struct socket *, struct sockaddr *, int);
755 static int unix_stream_connect(struct socket *, struct sockaddr *,
756 int addr_len, int flags);
757 static int unix_socketpair(struct socket *, struct socket *);
758 static int unix_accept(struct socket *, struct socket *, int, bool);
759 static int unix_getname(struct socket *, struct sockaddr *, int);
760 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
761 static __poll_t unix_dgram_poll(struct file *, struct socket *,
763 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
765 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
767 static int unix_shutdown(struct socket *, int);
768 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
769 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
770 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
771 struct pipe_inode_info *, size_t size,
773 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
774 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
775 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
776 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
777 static int unix_dgram_connect(struct socket *, struct sockaddr *,
779 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
780 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
783 #ifdef CONFIG_PROC_FS
784 static int unix_count_nr_fds(struct sock *sk)
790 spin_lock(&sk->sk_receive_queue.lock);
791 skb = skb_peek(&sk->sk_receive_queue);
793 u = unix_sk(skb->sk);
794 nr_fds += atomic_read(&u->scm_stat.nr_fds);
795 skb = skb_peek_next(skb, &sk->sk_receive_queue);
797 spin_unlock(&sk->sk_receive_queue.lock);
802 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
804 struct sock *sk = sock->sk;
805 unsigned char s_state;
810 s_state = READ_ONCE(sk->sk_state);
813 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
814 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
815 * SOCK_DGRAM is ordinary. So, no lock is needed.
817 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
818 nr_fds = atomic_read(&u->scm_stat.nr_fds);
819 else if (s_state == TCP_LISTEN)
820 nr_fds = unix_count_nr_fds(sk);
822 seq_printf(m, "scm_fds: %u\n", nr_fds);
826 #define unix_show_fdinfo NULL
829 static const struct proto_ops unix_stream_ops = {
831 .owner = THIS_MODULE,
832 .release = unix_release,
834 .connect = unix_stream_connect,
835 .socketpair = unix_socketpair,
836 .accept = unix_accept,
837 .getname = unix_getname,
841 .compat_ioctl = unix_compat_ioctl,
843 .listen = unix_listen,
844 .shutdown = unix_shutdown,
845 .sendmsg = unix_stream_sendmsg,
846 .recvmsg = unix_stream_recvmsg,
847 .read_skb = unix_stream_read_skb,
848 .mmap = sock_no_mmap,
849 .splice_read = unix_stream_splice_read,
850 .set_peek_off = sk_set_peek_off,
851 .show_fdinfo = unix_show_fdinfo,
854 static const struct proto_ops unix_dgram_ops = {
856 .owner = THIS_MODULE,
857 .release = unix_release,
859 .connect = unix_dgram_connect,
860 .socketpair = unix_socketpair,
861 .accept = sock_no_accept,
862 .getname = unix_getname,
863 .poll = unix_dgram_poll,
866 .compat_ioctl = unix_compat_ioctl,
868 .listen = sock_no_listen,
869 .shutdown = unix_shutdown,
870 .sendmsg = unix_dgram_sendmsg,
871 .read_skb = unix_read_skb,
872 .recvmsg = unix_dgram_recvmsg,
873 .mmap = sock_no_mmap,
874 .set_peek_off = sk_set_peek_off,
875 .show_fdinfo = unix_show_fdinfo,
878 static const struct proto_ops unix_seqpacket_ops = {
880 .owner = THIS_MODULE,
881 .release = unix_release,
883 .connect = unix_stream_connect,
884 .socketpair = unix_socketpair,
885 .accept = unix_accept,
886 .getname = unix_getname,
887 .poll = unix_dgram_poll,
890 .compat_ioctl = unix_compat_ioctl,
892 .listen = unix_listen,
893 .shutdown = unix_shutdown,
894 .sendmsg = unix_seqpacket_sendmsg,
895 .recvmsg = unix_seqpacket_recvmsg,
896 .mmap = sock_no_mmap,
897 .set_peek_off = sk_set_peek_off,
898 .show_fdinfo = unix_show_fdinfo,
901 static void unix_close(struct sock *sk, long timeout)
903 /* Nothing to do here, unix socket does not need a ->close().
904 * This is merely for sockmap.
908 static void unix_unhash(struct sock *sk)
910 /* Nothing to do here, unix socket does not need a ->unhash().
911 * This is merely for sockmap.
915 static bool unix_bpf_bypass_getsockopt(int level, int optname)
917 if (level == SOL_SOCKET) {
929 struct proto unix_dgram_proto = {
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct unix_sock),
934 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
935 #ifdef CONFIG_BPF_SYSCALL
936 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
940 struct proto unix_stream_proto = {
941 .name = "UNIX-STREAM",
942 .owner = THIS_MODULE,
943 .obj_size = sizeof(struct unix_sock),
945 .unhash = unix_unhash,
946 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
947 #ifdef CONFIG_BPF_SYSCALL
948 .psock_update_sk_prot = unix_stream_bpf_update_proto,
952 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
958 atomic_long_inc(&unix_nr_socks);
959 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
964 if (type == SOCK_STREAM)
965 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
966 else /*dgram and seqpacket */
967 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
974 sock_init_data(sock, sk);
976 sk->sk_hash = unix_unbound_hash(sk);
977 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
978 sk->sk_write_space = unix_write_space;
979 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
980 sk->sk_destruct = unix_sock_destructor;
983 u->path.dentry = NULL;
985 spin_lock_init(&u->lock);
986 INIT_LIST_HEAD(&u->link);
987 mutex_init(&u->iolock); /* single task reading lock */
988 mutex_init(&u->bindlock); /* single task binding lock */
989 init_waitqueue_head(&u->peer_wait);
990 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
991 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
992 unix_insert_unbound_socket(net, sk);
994 sock_prot_inuse_add(net, sk->sk_prot, 1);
999 atomic_long_dec(&unix_nr_socks);
1000 return ERR_PTR(err);
1003 static int unix_create(struct net *net, struct socket *sock, int protocol,
1008 if (protocol && protocol != PF_UNIX)
1009 return -EPROTONOSUPPORT;
1011 sock->state = SS_UNCONNECTED;
1013 switch (sock->type) {
1015 sock->ops = &unix_stream_ops;
1018 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1022 sock->type = SOCK_DGRAM;
1025 sock->ops = &unix_dgram_ops;
1027 case SOCK_SEQPACKET:
1028 sock->ops = &unix_seqpacket_ops;
1031 return -ESOCKTNOSUPPORT;
1034 sk = unix_create1(net, sock, kern, sock->type);
1041 static int unix_release(struct socket *sock)
1043 struct sock *sk = sock->sk;
1048 sk->sk_prot->close(sk, 0);
1049 unix_release_sock(sk, 0);
1055 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1058 struct inode *inode;
1063 unix_mkname_bsd(sunaddr, addr_len);
1064 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1068 err = path_permission(&path, MAY_WRITE);
1072 err = -ECONNREFUSED;
1073 inode = d_backing_inode(path.dentry);
1074 if (!S_ISSOCK(inode->i_mode))
1077 sk = unix_find_socket_byinode(inode);
1082 if (sk->sk_type == type)
1096 return ERR_PTR(err);
1099 static struct sock *unix_find_abstract(struct net *net,
1100 struct sockaddr_un *sunaddr,
1101 int addr_len, int type)
1103 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1104 struct dentry *dentry;
1107 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1109 return ERR_PTR(-ECONNREFUSED);
1111 dentry = unix_sk(sk)->path.dentry;
1113 touch_atime(&unix_sk(sk)->path);
1118 static struct sock *unix_find_other(struct net *net,
1119 struct sockaddr_un *sunaddr,
1120 int addr_len, int type)
1124 if (sunaddr->sun_path[0])
1125 sk = unix_find_bsd(sunaddr, addr_len, type);
1127 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1132 static int unix_autobind(struct sock *sk)
1134 unsigned int new_hash, old_hash = sk->sk_hash;
1135 struct unix_sock *u = unix_sk(sk);
1136 struct net *net = sock_net(sk);
1137 struct unix_address *addr;
1138 u32 lastnum, ordernum;
1141 err = mutex_lock_interruptible(&u->bindlock);
1149 addr = kzalloc(sizeof(*addr) +
1150 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1154 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1155 addr->name->sun_family = AF_UNIX;
1156 refcount_set(&addr->refcnt, 1);
1158 ordernum = get_random_u32();
1159 lastnum = ordernum & 0xFFFFF;
1161 ordernum = (ordernum + 1) & 0xFFFFF;
1162 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1164 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1165 unix_table_double_lock(net, old_hash, new_hash);
1167 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1168 unix_table_double_unlock(net, old_hash, new_hash);
1170 /* __unix_find_socket_byname() may take long time if many names
1171 * are already in use.
1175 if (ordernum == lastnum) {
1176 /* Give up if all names seems to be in use. */
1178 unix_release_addr(addr);
1185 __unix_set_addr_hash(net, sk, addr, new_hash);
1186 unix_table_double_unlock(net, old_hash, new_hash);
1189 out: mutex_unlock(&u->bindlock);
1193 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1196 umode_t mode = S_IFSOCK |
1197 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1198 unsigned int new_hash, old_hash = sk->sk_hash;
1199 struct unix_sock *u = unix_sk(sk);
1200 struct net *net = sock_net(sk);
1201 struct mnt_idmap *idmap;
1202 struct unix_address *addr;
1203 struct dentry *dentry;
1207 addr_len = unix_mkname_bsd(sunaddr, addr_len);
1208 addr = unix_create_addr(sunaddr, addr_len);
1213 * Get the parent directory, calculate the hash for last
1216 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1217 if (IS_ERR(dentry)) {
1218 err = PTR_ERR(dentry);
1223 * All right, let's create it.
1225 idmap = mnt_idmap(parent.mnt);
1226 err = security_path_mknod(&parent, dentry, mode, 0);
1228 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1231 err = mutex_lock_interruptible(&u->bindlock);
1237 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1238 unix_table_double_lock(net, old_hash, new_hash);
1239 u->path.mnt = mntget(parent.mnt);
1240 u->path.dentry = dget(dentry);
1241 __unix_set_addr_hash(net, sk, addr, new_hash);
1242 unix_table_double_unlock(net, old_hash, new_hash);
1243 unix_insert_bsd_socket(sk);
1244 mutex_unlock(&u->bindlock);
1245 done_path_create(&parent, dentry);
1249 mutex_unlock(&u->bindlock);
1252 /* failed after successful mknod? unlink what we'd created... */
1253 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1255 done_path_create(&parent, dentry);
1257 unix_release_addr(addr);
1258 return err == -EEXIST ? -EADDRINUSE : err;
1261 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1264 unsigned int new_hash, old_hash = sk->sk_hash;
1265 struct unix_sock *u = unix_sk(sk);
1266 struct net *net = sock_net(sk);
1267 struct unix_address *addr;
1270 addr = unix_create_addr(sunaddr, addr_len);
1274 err = mutex_lock_interruptible(&u->bindlock);
1283 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1284 unix_table_double_lock(net, old_hash, new_hash);
1286 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1289 __unix_set_addr_hash(net, sk, addr, new_hash);
1290 unix_table_double_unlock(net, old_hash, new_hash);
1291 mutex_unlock(&u->bindlock);
1295 unix_table_double_unlock(net, old_hash, new_hash);
1298 mutex_unlock(&u->bindlock);
1300 unix_release_addr(addr);
1304 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1306 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1307 struct sock *sk = sock->sk;
1310 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1311 sunaddr->sun_family == AF_UNIX)
1312 return unix_autobind(sk);
1314 err = unix_validate_addr(sunaddr, addr_len);
1318 if (sunaddr->sun_path[0])
1319 err = unix_bind_bsd(sk, sunaddr, addr_len);
1321 err = unix_bind_abstract(sk, sunaddr, addr_len);
1326 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1328 if (unlikely(sk1 == sk2) || !sk2) {
1329 unix_state_lock(sk1);
1335 unix_state_lock(sk1);
1336 unix_state_lock_nested(sk2, U_LOCK_SECOND);
1339 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1341 if (unlikely(sk1 == sk2) || !sk2) {
1342 unix_state_unlock(sk1);
1345 unix_state_unlock(sk1);
1346 unix_state_unlock(sk2);
1349 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1350 int alen, int flags)
1352 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1353 struct sock *sk = sock->sk;
1358 if (alen < offsetofend(struct sockaddr, sa_family))
1361 if (addr->sa_family != AF_UNSPEC) {
1362 err = unix_validate_addr(sunaddr, alen);
1366 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1370 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1371 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1372 !unix_sk(sk)->addr) {
1373 err = unix_autobind(sk);
1379 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1380 if (IS_ERR(other)) {
1381 err = PTR_ERR(other);
1385 unix_state_double_lock(sk, other);
1387 /* Apparently VFS overslept socket death. Retry. */
1388 if (sock_flag(other, SOCK_DEAD)) {
1389 unix_state_double_unlock(sk, other);
1395 if (!unix_may_send(sk, other))
1398 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1402 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1405 * 1003.1g breaking connected state with AF_UNSPEC
1408 unix_state_double_lock(sk, other);
1412 * If it was connected, reconnect.
1414 if (unix_peer(sk)) {
1415 struct sock *old_peer = unix_peer(sk);
1417 unix_peer(sk) = other;
1419 sk->sk_state = TCP_CLOSE;
1420 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1422 unix_state_double_unlock(sk, other);
1424 if (other != old_peer)
1425 unix_dgram_disconnected(sk, old_peer);
1428 unix_peer(sk) = other;
1429 unix_state_double_unlock(sk, other);
1435 unix_state_double_unlock(sk, other);
1441 static long unix_wait_for_peer(struct sock *other, long timeo)
1442 __releases(&unix_sk(other)->lock)
1444 struct unix_sock *u = unix_sk(other);
1448 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1450 sched = !sock_flag(other, SOCK_DEAD) &&
1451 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1452 unix_recvq_full_lockless(other);
1454 unix_state_unlock(other);
1457 timeo = schedule_timeout(timeo);
1459 finish_wait(&u->peer_wait, &wait);
1463 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1464 int addr_len, int flags)
1466 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1467 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1468 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1469 struct net *net = sock_net(sk);
1470 struct sk_buff *skb = NULL;
1475 err = unix_validate_addr(sunaddr, addr_len);
1479 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1483 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1484 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1485 err = unix_autobind(sk);
1490 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1492 /* First of all allocate resources.
1493 If we will make it after state is locked,
1494 we will have to recheck all again in any case.
1497 /* create new sock for complete connection */
1498 newsk = unix_create1(net, NULL, 0, sock->type);
1499 if (IS_ERR(newsk)) {
1500 err = PTR_ERR(newsk);
1507 /* Allocate skb for sending to listening sock */
1508 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1513 /* Find listening sock. */
1514 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1515 if (IS_ERR(other)) {
1516 err = PTR_ERR(other);
1521 /* Latch state of peer */
1522 unix_state_lock(other);
1524 /* Apparently VFS overslept socket death. Retry. */
1525 if (sock_flag(other, SOCK_DEAD)) {
1526 unix_state_unlock(other);
1531 err = -ECONNREFUSED;
1532 if (other->sk_state != TCP_LISTEN)
1534 if (other->sk_shutdown & RCV_SHUTDOWN)
1537 if (unix_recvq_full(other)) {
1542 timeo = unix_wait_for_peer(other, timeo);
1544 err = sock_intr_errno(timeo);
1545 if (signal_pending(current))
1553 It is tricky place. We need to grab our state lock and cannot
1554 drop lock on peer. It is dangerous because deadlock is
1555 possible. Connect to self case and simultaneous
1556 attempt to connect are eliminated by checking socket
1557 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1558 check this before attempt to grab lock.
1560 Well, and we have to recheck the state after socket locked.
1566 /* This is ok... continue with connect */
1568 case TCP_ESTABLISHED:
1569 /* Socket is already connected */
1577 unix_state_lock_nested(sk, U_LOCK_SECOND);
1579 if (sk->sk_state != st) {
1580 unix_state_unlock(sk);
1581 unix_state_unlock(other);
1586 err = security_unix_stream_connect(sk, other, newsk);
1588 unix_state_unlock(sk);
1592 /* The way is open! Fastly set all the necessary fields... */
1595 unix_peer(newsk) = sk;
1596 newsk->sk_state = TCP_ESTABLISHED;
1597 newsk->sk_type = sk->sk_type;
1598 init_peercred(newsk);
1599 newu = unix_sk(newsk);
1600 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1601 otheru = unix_sk(other);
1603 /* copy address information from listening to new sock
1605 * The contents of *(otheru->addr) and otheru->path
1606 * are seen fully set up here, since we have found
1607 * otheru in hash under its lock. Insertion into the
1608 * hash chain we'd found it in had been done in an
1609 * earlier critical area protected by the chain's lock,
1610 * the same one where we'd set *(otheru->addr) contents,
1611 * as well as otheru->path and otheru->addr itself.
1613 * Using smp_store_release() here to set newu->addr
1614 * is enough to make those stores, as well as stores
1615 * to newu->path visible to anyone who gets newu->addr
1616 * by smp_load_acquire(). IOW, the same warranties
1617 * as for unix_sock instances bound in unix_bind() or
1618 * in unix_autobind().
1620 if (otheru->path.dentry) {
1621 path_get(&otheru->path);
1622 newu->path = otheru->path;
1624 refcount_inc(&otheru->addr->refcnt);
1625 smp_store_release(&newu->addr, otheru->addr);
1627 /* Set credentials */
1628 copy_peercred(sk, other);
1630 sock->state = SS_CONNECTED;
1631 sk->sk_state = TCP_ESTABLISHED;
1634 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1635 unix_peer(sk) = newsk;
1637 unix_state_unlock(sk);
1639 /* take ten and send info to listening sock */
1640 spin_lock(&other->sk_receive_queue.lock);
1641 __skb_queue_tail(&other->sk_receive_queue, skb);
1642 spin_unlock(&other->sk_receive_queue.lock);
1643 unix_state_unlock(other);
1644 other->sk_data_ready(other);
1650 unix_state_unlock(other);
1655 unix_release_sock(newsk, 0);
1661 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1663 struct sock *ska = socka->sk, *skb = sockb->sk;
1665 /* Join our sockets back to back */
1668 unix_peer(ska) = skb;
1669 unix_peer(skb) = ska;
1673 ska->sk_state = TCP_ESTABLISHED;
1674 skb->sk_state = TCP_ESTABLISHED;
1675 socka->state = SS_CONNECTED;
1676 sockb->state = SS_CONNECTED;
1680 static void unix_sock_inherit_flags(const struct socket *old,
1683 if (test_bit(SOCK_PASSCRED, &old->flags))
1684 set_bit(SOCK_PASSCRED, &new->flags);
1685 if (test_bit(SOCK_PASSPIDFD, &old->flags))
1686 set_bit(SOCK_PASSPIDFD, &new->flags);
1687 if (test_bit(SOCK_PASSSEC, &old->flags))
1688 set_bit(SOCK_PASSSEC, &new->flags);
1691 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1694 struct sock *sk = sock->sk;
1696 struct sk_buff *skb;
1700 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1704 if (sk->sk_state != TCP_LISTEN)
1707 /* If socket state is TCP_LISTEN it cannot change (for now...),
1708 * so that no locks are necessary.
1711 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1714 /* This means receive shutdown. */
1721 skb_free_datagram(sk, skb);
1722 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1724 /* attach accepted sock to socket */
1725 unix_state_lock(tsk);
1726 newsock->state = SS_CONNECTED;
1727 unix_sock_inherit_flags(sock, newsock);
1728 sock_graft(tsk, newsock);
1729 unix_state_unlock(tsk);
1737 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1739 struct sock *sk = sock->sk;
1740 struct unix_address *addr;
1741 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1745 sk = unix_peer_get(sk);
1755 addr = smp_load_acquire(&unix_sk(sk)->addr);
1757 sunaddr->sun_family = AF_UNIX;
1758 sunaddr->sun_path[0] = 0;
1759 err = offsetof(struct sockaddr_un, sun_path);
1762 memcpy(sunaddr, addr->name, addr->len);
1765 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1766 CGROUP_UNIX_GETPEERNAME);
1768 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1769 CGROUP_UNIX_GETSOCKNAME);
1776 /* The "user->unix_inflight" variable is protected by the garbage
1777 * collection lock, and we just read it locklessly here. If you go
1778 * over the limit, there might be a tiny race in actually noticing
1779 * it across threads. Tough.
1781 static inline bool too_many_unix_fds(struct task_struct *p)
1783 struct user_struct *user = current_user();
1785 if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1786 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1790 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1794 if (too_many_unix_fds(current))
1795 return -ETOOMANYREFS;
1797 /* Need to duplicate file references for the sake of garbage
1798 * collection. Otherwise a socket in the fps might become a
1799 * candidate for GC while the skb is not yet queued.
1801 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1802 if (!UNIXCB(skb).fp)
1805 for (i = scm->fp->count - 1; i >= 0; i--)
1806 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1811 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1815 scm->fp = UNIXCB(skb).fp;
1816 UNIXCB(skb).fp = NULL;
1818 for (i = scm->fp->count - 1; i >= 0; i--)
1819 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1822 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1824 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1827 * Garbage collection of unix sockets starts by selecting a set of
1828 * candidate sockets which have reference only from being in flight
1829 * (total_refs == inflight_refs). This condition is checked once during
1830 * the candidate collection phase, and candidates are marked as such, so
1831 * that non-candidates can later be ignored. While inflight_refs is
1832 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1833 * is an instantaneous decision.
1835 * Once a candidate, however, the socket must not be reinstalled into a
1836 * file descriptor while the garbage collection is in progress.
1838 * If the above conditions are met, then the directed graph of
1839 * candidates (*) does not change while unix_gc_lock is held.
1841 * Any operations that changes the file count through file descriptors
1842 * (dup, close, sendmsg) does not change the graph since candidates are
1843 * not installed in fds.
1845 * Dequeing a candidate via recvmsg would install it into an fd, but
1846 * that takes unix_gc_lock to decrement the inflight count, so it's
1847 * serialized with garbage collection.
1849 * MSG_PEEK is special in that it does not change the inflight count,
1850 * yet does install the socket into an fd. The following lock/unlock
1851 * pair is to ensure serialization with garbage collection. It must be
1852 * done between incrementing the file count and installing the file into
1855 * If garbage collection starts after the barrier provided by the
1856 * lock/unlock, then it will see the elevated refcount and not mark this
1857 * as a candidate. If a garbage collection is already in progress
1858 * before the file count was incremented, then the lock/unlock pair will
1859 * ensure that garbage collection is finished before progressing to
1860 * installing the fd.
1862 * (*) A -> B where B is on the queue of A or B is on the queue of C
1863 * which is on the queue of listening socket A.
1865 spin_lock(&unix_gc_lock);
1866 spin_unlock(&unix_gc_lock);
1869 static void unix_destruct_scm(struct sk_buff *skb)
1871 struct scm_cookie scm;
1873 memset(&scm, 0, sizeof(scm));
1874 scm.pid = UNIXCB(skb).pid;
1876 unix_detach_fds(&scm, skb);
1878 /* Alas, it calls VFS */
1879 /* So fscking what? fput() had been SMP-safe since the last Summer */
1884 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1888 UNIXCB(skb).pid = get_pid(scm->pid);
1889 UNIXCB(skb).uid = scm->creds.uid;
1890 UNIXCB(skb).gid = scm->creds.gid;
1891 UNIXCB(skb).fp = NULL;
1892 unix_get_secdata(scm, skb);
1893 if (scm->fp && send_fds)
1894 err = unix_attach_fds(scm, skb);
1896 skb->destructor = unix_destruct_scm;
1900 static bool unix_passcred_enabled(const struct socket *sock,
1901 const struct sock *other)
1903 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1904 test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1905 !other->sk_socket ||
1906 test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1907 test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1911 * Some apps rely on write() giving SCM_CREDENTIALS
1912 * We include credentials if source or destination socket
1913 * asserted SOCK_PASSCRED.
1915 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1916 const struct sock *other)
1918 if (UNIXCB(skb).pid)
1920 if (unix_passcred_enabled(sock, other)) {
1921 UNIXCB(skb).pid = get_pid(task_tgid(current));
1922 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1926 static bool unix_skb_scm_eq(struct sk_buff *skb,
1927 struct scm_cookie *scm)
1929 return UNIXCB(skb).pid == scm->pid &&
1930 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1931 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1932 unix_secdata_eq(scm, skb);
1935 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1937 struct scm_fp_list *fp = UNIXCB(skb).fp;
1938 struct unix_sock *u = unix_sk(sk);
1940 if (unlikely(fp && fp->count))
1941 atomic_add(fp->count, &u->scm_stat.nr_fds);
1944 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1946 struct scm_fp_list *fp = UNIXCB(skb).fp;
1947 struct unix_sock *u = unix_sk(sk);
1949 if (unlikely(fp && fp->count))
1950 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1954 * Send AF_UNIX data.
1957 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1960 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1961 struct sock *sk = sock->sk, *other = NULL;
1962 struct unix_sock *u = unix_sk(sk);
1963 struct scm_cookie scm;
1964 struct sk_buff *skb;
1970 err = scm_send(sock, msg, &scm, false);
1974 wait_for_unix_gc(scm.fp);
1977 if (msg->msg_flags&MSG_OOB)
1980 if (msg->msg_namelen) {
1981 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1985 err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
1994 other = unix_peer_get(sk);
1999 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
2000 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
2001 err = unix_autobind(sk);
2007 if (len > sk->sk_sndbuf - 32)
2010 if (len > SKB_MAX_ALLOC) {
2011 data_len = min_t(size_t,
2012 len - SKB_MAX_ALLOC,
2013 MAX_SKB_FRAGS * PAGE_SIZE);
2014 data_len = PAGE_ALIGN(data_len);
2016 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2019 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2020 msg->msg_flags & MSG_DONTWAIT, &err,
2021 PAGE_ALLOC_COSTLY_ORDER);
2025 err = unix_scm_to_skb(&scm, skb, true);
2029 skb_put(skb, len - data_len);
2030 skb->data_len = data_len;
2032 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2036 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2041 if (sunaddr == NULL)
2044 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
2046 if (IS_ERR(other)) {
2047 err = PTR_ERR(other);
2053 if (sk_filter(other, skb) < 0) {
2054 /* Toss the packet but do not return any error to the sender */
2060 unix_state_lock(other);
2063 if (!unix_may_send(sk, other))
2066 if (unlikely(sock_flag(other, SOCK_DEAD))) {
2068 * Check with 1003.1g - what should
2071 unix_state_unlock(other);
2075 unix_state_lock(sk);
2078 if (sk->sk_type == SOCK_SEQPACKET) {
2079 /* We are here only when racing with unix_release_sock()
2080 * is clearing @other. Never change state to TCP_CLOSE
2081 * unlike SOCK_DGRAM wants.
2083 unix_state_unlock(sk);
2085 } else if (unix_peer(sk) == other) {
2086 unix_peer(sk) = NULL;
2087 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2089 sk->sk_state = TCP_CLOSE;
2090 unix_state_unlock(sk);
2092 unix_dgram_disconnected(sk, other);
2094 err = -ECONNREFUSED;
2096 unix_state_unlock(sk);
2106 if (other->sk_shutdown & RCV_SHUTDOWN)
2109 if (sk->sk_type != SOCK_SEQPACKET) {
2110 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2115 /* other == sk && unix_peer(other) != sk if
2116 * - unix_peer(sk) == NULL, destination address bound to sk
2117 * - unix_peer(sk) == sk by time of get but disconnected before lock
2120 unlikely(unix_peer(other) != sk &&
2121 unix_recvq_full_lockless(other))) {
2123 timeo = unix_wait_for_peer(other, timeo);
2125 err = sock_intr_errno(timeo);
2126 if (signal_pending(current))
2133 unix_state_unlock(other);
2134 unix_state_double_lock(sk, other);
2137 if (unix_peer(sk) != other ||
2138 unix_dgram_peer_wake_me(sk, other)) {
2146 goto restart_locked;
2150 if (unlikely(sk_locked))
2151 unix_state_unlock(sk);
2153 if (sock_flag(other, SOCK_RCVTSTAMP))
2154 __net_timestamp(skb);
2155 maybe_add_creds(skb, sock, other);
2156 scm_stat_add(other, skb);
2157 skb_queue_tail(&other->sk_receive_queue, skb);
2158 unix_state_unlock(other);
2159 other->sk_data_ready(other);
2166 unix_state_unlock(sk);
2167 unix_state_unlock(other);
2177 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2178 * bytes, and a minimum of a full page.
2180 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2182 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2183 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2184 struct scm_cookie *scm, bool fds_sent)
2186 struct unix_sock *ousk = unix_sk(other);
2187 struct sk_buff *skb;
2190 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2195 err = unix_scm_to_skb(scm, skb, !fds_sent);
2201 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2208 unix_state_lock(other);
2210 if (sock_flag(other, SOCK_DEAD) ||
2211 (other->sk_shutdown & RCV_SHUTDOWN)) {
2212 unix_state_unlock(other);
2217 maybe_add_creds(skb, sock, other);
2221 consume_skb(ousk->oob_skb);
2223 WRITE_ONCE(ousk->oob_skb, skb);
2225 scm_stat_add(other, skb);
2226 skb_queue_tail(&other->sk_receive_queue, skb);
2227 sk_send_sigurg(other);
2228 unix_state_unlock(other);
2229 other->sk_data_ready(other);
2235 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2238 struct sock *sk = sock->sk;
2239 struct sock *other = NULL;
2241 struct sk_buff *skb;
2243 struct scm_cookie scm;
2244 bool fds_sent = false;
2247 err = scm_send(sock, msg, &scm, false);
2251 wait_for_unix_gc(scm.fp);
2254 if (msg->msg_flags & MSG_OOB) {
2255 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2263 if (msg->msg_namelen) {
2264 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2268 other = unix_peer(sk);
2273 if (sk->sk_shutdown & SEND_SHUTDOWN)
2276 while (sent < len) {
2279 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2280 skb = sock_alloc_send_pskb(sk, 0, 0,
2281 msg->msg_flags & MSG_DONTWAIT,
2284 /* Keep two messages in the pipe so it schedules better */
2285 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2287 /* allow fallback to order-0 allocations */
2288 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2290 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2292 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2294 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2295 msg->msg_flags & MSG_DONTWAIT, &err,
2296 get_order(UNIX_SKB_FRAGS_SZ));
2301 /* Only send the fds in the first buffer */
2302 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2309 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2310 err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2317 refcount_add(size, &sk->sk_wmem_alloc);
2319 skb_put(skb, size - data_len);
2320 skb->data_len = data_len;
2322 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2329 unix_state_lock(other);
2331 if (sock_flag(other, SOCK_DEAD) ||
2332 (other->sk_shutdown & RCV_SHUTDOWN))
2335 maybe_add_creds(skb, sock, other);
2336 scm_stat_add(other, skb);
2337 skb_queue_tail(&other->sk_receive_queue, skb);
2338 unix_state_unlock(other);
2339 other->sk_data_ready(other);
2343 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2344 if (msg->msg_flags & MSG_OOB) {
2345 err = queue_oob(sock, msg, other, &scm, fds_sent);
2357 unix_state_unlock(other);
2360 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2361 send_sig(SIGPIPE, current, 0);
2365 return sent ? : err;
2368 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2372 struct sock *sk = sock->sk;
2374 err = sock_error(sk);
2378 if (sk->sk_state != TCP_ESTABLISHED)
2381 if (msg->msg_namelen)
2382 msg->msg_namelen = 0;
2384 return unix_dgram_sendmsg(sock, msg, len);
2387 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2388 size_t size, int flags)
2390 struct sock *sk = sock->sk;
2392 if (sk->sk_state != TCP_ESTABLISHED)
2395 return unix_dgram_recvmsg(sock, msg, size, flags);
2398 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2400 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2403 msg->msg_namelen = addr->len;
2404 memcpy(msg->msg_name, addr->name, addr->len);
2408 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2411 struct scm_cookie scm;
2412 struct socket *sock = sk->sk_socket;
2413 struct unix_sock *u = unix_sk(sk);
2414 struct sk_buff *skb, *last;
2423 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2426 mutex_lock(&u->iolock);
2428 skip = sk_peek_offset(sk, flags);
2429 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2430 &skip, &err, &last);
2432 if (!(flags & MSG_PEEK))
2433 scm_stat_del(sk, skb);
2437 mutex_unlock(&u->iolock);
2442 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2443 &err, &timeo, last));
2445 if (!skb) { /* implies iolock unlocked */
2446 unix_state_lock(sk);
2447 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2448 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2449 (sk->sk_shutdown & RCV_SHUTDOWN))
2451 unix_state_unlock(sk);
2455 if (wq_has_sleeper(&u->peer_wait))
2456 wake_up_interruptible_sync_poll(&u->peer_wait,
2457 EPOLLOUT | EPOLLWRNORM |
2460 if (msg->msg_name) {
2461 unix_copy_addr(msg, skb->sk);
2463 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2468 if (size > skb->len - skip)
2469 size = skb->len - skip;
2470 else if (size < skb->len - skip)
2471 msg->msg_flags |= MSG_TRUNC;
2473 err = skb_copy_datagram_msg(skb, skip, msg, size);
2477 if (sock_flag(sk, SOCK_RCVTSTAMP))
2478 __sock_recv_timestamp(msg, sk, skb);
2480 memset(&scm, 0, sizeof(scm));
2482 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2483 unix_set_secdata(&scm, skb);
2485 if (!(flags & MSG_PEEK)) {
2487 unix_detach_fds(&scm, skb);
2489 sk_peek_offset_bwd(sk, skb->len);
2491 /* It is questionable: on PEEK we could:
2492 - do not return fds - good, but too simple 8)
2493 - return fds, and do not return them on read (old strategy,
2495 - clone fds (I chose it for now, it is the most universal
2498 POSIX 1003.1g does not actually define this clearly
2499 at all. POSIX 1003.1g doesn't define a lot of things
2504 sk_peek_offset_fwd(sk, size);
2507 unix_peek_fds(&scm, skb);
2509 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2511 scm_recv_unix(sock, msg, &scm, flags);
2514 skb_free_datagram(sk, skb);
2515 mutex_unlock(&u->iolock);
2520 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2523 struct sock *sk = sock->sk;
2525 #ifdef CONFIG_BPF_SYSCALL
2526 const struct proto *prot = READ_ONCE(sk->sk_prot);
2528 if (prot != &unix_dgram_proto)
2529 return prot->recvmsg(sk, msg, size, flags, NULL);
2531 return __unix_dgram_recvmsg(sk, msg, size, flags);
2534 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2536 struct unix_sock *u = unix_sk(sk);
2537 struct sk_buff *skb;
2540 mutex_lock(&u->iolock);
2541 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2542 mutex_unlock(&u->iolock);
2546 return recv_actor(sk, skb);
2550 * Sleep until more data has arrived. But check for races..
2552 static long unix_stream_data_wait(struct sock *sk, long timeo,
2553 struct sk_buff *last, unsigned int last_len,
2556 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2557 struct sk_buff *tail;
2560 unix_state_lock(sk);
2563 prepare_to_wait(sk_sleep(sk), &wait, state);
2565 tail = skb_peek_tail(&sk->sk_receive_queue);
2567 (tail && tail->len != last_len) ||
2569 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2570 signal_pending(current) ||
2574 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2575 unix_state_unlock(sk);
2576 timeo = schedule_timeout(timeo);
2577 unix_state_lock(sk);
2579 if (sock_flag(sk, SOCK_DEAD))
2582 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2585 finish_wait(sk_sleep(sk), &wait);
2586 unix_state_unlock(sk);
2590 static unsigned int unix_skb_len(const struct sk_buff *skb)
2592 return skb->len - UNIXCB(skb).consumed;
2595 struct unix_stream_read_state {
2596 int (*recv_actor)(struct sk_buff *, int, int,
2597 struct unix_stream_read_state *);
2598 struct socket *socket;
2600 struct pipe_inode_info *pipe;
2603 unsigned int splice_flags;
2606 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2607 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2609 struct socket *sock = state->socket;
2610 struct sock *sk = sock->sk;
2611 struct unix_sock *u = unix_sk(sk);
2613 struct sk_buff *oob_skb;
2615 mutex_lock(&u->iolock);
2616 unix_state_lock(sk);
2618 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2619 unix_state_unlock(sk);
2620 mutex_unlock(&u->iolock);
2624 oob_skb = u->oob_skb;
2626 if (!(state->flags & MSG_PEEK))
2627 WRITE_ONCE(u->oob_skb, NULL);
2630 unix_state_unlock(sk);
2632 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2634 if (!(state->flags & MSG_PEEK))
2635 UNIXCB(oob_skb).consumed += 1;
2637 consume_skb(oob_skb);
2639 mutex_unlock(&u->iolock);
2644 state->msg->msg_flags |= MSG_OOB;
2648 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2649 int flags, int copied)
2651 struct unix_sock *u = unix_sk(sk);
2653 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2654 skb_unlink(skb, &sk->sk_receive_queue);
2658 if (skb == u->oob_skb) {
2661 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2662 if (!(flags & MSG_PEEK)) {
2663 WRITE_ONCE(u->oob_skb, NULL);
2666 } else if (flags & MSG_PEEK) {
2669 skb_unlink(skb, &sk->sk_receive_queue);
2670 WRITE_ONCE(u->oob_skb, NULL);
2671 if (!WARN_ON_ONCE(skb_unref(skb)))
2673 skb = skb_peek(&sk->sk_receive_queue);
2681 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2683 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2686 return unix_read_skb(sk, recv_actor);
2689 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2692 struct scm_cookie scm;
2693 struct socket *sock = state->socket;
2694 struct sock *sk = sock->sk;
2695 struct unix_sock *u = unix_sk(sk);
2697 int flags = state->flags;
2698 int noblock = flags & MSG_DONTWAIT;
2699 bool check_creds = false;
2704 size_t size = state->size;
2705 unsigned int last_len;
2707 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2712 if (unlikely(flags & MSG_OOB)) {
2714 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2715 err = unix_stream_recv_urg(state);
2720 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2721 timeo = sock_rcvtimeo(sk, noblock);
2723 memset(&scm, 0, sizeof(scm));
2725 /* Lock the socket to prevent queue disordering
2726 * while sleeps in memcpy_tomsg
2728 mutex_lock(&u->iolock);
2730 skip = max(sk_peek_offset(sk, flags), 0);
2735 struct sk_buff *skb, *last;
2738 unix_state_lock(sk);
2739 if (sock_flag(sk, SOCK_DEAD)) {
2743 last = skb = skb_peek(&sk->sk_receive_queue);
2744 last_len = last ? last->len : 0;
2747 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2749 skb = manage_oob(skb, sk, flags, copied);
2750 if (!skb && copied) {
2751 unix_state_unlock(sk);
2757 if (copied >= target)
2761 * POSIX 1003.1g mandates this order.
2764 err = sock_error(sk);
2767 if (sk->sk_shutdown & RCV_SHUTDOWN)
2770 unix_state_unlock(sk);
2776 mutex_unlock(&u->iolock);
2778 timeo = unix_stream_data_wait(sk, timeo, last,
2779 last_len, freezable);
2781 if (signal_pending(current)) {
2782 err = sock_intr_errno(timeo);
2787 mutex_lock(&u->iolock);
2790 unix_state_unlock(sk);
2794 while (skip >= unix_skb_len(skb)) {
2795 skip -= unix_skb_len(skb);
2797 last_len = skb->len;
2798 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2803 unix_state_unlock(sk);
2806 /* Never glue messages from different writers */
2807 if (!unix_skb_scm_eq(skb, &scm))
2809 } else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2810 test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2811 /* Copy credentials */
2812 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2813 unix_set_secdata(&scm, skb);
2817 /* Copy address just once */
2818 if (state->msg && state->msg->msg_name) {
2819 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2820 state->msg->msg_name);
2821 unix_copy_addr(state->msg, skb->sk);
2823 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2824 state->msg->msg_name,
2825 &state->msg->msg_namelen);
2830 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2832 chunk = state->recv_actor(skb, skip, chunk, state);
2833 drop_skb = !unix_skb_len(skb);
2834 /* skb is only safe to use if !drop_skb */
2845 /* the skb was touched by a concurrent reader;
2846 * we should not expect anything from this skb
2847 * anymore and assume it invalid - we can be
2848 * sure it was dropped from the socket queue
2850 * let's report a short read
2856 /* Mark read part of skb as used */
2857 if (!(flags & MSG_PEEK)) {
2858 UNIXCB(skb).consumed += chunk;
2860 sk_peek_offset_bwd(sk, chunk);
2862 if (UNIXCB(skb).fp) {
2863 scm_stat_del(sk, skb);
2864 unix_detach_fds(&scm, skb);
2867 if (unix_skb_len(skb))
2870 skb_unlink(skb, &sk->sk_receive_queue);
2876 /* It is questionable, see note in unix_dgram_recvmsg.
2879 unix_peek_fds(&scm, skb);
2881 sk_peek_offset_fwd(sk, chunk);
2888 last_len = skb->len;
2889 unix_state_lock(sk);
2890 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2893 unix_state_unlock(sk);
2898 mutex_unlock(&u->iolock);
2900 scm_recv_unix(sock, state->msg, &scm, flags);
2904 return copied ? : err;
2907 static int unix_stream_read_actor(struct sk_buff *skb,
2908 int skip, int chunk,
2909 struct unix_stream_read_state *state)
2913 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2915 return ret ?: chunk;
2918 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2919 size_t size, int flags)
2921 struct unix_stream_read_state state = {
2922 .recv_actor = unix_stream_read_actor,
2923 .socket = sk->sk_socket,
2929 return unix_stream_read_generic(&state, true);
2932 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2933 size_t size, int flags)
2935 struct unix_stream_read_state state = {
2936 .recv_actor = unix_stream_read_actor,
2943 #ifdef CONFIG_BPF_SYSCALL
2944 struct sock *sk = sock->sk;
2945 const struct proto *prot = READ_ONCE(sk->sk_prot);
2947 if (prot != &unix_stream_proto)
2948 return prot->recvmsg(sk, msg, size, flags, NULL);
2950 return unix_stream_read_generic(&state, true);
2953 static int unix_stream_splice_actor(struct sk_buff *skb,
2954 int skip, int chunk,
2955 struct unix_stream_read_state *state)
2957 return skb_splice_bits(skb, state->socket->sk,
2958 UNIXCB(skb).consumed + skip,
2959 state->pipe, chunk, state->splice_flags);
2962 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2963 struct pipe_inode_info *pipe,
2964 size_t size, unsigned int flags)
2966 struct unix_stream_read_state state = {
2967 .recv_actor = unix_stream_splice_actor,
2971 .splice_flags = flags,
2974 if (unlikely(*ppos))
2977 if (sock->file->f_flags & O_NONBLOCK ||
2978 flags & SPLICE_F_NONBLOCK)
2979 state.flags = MSG_DONTWAIT;
2981 return unix_stream_read_generic(&state, false);
2984 static int unix_shutdown(struct socket *sock, int mode)
2986 struct sock *sk = sock->sk;
2989 if (mode < SHUT_RD || mode > SHUT_RDWR)
2992 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2993 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2994 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2998 unix_state_lock(sk);
2999 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3000 other = unix_peer(sk);
3003 unix_state_unlock(sk);
3004 sk->sk_state_change(sk);
3007 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3010 const struct proto *prot = READ_ONCE(other->sk_prot);
3013 prot->unhash(other);
3014 if (mode&RCV_SHUTDOWN)
3015 peer_mode |= SEND_SHUTDOWN;
3016 if (mode&SEND_SHUTDOWN)
3017 peer_mode |= RCV_SHUTDOWN;
3018 unix_state_lock(other);
3019 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3020 unix_state_unlock(other);
3021 other->sk_state_change(other);
3022 if (peer_mode == SHUTDOWN_MASK)
3023 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3024 else if (peer_mode & RCV_SHUTDOWN)
3025 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3033 long unix_inq_len(struct sock *sk)
3035 struct sk_buff *skb;
3038 if (sk->sk_state == TCP_LISTEN)
3041 spin_lock(&sk->sk_receive_queue.lock);
3042 if (sk->sk_type == SOCK_STREAM ||
3043 sk->sk_type == SOCK_SEQPACKET) {
3044 skb_queue_walk(&sk->sk_receive_queue, skb)
3045 amount += unix_skb_len(skb);
3047 skb = skb_peek(&sk->sk_receive_queue);
3051 spin_unlock(&sk->sk_receive_queue.lock);
3055 EXPORT_SYMBOL_GPL(unix_inq_len);
3057 long unix_outq_len(struct sock *sk)
3059 return sk_wmem_alloc_get(sk);
3061 EXPORT_SYMBOL_GPL(unix_outq_len);
3063 static int unix_open_file(struct sock *sk)
3069 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3072 if (!smp_load_acquire(&unix_sk(sk)->addr))
3075 path = unix_sk(sk)->path;
3081 fd = get_unused_fd_flags(O_CLOEXEC);
3085 f = dentry_open(&path, O_PATH, current_cred());
3099 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3101 struct sock *sk = sock->sk;
3107 amount = unix_outq_len(sk);
3108 err = put_user(amount, (int __user *)arg);
3111 amount = unix_inq_len(sk);
3115 err = put_user(amount, (int __user *)arg);
3118 err = unix_open_file(sk);
3120 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3123 struct sk_buff *skb;
3126 skb = skb_peek(&sk->sk_receive_queue);
3127 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3129 err = put_user(answ, (int __user *)arg);
3140 #ifdef CONFIG_COMPAT
3141 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3143 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3147 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3149 struct sock *sk = sock->sk;
3153 sock_poll_wait(file, sock, wait);
3155 shutdown = READ_ONCE(sk->sk_shutdown);
3157 /* exceptional events? */
3158 if (READ_ONCE(sk->sk_err))
3160 if (shutdown == SHUTDOWN_MASK)
3162 if (shutdown & RCV_SHUTDOWN)
3163 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3166 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3167 mask |= EPOLLIN | EPOLLRDNORM;
3168 if (sk_is_readable(sk))
3169 mask |= EPOLLIN | EPOLLRDNORM;
3170 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3171 if (READ_ONCE(unix_sk(sk)->oob_skb))
3175 /* Connection-based need to check for termination and startup */
3176 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3177 sk->sk_state == TCP_CLOSE)
3181 * we set writable also when the other side has shut down the
3182 * connection. This prevents stuck sockets.
3184 if (unix_writable(sk))
3185 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3190 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3193 struct sock *sk = sock->sk, *other;
3194 unsigned int writable;
3198 sock_poll_wait(file, sock, wait);
3200 shutdown = READ_ONCE(sk->sk_shutdown);
3202 /* exceptional events? */
3203 if (READ_ONCE(sk->sk_err) ||
3204 !skb_queue_empty_lockless(&sk->sk_error_queue))
3206 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3208 if (shutdown & RCV_SHUTDOWN)
3209 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3210 if (shutdown == SHUTDOWN_MASK)
3214 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3215 mask |= EPOLLIN | EPOLLRDNORM;
3216 if (sk_is_readable(sk))
3217 mask |= EPOLLIN | EPOLLRDNORM;
3219 /* Connection-based need to check for termination and startup */
3220 if (sk->sk_type == SOCK_SEQPACKET) {
3221 if (sk->sk_state == TCP_CLOSE)
3223 /* connection hasn't started yet? */
3224 if (sk->sk_state == TCP_SYN_SENT)
3228 /* No write status requested, avoid expensive OUT tests. */
3229 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3232 writable = unix_writable(sk);
3234 unix_state_lock(sk);
3236 other = unix_peer(sk);
3237 if (other && unix_peer(other) != sk &&
3238 unix_recvq_full_lockless(other) &&
3239 unix_dgram_peer_wake_me(sk, other))
3242 unix_state_unlock(sk);
3246 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3248 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3253 #ifdef CONFIG_PROC_FS
3255 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3257 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3258 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3259 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3261 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3263 unsigned long offset = get_offset(*pos);
3264 unsigned long bucket = get_bucket(*pos);
3265 unsigned long count = 0;
3268 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3269 sk; sk = sk_next(sk)) {
3270 if (++count == offset)
3277 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3279 unsigned long bucket = get_bucket(*pos);
3280 struct net *net = seq_file_net(seq);
3283 while (bucket < UNIX_HASH_SIZE) {
3284 spin_lock(&net->unx.table.locks[bucket]);
3286 sk = unix_from_bucket(seq, pos);
3290 spin_unlock(&net->unx.table.locks[bucket]);
3292 *pos = set_bucket_offset(++bucket, 1);
3298 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3301 unsigned long bucket = get_bucket(*pos);
3308 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3310 *pos = set_bucket_offset(++bucket, 1);
3312 return unix_get_first(seq, pos);
3315 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3318 return SEQ_START_TOKEN;
3320 return unix_get_first(seq, pos);
3323 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3327 if (v == SEQ_START_TOKEN)
3328 return unix_get_first(seq, pos);
3330 return unix_get_next(seq, v, pos);
3333 static void unix_seq_stop(struct seq_file *seq, void *v)
3335 struct sock *sk = v;
3338 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3341 static int unix_seq_show(struct seq_file *seq, void *v)
3344 if (v == SEQ_START_TOKEN)
3345 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3349 struct unix_sock *u = unix_sk(s);
3352 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3354 refcount_read(&s->sk_refcnt),
3356 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3359 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3360 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3363 if (u->addr) { // under a hash table lock here
3368 len = u->addr->len -
3369 offsetof(struct sockaddr_un, sun_path);
3370 if (u->addr->name->sun_path[0]) {
3376 for ( ; i < len; i++)
3377 seq_putc(seq, u->addr->name->sun_path[i] ?:
3380 unix_state_unlock(s);
3381 seq_putc(seq, '\n');
3387 static const struct seq_operations unix_seq_ops = {
3388 .start = unix_seq_start,
3389 .next = unix_seq_next,
3390 .stop = unix_seq_stop,
3391 .show = unix_seq_show,
3394 #ifdef CONFIG_BPF_SYSCALL
3395 struct bpf_unix_iter_state {
3396 struct seq_net_private p;
3397 unsigned int cur_sk;
3398 unsigned int end_sk;
3399 unsigned int max_sk;
3400 struct sock **batch;
3401 bool st_bucket_done;
3404 struct bpf_iter__unix {
3405 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3406 __bpf_md_ptr(struct unix_sock *, unix_sk);
3407 uid_t uid __aligned(8);
3410 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3411 struct unix_sock *unix_sk, uid_t uid)
3413 struct bpf_iter__unix ctx;
3415 meta->seq_num--; /* skip SEQ_START_TOKEN */
3417 ctx.unix_sk = unix_sk;
3419 return bpf_iter_run_prog(prog, &ctx);
3422 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3425 struct bpf_unix_iter_state *iter = seq->private;
3426 unsigned int expected = 1;
3429 sock_hold(start_sk);
3430 iter->batch[iter->end_sk++] = start_sk;
3432 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3433 if (iter->end_sk < iter->max_sk) {
3435 iter->batch[iter->end_sk++] = sk;
3441 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3446 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3448 while (iter->cur_sk < iter->end_sk)
3449 sock_put(iter->batch[iter->cur_sk++]);
3452 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3453 unsigned int new_batch_sz)
3455 struct sock **new_batch;
3457 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3458 GFP_USER | __GFP_NOWARN);
3462 bpf_iter_unix_put_batch(iter);
3463 kvfree(iter->batch);
3464 iter->batch = new_batch;
3465 iter->max_sk = new_batch_sz;
3470 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3473 struct bpf_unix_iter_state *iter = seq->private;
3474 unsigned int expected;
3475 bool resized = false;
3478 if (iter->st_bucket_done)
3479 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3482 /* Get a new batch */
3486 sk = unix_get_first(seq, pos);
3488 return NULL; /* Done */
3490 expected = bpf_iter_unix_hold_batch(seq, sk);
3492 if (iter->end_sk == expected) {
3493 iter->st_bucket_done = true;
3497 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3505 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3508 return SEQ_START_TOKEN;
3510 /* bpf iter does not support lseek, so it always
3511 * continue from where it was stop()-ped.
3513 return bpf_iter_unix_batch(seq, pos);
3516 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3518 struct bpf_unix_iter_state *iter = seq->private;
3521 /* Whenever seq_next() is called, the iter->cur_sk is
3522 * done with seq_show(), so advance to the next sk in
3525 if (iter->cur_sk < iter->end_sk)
3526 sock_put(iter->batch[iter->cur_sk++]);
3530 if (iter->cur_sk < iter->end_sk)
3531 sk = iter->batch[iter->cur_sk];
3533 sk = bpf_iter_unix_batch(seq, pos);
3538 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3540 struct bpf_iter_meta meta;
3541 struct bpf_prog *prog;
3542 struct sock *sk = v;
3547 if (v == SEQ_START_TOKEN)
3550 slow = lock_sock_fast(sk);
3552 if (unlikely(sk_unhashed(sk))) {
3557 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3559 prog = bpf_iter_get_info(&meta, false);
3560 ret = unix_prog_seq_show(prog, &meta, v, uid);
3562 unlock_sock_fast(sk, slow);
3566 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3568 struct bpf_unix_iter_state *iter = seq->private;
3569 struct bpf_iter_meta meta;
3570 struct bpf_prog *prog;
3574 prog = bpf_iter_get_info(&meta, true);
3576 (void)unix_prog_seq_show(prog, &meta, v, 0);
3579 if (iter->cur_sk < iter->end_sk)
3580 bpf_iter_unix_put_batch(iter);
3583 static const struct seq_operations bpf_iter_unix_seq_ops = {
3584 .start = bpf_iter_unix_seq_start,
3585 .next = bpf_iter_unix_seq_next,
3586 .stop = bpf_iter_unix_seq_stop,
3587 .show = bpf_iter_unix_seq_show,
3592 static const struct net_proto_family unix_family_ops = {
3594 .create = unix_create,
3595 .owner = THIS_MODULE,
3599 static int __net_init unix_net_init(struct net *net)
3603 net->unx.sysctl_max_dgram_qlen = 10;
3604 if (unix_sysctl_register(net))
3607 #ifdef CONFIG_PROC_FS
3608 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3609 sizeof(struct seq_net_private)))
3613 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3614 sizeof(spinlock_t), GFP_KERNEL);
3615 if (!net->unx.table.locks)
3618 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3619 sizeof(struct hlist_head),
3621 if (!net->unx.table.buckets)
3624 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3625 spin_lock_init(&net->unx.table.locks[i]);
3626 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3632 kvfree(net->unx.table.locks);
3634 #ifdef CONFIG_PROC_FS
3635 remove_proc_entry("unix", net->proc_net);
3638 unix_sysctl_unregister(net);
3643 static void __net_exit unix_net_exit(struct net *net)
3645 kvfree(net->unx.table.buckets);
3646 kvfree(net->unx.table.locks);
3647 unix_sysctl_unregister(net);
3648 remove_proc_entry("unix", net->proc_net);
3651 static struct pernet_operations unix_net_ops = {
3652 .init = unix_net_init,
3653 .exit = unix_net_exit,
3656 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3657 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3658 struct unix_sock *unix_sk, uid_t uid)
3660 #define INIT_BATCH_SZ 16
3662 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3664 struct bpf_unix_iter_state *iter = priv_data;
3667 err = bpf_iter_init_seq_net(priv_data, aux);
3671 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3673 bpf_iter_fini_seq_net(priv_data);
3680 static void bpf_iter_fini_unix(void *priv_data)
3682 struct bpf_unix_iter_state *iter = priv_data;
3684 bpf_iter_fini_seq_net(priv_data);
3685 kvfree(iter->batch);
3688 static const struct bpf_iter_seq_info unix_seq_info = {
3689 .seq_ops = &bpf_iter_unix_seq_ops,
3690 .init_seq_private = bpf_iter_init_unix,
3691 .fini_seq_private = bpf_iter_fini_unix,
3692 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3695 static const struct bpf_func_proto *
3696 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3697 const struct bpf_prog *prog)
3700 case BPF_FUNC_setsockopt:
3701 return &bpf_sk_setsockopt_proto;
3702 case BPF_FUNC_getsockopt:
3703 return &bpf_sk_getsockopt_proto;
3709 static struct bpf_iter_reg unix_reg_info = {
3711 .ctx_arg_info_size = 1,
3713 { offsetof(struct bpf_iter__unix, unix_sk),
3714 PTR_TO_BTF_ID_OR_NULL },
3716 .get_func_proto = bpf_iter_unix_get_func_proto,
3717 .seq_info = &unix_seq_info,
3720 static void __init bpf_iter_register(void)
3722 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3723 if (bpf_iter_reg_target(&unix_reg_info))
3724 pr_warn("Warning: could not register bpf iterator unix\n");
3728 static int __init af_unix_init(void)
3732 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3734 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3735 spin_lock_init(&bsd_socket_locks[i]);
3736 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3739 rc = proto_register(&unix_dgram_proto, 1);
3741 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3745 rc = proto_register(&unix_stream_proto, 1);
3747 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3748 proto_unregister(&unix_dgram_proto);
3752 sock_register(&unix_family_ops);
3753 register_pernet_subsys(&unix_net_ops);
3754 unix_bpf_build_proto();
3756 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3757 bpf_iter_register();
3764 /* Later than subsys_initcall() because we depend on stuff initialised there */
3765 fs_initcall(af_unix_init);