1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/freezer.h>
116 #include <linux/file.h>
117 #include <linux/btf_ids.h>
121 spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE];
122 EXPORT_SYMBOL_GPL(unix_table_locks);
123 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
124 EXPORT_SYMBOL_GPL(unix_socket_table);
125 static atomic_long_t unix_nr_socks;
127 /* SMP locking strategy:
128 * hash table is protected with spinlock unix_table_locks
129 * each socket state is protected by separate spin lock.
132 static unsigned int unix_unbound_hash(struct sock *sk)
134 unsigned long hash = (unsigned long)sk;
140 return UNIX_HASH_SIZE + (hash & (UNIX_HASH_SIZE - 1));
143 static unsigned int unix_bsd_hash(struct inode *i)
145 return i->i_ino & (UNIX_HASH_SIZE - 1);
148 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
149 int addr_len, int type)
151 __wsum csum = csum_partial(sunaddr, addr_len, 0);
154 hash = (__force unsigned int)csum_fold(csum);
158 return hash & (UNIX_HASH_SIZE - 1);
161 static void unix_table_double_lock(unsigned int hash1, unsigned int hash2)
163 /* hash1 and hash2 is never the same because
164 * one is between 0 and UNIX_HASH_SIZE - 1, and
165 * another is between UNIX_HASH_SIZE and UNIX_HASH_SIZE * 2.
170 spin_lock(&unix_table_locks[hash1]);
171 spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING);
174 static void unix_table_double_unlock(unsigned int hash1, unsigned int hash2)
176 spin_unlock(&unix_table_locks[hash1]);
177 spin_unlock(&unix_table_locks[hash2]);
180 #ifdef CONFIG_SECURITY_NETWORK
181 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
183 UNIXCB(skb).secid = scm->secid;
186 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
188 scm->secid = UNIXCB(skb).secid;
191 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
193 return (scm->secid == UNIXCB(skb).secid);
196 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
199 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
202 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
206 #endif /* CONFIG_SECURITY_NETWORK */
208 #define unix_peer(sk) (unix_sk(sk)->peer)
210 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
212 return unix_peer(osk) == sk;
215 static inline int unix_may_send(struct sock *sk, struct sock *osk)
217 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
220 static inline int unix_recvq_full(const struct sock *sk)
222 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
225 static inline int unix_recvq_full_lockless(const struct sock *sk)
227 return skb_queue_len_lockless(&sk->sk_receive_queue) >
228 READ_ONCE(sk->sk_max_ack_backlog);
231 struct sock *unix_peer_get(struct sock *s)
239 unix_state_unlock(s);
242 EXPORT_SYMBOL_GPL(unix_peer_get);
244 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
247 struct unix_address *addr;
249 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
253 refcount_set(&addr->refcnt, 1);
254 addr->len = addr_len;
255 memcpy(addr->name, sunaddr, addr_len);
260 static inline void unix_release_addr(struct unix_address *addr)
262 if (refcount_dec_and_test(&addr->refcnt))
267 * Check unix socket name:
268 * - should be not zero length.
269 * - if started by not zero, should be NULL terminated (FS object)
270 * - if started by zero, it is abstract name.
273 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
275 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
276 addr_len > sizeof(*sunaddr))
279 if (sunaddr->sun_family != AF_UNIX)
285 static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
287 /* This may look like an off by one error but it is a bit more
288 * subtle. 108 is the longest valid AF_UNIX path for a binding.
289 * sun_path[108] doesn't as such exist. However in kernel space
290 * we are guaranteed that it is a valid memory location in our
291 * kernel address buffer because syscall functions always pass
292 * a pointer of struct sockaddr_storage which has a bigger buffer
295 ((char *)sunaddr)[addr_len] = 0;
298 static void __unix_remove_socket(struct sock *sk)
300 sk_del_node_init(sk);
303 static void __unix_insert_socket(struct sock *sk)
305 WARN_ON(!sk_unhashed(sk));
306 sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
309 static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr,
312 __unix_remove_socket(sk);
313 smp_store_release(&unix_sk(sk)->addr, addr);
316 __unix_insert_socket(sk);
319 static void unix_remove_socket(struct sock *sk)
321 spin_lock(&unix_table_locks[sk->sk_hash]);
322 __unix_remove_socket(sk);
323 spin_unlock(&unix_table_locks[sk->sk_hash]);
326 static void unix_insert_unbound_socket(struct sock *sk)
328 spin_lock(&unix_table_locks[sk->sk_hash]);
329 __unix_insert_socket(sk);
330 spin_unlock(&unix_table_locks[sk->sk_hash]);
333 static struct sock *__unix_find_socket_byname(struct net *net,
334 struct sockaddr_un *sunname,
335 int len, unsigned int hash)
339 sk_for_each(s, &unix_socket_table[hash]) {
340 struct unix_sock *u = unix_sk(s);
342 if (!net_eq(sock_net(s), net))
345 if (u->addr->len == len &&
346 !memcmp(u->addr->name, sunname, len))
352 static inline struct sock *unix_find_socket_byname(struct net *net,
353 struct sockaddr_un *sunname,
354 int len, unsigned int hash)
358 spin_lock(&unix_table_locks[hash]);
359 s = __unix_find_socket_byname(net, sunname, len, hash);
362 spin_unlock(&unix_table_locks[hash]);
366 static struct sock *unix_find_socket_byinode(struct inode *i)
368 unsigned int hash = unix_bsd_hash(i);
371 spin_lock(&unix_table_locks[hash]);
372 sk_for_each(s, &unix_socket_table[hash]) {
373 struct dentry *dentry = unix_sk(s)->path.dentry;
375 if (dentry && d_backing_inode(dentry) == i) {
377 spin_unlock(&unix_table_locks[hash]);
381 spin_unlock(&unix_table_locks[hash]);
385 /* Support code for asymmetrically connected dgram sockets
387 * If a datagram socket is connected to a socket not itself connected
388 * to the first socket (eg, /dev/log), clients may only enqueue more
389 * messages if the present receive queue of the server socket is not
390 * "too large". This means there's a second writeability condition
391 * poll and sendmsg need to test. The dgram recv code will do a wake
392 * up on the peer_wait wait queue of a socket upon reception of a
393 * datagram which needs to be propagated to sleeping would-be writers
394 * since these might not have sent anything so far. This can't be
395 * accomplished via poll_wait because the lifetime of the server
396 * socket might be less than that of its clients if these break their
397 * association with it or if the server socket is closed while clients
398 * are still connected to it and there's no way to inform "a polling
399 * implementation" that it should let go of a certain wait queue
401 * In order to propagate a wake up, a wait_queue_entry_t of the client
402 * socket is enqueued on the peer_wait queue of the server socket
403 * whose wake function does a wake_up on the ordinary client socket
404 * wait queue. This connection is established whenever a write (or
405 * poll for write) hit the flow control condition and broken when the
406 * association to the server socket is dissolved or after a wake up
410 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
414 wait_queue_head_t *u_sleep;
416 u = container_of(q, struct unix_sock, peer_wake);
418 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
420 u->peer_wake.private = NULL;
422 /* relaying can only happen while the wq still exists */
423 u_sleep = sk_sleep(&u->sk);
425 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
430 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
432 struct unix_sock *u, *u_other;
436 u_other = unix_sk(other);
438 spin_lock(&u_other->peer_wait.lock);
440 if (!u->peer_wake.private) {
441 u->peer_wake.private = other;
442 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
447 spin_unlock(&u_other->peer_wait.lock);
451 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
454 struct unix_sock *u, *u_other;
457 u_other = unix_sk(other);
458 spin_lock(&u_other->peer_wait.lock);
460 if (u->peer_wake.private == other) {
461 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
462 u->peer_wake.private = NULL;
465 spin_unlock(&u_other->peer_wait.lock);
468 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
471 unix_dgram_peer_wake_disconnect(sk, other);
472 wake_up_interruptible_poll(sk_sleep(sk),
479 * - unix_peer(sk) == other
480 * - association is stable
482 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
486 connected = unix_dgram_peer_wake_connect(sk, other);
488 /* If other is SOCK_DEAD, we want to make sure we signal
489 * POLLOUT, such that a subsequent write() can get a
490 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
491 * to other and its full, we will hang waiting for POLLOUT.
493 if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
497 unix_dgram_peer_wake_disconnect(sk, other);
502 static int unix_writable(const struct sock *sk)
504 return sk->sk_state != TCP_LISTEN &&
505 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
508 static void unix_write_space(struct sock *sk)
510 struct socket_wq *wq;
513 if (unix_writable(sk)) {
514 wq = rcu_dereference(sk->sk_wq);
515 if (skwq_has_sleeper(wq))
516 wake_up_interruptible_sync_poll(&wq->wait,
517 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
518 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
523 /* When dgram socket disconnects (or changes its peer), we clear its receive
524 * queue of packets arrived from previous peer. First, it allows to do
525 * flow control based only on wmem_alloc; second, sk connected to peer
526 * may receive messages only from that peer. */
527 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
529 if (!skb_queue_empty(&sk->sk_receive_queue)) {
530 skb_queue_purge(&sk->sk_receive_queue);
531 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
533 /* If one link of bidirectional dgram pipe is disconnected,
534 * we signal error. Messages are lost. Do not make this,
535 * when peer was not connected to us.
537 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
538 other->sk_err = ECONNRESET;
539 sk_error_report(other);
542 other->sk_state = TCP_CLOSE;
545 static void unix_sock_destructor(struct sock *sk)
547 struct unix_sock *u = unix_sk(sk);
549 skb_queue_purge(&sk->sk_receive_queue);
551 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
553 kfree_skb(u->oob_skb);
557 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
558 WARN_ON(!sk_unhashed(sk));
559 WARN_ON(sk->sk_socket);
560 if (!sock_flag(sk, SOCK_DEAD)) {
561 pr_info("Attempt to release alive unix socket: %p\n", sk);
566 unix_release_addr(u->addr);
568 atomic_long_dec(&unix_nr_socks);
569 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
570 #ifdef UNIX_REFCNT_DEBUG
571 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
572 atomic_long_read(&unix_nr_socks));
576 static void unix_release_sock(struct sock *sk, int embrion)
578 struct unix_sock *u = unix_sk(sk);
584 unix_remove_socket(sk);
589 sk->sk_shutdown = SHUTDOWN_MASK;
591 u->path.dentry = NULL;
593 state = sk->sk_state;
594 sk->sk_state = TCP_CLOSE;
596 skpair = unix_peer(sk);
597 unix_peer(sk) = NULL;
599 unix_state_unlock(sk);
601 wake_up_interruptible_all(&u->peer_wait);
603 if (skpair != NULL) {
604 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
605 unix_state_lock(skpair);
607 skpair->sk_shutdown = SHUTDOWN_MASK;
608 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
609 skpair->sk_err = ECONNRESET;
610 unix_state_unlock(skpair);
611 skpair->sk_state_change(skpair);
612 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
615 unix_dgram_peer_wake_disconnect(sk, skpair);
616 sock_put(skpair); /* It may now die */
619 /* Try to flush out this socket. Throw out buffers at least */
621 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
622 if (state == TCP_LISTEN)
623 unix_release_sock(skb->sk, 1);
624 /* passed fds are erased in the kfree_skb hook */
625 UNIXCB(skb).consumed = skb->len;
634 /* ---- Socket is dead now and most probably destroyed ---- */
637 * Fixme: BSD difference: In BSD all sockets connected to us get
638 * ECONNRESET and we die on the spot. In Linux we behave
639 * like files and pipes do and wait for the last
642 * Can't we simply set sock->err?
644 * What the above comment does talk about? --ANK(980817)
647 if (unix_tot_inflight)
648 unix_gc(); /* Garbage collect fds */
651 static void init_peercred(struct sock *sk)
653 const struct cred *old_cred;
656 spin_lock(&sk->sk_peer_lock);
657 old_pid = sk->sk_peer_pid;
658 old_cred = sk->sk_peer_cred;
659 sk->sk_peer_pid = get_pid(task_tgid(current));
660 sk->sk_peer_cred = get_current_cred();
661 spin_unlock(&sk->sk_peer_lock);
667 static void copy_peercred(struct sock *sk, struct sock *peersk)
669 const struct cred *old_cred;
673 spin_lock(&sk->sk_peer_lock);
674 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
676 spin_lock(&peersk->sk_peer_lock);
677 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
679 old_pid = sk->sk_peer_pid;
680 old_cred = sk->sk_peer_cred;
681 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
682 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
684 spin_unlock(&sk->sk_peer_lock);
685 spin_unlock(&peersk->sk_peer_lock);
691 static int unix_listen(struct socket *sock, int backlog)
694 struct sock *sk = sock->sk;
695 struct unix_sock *u = unix_sk(sk);
698 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
699 goto out; /* Only stream/seqpacket sockets accept */
702 goto out; /* No listens on an unbound socket */
704 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
706 if (backlog > sk->sk_max_ack_backlog)
707 wake_up_interruptible_all(&u->peer_wait);
708 sk->sk_max_ack_backlog = backlog;
709 sk->sk_state = TCP_LISTEN;
710 /* set credentials so connect can copy them */
715 unix_state_unlock(sk);
720 static int unix_release(struct socket *);
721 static int unix_bind(struct socket *, struct sockaddr *, int);
722 static int unix_stream_connect(struct socket *, struct sockaddr *,
723 int addr_len, int flags);
724 static int unix_socketpair(struct socket *, struct socket *);
725 static int unix_accept(struct socket *, struct socket *, int, bool);
726 static int unix_getname(struct socket *, struct sockaddr *, int);
727 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
728 static __poll_t unix_dgram_poll(struct file *, struct socket *,
730 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
732 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
734 static int unix_shutdown(struct socket *, int);
735 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
736 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
737 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
738 size_t size, int flags);
739 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
740 struct pipe_inode_info *, size_t size,
742 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
743 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
744 static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
745 sk_read_actor_t recv_actor);
746 static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
747 sk_read_actor_t recv_actor);
748 static int unix_dgram_connect(struct socket *, struct sockaddr *,
750 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
751 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
754 static int unix_set_peek_off(struct sock *sk, int val)
756 struct unix_sock *u = unix_sk(sk);
758 if (mutex_lock_interruptible(&u->iolock))
761 sk->sk_peek_off = val;
762 mutex_unlock(&u->iolock);
767 #ifdef CONFIG_PROC_FS
768 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
770 struct sock *sk = sock->sk;
774 u = unix_sk(sock->sk);
775 seq_printf(m, "scm_fds: %u\n",
776 atomic_read(&u->scm_stat.nr_fds));
780 #define unix_show_fdinfo NULL
783 static const struct proto_ops unix_stream_ops = {
785 .owner = THIS_MODULE,
786 .release = unix_release,
788 .connect = unix_stream_connect,
789 .socketpair = unix_socketpair,
790 .accept = unix_accept,
791 .getname = unix_getname,
795 .compat_ioctl = unix_compat_ioctl,
797 .listen = unix_listen,
798 .shutdown = unix_shutdown,
799 .sendmsg = unix_stream_sendmsg,
800 .recvmsg = unix_stream_recvmsg,
801 .read_sock = unix_stream_read_sock,
802 .mmap = sock_no_mmap,
803 .sendpage = unix_stream_sendpage,
804 .splice_read = unix_stream_splice_read,
805 .set_peek_off = unix_set_peek_off,
806 .show_fdinfo = unix_show_fdinfo,
809 static const struct proto_ops unix_dgram_ops = {
811 .owner = THIS_MODULE,
812 .release = unix_release,
814 .connect = unix_dgram_connect,
815 .socketpair = unix_socketpair,
816 .accept = sock_no_accept,
817 .getname = unix_getname,
818 .poll = unix_dgram_poll,
821 .compat_ioctl = unix_compat_ioctl,
823 .listen = sock_no_listen,
824 .shutdown = unix_shutdown,
825 .sendmsg = unix_dgram_sendmsg,
826 .read_sock = unix_read_sock,
827 .recvmsg = unix_dgram_recvmsg,
828 .mmap = sock_no_mmap,
829 .sendpage = sock_no_sendpage,
830 .set_peek_off = unix_set_peek_off,
831 .show_fdinfo = unix_show_fdinfo,
834 static const struct proto_ops unix_seqpacket_ops = {
836 .owner = THIS_MODULE,
837 .release = unix_release,
839 .connect = unix_stream_connect,
840 .socketpair = unix_socketpair,
841 .accept = unix_accept,
842 .getname = unix_getname,
843 .poll = unix_dgram_poll,
846 .compat_ioctl = unix_compat_ioctl,
848 .listen = unix_listen,
849 .shutdown = unix_shutdown,
850 .sendmsg = unix_seqpacket_sendmsg,
851 .recvmsg = unix_seqpacket_recvmsg,
852 .mmap = sock_no_mmap,
853 .sendpage = sock_no_sendpage,
854 .set_peek_off = unix_set_peek_off,
855 .show_fdinfo = unix_show_fdinfo,
858 static void unix_close(struct sock *sk, long timeout)
860 /* Nothing to do here, unix socket does not need a ->close().
861 * This is merely for sockmap.
865 static void unix_unhash(struct sock *sk)
867 /* Nothing to do here, unix socket does not need a ->unhash().
868 * This is merely for sockmap.
872 struct proto unix_dgram_proto = {
874 .owner = THIS_MODULE,
875 .obj_size = sizeof(struct unix_sock),
877 #ifdef CONFIG_BPF_SYSCALL
878 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
882 struct proto unix_stream_proto = {
883 .name = "UNIX-STREAM",
884 .owner = THIS_MODULE,
885 .obj_size = sizeof(struct unix_sock),
887 .unhash = unix_unhash,
888 #ifdef CONFIG_BPF_SYSCALL
889 .psock_update_sk_prot = unix_stream_bpf_update_proto,
893 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
899 atomic_long_inc(&unix_nr_socks);
900 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
905 if (type == SOCK_STREAM)
906 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
907 else /*dgram and seqpacket */
908 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
915 sock_init_data(sock, sk);
917 sk->sk_hash = unix_unbound_hash(sk);
918 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
919 sk->sk_write_space = unix_write_space;
920 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
921 sk->sk_destruct = unix_sock_destructor;
923 u->path.dentry = NULL;
925 spin_lock_init(&u->lock);
926 atomic_long_set(&u->inflight, 0);
927 INIT_LIST_HEAD(&u->link);
928 mutex_init(&u->iolock); /* single task reading lock */
929 mutex_init(&u->bindlock); /* single task binding lock */
930 init_waitqueue_head(&u->peer_wait);
931 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
932 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
933 unix_insert_unbound_socket(sk);
935 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
940 atomic_long_dec(&unix_nr_socks);
944 static int unix_create(struct net *net, struct socket *sock, int protocol,
949 if (protocol && protocol != PF_UNIX)
950 return -EPROTONOSUPPORT;
952 sock->state = SS_UNCONNECTED;
954 switch (sock->type) {
956 sock->ops = &unix_stream_ops;
959 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
963 sock->type = SOCK_DGRAM;
966 sock->ops = &unix_dgram_ops;
969 sock->ops = &unix_seqpacket_ops;
972 return -ESOCKTNOSUPPORT;
975 sk = unix_create1(net, sock, kern, sock->type);
982 static int unix_release(struct socket *sock)
984 struct sock *sk = sock->sk;
989 sk->sk_prot->close(sk, 0);
990 unix_release_sock(sk, 0);
996 static struct sock *unix_find_bsd(struct net *net, struct sockaddr_un *sunaddr,
997 int addr_len, int type)
1004 unix_mkname_bsd(sunaddr, addr_len);
1005 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1009 err = path_permission(&path, MAY_WRITE);
1013 err = -ECONNREFUSED;
1014 inode = d_backing_inode(path.dentry);
1015 if (!S_ISSOCK(inode->i_mode))
1018 sk = unix_find_socket_byinode(inode);
1023 if (sk->sk_type == type)
1037 return ERR_PTR(err);
1040 static struct sock *unix_find_abstract(struct net *net,
1041 struct sockaddr_un *sunaddr,
1042 int addr_len, int type)
1044 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1045 struct dentry *dentry;
1048 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1050 return ERR_PTR(-ECONNREFUSED);
1052 dentry = unix_sk(sk)->path.dentry;
1054 touch_atime(&unix_sk(sk)->path);
1059 static struct sock *unix_find_other(struct net *net,
1060 struct sockaddr_un *sunaddr,
1061 int addr_len, int type)
1065 if (sunaddr->sun_path[0])
1066 sk = unix_find_bsd(net, sunaddr, addr_len, type);
1068 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1073 static int unix_autobind(struct sock *sk)
1075 unsigned int new_hash, old_hash = sk->sk_hash;
1076 struct unix_sock *u = unix_sk(sk);
1077 struct unix_address *addr;
1078 u32 lastnum, ordernum;
1081 err = mutex_lock_interruptible(&u->bindlock);
1089 addr = kzalloc(sizeof(*addr) +
1090 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1094 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1095 addr->name->sun_family = AF_UNIX;
1096 refcount_set(&addr->refcnt, 1);
1098 ordernum = prandom_u32();
1099 lastnum = ordernum & 0xFFFFF;
1101 ordernum = (ordernum + 1) & 0xFFFFF;
1102 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1104 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1105 unix_table_double_lock(old_hash, new_hash);
1107 if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
1109 unix_table_double_unlock(old_hash, new_hash);
1111 /* __unix_find_socket_byname() may take long time if many names
1112 * are already in use.
1116 if (ordernum == lastnum) {
1117 /* Give up if all names seems to be in use. */
1119 unix_release_addr(addr);
1126 __unix_set_addr_hash(sk, addr, new_hash);
1127 unix_table_double_unlock(old_hash, new_hash);
1130 out: mutex_unlock(&u->bindlock);
1134 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1137 umode_t mode = S_IFSOCK |
1138 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1139 unsigned int new_hash, old_hash = sk->sk_hash;
1140 struct unix_sock *u = unix_sk(sk);
1141 struct user_namespace *ns; // barf...
1142 struct unix_address *addr;
1143 struct dentry *dentry;
1147 unix_mkname_bsd(sunaddr, addr_len);
1148 addr_len = strlen(sunaddr->sun_path) +
1149 offsetof(struct sockaddr_un, sun_path) + 1;
1151 addr = unix_create_addr(sunaddr, addr_len);
1156 * Get the parent directory, calculate the hash for last
1159 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1160 if (IS_ERR(dentry)) {
1161 err = PTR_ERR(dentry);
1166 * All right, let's create it.
1168 ns = mnt_user_ns(parent.mnt);
1169 err = security_path_mknod(&parent, dentry, mode, 0);
1171 err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
1174 err = mutex_lock_interruptible(&u->bindlock);
1180 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1181 unix_table_double_lock(old_hash, new_hash);
1182 u->path.mnt = mntget(parent.mnt);
1183 u->path.dentry = dget(dentry);
1184 __unix_set_addr_hash(sk, addr, new_hash);
1185 unix_table_double_unlock(old_hash, new_hash);
1186 mutex_unlock(&u->bindlock);
1187 done_path_create(&parent, dentry);
1191 mutex_unlock(&u->bindlock);
1194 /* failed after successful mknod? unlink what we'd created... */
1195 vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
1197 done_path_create(&parent, dentry);
1199 unix_release_addr(addr);
1200 return err == -EEXIST ? -EADDRINUSE : err;
1203 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1206 unsigned int new_hash, old_hash = sk->sk_hash;
1207 struct unix_sock *u = unix_sk(sk);
1208 struct unix_address *addr;
1211 addr = unix_create_addr(sunaddr, addr_len);
1215 err = mutex_lock_interruptible(&u->bindlock);
1224 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1225 unix_table_double_lock(old_hash, new_hash);
1227 if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
1231 __unix_set_addr_hash(sk, addr, new_hash);
1232 unix_table_double_unlock(old_hash, new_hash);
1233 mutex_unlock(&u->bindlock);
1237 unix_table_double_unlock(old_hash, new_hash);
1240 mutex_unlock(&u->bindlock);
1242 unix_release_addr(addr);
1246 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1248 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1249 struct sock *sk = sock->sk;
1252 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1253 sunaddr->sun_family == AF_UNIX)
1254 return unix_autobind(sk);
1256 err = unix_validate_addr(sunaddr, addr_len);
1260 if (sunaddr->sun_path[0])
1261 err = unix_bind_bsd(sk, sunaddr, addr_len);
1263 err = unix_bind_abstract(sk, sunaddr, addr_len);
1268 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1270 if (unlikely(sk1 == sk2) || !sk2) {
1271 unix_state_lock(sk1);
1275 unix_state_lock(sk1);
1276 unix_state_lock_nested(sk2);
1278 unix_state_lock(sk2);
1279 unix_state_lock_nested(sk1);
1283 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1285 if (unlikely(sk1 == sk2) || !sk2) {
1286 unix_state_unlock(sk1);
1289 unix_state_unlock(sk1);
1290 unix_state_unlock(sk2);
1293 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1294 int alen, int flags)
1296 struct sock *sk = sock->sk;
1297 struct net *net = sock_net(sk);
1298 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1303 if (alen < offsetofend(struct sockaddr, sa_family))
1306 if (addr->sa_family != AF_UNSPEC) {
1307 err = unix_validate_addr(sunaddr, alen);
1311 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1312 !unix_sk(sk)->addr) {
1313 err = unix_autobind(sk);
1319 other = unix_find_other(net, sunaddr, alen, sock->type);
1320 if (IS_ERR(other)) {
1321 err = PTR_ERR(other);
1325 unix_state_double_lock(sk, other);
1327 /* Apparently VFS overslept socket death. Retry. */
1328 if (sock_flag(other, SOCK_DEAD)) {
1329 unix_state_double_unlock(sk, other);
1335 if (!unix_may_send(sk, other))
1338 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1342 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1345 * 1003.1g breaking connected state with AF_UNSPEC
1348 unix_state_double_lock(sk, other);
1352 * If it was connected, reconnect.
1354 if (unix_peer(sk)) {
1355 struct sock *old_peer = unix_peer(sk);
1357 unix_peer(sk) = other;
1359 sk->sk_state = TCP_CLOSE;
1360 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1362 unix_state_double_unlock(sk, other);
1364 if (other != old_peer)
1365 unix_dgram_disconnected(sk, old_peer);
1368 unix_peer(sk) = other;
1369 unix_state_double_unlock(sk, other);
1375 unix_state_double_unlock(sk, other);
1381 static long unix_wait_for_peer(struct sock *other, long timeo)
1382 __releases(&unix_sk(other)->lock)
1384 struct unix_sock *u = unix_sk(other);
1388 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1390 sched = !sock_flag(other, SOCK_DEAD) &&
1391 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1392 unix_recvq_full(other);
1394 unix_state_unlock(other);
1397 timeo = schedule_timeout(timeo);
1399 finish_wait(&u->peer_wait, &wait);
1403 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1404 int addr_len, int flags)
1406 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1407 struct sock *sk = sock->sk;
1408 struct net *net = sock_net(sk);
1409 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1410 struct sock *newsk = NULL;
1411 struct sock *other = NULL;
1412 struct sk_buff *skb = NULL;
1417 err = unix_validate_addr(sunaddr, addr_len);
1421 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1422 err = unix_autobind(sk);
1427 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1429 /* First of all allocate resources.
1430 If we will make it after state is locked,
1431 we will have to recheck all again in any case.
1434 /* create new sock for complete connection */
1435 newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
1436 if (IS_ERR(newsk)) {
1437 err = PTR_ERR(newsk);
1444 /* Allocate skb for sending to listening sock */
1445 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1450 /* Find listening sock. */
1451 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1452 if (IS_ERR(other)) {
1453 err = PTR_ERR(other);
1458 /* Latch state of peer */
1459 unix_state_lock(other);
1461 /* Apparently VFS overslept socket death. Retry. */
1462 if (sock_flag(other, SOCK_DEAD)) {
1463 unix_state_unlock(other);
1468 err = -ECONNREFUSED;
1469 if (other->sk_state != TCP_LISTEN)
1471 if (other->sk_shutdown & RCV_SHUTDOWN)
1474 if (unix_recvq_full(other)) {
1479 timeo = unix_wait_for_peer(other, timeo);
1481 err = sock_intr_errno(timeo);
1482 if (signal_pending(current))
1490 It is tricky place. We need to grab our state lock and cannot
1491 drop lock on peer. It is dangerous because deadlock is
1492 possible. Connect to self case and simultaneous
1493 attempt to connect are eliminated by checking socket
1494 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1495 check this before attempt to grab lock.
1497 Well, and we have to recheck the state after socket locked.
1503 /* This is ok... continue with connect */
1505 case TCP_ESTABLISHED:
1506 /* Socket is already connected */
1514 unix_state_lock_nested(sk);
1516 if (sk->sk_state != st) {
1517 unix_state_unlock(sk);
1518 unix_state_unlock(other);
1523 err = security_unix_stream_connect(sk, other, newsk);
1525 unix_state_unlock(sk);
1529 /* The way is open! Fastly set all the necessary fields... */
1532 unix_peer(newsk) = sk;
1533 newsk->sk_state = TCP_ESTABLISHED;
1534 newsk->sk_type = sk->sk_type;
1535 init_peercred(newsk);
1536 newu = unix_sk(newsk);
1537 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1538 otheru = unix_sk(other);
1540 /* copy address information from listening to new sock
1542 * The contents of *(otheru->addr) and otheru->path
1543 * are seen fully set up here, since we have found
1544 * otheru in hash under unix_table_locks. Insertion
1545 * into the hash chain we'd found it in had been done
1546 * in an earlier critical area protected by unix_table_locks,
1547 * the same one where we'd set *(otheru->addr) contents,
1548 * as well as otheru->path and otheru->addr itself.
1550 * Using smp_store_release() here to set newu->addr
1551 * is enough to make those stores, as well as stores
1552 * to newu->path visible to anyone who gets newu->addr
1553 * by smp_load_acquire(). IOW, the same warranties
1554 * as for unix_sock instances bound in unix_bind() or
1555 * in unix_autobind().
1557 if (otheru->path.dentry) {
1558 path_get(&otheru->path);
1559 newu->path = otheru->path;
1561 refcount_inc(&otheru->addr->refcnt);
1562 smp_store_release(&newu->addr, otheru->addr);
1564 /* Set credentials */
1565 copy_peercred(sk, other);
1567 sock->state = SS_CONNECTED;
1568 sk->sk_state = TCP_ESTABLISHED;
1571 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1572 unix_peer(sk) = newsk;
1574 unix_state_unlock(sk);
1576 /* take ten and send info to listening sock */
1577 spin_lock(&other->sk_receive_queue.lock);
1578 __skb_queue_tail(&other->sk_receive_queue, skb);
1579 spin_unlock(&other->sk_receive_queue.lock);
1580 unix_state_unlock(other);
1581 other->sk_data_ready(other);
1587 unix_state_unlock(other);
1592 unix_release_sock(newsk, 0);
1598 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1600 struct sock *ska = socka->sk, *skb = sockb->sk;
1602 /* Join our sockets back to back */
1605 unix_peer(ska) = skb;
1606 unix_peer(skb) = ska;
1610 ska->sk_state = TCP_ESTABLISHED;
1611 skb->sk_state = TCP_ESTABLISHED;
1612 socka->state = SS_CONNECTED;
1613 sockb->state = SS_CONNECTED;
1617 static void unix_sock_inherit_flags(const struct socket *old,
1620 if (test_bit(SOCK_PASSCRED, &old->flags))
1621 set_bit(SOCK_PASSCRED, &new->flags);
1622 if (test_bit(SOCK_PASSSEC, &old->flags))
1623 set_bit(SOCK_PASSSEC, &new->flags);
1626 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1629 struct sock *sk = sock->sk;
1631 struct sk_buff *skb;
1635 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1639 if (sk->sk_state != TCP_LISTEN)
1642 /* If socket state is TCP_LISTEN it cannot change (for now...),
1643 * so that no locks are necessary.
1646 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1648 /* This means receive shutdown. */
1655 skb_free_datagram(sk, skb);
1656 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1658 /* attach accepted sock to socket */
1659 unix_state_lock(tsk);
1660 newsock->state = SS_CONNECTED;
1661 unix_sock_inherit_flags(sock, newsock);
1662 sock_graft(tsk, newsock);
1663 unix_state_unlock(tsk);
1671 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1673 struct sock *sk = sock->sk;
1674 struct unix_address *addr;
1675 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1679 sk = unix_peer_get(sk);
1689 addr = smp_load_acquire(&unix_sk(sk)->addr);
1691 sunaddr->sun_family = AF_UNIX;
1692 sunaddr->sun_path[0] = 0;
1693 err = offsetof(struct sockaddr_un, sun_path);
1696 memcpy(sunaddr, addr->name, addr->len);
1703 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1705 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1708 * Garbage collection of unix sockets starts by selecting a set of
1709 * candidate sockets which have reference only from being in flight
1710 * (total_refs == inflight_refs). This condition is checked once during
1711 * the candidate collection phase, and candidates are marked as such, so
1712 * that non-candidates can later be ignored. While inflight_refs is
1713 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1714 * is an instantaneous decision.
1716 * Once a candidate, however, the socket must not be reinstalled into a
1717 * file descriptor while the garbage collection is in progress.
1719 * If the above conditions are met, then the directed graph of
1720 * candidates (*) does not change while unix_gc_lock is held.
1722 * Any operations that changes the file count through file descriptors
1723 * (dup, close, sendmsg) does not change the graph since candidates are
1724 * not installed in fds.
1726 * Dequeing a candidate via recvmsg would install it into an fd, but
1727 * that takes unix_gc_lock to decrement the inflight count, so it's
1728 * serialized with garbage collection.
1730 * MSG_PEEK is special in that it does not change the inflight count,
1731 * yet does install the socket into an fd. The following lock/unlock
1732 * pair is to ensure serialization with garbage collection. It must be
1733 * done between incrementing the file count and installing the file into
1736 * If garbage collection starts after the barrier provided by the
1737 * lock/unlock, then it will see the elevated refcount and not mark this
1738 * as a candidate. If a garbage collection is already in progress
1739 * before the file count was incremented, then the lock/unlock pair will
1740 * ensure that garbage collection is finished before progressing to
1741 * installing the fd.
1743 * (*) A -> B where B is on the queue of A or B is on the queue of C
1744 * which is on the queue of listening socket A.
1746 spin_lock(&unix_gc_lock);
1747 spin_unlock(&unix_gc_lock);
1750 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1754 UNIXCB(skb).pid = get_pid(scm->pid);
1755 UNIXCB(skb).uid = scm->creds.uid;
1756 UNIXCB(skb).gid = scm->creds.gid;
1757 UNIXCB(skb).fp = NULL;
1758 unix_get_secdata(scm, skb);
1759 if (scm->fp && send_fds)
1760 err = unix_attach_fds(scm, skb);
1762 skb->destructor = unix_destruct_scm;
1766 static bool unix_passcred_enabled(const struct socket *sock,
1767 const struct sock *other)
1769 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1770 !other->sk_socket ||
1771 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1775 * Some apps rely on write() giving SCM_CREDENTIALS
1776 * We include credentials if source or destination socket
1777 * asserted SOCK_PASSCRED.
1779 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1780 const struct sock *other)
1782 if (UNIXCB(skb).pid)
1784 if (unix_passcred_enabled(sock, other)) {
1785 UNIXCB(skb).pid = get_pid(task_tgid(current));
1786 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1790 static int maybe_init_creds(struct scm_cookie *scm,
1791 struct socket *socket,
1792 const struct sock *other)
1795 struct msghdr msg = { .msg_controllen = 0 };
1797 err = scm_send(socket, &msg, scm, false);
1801 if (unix_passcred_enabled(socket, other)) {
1802 scm->pid = get_pid(task_tgid(current));
1803 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1808 static bool unix_skb_scm_eq(struct sk_buff *skb,
1809 struct scm_cookie *scm)
1811 const struct unix_skb_parms *u = &UNIXCB(skb);
1813 return u->pid == scm->pid &&
1814 uid_eq(u->uid, scm->creds.uid) &&
1815 gid_eq(u->gid, scm->creds.gid) &&
1816 unix_secdata_eq(scm, skb);
1819 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1821 struct scm_fp_list *fp = UNIXCB(skb).fp;
1822 struct unix_sock *u = unix_sk(sk);
1824 if (unlikely(fp && fp->count))
1825 atomic_add(fp->count, &u->scm_stat.nr_fds);
1828 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1830 struct scm_fp_list *fp = UNIXCB(skb).fp;
1831 struct unix_sock *u = unix_sk(sk);
1833 if (unlikely(fp && fp->count))
1834 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1838 * Send AF_UNIX data.
1841 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1844 struct sock *sk = sock->sk;
1845 struct net *net = sock_net(sk);
1846 struct unix_sock *u = unix_sk(sk);
1847 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1848 struct sock *other = NULL;
1850 struct sk_buff *skb;
1852 struct scm_cookie scm;
1857 err = scm_send(sock, msg, &scm, false);
1862 if (msg->msg_flags&MSG_OOB)
1865 if (msg->msg_namelen) {
1866 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1872 other = unix_peer_get(sk);
1877 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1878 err = unix_autobind(sk);
1884 if (len > sk->sk_sndbuf - 32)
1887 if (len > SKB_MAX_ALLOC) {
1888 data_len = min_t(size_t,
1889 len - SKB_MAX_ALLOC,
1890 MAX_SKB_FRAGS * PAGE_SIZE);
1891 data_len = PAGE_ALIGN(data_len);
1893 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1896 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1897 msg->msg_flags & MSG_DONTWAIT, &err,
1898 PAGE_ALLOC_COSTLY_ORDER);
1902 err = unix_scm_to_skb(&scm, skb, true);
1906 skb_put(skb, len - data_len);
1907 skb->data_len = data_len;
1909 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1913 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1918 if (sunaddr == NULL)
1921 other = unix_find_other(net, sunaddr, msg->msg_namelen,
1923 if (IS_ERR(other)) {
1924 err = PTR_ERR(other);
1930 if (sk_filter(other, skb) < 0) {
1931 /* Toss the packet but do not return any error to the sender */
1937 unix_state_lock(other);
1940 if (!unix_may_send(sk, other))
1943 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1945 * Check with 1003.1g - what should
1948 unix_state_unlock(other);
1952 unix_state_lock(sk);
1955 if (unix_peer(sk) == other) {
1956 unix_peer(sk) = NULL;
1957 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1959 unix_state_unlock(sk);
1961 sk->sk_state = TCP_CLOSE;
1962 unix_dgram_disconnected(sk, other);
1964 err = -ECONNREFUSED;
1966 unix_state_unlock(sk);
1976 if (other->sk_shutdown & RCV_SHUTDOWN)
1979 if (sk->sk_type != SOCK_SEQPACKET) {
1980 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1985 /* other == sk && unix_peer(other) != sk if
1986 * - unix_peer(sk) == NULL, destination address bound to sk
1987 * - unix_peer(sk) == sk by time of get but disconnected before lock
1990 unlikely(unix_peer(other) != sk &&
1991 unix_recvq_full_lockless(other))) {
1993 timeo = unix_wait_for_peer(other, timeo);
1995 err = sock_intr_errno(timeo);
1996 if (signal_pending(current))
2003 unix_state_unlock(other);
2004 unix_state_double_lock(sk, other);
2007 if (unix_peer(sk) != other ||
2008 unix_dgram_peer_wake_me(sk, other)) {
2016 goto restart_locked;
2020 if (unlikely(sk_locked))
2021 unix_state_unlock(sk);
2023 if (sock_flag(other, SOCK_RCVTSTAMP))
2024 __net_timestamp(skb);
2025 maybe_add_creds(skb, sock, other);
2026 scm_stat_add(other, skb);
2027 skb_queue_tail(&other->sk_receive_queue, skb);
2028 unix_state_unlock(other);
2029 other->sk_data_ready(other);
2036 unix_state_unlock(sk);
2037 unix_state_unlock(other);
2047 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2048 * bytes, and a minimum of a full page.
2050 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2052 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2053 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
2055 struct unix_sock *ousk = unix_sk(other);
2056 struct sk_buff *skb;
2059 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2065 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2072 unix_state_lock(other);
2074 if (sock_flag(other, SOCK_DEAD) ||
2075 (other->sk_shutdown & RCV_SHUTDOWN)) {
2076 unix_state_unlock(other);
2081 maybe_add_creds(skb, sock, other);
2085 consume_skb(ousk->oob_skb);
2087 WRITE_ONCE(ousk->oob_skb, skb);
2089 scm_stat_add(other, skb);
2090 skb_queue_tail(&other->sk_receive_queue, skb);
2091 sk_send_sigurg(other);
2092 unix_state_unlock(other);
2093 other->sk_data_ready(other);
2099 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2102 struct sock *sk = sock->sk;
2103 struct sock *other = NULL;
2105 struct sk_buff *skb;
2107 struct scm_cookie scm;
2108 bool fds_sent = false;
2112 err = scm_send(sock, msg, &scm, false);
2117 if (msg->msg_flags & MSG_OOB) {
2118 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2126 if (msg->msg_namelen) {
2127 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2131 other = unix_peer(sk);
2136 if (sk->sk_shutdown & SEND_SHUTDOWN)
2139 while (sent < len) {
2142 /* Keep two messages in the pipe so it schedules better */
2143 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2145 /* allow fallback to order-0 allocations */
2146 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2148 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2150 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2152 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2153 msg->msg_flags & MSG_DONTWAIT, &err,
2154 get_order(UNIX_SKB_FRAGS_SZ));
2158 /* Only send the fds in the first buffer */
2159 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2166 skb_put(skb, size - data_len);
2167 skb->data_len = data_len;
2169 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2175 unix_state_lock(other);
2177 if (sock_flag(other, SOCK_DEAD) ||
2178 (other->sk_shutdown & RCV_SHUTDOWN))
2181 maybe_add_creds(skb, sock, other);
2182 scm_stat_add(other, skb);
2183 skb_queue_tail(&other->sk_receive_queue, skb);
2184 unix_state_unlock(other);
2185 other->sk_data_ready(other);
2189 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2190 if (msg->msg_flags & MSG_OOB) {
2191 err = queue_oob(sock, msg, other);
2203 unix_state_unlock(other);
2206 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2207 send_sig(SIGPIPE, current, 0);
2211 return sent ? : err;
2214 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2215 int offset, size_t size, int flags)
2218 bool send_sigpipe = false;
2219 bool init_scm = true;
2220 struct scm_cookie scm;
2221 struct sock *other, *sk = socket->sk;
2222 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
2224 if (flags & MSG_OOB)
2227 other = unix_peer(sk);
2228 if (!other || sk->sk_state != TCP_ESTABLISHED)
2233 unix_state_unlock(other);
2234 mutex_unlock(&unix_sk(other)->iolock);
2235 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2241 /* we must acquire iolock as we modify already present
2242 * skbs in the sk_receive_queue and mess with skb->len
2244 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2246 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2250 if (sk->sk_shutdown & SEND_SHUTDOWN) {
2252 send_sigpipe = true;
2256 unix_state_lock(other);
2258 if (sock_flag(other, SOCK_DEAD) ||
2259 other->sk_shutdown & RCV_SHUTDOWN) {
2261 send_sigpipe = true;
2262 goto err_state_unlock;
2266 err = maybe_init_creds(&scm, socket, other);
2268 goto err_state_unlock;
2272 skb = skb_peek_tail(&other->sk_receive_queue);
2273 if (tail && tail == skb) {
2275 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2282 } else if (newskb) {
2283 /* this is fast path, we don't necessarily need to
2284 * call to kfree_skb even though with newskb == NULL
2285 * this - does no harm
2287 consume_skb(newskb);
2291 if (skb_append_pagefrags(skb, page, offset, size)) {
2297 skb->data_len += size;
2298 skb->truesize += size;
2299 refcount_add(size, &sk->sk_wmem_alloc);
2302 err = unix_scm_to_skb(&scm, skb, false);
2304 goto err_state_unlock;
2305 spin_lock(&other->sk_receive_queue.lock);
2306 __skb_queue_tail(&other->sk_receive_queue, newskb);
2307 spin_unlock(&other->sk_receive_queue.lock);
2310 unix_state_unlock(other);
2311 mutex_unlock(&unix_sk(other)->iolock);
2313 other->sk_data_ready(other);
2318 unix_state_unlock(other);
2320 mutex_unlock(&unix_sk(other)->iolock);
2323 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2324 send_sig(SIGPIPE, current, 0);
2330 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2334 struct sock *sk = sock->sk;
2336 err = sock_error(sk);
2340 if (sk->sk_state != TCP_ESTABLISHED)
2343 if (msg->msg_namelen)
2344 msg->msg_namelen = 0;
2346 return unix_dgram_sendmsg(sock, msg, len);
2349 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2350 size_t size, int flags)
2352 struct sock *sk = sock->sk;
2354 if (sk->sk_state != TCP_ESTABLISHED)
2357 return unix_dgram_recvmsg(sock, msg, size, flags);
2360 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2362 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2365 msg->msg_namelen = addr->len;
2366 memcpy(msg->msg_name, addr->name, addr->len);
2370 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2373 struct scm_cookie scm;
2374 struct socket *sock = sk->sk_socket;
2375 struct unix_sock *u = unix_sk(sk);
2376 struct sk_buff *skb, *last;
2385 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2388 mutex_lock(&u->iolock);
2390 skip = sk_peek_offset(sk, flags);
2391 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2392 &skip, &err, &last);
2394 if (!(flags & MSG_PEEK))
2395 scm_stat_del(sk, skb);
2399 mutex_unlock(&u->iolock);
2404 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2405 &err, &timeo, last));
2407 if (!skb) { /* implies iolock unlocked */
2408 unix_state_lock(sk);
2409 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2410 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2411 (sk->sk_shutdown & RCV_SHUTDOWN))
2413 unix_state_unlock(sk);
2417 if (wq_has_sleeper(&u->peer_wait))
2418 wake_up_interruptible_sync_poll(&u->peer_wait,
2419 EPOLLOUT | EPOLLWRNORM |
2423 unix_copy_addr(msg, skb->sk);
2425 if (size > skb->len - skip)
2426 size = skb->len - skip;
2427 else if (size < skb->len - skip)
2428 msg->msg_flags |= MSG_TRUNC;
2430 err = skb_copy_datagram_msg(skb, skip, msg, size);
2434 if (sock_flag(sk, SOCK_RCVTSTAMP))
2435 __sock_recv_timestamp(msg, sk, skb);
2437 memset(&scm, 0, sizeof(scm));
2439 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2440 unix_set_secdata(&scm, skb);
2442 if (!(flags & MSG_PEEK)) {
2444 unix_detach_fds(&scm, skb);
2446 sk_peek_offset_bwd(sk, skb->len);
2448 /* It is questionable: on PEEK we could:
2449 - do not return fds - good, but too simple 8)
2450 - return fds, and do not return them on read (old strategy,
2452 - clone fds (I chose it for now, it is the most universal
2455 POSIX 1003.1g does not actually define this clearly
2456 at all. POSIX 1003.1g doesn't define a lot of things
2461 sk_peek_offset_fwd(sk, size);
2464 unix_peek_fds(&scm, skb);
2466 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2468 scm_recv(sock, msg, &scm, flags);
2471 skb_free_datagram(sk, skb);
2472 mutex_unlock(&u->iolock);
2477 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2480 struct sock *sk = sock->sk;
2482 #ifdef CONFIG_BPF_SYSCALL
2483 const struct proto *prot = READ_ONCE(sk->sk_prot);
2485 if (prot != &unix_dgram_proto)
2486 return prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2487 flags & ~MSG_DONTWAIT, NULL);
2489 return __unix_dgram_recvmsg(sk, msg, size, flags);
2492 static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
2493 sk_read_actor_t recv_actor)
2498 struct unix_sock *u = unix_sk(sk);
2499 struct sk_buff *skb;
2502 mutex_lock(&u->iolock);
2503 skb = skb_recv_datagram(sk, 0, 1, &err);
2504 mutex_unlock(&u->iolock);
2508 used = recv_actor(desc, skb, 0, skb->len);
2514 } else if (used <= skb->len) {
2527 * Sleep until more data has arrived. But check for races..
2529 static long unix_stream_data_wait(struct sock *sk, long timeo,
2530 struct sk_buff *last, unsigned int last_len,
2533 struct sk_buff *tail;
2536 unix_state_lock(sk);
2539 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2541 tail = skb_peek_tail(&sk->sk_receive_queue);
2543 (tail && tail->len != last_len) ||
2545 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2546 signal_pending(current) ||
2550 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2551 unix_state_unlock(sk);
2553 timeo = freezable_schedule_timeout(timeo);
2555 timeo = schedule_timeout(timeo);
2556 unix_state_lock(sk);
2558 if (sock_flag(sk, SOCK_DEAD))
2561 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2564 finish_wait(sk_sleep(sk), &wait);
2565 unix_state_unlock(sk);
2569 static unsigned int unix_skb_len(const struct sk_buff *skb)
2571 return skb->len - UNIXCB(skb).consumed;
2574 struct unix_stream_read_state {
2575 int (*recv_actor)(struct sk_buff *, int, int,
2576 struct unix_stream_read_state *);
2577 struct socket *socket;
2579 struct pipe_inode_info *pipe;
2582 unsigned int splice_flags;
2585 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2586 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2588 struct socket *sock = state->socket;
2589 struct sock *sk = sock->sk;
2590 struct unix_sock *u = unix_sk(sk);
2592 struct sk_buff *oob_skb;
2594 mutex_lock(&u->iolock);
2595 unix_state_lock(sk);
2597 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2598 unix_state_unlock(sk);
2599 mutex_unlock(&u->iolock);
2603 oob_skb = u->oob_skb;
2605 if (!(state->flags & MSG_PEEK))
2606 WRITE_ONCE(u->oob_skb, NULL);
2608 unix_state_unlock(sk);
2610 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2612 if (!(state->flags & MSG_PEEK)) {
2613 UNIXCB(oob_skb).consumed += 1;
2617 mutex_unlock(&u->iolock);
2622 state->msg->msg_flags |= MSG_OOB;
2626 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2627 int flags, int copied)
2629 struct unix_sock *u = unix_sk(sk);
2631 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2632 skb_unlink(skb, &sk->sk_receive_queue);
2636 if (skb == u->oob_skb) {
2639 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2640 if (!(flags & MSG_PEEK)) {
2641 WRITE_ONCE(u->oob_skb, NULL);
2644 } else if (!(flags & MSG_PEEK)) {
2645 skb_unlink(skb, &sk->sk_receive_queue);
2647 skb = skb_peek(&sk->sk_receive_queue);
2655 static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
2656 sk_read_actor_t recv_actor)
2658 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2661 return unix_read_sock(sk, desc, recv_actor);
2664 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2667 struct scm_cookie scm;
2668 struct socket *sock = state->socket;
2669 struct sock *sk = sock->sk;
2670 struct unix_sock *u = unix_sk(sk);
2672 int flags = state->flags;
2673 int noblock = flags & MSG_DONTWAIT;
2674 bool check_creds = false;
2679 size_t size = state->size;
2680 unsigned int last_len;
2682 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2687 if (unlikely(flags & MSG_OOB)) {
2689 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2690 err = unix_stream_recv_urg(state);
2695 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2696 timeo = sock_rcvtimeo(sk, noblock);
2698 memset(&scm, 0, sizeof(scm));
2700 /* Lock the socket to prevent queue disordering
2701 * while sleeps in memcpy_tomsg
2703 mutex_lock(&u->iolock);
2705 skip = max(sk_peek_offset(sk, flags), 0);
2710 struct sk_buff *skb, *last;
2713 unix_state_lock(sk);
2714 if (sock_flag(sk, SOCK_DEAD)) {
2718 last = skb = skb_peek(&sk->sk_receive_queue);
2719 last_len = last ? last->len : 0;
2721 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2723 skb = manage_oob(skb, sk, flags, copied);
2725 unix_state_unlock(sk);
2734 if (copied >= target)
2738 * POSIX 1003.1g mandates this order.
2741 err = sock_error(sk);
2744 if (sk->sk_shutdown & RCV_SHUTDOWN)
2747 unix_state_unlock(sk);
2753 mutex_unlock(&u->iolock);
2755 timeo = unix_stream_data_wait(sk, timeo, last,
2756 last_len, freezable);
2758 if (signal_pending(current)) {
2759 err = sock_intr_errno(timeo);
2764 mutex_lock(&u->iolock);
2767 unix_state_unlock(sk);
2771 while (skip >= unix_skb_len(skb)) {
2772 skip -= unix_skb_len(skb);
2774 last_len = skb->len;
2775 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2780 unix_state_unlock(sk);
2783 /* Never glue messages from different writers */
2784 if (!unix_skb_scm_eq(skb, &scm))
2786 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2787 /* Copy credentials */
2788 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2789 unix_set_secdata(&scm, skb);
2793 /* Copy address just once */
2794 if (state->msg && state->msg->msg_name) {
2795 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2796 state->msg->msg_name);
2797 unix_copy_addr(state->msg, skb->sk);
2801 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2803 chunk = state->recv_actor(skb, skip, chunk, state);
2804 drop_skb = !unix_skb_len(skb);
2805 /* skb is only safe to use if !drop_skb */
2816 /* the skb was touched by a concurrent reader;
2817 * we should not expect anything from this skb
2818 * anymore and assume it invalid - we can be
2819 * sure it was dropped from the socket queue
2821 * let's report a short read
2827 /* Mark read part of skb as used */
2828 if (!(flags & MSG_PEEK)) {
2829 UNIXCB(skb).consumed += chunk;
2831 sk_peek_offset_bwd(sk, chunk);
2833 if (UNIXCB(skb).fp) {
2834 scm_stat_del(sk, skb);
2835 unix_detach_fds(&scm, skb);
2838 if (unix_skb_len(skb))
2841 skb_unlink(skb, &sk->sk_receive_queue);
2847 /* It is questionable, see note in unix_dgram_recvmsg.
2850 unix_peek_fds(&scm, skb);
2852 sk_peek_offset_fwd(sk, chunk);
2859 last_len = skb->len;
2860 unix_state_lock(sk);
2861 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2864 unix_state_unlock(sk);
2869 mutex_unlock(&u->iolock);
2871 scm_recv(sock, state->msg, &scm, flags);
2875 return copied ? : err;
2878 static int unix_stream_read_actor(struct sk_buff *skb,
2879 int skip, int chunk,
2880 struct unix_stream_read_state *state)
2884 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2886 return ret ?: chunk;
2889 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2890 size_t size, int flags)
2892 struct unix_stream_read_state state = {
2893 .recv_actor = unix_stream_read_actor,
2894 .socket = sk->sk_socket,
2900 return unix_stream_read_generic(&state, true);
2903 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2904 size_t size, int flags)
2906 struct unix_stream_read_state state = {
2907 .recv_actor = unix_stream_read_actor,
2914 #ifdef CONFIG_BPF_SYSCALL
2915 struct sock *sk = sock->sk;
2916 const struct proto *prot = READ_ONCE(sk->sk_prot);
2918 if (prot != &unix_stream_proto)
2919 return prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2920 flags & ~MSG_DONTWAIT, NULL);
2922 return unix_stream_read_generic(&state, true);
2925 static int unix_stream_splice_actor(struct sk_buff *skb,
2926 int skip, int chunk,
2927 struct unix_stream_read_state *state)
2929 return skb_splice_bits(skb, state->socket->sk,
2930 UNIXCB(skb).consumed + skip,
2931 state->pipe, chunk, state->splice_flags);
2934 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2935 struct pipe_inode_info *pipe,
2936 size_t size, unsigned int flags)
2938 struct unix_stream_read_state state = {
2939 .recv_actor = unix_stream_splice_actor,
2943 .splice_flags = flags,
2946 if (unlikely(*ppos))
2949 if (sock->file->f_flags & O_NONBLOCK ||
2950 flags & SPLICE_F_NONBLOCK)
2951 state.flags = MSG_DONTWAIT;
2953 return unix_stream_read_generic(&state, false);
2956 static int unix_shutdown(struct socket *sock, int mode)
2958 struct sock *sk = sock->sk;
2961 if (mode < SHUT_RD || mode > SHUT_RDWR)
2964 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2965 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2966 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2970 unix_state_lock(sk);
2971 sk->sk_shutdown |= mode;
2972 other = unix_peer(sk);
2975 unix_state_unlock(sk);
2976 sk->sk_state_change(sk);
2979 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2982 const struct proto *prot = READ_ONCE(other->sk_prot);
2985 prot->unhash(other);
2986 if (mode&RCV_SHUTDOWN)
2987 peer_mode |= SEND_SHUTDOWN;
2988 if (mode&SEND_SHUTDOWN)
2989 peer_mode |= RCV_SHUTDOWN;
2990 unix_state_lock(other);
2991 other->sk_shutdown |= peer_mode;
2992 unix_state_unlock(other);
2993 other->sk_state_change(other);
2994 if (peer_mode == SHUTDOWN_MASK)
2995 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2996 else if (peer_mode & RCV_SHUTDOWN)
2997 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3005 long unix_inq_len(struct sock *sk)
3007 struct sk_buff *skb;
3010 if (sk->sk_state == TCP_LISTEN)
3013 spin_lock(&sk->sk_receive_queue.lock);
3014 if (sk->sk_type == SOCK_STREAM ||
3015 sk->sk_type == SOCK_SEQPACKET) {
3016 skb_queue_walk(&sk->sk_receive_queue, skb)
3017 amount += unix_skb_len(skb);
3019 skb = skb_peek(&sk->sk_receive_queue);
3023 spin_unlock(&sk->sk_receive_queue.lock);
3027 EXPORT_SYMBOL_GPL(unix_inq_len);
3029 long unix_outq_len(struct sock *sk)
3031 return sk_wmem_alloc_get(sk);
3033 EXPORT_SYMBOL_GPL(unix_outq_len);
3035 static int unix_open_file(struct sock *sk)
3041 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3044 if (!smp_load_acquire(&unix_sk(sk)->addr))
3047 path = unix_sk(sk)->path;
3053 fd = get_unused_fd_flags(O_CLOEXEC);
3057 f = dentry_open(&path, O_PATH, current_cred());
3071 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3073 struct sock *sk = sock->sk;
3079 amount = unix_outq_len(sk);
3080 err = put_user(amount, (int __user *)arg);
3083 amount = unix_inq_len(sk);
3087 err = put_user(amount, (int __user *)arg);
3090 err = unix_open_file(sk);
3092 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3095 struct sk_buff *skb;
3098 skb = skb_peek(&sk->sk_receive_queue);
3099 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3101 err = put_user(answ, (int __user *)arg);
3112 #ifdef CONFIG_COMPAT
3113 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3115 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3119 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3121 struct sock *sk = sock->sk;
3124 sock_poll_wait(file, sock, wait);
3127 /* exceptional events? */
3130 if (sk->sk_shutdown == SHUTDOWN_MASK)
3132 if (sk->sk_shutdown & RCV_SHUTDOWN)
3133 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3136 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3137 mask |= EPOLLIN | EPOLLRDNORM;
3138 if (sk_is_readable(sk))
3139 mask |= EPOLLIN | EPOLLRDNORM;
3140 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3141 if (READ_ONCE(unix_sk(sk)->oob_skb))
3145 /* Connection-based need to check for termination and startup */
3146 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3147 sk->sk_state == TCP_CLOSE)
3151 * we set writable also when the other side has shut down the
3152 * connection. This prevents stuck sockets.
3154 if (unix_writable(sk))
3155 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3160 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3163 struct sock *sk = sock->sk, *other;
3164 unsigned int writable;
3167 sock_poll_wait(file, sock, wait);
3170 /* exceptional events? */
3171 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
3173 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3175 if (sk->sk_shutdown & RCV_SHUTDOWN)
3176 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3177 if (sk->sk_shutdown == SHUTDOWN_MASK)
3181 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3182 mask |= EPOLLIN | EPOLLRDNORM;
3183 if (sk_is_readable(sk))
3184 mask |= EPOLLIN | EPOLLRDNORM;
3186 /* Connection-based need to check for termination and startup */
3187 if (sk->sk_type == SOCK_SEQPACKET) {
3188 if (sk->sk_state == TCP_CLOSE)
3190 /* connection hasn't started yet? */
3191 if (sk->sk_state == TCP_SYN_SENT)
3195 /* No write status requested, avoid expensive OUT tests. */
3196 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3199 writable = unix_writable(sk);
3201 unix_state_lock(sk);
3203 other = unix_peer(sk);
3204 if (other && unix_peer(other) != sk &&
3205 unix_recvq_full_lockless(other) &&
3206 unix_dgram_peer_wake_me(sk, other))
3209 unix_state_unlock(sk);
3213 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3215 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3220 #ifdef CONFIG_PROC_FS
3222 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3224 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3225 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3226 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3228 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3230 unsigned long offset = get_offset(*pos);
3231 unsigned long bucket = get_bucket(*pos);
3233 unsigned long count = 0;
3235 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
3236 if (sock_net(sk) != seq_file_net(seq))
3238 if (++count == offset)
3245 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3247 unsigned long bucket = get_bucket(*pos);
3250 while (bucket < ARRAY_SIZE(unix_socket_table)) {
3251 spin_lock(&unix_table_locks[bucket]);
3253 sk = unix_from_bucket(seq, pos);
3257 spin_unlock(&unix_table_locks[bucket]);
3259 *pos = set_bucket_offset(++bucket, 1);
3265 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3268 unsigned long bucket = get_bucket(*pos);
3270 for (sk = sk_next(sk); sk; sk = sk_next(sk))
3271 if (sock_net(sk) == seq_file_net(seq))
3274 spin_unlock(&unix_table_locks[bucket]);
3276 *pos = set_bucket_offset(++bucket, 1);
3278 return unix_get_first(seq, pos);
3281 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3284 return SEQ_START_TOKEN;
3286 return unix_get_first(seq, pos);
3289 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3293 if (v == SEQ_START_TOKEN)
3294 return unix_get_first(seq, pos);
3296 return unix_get_next(seq, v, pos);
3299 static void unix_seq_stop(struct seq_file *seq, void *v)
3301 struct sock *sk = v;
3304 spin_unlock(&unix_table_locks[sk->sk_hash]);
3307 static int unix_seq_show(struct seq_file *seq, void *v)
3310 if (v == SEQ_START_TOKEN)
3311 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3315 struct unix_sock *u = unix_sk(s);
3318 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3320 refcount_read(&s->sk_refcnt),
3322 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3325 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3326 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3329 if (u->addr) { // under unix_table_locks here
3334 len = u->addr->len -
3335 offsetof(struct sockaddr_un, sun_path);
3336 if (u->addr->name->sun_path[0]) {
3342 for ( ; i < len; i++)
3343 seq_putc(seq, u->addr->name->sun_path[i] ?:
3346 unix_state_unlock(s);
3347 seq_putc(seq, '\n');
3353 static const struct seq_operations unix_seq_ops = {
3354 .start = unix_seq_start,
3355 .next = unix_seq_next,
3356 .stop = unix_seq_stop,
3357 .show = unix_seq_show,
3360 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3361 struct bpf_unix_iter_state {
3362 struct seq_net_private p;
3363 unsigned int cur_sk;
3364 unsigned int end_sk;
3365 unsigned int max_sk;
3366 struct sock **batch;
3367 bool st_bucket_done;
3370 struct bpf_iter__unix {
3371 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3372 __bpf_md_ptr(struct unix_sock *, unix_sk);
3373 uid_t uid __aligned(8);
3376 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3377 struct unix_sock *unix_sk, uid_t uid)
3379 struct bpf_iter__unix ctx;
3381 meta->seq_num--; /* skip SEQ_START_TOKEN */
3383 ctx.unix_sk = unix_sk;
3385 return bpf_iter_run_prog(prog, &ctx);
3388 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3391 struct bpf_unix_iter_state *iter = seq->private;
3392 unsigned int expected = 1;
3395 sock_hold(start_sk);
3396 iter->batch[iter->end_sk++] = start_sk;
3398 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3399 if (sock_net(sk) != seq_file_net(seq))
3402 if (iter->end_sk < iter->max_sk) {
3404 iter->batch[iter->end_sk++] = sk;
3410 spin_unlock(&unix_table_locks[start_sk->sk_hash]);
3415 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3417 while (iter->cur_sk < iter->end_sk)
3418 sock_put(iter->batch[iter->cur_sk++]);
3421 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3422 unsigned int new_batch_sz)
3424 struct sock **new_batch;
3426 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3427 GFP_USER | __GFP_NOWARN);
3431 bpf_iter_unix_put_batch(iter);
3432 kvfree(iter->batch);
3433 iter->batch = new_batch;
3434 iter->max_sk = new_batch_sz;
3439 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3442 struct bpf_unix_iter_state *iter = seq->private;
3443 unsigned int expected;
3444 bool resized = false;
3447 if (iter->st_bucket_done)
3448 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3451 /* Get a new batch */
3455 sk = unix_get_first(seq, pos);
3457 return NULL; /* Done */
3459 expected = bpf_iter_unix_hold_batch(seq, sk);
3461 if (iter->end_sk == expected) {
3462 iter->st_bucket_done = true;
3466 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3474 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3477 return SEQ_START_TOKEN;
3479 /* bpf iter does not support lseek, so it always
3480 * continue from where it was stop()-ped.
3482 return bpf_iter_unix_batch(seq, pos);
3485 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3487 struct bpf_unix_iter_state *iter = seq->private;
3490 /* Whenever seq_next() is called, the iter->cur_sk is
3491 * done with seq_show(), so advance to the next sk in
3494 if (iter->cur_sk < iter->end_sk)
3495 sock_put(iter->batch[iter->cur_sk++]);
3499 if (iter->cur_sk < iter->end_sk)
3500 sk = iter->batch[iter->cur_sk];
3502 sk = bpf_iter_unix_batch(seq, pos);
3507 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3509 struct bpf_iter_meta meta;
3510 struct bpf_prog *prog;
3511 struct sock *sk = v;
3516 if (v == SEQ_START_TOKEN)
3519 slow = lock_sock_fast(sk);
3521 if (unlikely(sk_unhashed(sk))) {
3526 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3528 prog = bpf_iter_get_info(&meta, false);
3529 ret = unix_prog_seq_show(prog, &meta, v, uid);
3531 unlock_sock_fast(sk, slow);
3535 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3537 struct bpf_unix_iter_state *iter = seq->private;
3538 struct bpf_iter_meta meta;
3539 struct bpf_prog *prog;
3543 prog = bpf_iter_get_info(&meta, true);
3545 (void)unix_prog_seq_show(prog, &meta, v, 0);
3548 if (iter->cur_sk < iter->end_sk)
3549 bpf_iter_unix_put_batch(iter);
3552 static const struct seq_operations bpf_iter_unix_seq_ops = {
3553 .start = bpf_iter_unix_seq_start,
3554 .next = bpf_iter_unix_seq_next,
3555 .stop = bpf_iter_unix_seq_stop,
3556 .show = bpf_iter_unix_seq_show,
3561 static const struct net_proto_family unix_family_ops = {
3563 .create = unix_create,
3564 .owner = THIS_MODULE,
3568 static int __net_init unix_net_init(struct net *net)
3570 int error = -ENOMEM;
3572 net->unx.sysctl_max_dgram_qlen = 10;
3573 if (unix_sysctl_register(net))
3576 #ifdef CONFIG_PROC_FS
3577 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3578 sizeof(struct seq_net_private))) {
3579 unix_sysctl_unregister(net);
3588 static void __net_exit unix_net_exit(struct net *net)
3590 unix_sysctl_unregister(net);
3591 remove_proc_entry("unix", net->proc_net);
3594 static struct pernet_operations unix_net_ops = {
3595 .init = unix_net_init,
3596 .exit = unix_net_exit,
3599 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3600 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3601 struct unix_sock *unix_sk, uid_t uid)
3603 #define INIT_BATCH_SZ 16
3605 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3607 struct bpf_unix_iter_state *iter = priv_data;
3610 err = bpf_iter_init_seq_net(priv_data, aux);
3614 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3616 bpf_iter_fini_seq_net(priv_data);
3623 static void bpf_iter_fini_unix(void *priv_data)
3625 struct bpf_unix_iter_state *iter = priv_data;
3627 bpf_iter_fini_seq_net(priv_data);
3628 kvfree(iter->batch);
3631 static const struct bpf_iter_seq_info unix_seq_info = {
3632 .seq_ops = &bpf_iter_unix_seq_ops,
3633 .init_seq_private = bpf_iter_init_unix,
3634 .fini_seq_private = bpf_iter_fini_unix,
3635 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3638 static const struct bpf_func_proto *
3639 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3640 const struct bpf_prog *prog)
3643 case BPF_FUNC_setsockopt:
3644 return &bpf_sk_setsockopt_proto;
3645 case BPF_FUNC_getsockopt:
3646 return &bpf_sk_getsockopt_proto;
3652 static struct bpf_iter_reg unix_reg_info = {
3654 .ctx_arg_info_size = 1,
3656 { offsetof(struct bpf_iter__unix, unix_sk),
3657 PTR_TO_BTF_ID_OR_NULL },
3659 .get_func_proto = bpf_iter_unix_get_func_proto,
3660 .seq_info = &unix_seq_info,
3663 static void __init bpf_iter_register(void)
3665 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3666 if (bpf_iter_reg_target(&unix_reg_info))
3667 pr_warn("Warning: could not register bpf iterator unix\n");
3671 static int __init af_unix_init(void)
3675 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3677 for (i = 0; i < 2 * UNIX_HASH_SIZE; i++)
3678 spin_lock_init(&unix_table_locks[i]);
3680 rc = proto_register(&unix_dgram_proto, 1);
3682 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3686 rc = proto_register(&unix_stream_proto, 1);
3688 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3692 sock_register(&unix_family_ops);
3693 register_pernet_subsys(&unix_net_ops);
3694 unix_bpf_build_proto();
3696 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3697 bpf_iter_register();
3704 static void __exit af_unix_exit(void)
3706 sock_unregister(PF_UNIX);
3707 proto_unregister(&unix_dgram_proto);
3708 proto_unregister(&unix_stream_proto);
3709 unregister_pernet_subsys(&unix_net_ops);
3712 /* Earlier than device_initcall() so that other drivers invoking
3713 request_module() don't end up in a loop when modprobe tries
3714 to use a UNIX socket. But later than subsys_initcall() because
3715 we depend on stuff initialised there */
3716 fs_initcall(af_unix_init);
3717 module_exit(af_unix_exit);
3719 MODULE_LICENSE("GPL");
3720 MODULE_ALIAS_NETPROTO(PF_UNIX);