1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/termios.h>
93 #include <linux/sockios.h>
94 #include <linux/net.h>
97 #include <linux/slab.h>
98 #include <linux/uaccess.h>
99 #include <linux/skbuff.h>
100 #include <linux/netdevice.h>
101 #include <net/net_namespace.h>
102 #include <net/sock.h>
103 #include <net/tcp_states.h>
104 #include <net/af_unix.h>
105 #include <linux/proc_fs.h>
106 #include <linux/seq_file.h>
108 #include <linux/init.h>
109 #include <linux/poll.h>
110 #include <linux/rtnetlink.h>
111 #include <linux/mount.h>
112 #include <net/checksum.h>
113 #include <linux/security.h>
114 #include <linux/freezer.h>
115 #include <linux/file.h>
119 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
120 EXPORT_SYMBOL_GPL(unix_socket_table);
121 DEFINE_SPINLOCK(unix_table_lock);
122 EXPORT_SYMBOL_GPL(unix_table_lock);
123 static atomic_long_t unix_nr_socks;
126 static struct hlist_head *unix_sockets_unbound(void *addr)
128 unsigned long hash = (unsigned long)addr;
132 hash %= UNIX_HASH_SIZE;
133 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
138 #ifdef CONFIG_SECURITY_NETWORK
139 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141 UNIXCB(skb).secid = scm->secid;
144 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
146 scm->secid = UNIXCB(skb).secid;
149 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
151 return (scm->secid == UNIXCB(skb).secid);
154 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
164 #endif /* CONFIG_SECURITY_NETWORK */
167 * SMP locking strategy:
168 * hash table is protected with spinlock unix_table_lock
169 * each socket state is protected by separate spin lock.
172 static inline unsigned int unix_hash_fold(__wsum n)
174 unsigned int hash = (__force unsigned int)csum_fold(n);
177 return hash&(UNIX_HASH_SIZE-1);
180 #define unix_peer(sk) (unix_sk(sk)->peer)
182 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
184 return unix_peer(osk) == sk;
187 static inline int unix_may_send(struct sock *sk, struct sock *osk)
189 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
192 static inline int unix_recvq_full(const struct sock *sk)
194 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197 static inline int unix_recvq_full_lockless(const struct sock *sk)
199 return skb_queue_len_lockless(&sk->sk_receive_queue) >
200 READ_ONCE(sk->sk_max_ack_backlog);
203 struct sock *unix_peer_get(struct sock *s)
211 unix_state_unlock(s);
214 EXPORT_SYMBOL_GPL(unix_peer_get);
216 static inline void unix_release_addr(struct unix_address *addr)
218 if (refcount_dec_and_test(&addr->refcnt))
223 * Check unix socket name:
224 * - should be not zero length.
225 * - if started by not zero, should be NULL terminated (FS object)
226 * - if started by zero, it is abstract name.
229 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
233 if (len <= sizeof(short) || len > sizeof(*sunaddr))
235 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
237 if (sunaddr->sun_path[0]) {
239 * This may look like an off by one error but it is a bit more
240 * subtle. 108 is the longest valid AF_UNIX path for a binding.
241 * sun_path[108] doesn't as such exist. However in kernel space
242 * we are guaranteed that it is a valid memory location in our
243 * kernel address buffer.
245 ((char *)sunaddr)[len] = 0;
246 len = strlen(sunaddr->sun_path)+1+sizeof(short);
250 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
254 static void __unix_remove_socket(struct sock *sk)
256 sk_del_node_init(sk);
259 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
261 WARN_ON(!sk_unhashed(sk));
262 sk_add_node(sk, list);
265 static inline void unix_remove_socket(struct sock *sk)
267 spin_lock(&unix_table_lock);
268 __unix_remove_socket(sk);
269 spin_unlock(&unix_table_lock);
272 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
274 spin_lock(&unix_table_lock);
275 __unix_insert_socket(list, sk);
276 spin_unlock(&unix_table_lock);
279 static struct sock *__unix_find_socket_byname(struct net *net,
280 struct sockaddr_un *sunname,
281 int len, int type, unsigned int hash)
285 sk_for_each(s, &unix_socket_table[hash ^ type]) {
286 struct unix_sock *u = unix_sk(s);
288 if (!net_eq(sock_net(s), net))
291 if (u->addr->len == len &&
292 !memcmp(u->addr->name, sunname, len))
298 static inline struct sock *unix_find_socket_byname(struct net *net,
299 struct sockaddr_un *sunname,
305 spin_lock(&unix_table_lock);
306 s = __unix_find_socket_byname(net, sunname, len, type, hash);
309 spin_unlock(&unix_table_lock);
313 static struct sock *unix_find_socket_byinode(struct inode *i)
317 spin_lock(&unix_table_lock);
319 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
320 struct dentry *dentry = unix_sk(s)->path.dentry;
322 if (dentry && d_backing_inode(dentry) == i) {
329 spin_unlock(&unix_table_lock);
333 /* Support code for asymmetrically connected dgram sockets
335 * If a datagram socket is connected to a socket not itself connected
336 * to the first socket (eg, /dev/log), clients may only enqueue more
337 * messages if the present receive queue of the server socket is not
338 * "too large". This means there's a second writeability condition
339 * poll and sendmsg need to test. The dgram recv code will do a wake
340 * up on the peer_wait wait queue of a socket upon reception of a
341 * datagram which needs to be propagated to sleeping would-be writers
342 * since these might not have sent anything so far. This can't be
343 * accomplished via poll_wait because the lifetime of the server
344 * socket might be less than that of its clients if these break their
345 * association with it or if the server socket is closed while clients
346 * are still connected to it and there's no way to inform "a polling
347 * implementation" that it should let go of a certain wait queue
349 * In order to propagate a wake up, a wait_queue_entry_t of the client
350 * socket is enqueued on the peer_wait queue of the server socket
351 * whose wake function does a wake_up on the ordinary client socket
352 * wait queue. This connection is established whenever a write (or
353 * poll for write) hit the flow control condition and broken when the
354 * association to the server socket is dissolved or after a wake up
358 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
362 wait_queue_head_t *u_sleep;
364 u = container_of(q, struct unix_sock, peer_wake);
366 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
368 u->peer_wake.private = NULL;
370 /* relaying can only happen while the wq still exists */
371 u_sleep = sk_sleep(&u->sk);
373 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
378 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
380 struct unix_sock *u, *u_other;
384 u_other = unix_sk(other);
386 spin_lock(&u_other->peer_wait.lock);
388 if (!u->peer_wake.private) {
389 u->peer_wake.private = other;
390 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
395 spin_unlock(&u_other->peer_wait.lock);
399 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
402 struct unix_sock *u, *u_other;
405 u_other = unix_sk(other);
406 spin_lock(&u_other->peer_wait.lock);
408 if (u->peer_wake.private == other) {
409 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
410 u->peer_wake.private = NULL;
413 spin_unlock(&u_other->peer_wait.lock);
416 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
419 unix_dgram_peer_wake_disconnect(sk, other);
420 wake_up_interruptible_poll(sk_sleep(sk),
427 * - unix_peer(sk) == other
428 * - association is stable
430 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
434 connected = unix_dgram_peer_wake_connect(sk, other);
436 /* If other is SOCK_DEAD, we want to make sure we signal
437 * POLLOUT, such that a subsequent write() can get a
438 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
439 * to other and its full, we will hang waiting for POLLOUT.
441 if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
445 unix_dgram_peer_wake_disconnect(sk, other);
450 static int unix_writable(const struct sock *sk)
452 return sk->sk_state != TCP_LISTEN &&
453 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
456 static void unix_write_space(struct sock *sk)
458 struct socket_wq *wq;
461 if (unix_writable(sk)) {
462 wq = rcu_dereference(sk->sk_wq);
463 if (skwq_has_sleeper(wq))
464 wake_up_interruptible_sync_poll(&wq->wait,
465 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
466 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
471 /* When dgram socket disconnects (or changes its peer), we clear its receive
472 * queue of packets arrived from previous peer. First, it allows to do
473 * flow control based only on wmem_alloc; second, sk connected to peer
474 * may receive messages only from that peer. */
475 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
477 if (!skb_queue_empty(&sk->sk_receive_queue)) {
478 skb_queue_purge(&sk->sk_receive_queue);
479 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
481 /* If one link of bidirectional dgram pipe is disconnected,
482 * we signal error. Messages are lost. Do not make this,
483 * when peer was not connected to us.
485 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
486 other->sk_err = ECONNRESET;
487 other->sk_error_report(other);
492 static void unix_sock_destructor(struct sock *sk)
494 struct unix_sock *u = unix_sk(sk);
496 skb_queue_purge(&sk->sk_receive_queue);
498 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
499 WARN_ON(!sk_unhashed(sk));
500 WARN_ON(sk->sk_socket);
501 if (!sock_flag(sk, SOCK_DEAD)) {
502 pr_info("Attempt to release alive unix socket: %p\n", sk);
507 unix_release_addr(u->addr);
509 atomic_long_dec(&unix_nr_socks);
511 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
513 #ifdef UNIX_REFCNT_DEBUG
514 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
515 atomic_long_read(&unix_nr_socks));
519 static void unix_release_sock(struct sock *sk, int embrion)
521 struct unix_sock *u = unix_sk(sk);
527 unix_remove_socket(sk);
532 sk->sk_shutdown = SHUTDOWN_MASK;
534 u->path.dentry = NULL;
536 state = sk->sk_state;
537 sk->sk_state = TCP_CLOSE;
538 unix_state_unlock(sk);
540 wake_up_interruptible_all(&u->peer_wait);
542 skpair = unix_peer(sk);
544 if (skpair != NULL) {
545 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
546 unix_state_lock(skpair);
548 skpair->sk_shutdown = SHUTDOWN_MASK;
549 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
550 skpair->sk_err = ECONNRESET;
551 unix_state_unlock(skpair);
552 skpair->sk_state_change(skpair);
553 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
556 unix_dgram_peer_wake_disconnect(sk, skpair);
557 sock_put(skpair); /* It may now die */
558 unix_peer(sk) = NULL;
561 /* Try to flush out this socket. Throw out buffers at least */
563 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
564 if (state == TCP_LISTEN)
565 unix_release_sock(skb->sk, 1);
566 /* passed fds are erased in the kfree_skb hook */
567 UNIXCB(skb).consumed = skb->len;
576 /* ---- Socket is dead now and most probably destroyed ---- */
579 * Fixme: BSD difference: In BSD all sockets connected to us get
580 * ECONNRESET and we die on the spot. In Linux we behave
581 * like files and pipes do and wait for the last
584 * Can't we simply set sock->err?
586 * What the above comment does talk about? --ANK(980817)
589 if (unix_tot_inflight)
590 unix_gc(); /* Garbage collect fds */
593 static void init_peercred(struct sock *sk)
595 put_pid(sk->sk_peer_pid);
596 if (sk->sk_peer_cred)
597 put_cred(sk->sk_peer_cred);
598 sk->sk_peer_pid = get_pid(task_tgid(current));
599 sk->sk_peer_cred = get_current_cred();
602 static void copy_peercred(struct sock *sk, struct sock *peersk)
604 put_pid(sk->sk_peer_pid);
605 if (sk->sk_peer_cred)
606 put_cred(sk->sk_peer_cred);
607 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
608 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
611 static int unix_listen(struct socket *sock, int backlog)
614 struct sock *sk = sock->sk;
615 struct unix_sock *u = unix_sk(sk);
616 struct pid *old_pid = NULL;
619 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
620 goto out; /* Only stream/seqpacket sockets accept */
623 goto out; /* No listens on an unbound socket */
625 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
627 if (backlog > sk->sk_max_ack_backlog)
628 wake_up_interruptible_all(&u->peer_wait);
629 sk->sk_max_ack_backlog = backlog;
630 sk->sk_state = TCP_LISTEN;
631 /* set credentials so connect can copy them */
636 unix_state_unlock(sk);
642 static int unix_release(struct socket *);
643 static int unix_bind(struct socket *, struct sockaddr *, int);
644 static int unix_stream_connect(struct socket *, struct sockaddr *,
645 int addr_len, int flags);
646 static int unix_socketpair(struct socket *, struct socket *);
647 static int unix_accept(struct socket *, struct socket *, int, bool);
648 static int unix_getname(struct socket *, struct sockaddr *, int);
649 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
650 static __poll_t unix_dgram_poll(struct file *, struct socket *,
652 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
654 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
656 static int unix_shutdown(struct socket *, int);
657 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
658 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
659 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
660 size_t size, int flags);
661 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
662 struct pipe_inode_info *, size_t size,
664 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
665 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
666 static int unix_dgram_connect(struct socket *, struct sockaddr *,
668 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
669 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
672 static int unix_set_peek_off(struct sock *sk, int val)
674 struct unix_sock *u = unix_sk(sk);
676 if (mutex_lock_interruptible(&u->iolock))
679 sk->sk_peek_off = val;
680 mutex_unlock(&u->iolock);
685 #ifdef CONFIG_PROC_FS
686 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
688 struct sock *sk = sock->sk;
692 u = unix_sk(sock->sk);
693 seq_printf(m, "scm_fds: %u\n",
694 atomic_read(&u->scm_stat.nr_fds));
698 #define unix_show_fdinfo NULL
701 static const struct proto_ops unix_stream_ops = {
703 .owner = THIS_MODULE,
704 .release = unix_release,
706 .connect = unix_stream_connect,
707 .socketpair = unix_socketpair,
708 .accept = unix_accept,
709 .getname = unix_getname,
713 .compat_ioctl = unix_compat_ioctl,
715 .listen = unix_listen,
716 .shutdown = unix_shutdown,
717 .setsockopt = sock_no_setsockopt,
718 .getsockopt = sock_no_getsockopt,
719 .sendmsg = unix_stream_sendmsg,
720 .recvmsg = unix_stream_recvmsg,
721 .mmap = sock_no_mmap,
722 .sendpage = unix_stream_sendpage,
723 .splice_read = unix_stream_splice_read,
724 .set_peek_off = unix_set_peek_off,
725 .show_fdinfo = unix_show_fdinfo,
728 static const struct proto_ops unix_dgram_ops = {
730 .owner = THIS_MODULE,
731 .release = unix_release,
733 .connect = unix_dgram_connect,
734 .socketpair = unix_socketpair,
735 .accept = sock_no_accept,
736 .getname = unix_getname,
737 .poll = unix_dgram_poll,
740 .compat_ioctl = unix_compat_ioctl,
742 .listen = sock_no_listen,
743 .shutdown = unix_shutdown,
744 .setsockopt = sock_no_setsockopt,
745 .getsockopt = sock_no_getsockopt,
746 .sendmsg = unix_dgram_sendmsg,
747 .recvmsg = unix_dgram_recvmsg,
748 .mmap = sock_no_mmap,
749 .sendpage = sock_no_sendpage,
750 .set_peek_off = unix_set_peek_off,
751 .show_fdinfo = unix_show_fdinfo,
754 static const struct proto_ops unix_seqpacket_ops = {
756 .owner = THIS_MODULE,
757 .release = unix_release,
759 .connect = unix_stream_connect,
760 .socketpair = unix_socketpair,
761 .accept = unix_accept,
762 .getname = unix_getname,
763 .poll = unix_dgram_poll,
766 .compat_ioctl = unix_compat_ioctl,
768 .listen = unix_listen,
769 .shutdown = unix_shutdown,
770 .setsockopt = sock_no_setsockopt,
771 .getsockopt = sock_no_getsockopt,
772 .sendmsg = unix_seqpacket_sendmsg,
773 .recvmsg = unix_seqpacket_recvmsg,
774 .mmap = sock_no_mmap,
775 .sendpage = sock_no_sendpage,
776 .set_peek_off = unix_set_peek_off,
777 .show_fdinfo = unix_show_fdinfo,
780 static struct proto unix_proto = {
782 .owner = THIS_MODULE,
783 .obj_size = sizeof(struct unix_sock),
786 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
788 struct sock *sk = NULL;
791 atomic_long_inc(&unix_nr_socks);
792 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
795 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
799 sock_init_data(sock, sk);
801 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
802 sk->sk_write_space = unix_write_space;
803 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
804 sk->sk_destruct = unix_sock_destructor;
806 u->path.dentry = NULL;
808 spin_lock_init(&u->lock);
809 atomic_long_set(&u->inflight, 0);
810 INIT_LIST_HEAD(&u->link);
811 mutex_init(&u->iolock); /* single task reading lock */
812 mutex_init(&u->bindlock); /* single task binding lock */
813 init_waitqueue_head(&u->peer_wait);
814 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
815 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
816 unix_insert_socket(unix_sockets_unbound(sk), sk);
819 atomic_long_dec(&unix_nr_socks);
822 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
828 static int unix_create(struct net *net, struct socket *sock, int protocol,
831 if (protocol && protocol != PF_UNIX)
832 return -EPROTONOSUPPORT;
834 sock->state = SS_UNCONNECTED;
836 switch (sock->type) {
838 sock->ops = &unix_stream_ops;
841 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
845 sock->type = SOCK_DGRAM;
848 sock->ops = &unix_dgram_ops;
851 sock->ops = &unix_seqpacket_ops;
854 return -ESOCKTNOSUPPORT;
857 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
860 static int unix_release(struct socket *sock)
862 struct sock *sk = sock->sk;
867 unix_release_sock(sk, 0);
873 static int unix_autobind(struct socket *sock)
875 struct sock *sk = sock->sk;
876 struct net *net = sock_net(sk);
877 struct unix_sock *u = unix_sk(sk);
878 static u32 ordernum = 1;
879 struct unix_address *addr;
881 unsigned int retries = 0;
883 err = mutex_lock_interruptible(&u->bindlock);
892 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
896 addr->name->sun_family = AF_UNIX;
897 refcount_set(&addr->refcnt, 1);
900 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
901 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
903 spin_lock(&unix_table_lock);
904 ordernum = (ordernum+1)&0xFFFFF;
906 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
908 spin_unlock(&unix_table_lock);
910 * __unix_find_socket_byname() may take long time if many names
911 * are already in use.
914 /* Give up if all names seems to be in use. */
915 if (retries++ == 0xFFFFF) {
922 addr->hash ^= sk->sk_type;
924 __unix_remove_socket(sk);
925 smp_store_release(&u->addr, addr);
926 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
927 spin_unlock(&unix_table_lock);
930 out: mutex_unlock(&u->bindlock);
934 static struct sock *unix_find_other(struct net *net,
935 struct sockaddr_un *sunname, int len,
936 int type, unsigned int hash, int *error)
942 if (sunname->sun_path[0]) {
944 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
947 inode = d_backing_inode(path.dentry);
948 err = inode_permission(inode, MAY_WRITE);
953 if (!S_ISSOCK(inode->i_mode))
955 u = unix_find_socket_byinode(inode);
959 if (u->sk_type == type)
965 if (u->sk_type != type) {
971 u = unix_find_socket_byname(net, sunname, len, type, hash);
973 struct dentry *dentry;
974 dentry = unix_sk(u)->path.dentry;
976 touch_atime(&unix_sk(u)->path);
989 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
991 struct dentry *dentry;
995 * Get the parent directory, calculate the hash for last
998 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
999 err = PTR_ERR(dentry);
1004 * All right, let's create it.
1006 err = security_path_mknod(&path, dentry, mode, 0);
1008 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
1010 res->mnt = mntget(path.mnt);
1011 res->dentry = dget(dentry);
1014 done_path_create(&path, dentry);
1018 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1020 struct sock *sk = sock->sk;
1021 struct net *net = sock_net(sk);
1022 struct unix_sock *u = unix_sk(sk);
1023 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1024 char *sun_path = sunaddr->sun_path;
1027 struct unix_address *addr;
1028 struct hlist_head *list;
1029 struct path path = { };
1032 if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1033 sunaddr->sun_family != AF_UNIX)
1036 if (addr_len == sizeof(short)) {
1037 err = unix_autobind(sock);
1041 err = unix_mkname(sunaddr, addr_len, &hash);
1047 umode_t mode = S_IFSOCK |
1048 (SOCK_INODE(sock)->i_mode & ~current_umask());
1049 err = unix_mknod(sun_path, mode, &path);
1057 err = mutex_lock_interruptible(&u->bindlock);
1066 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1070 memcpy(addr->name, sunaddr, addr_len);
1071 addr->len = addr_len;
1072 addr->hash = hash ^ sk->sk_type;
1073 refcount_set(&addr->refcnt, 1);
1076 addr->hash = UNIX_HASH_SIZE;
1077 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1078 spin_lock(&unix_table_lock);
1080 list = &unix_socket_table[hash];
1082 spin_lock(&unix_table_lock);
1084 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1085 sk->sk_type, hash)) {
1086 unix_release_addr(addr);
1090 list = &unix_socket_table[addr->hash];
1094 __unix_remove_socket(sk);
1095 smp_store_release(&u->addr, addr);
1096 __unix_insert_socket(list, sk);
1099 spin_unlock(&unix_table_lock);
1101 mutex_unlock(&u->bindlock);
1109 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1111 if (unlikely(sk1 == sk2) || !sk2) {
1112 unix_state_lock(sk1);
1116 unix_state_lock(sk1);
1117 unix_state_lock_nested(sk2);
1119 unix_state_lock(sk2);
1120 unix_state_lock_nested(sk1);
1124 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1126 if (unlikely(sk1 == sk2) || !sk2) {
1127 unix_state_unlock(sk1);
1130 unix_state_unlock(sk1);
1131 unix_state_unlock(sk2);
1134 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1135 int alen, int flags)
1137 struct sock *sk = sock->sk;
1138 struct net *net = sock_net(sk);
1139 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1145 if (alen < offsetofend(struct sockaddr, sa_family))
1148 if (addr->sa_family != AF_UNSPEC) {
1149 err = unix_mkname(sunaddr, alen, &hash);
1154 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1155 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1159 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1163 unix_state_double_lock(sk, other);
1165 /* Apparently VFS overslept socket death. Retry. */
1166 if (sock_flag(other, SOCK_DEAD)) {
1167 unix_state_double_unlock(sk, other);
1173 if (!unix_may_send(sk, other))
1176 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1182 * 1003.1g breaking connected state with AF_UNSPEC
1185 unix_state_double_lock(sk, other);
1189 * If it was connected, reconnect.
1191 if (unix_peer(sk)) {
1192 struct sock *old_peer = unix_peer(sk);
1193 unix_peer(sk) = other;
1194 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1196 unix_state_double_unlock(sk, other);
1198 if (other != old_peer)
1199 unix_dgram_disconnected(sk, old_peer);
1202 unix_peer(sk) = other;
1203 unix_state_double_unlock(sk, other);
1208 unix_state_double_unlock(sk, other);
1214 static long unix_wait_for_peer(struct sock *other, long timeo)
1215 __releases(&unix_sk(other)->lock)
1217 struct unix_sock *u = unix_sk(other);
1221 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1223 sched = !sock_flag(other, SOCK_DEAD) &&
1224 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1225 unix_recvq_full(other);
1227 unix_state_unlock(other);
1230 timeo = schedule_timeout(timeo);
1232 finish_wait(&u->peer_wait, &wait);
1236 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1237 int addr_len, int flags)
1239 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1240 struct sock *sk = sock->sk;
1241 struct net *net = sock_net(sk);
1242 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1243 struct sock *newsk = NULL;
1244 struct sock *other = NULL;
1245 struct sk_buff *skb = NULL;
1251 err = unix_mkname(sunaddr, addr_len, &hash);
1256 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1257 (err = unix_autobind(sock)) != 0)
1260 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1262 /* First of all allocate resources.
1263 If we will make it after state is locked,
1264 we will have to recheck all again in any case.
1269 /* create new sock for complete connection */
1270 newsk = unix_create1(sock_net(sk), NULL, 0);
1274 /* Allocate skb for sending to listening sock */
1275 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1280 /* Find listening sock. */
1281 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1285 /* Latch state of peer */
1286 unix_state_lock(other);
1288 /* Apparently VFS overslept socket death. Retry. */
1289 if (sock_flag(other, SOCK_DEAD)) {
1290 unix_state_unlock(other);
1295 err = -ECONNREFUSED;
1296 if (other->sk_state != TCP_LISTEN)
1298 if (other->sk_shutdown & RCV_SHUTDOWN)
1301 if (unix_recvq_full(other)) {
1306 timeo = unix_wait_for_peer(other, timeo);
1308 err = sock_intr_errno(timeo);
1309 if (signal_pending(current))
1317 It is tricky place. We need to grab our state lock and cannot
1318 drop lock on peer. It is dangerous because deadlock is
1319 possible. Connect to self case and simultaneous
1320 attempt to connect are eliminated by checking socket
1321 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1322 check this before attempt to grab lock.
1324 Well, and we have to recheck the state after socket locked.
1330 /* This is ok... continue with connect */
1332 case TCP_ESTABLISHED:
1333 /* Socket is already connected */
1341 unix_state_lock_nested(sk);
1343 if (sk->sk_state != st) {
1344 unix_state_unlock(sk);
1345 unix_state_unlock(other);
1350 err = security_unix_stream_connect(sk, other, newsk);
1352 unix_state_unlock(sk);
1356 /* The way is open! Fastly set all the necessary fields... */
1359 unix_peer(newsk) = sk;
1360 newsk->sk_state = TCP_ESTABLISHED;
1361 newsk->sk_type = sk->sk_type;
1362 init_peercred(newsk);
1363 newu = unix_sk(newsk);
1364 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1365 otheru = unix_sk(other);
1367 /* copy address information from listening to new sock
1369 * The contents of *(otheru->addr) and otheru->path
1370 * are seen fully set up here, since we have found
1371 * otheru in hash under unix_table_lock. Insertion
1372 * into the hash chain we'd found it in had been done
1373 * in an earlier critical area protected by unix_table_lock,
1374 * the same one where we'd set *(otheru->addr) contents,
1375 * as well as otheru->path and otheru->addr itself.
1377 * Using smp_store_release() here to set newu->addr
1378 * is enough to make those stores, as well as stores
1379 * to newu->path visible to anyone who gets newu->addr
1380 * by smp_load_acquire(). IOW, the same warranties
1381 * as for unix_sock instances bound in unix_bind() or
1382 * in unix_autobind().
1384 if (otheru->path.dentry) {
1385 path_get(&otheru->path);
1386 newu->path = otheru->path;
1388 refcount_inc(&otheru->addr->refcnt);
1389 smp_store_release(&newu->addr, otheru->addr);
1391 /* Set credentials */
1392 copy_peercred(sk, other);
1394 sock->state = SS_CONNECTED;
1395 sk->sk_state = TCP_ESTABLISHED;
1398 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1399 unix_peer(sk) = newsk;
1401 unix_state_unlock(sk);
1403 /* take ten and and send info to listening sock */
1404 spin_lock(&other->sk_receive_queue.lock);
1405 __skb_queue_tail(&other->sk_receive_queue, skb);
1406 spin_unlock(&other->sk_receive_queue.lock);
1407 unix_state_unlock(other);
1408 other->sk_data_ready(other);
1414 unix_state_unlock(other);
1419 unix_release_sock(newsk, 0);
1425 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1427 struct sock *ska = socka->sk, *skb = sockb->sk;
1429 /* Join our sockets back to back */
1432 unix_peer(ska) = skb;
1433 unix_peer(skb) = ska;
1437 if (ska->sk_type != SOCK_DGRAM) {
1438 ska->sk_state = TCP_ESTABLISHED;
1439 skb->sk_state = TCP_ESTABLISHED;
1440 socka->state = SS_CONNECTED;
1441 sockb->state = SS_CONNECTED;
1446 static void unix_sock_inherit_flags(const struct socket *old,
1449 if (test_bit(SOCK_PASSCRED, &old->flags))
1450 set_bit(SOCK_PASSCRED, &new->flags);
1451 if (test_bit(SOCK_PASSSEC, &old->flags))
1452 set_bit(SOCK_PASSSEC, &new->flags);
1455 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1458 struct sock *sk = sock->sk;
1460 struct sk_buff *skb;
1464 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1468 if (sk->sk_state != TCP_LISTEN)
1471 /* If socket state is TCP_LISTEN it cannot change (for now...),
1472 * so that no locks are necessary.
1475 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1477 /* This means receive shutdown. */
1484 skb_free_datagram(sk, skb);
1485 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1487 /* attach accepted sock to socket */
1488 unix_state_lock(tsk);
1489 newsock->state = SS_CONNECTED;
1490 unix_sock_inherit_flags(sock, newsock);
1491 sock_graft(tsk, newsock);
1492 unix_state_unlock(tsk);
1500 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1502 struct sock *sk = sock->sk;
1503 struct unix_address *addr;
1504 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1508 sk = unix_peer_get(sk);
1518 addr = smp_load_acquire(&unix_sk(sk)->addr);
1520 sunaddr->sun_family = AF_UNIX;
1521 sunaddr->sun_path[0] = 0;
1522 err = sizeof(short);
1525 memcpy(sunaddr, addr->name, addr->len);
1532 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1536 UNIXCB(skb).pid = get_pid(scm->pid);
1537 UNIXCB(skb).uid = scm->creds.uid;
1538 UNIXCB(skb).gid = scm->creds.gid;
1539 UNIXCB(skb).fp = NULL;
1540 unix_get_secdata(scm, skb);
1541 if (scm->fp && send_fds)
1542 err = unix_attach_fds(scm, skb);
1544 skb->destructor = unix_destruct_scm;
1548 static bool unix_passcred_enabled(const struct socket *sock,
1549 const struct sock *other)
1551 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1552 !other->sk_socket ||
1553 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1557 * Some apps rely on write() giving SCM_CREDENTIALS
1558 * We include credentials if source or destination socket
1559 * asserted SOCK_PASSCRED.
1561 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1562 const struct sock *other)
1564 if (UNIXCB(skb).pid)
1566 if (unix_passcred_enabled(sock, other)) {
1567 UNIXCB(skb).pid = get_pid(task_tgid(current));
1568 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1572 static int maybe_init_creds(struct scm_cookie *scm,
1573 struct socket *socket,
1574 const struct sock *other)
1577 struct msghdr msg = { .msg_controllen = 0 };
1579 err = scm_send(socket, &msg, scm, false);
1583 if (unix_passcred_enabled(socket, other)) {
1584 scm->pid = get_pid(task_tgid(current));
1585 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1590 static bool unix_skb_scm_eq(struct sk_buff *skb,
1591 struct scm_cookie *scm)
1593 const struct unix_skb_parms *u = &UNIXCB(skb);
1595 return u->pid == scm->pid &&
1596 uid_eq(u->uid, scm->creds.uid) &&
1597 gid_eq(u->gid, scm->creds.gid) &&
1598 unix_secdata_eq(scm, skb);
1601 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1603 struct scm_fp_list *fp = UNIXCB(skb).fp;
1604 struct unix_sock *u = unix_sk(sk);
1606 if (unlikely(fp && fp->count))
1607 atomic_add(fp->count, &u->scm_stat.nr_fds);
1610 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1612 struct scm_fp_list *fp = UNIXCB(skb).fp;
1613 struct unix_sock *u = unix_sk(sk);
1615 if (unlikely(fp && fp->count))
1616 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1620 * Send AF_UNIX data.
1623 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1626 struct sock *sk = sock->sk;
1627 struct net *net = sock_net(sk);
1628 struct unix_sock *u = unix_sk(sk);
1629 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1630 struct sock *other = NULL;
1631 int namelen = 0; /* fake GCC */
1634 struct sk_buff *skb;
1636 struct scm_cookie scm;
1641 err = scm_send(sock, msg, &scm, false);
1646 if (msg->msg_flags&MSG_OOB)
1649 if (msg->msg_namelen) {
1650 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1657 other = unix_peer_get(sk);
1662 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1663 && (err = unix_autobind(sock)) != 0)
1667 if (len > sk->sk_sndbuf - 32)
1670 if (len > SKB_MAX_ALLOC) {
1671 data_len = min_t(size_t,
1672 len - SKB_MAX_ALLOC,
1673 MAX_SKB_FRAGS * PAGE_SIZE);
1674 data_len = PAGE_ALIGN(data_len);
1676 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1679 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1680 msg->msg_flags & MSG_DONTWAIT, &err,
1681 PAGE_ALLOC_COSTLY_ORDER);
1685 err = unix_scm_to_skb(&scm, skb, true);
1689 skb_put(skb, len - data_len);
1690 skb->data_len = data_len;
1692 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1696 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1701 if (sunaddr == NULL)
1704 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1710 if (sk_filter(other, skb) < 0) {
1711 /* Toss the packet but do not return any error to the sender */
1717 unix_state_lock(other);
1720 if (!unix_may_send(sk, other))
1723 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1725 * Check with 1003.1g - what should
1728 unix_state_unlock(other);
1732 unix_state_lock(sk);
1735 if (unix_peer(sk) == other) {
1736 unix_peer(sk) = NULL;
1737 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1739 unix_state_unlock(sk);
1741 unix_dgram_disconnected(sk, other);
1743 err = -ECONNREFUSED;
1745 unix_state_unlock(sk);
1755 if (other->sk_shutdown & RCV_SHUTDOWN)
1758 if (sk->sk_type != SOCK_SEQPACKET) {
1759 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1764 /* other == sk && unix_peer(other) != sk if
1765 * - unix_peer(sk) == NULL, destination address bound to sk
1766 * - unix_peer(sk) == sk by time of get but disconnected before lock
1769 unlikely(unix_peer(other) != sk &&
1770 unix_recvq_full_lockless(other))) {
1772 timeo = unix_wait_for_peer(other, timeo);
1774 err = sock_intr_errno(timeo);
1775 if (signal_pending(current))
1782 unix_state_unlock(other);
1783 unix_state_double_lock(sk, other);
1786 if (unix_peer(sk) != other ||
1787 unix_dgram_peer_wake_me(sk, other)) {
1795 goto restart_locked;
1799 if (unlikely(sk_locked))
1800 unix_state_unlock(sk);
1802 if (sock_flag(other, SOCK_RCVTSTAMP))
1803 __net_timestamp(skb);
1804 maybe_add_creds(skb, sock, other);
1805 scm_stat_add(other, skb);
1806 skb_queue_tail(&other->sk_receive_queue, skb);
1807 unix_state_unlock(other);
1808 other->sk_data_ready(other);
1815 unix_state_unlock(sk);
1816 unix_state_unlock(other);
1826 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1827 * bytes, and a minimum of a full page.
1829 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1831 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1834 struct sock *sk = sock->sk;
1835 struct sock *other = NULL;
1837 struct sk_buff *skb;
1839 struct scm_cookie scm;
1840 bool fds_sent = false;
1844 err = scm_send(sock, msg, &scm, false);
1849 if (msg->msg_flags&MSG_OOB)
1852 if (msg->msg_namelen) {
1853 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1857 other = unix_peer(sk);
1862 if (sk->sk_shutdown & SEND_SHUTDOWN)
1865 while (sent < len) {
1868 /* Keep two messages in the pipe so it schedules better */
1869 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1871 /* allow fallback to order-0 allocations */
1872 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1874 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1876 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1878 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1879 msg->msg_flags & MSG_DONTWAIT, &err,
1880 get_order(UNIX_SKB_FRAGS_SZ));
1884 /* Only send the fds in the first buffer */
1885 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1892 skb_put(skb, size - data_len);
1893 skb->data_len = data_len;
1895 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1901 unix_state_lock(other);
1903 if (sock_flag(other, SOCK_DEAD) ||
1904 (other->sk_shutdown & RCV_SHUTDOWN))
1907 maybe_add_creds(skb, sock, other);
1908 scm_stat_add(other, skb);
1909 skb_queue_tail(&other->sk_receive_queue, skb);
1910 unix_state_unlock(other);
1911 other->sk_data_ready(other);
1920 unix_state_unlock(other);
1923 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1924 send_sig(SIGPIPE, current, 0);
1928 return sent ? : err;
1931 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1932 int offset, size_t size, int flags)
1935 bool send_sigpipe = false;
1936 bool init_scm = true;
1937 struct scm_cookie scm;
1938 struct sock *other, *sk = socket->sk;
1939 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1941 if (flags & MSG_OOB)
1944 other = unix_peer(sk);
1945 if (!other || sk->sk_state != TCP_ESTABLISHED)
1950 unix_state_unlock(other);
1951 mutex_unlock(&unix_sk(other)->iolock);
1952 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1958 /* we must acquire iolock as we modify already present
1959 * skbs in the sk_receive_queue and mess with skb->len
1961 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
1963 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1967 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1969 send_sigpipe = true;
1973 unix_state_lock(other);
1975 if (sock_flag(other, SOCK_DEAD) ||
1976 other->sk_shutdown & RCV_SHUTDOWN) {
1978 send_sigpipe = true;
1979 goto err_state_unlock;
1983 err = maybe_init_creds(&scm, socket, other);
1985 goto err_state_unlock;
1989 skb = skb_peek_tail(&other->sk_receive_queue);
1990 if (tail && tail == skb) {
1992 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1999 } else if (newskb) {
2000 /* this is fast path, we don't necessarily need to
2001 * call to kfree_skb even though with newskb == NULL
2002 * this - does no harm
2004 consume_skb(newskb);
2008 if (skb_append_pagefrags(skb, page, offset, size)) {
2014 skb->data_len += size;
2015 skb->truesize += size;
2016 refcount_add(size, &sk->sk_wmem_alloc);
2019 err = unix_scm_to_skb(&scm, skb, false);
2021 goto err_state_unlock;
2022 spin_lock(&other->sk_receive_queue.lock);
2023 __skb_queue_tail(&other->sk_receive_queue, newskb);
2024 spin_unlock(&other->sk_receive_queue.lock);
2027 unix_state_unlock(other);
2028 mutex_unlock(&unix_sk(other)->iolock);
2030 other->sk_data_ready(other);
2035 unix_state_unlock(other);
2037 mutex_unlock(&unix_sk(other)->iolock);
2040 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2041 send_sig(SIGPIPE, current, 0);
2047 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2051 struct sock *sk = sock->sk;
2053 err = sock_error(sk);
2057 if (sk->sk_state != TCP_ESTABLISHED)
2060 if (msg->msg_namelen)
2061 msg->msg_namelen = 0;
2063 return unix_dgram_sendmsg(sock, msg, len);
2066 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2067 size_t size, int flags)
2069 struct sock *sk = sock->sk;
2071 if (sk->sk_state != TCP_ESTABLISHED)
2074 return unix_dgram_recvmsg(sock, msg, size, flags);
2077 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2079 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2082 msg->msg_namelen = addr->len;
2083 memcpy(msg->msg_name, addr->name, addr->len);
2087 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2088 size_t size, int flags)
2090 struct scm_cookie scm;
2091 struct sock *sk = sock->sk;
2092 struct unix_sock *u = unix_sk(sk);
2093 struct sk_buff *skb, *last;
2102 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2105 mutex_lock(&u->iolock);
2107 skip = sk_peek_offset(sk, flags);
2108 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2109 &skip, &err, &last);
2111 if (!(flags & MSG_PEEK))
2112 scm_stat_del(sk, skb);
2116 mutex_unlock(&u->iolock);
2121 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2122 &err, &timeo, last));
2124 if (!skb) { /* implies iolock unlocked */
2125 unix_state_lock(sk);
2126 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2127 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2128 (sk->sk_shutdown & RCV_SHUTDOWN))
2130 unix_state_unlock(sk);
2134 if (wq_has_sleeper(&u->peer_wait))
2135 wake_up_interruptible_sync_poll(&u->peer_wait,
2136 EPOLLOUT | EPOLLWRNORM |
2140 unix_copy_addr(msg, skb->sk);
2142 if (size > skb->len - skip)
2143 size = skb->len - skip;
2144 else if (size < skb->len - skip)
2145 msg->msg_flags |= MSG_TRUNC;
2147 err = skb_copy_datagram_msg(skb, skip, msg, size);
2151 if (sock_flag(sk, SOCK_RCVTSTAMP))
2152 __sock_recv_timestamp(msg, sk, skb);
2154 memset(&scm, 0, sizeof(scm));
2156 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2157 unix_set_secdata(&scm, skb);
2159 if (!(flags & MSG_PEEK)) {
2161 unix_detach_fds(&scm, skb);
2163 sk_peek_offset_bwd(sk, skb->len);
2165 /* It is questionable: on PEEK we could:
2166 - do not return fds - good, but too simple 8)
2167 - return fds, and do not return them on read (old strategy,
2169 - clone fds (I chose it for now, it is the most universal
2172 POSIX 1003.1g does not actually define this clearly
2173 at all. POSIX 1003.1g doesn't define a lot of things
2178 sk_peek_offset_fwd(sk, size);
2181 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2183 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2185 scm_recv(sock, msg, &scm, flags);
2188 skb_free_datagram(sk, skb);
2189 mutex_unlock(&u->iolock);
2195 * Sleep until more data has arrived. But check for races..
2197 static long unix_stream_data_wait(struct sock *sk, long timeo,
2198 struct sk_buff *last, unsigned int last_len,
2201 struct sk_buff *tail;
2204 unix_state_lock(sk);
2207 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2209 tail = skb_peek_tail(&sk->sk_receive_queue);
2211 (tail && tail->len != last_len) ||
2213 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2214 signal_pending(current) ||
2218 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2219 unix_state_unlock(sk);
2221 timeo = freezable_schedule_timeout(timeo);
2223 timeo = schedule_timeout(timeo);
2224 unix_state_lock(sk);
2226 if (sock_flag(sk, SOCK_DEAD))
2229 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2232 finish_wait(sk_sleep(sk), &wait);
2233 unix_state_unlock(sk);
2237 static unsigned int unix_skb_len(const struct sk_buff *skb)
2239 return skb->len - UNIXCB(skb).consumed;
2242 struct unix_stream_read_state {
2243 int (*recv_actor)(struct sk_buff *, int, int,
2244 struct unix_stream_read_state *);
2245 struct socket *socket;
2247 struct pipe_inode_info *pipe;
2250 unsigned int splice_flags;
2253 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2256 struct scm_cookie scm;
2257 struct socket *sock = state->socket;
2258 struct sock *sk = sock->sk;
2259 struct unix_sock *u = unix_sk(sk);
2261 int flags = state->flags;
2262 int noblock = flags & MSG_DONTWAIT;
2263 bool check_creds = false;
2268 size_t size = state->size;
2269 unsigned int last_len;
2271 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2276 if (unlikely(flags & MSG_OOB)) {
2281 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2282 timeo = sock_rcvtimeo(sk, noblock);
2284 memset(&scm, 0, sizeof(scm));
2286 /* Lock the socket to prevent queue disordering
2287 * while sleeps in memcpy_tomsg
2289 mutex_lock(&u->iolock);
2291 skip = max(sk_peek_offset(sk, flags), 0);
2296 struct sk_buff *skb, *last;
2299 unix_state_lock(sk);
2300 if (sock_flag(sk, SOCK_DEAD)) {
2304 last = skb = skb_peek(&sk->sk_receive_queue);
2305 last_len = last ? last->len : 0;
2308 if (copied >= target)
2312 * POSIX 1003.1g mandates this order.
2315 err = sock_error(sk);
2318 if (sk->sk_shutdown & RCV_SHUTDOWN)
2321 unix_state_unlock(sk);
2327 mutex_unlock(&u->iolock);
2329 timeo = unix_stream_data_wait(sk, timeo, last,
2330 last_len, freezable);
2332 if (signal_pending(current)) {
2333 err = sock_intr_errno(timeo);
2338 mutex_lock(&u->iolock);
2341 unix_state_unlock(sk);
2345 while (skip >= unix_skb_len(skb)) {
2346 skip -= unix_skb_len(skb);
2348 last_len = skb->len;
2349 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2354 unix_state_unlock(sk);
2357 /* Never glue messages from different writers */
2358 if (!unix_skb_scm_eq(skb, &scm))
2360 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2361 /* Copy credentials */
2362 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2363 unix_set_secdata(&scm, skb);
2367 /* Copy address just once */
2368 if (state->msg && state->msg->msg_name) {
2369 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2370 state->msg->msg_name);
2371 unix_copy_addr(state->msg, skb->sk);
2375 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2377 chunk = state->recv_actor(skb, skip, chunk, state);
2378 drop_skb = !unix_skb_len(skb);
2379 /* skb is only safe to use if !drop_skb */
2390 /* the skb was touched by a concurrent reader;
2391 * we should not expect anything from this skb
2392 * anymore and assume it invalid - we can be
2393 * sure it was dropped from the socket queue
2395 * let's report a short read
2401 /* Mark read part of skb as used */
2402 if (!(flags & MSG_PEEK)) {
2403 UNIXCB(skb).consumed += chunk;
2405 sk_peek_offset_bwd(sk, chunk);
2407 if (UNIXCB(skb).fp) {
2408 scm_stat_del(sk, skb);
2409 unix_detach_fds(&scm, skb);
2412 if (unix_skb_len(skb))
2415 skb_unlink(skb, &sk->sk_receive_queue);
2421 /* It is questionable, see note in unix_dgram_recvmsg.
2424 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2426 sk_peek_offset_fwd(sk, chunk);
2433 last_len = skb->len;
2434 unix_state_lock(sk);
2435 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2438 unix_state_unlock(sk);
2443 mutex_unlock(&u->iolock);
2445 scm_recv(sock, state->msg, &scm, flags);
2449 return copied ? : err;
2452 static int unix_stream_read_actor(struct sk_buff *skb,
2453 int skip, int chunk,
2454 struct unix_stream_read_state *state)
2458 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2460 return ret ?: chunk;
2463 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2464 size_t size, int flags)
2466 struct unix_stream_read_state state = {
2467 .recv_actor = unix_stream_read_actor,
2474 return unix_stream_read_generic(&state, true);
2477 static int unix_stream_splice_actor(struct sk_buff *skb,
2478 int skip, int chunk,
2479 struct unix_stream_read_state *state)
2481 return skb_splice_bits(skb, state->socket->sk,
2482 UNIXCB(skb).consumed + skip,
2483 state->pipe, chunk, state->splice_flags);
2486 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2487 struct pipe_inode_info *pipe,
2488 size_t size, unsigned int flags)
2490 struct unix_stream_read_state state = {
2491 .recv_actor = unix_stream_splice_actor,
2495 .splice_flags = flags,
2498 if (unlikely(*ppos))
2501 if (sock->file->f_flags & O_NONBLOCK ||
2502 flags & SPLICE_F_NONBLOCK)
2503 state.flags = MSG_DONTWAIT;
2505 return unix_stream_read_generic(&state, false);
2508 static int unix_shutdown(struct socket *sock, int mode)
2510 struct sock *sk = sock->sk;
2513 if (mode < SHUT_RD || mode > SHUT_RDWR)
2516 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2517 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2518 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2522 unix_state_lock(sk);
2523 sk->sk_shutdown |= mode;
2524 other = unix_peer(sk);
2527 unix_state_unlock(sk);
2528 sk->sk_state_change(sk);
2531 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2535 if (mode&RCV_SHUTDOWN)
2536 peer_mode |= SEND_SHUTDOWN;
2537 if (mode&SEND_SHUTDOWN)
2538 peer_mode |= RCV_SHUTDOWN;
2539 unix_state_lock(other);
2540 other->sk_shutdown |= peer_mode;
2541 unix_state_unlock(other);
2542 other->sk_state_change(other);
2543 if (peer_mode == SHUTDOWN_MASK)
2544 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2545 else if (peer_mode & RCV_SHUTDOWN)
2546 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2554 long unix_inq_len(struct sock *sk)
2556 struct sk_buff *skb;
2559 if (sk->sk_state == TCP_LISTEN)
2562 spin_lock(&sk->sk_receive_queue.lock);
2563 if (sk->sk_type == SOCK_STREAM ||
2564 sk->sk_type == SOCK_SEQPACKET) {
2565 skb_queue_walk(&sk->sk_receive_queue, skb)
2566 amount += unix_skb_len(skb);
2568 skb = skb_peek(&sk->sk_receive_queue);
2572 spin_unlock(&sk->sk_receive_queue.lock);
2576 EXPORT_SYMBOL_GPL(unix_inq_len);
2578 long unix_outq_len(struct sock *sk)
2580 return sk_wmem_alloc_get(sk);
2582 EXPORT_SYMBOL_GPL(unix_outq_len);
2584 static int unix_open_file(struct sock *sk)
2590 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2593 if (!smp_load_acquire(&unix_sk(sk)->addr))
2596 path = unix_sk(sk)->path;
2602 fd = get_unused_fd_flags(O_CLOEXEC);
2606 f = dentry_open(&path, O_PATH, current_cred());
2620 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2622 struct sock *sk = sock->sk;
2628 amount = unix_outq_len(sk);
2629 err = put_user(amount, (int __user *)arg);
2632 amount = unix_inq_len(sk);
2636 err = put_user(amount, (int __user *)arg);
2639 err = unix_open_file(sk);
2648 #ifdef CONFIG_COMPAT
2649 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2651 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
2655 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2657 struct sock *sk = sock->sk;
2660 sock_poll_wait(file, sock, wait);
2663 /* exceptional events? */
2666 if (sk->sk_shutdown == SHUTDOWN_MASK)
2668 if (sk->sk_shutdown & RCV_SHUTDOWN)
2669 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2672 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2673 mask |= EPOLLIN | EPOLLRDNORM;
2675 /* Connection-based need to check for termination and startup */
2676 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2677 sk->sk_state == TCP_CLOSE)
2681 * we set writable also when the other side has shut down the
2682 * connection. This prevents stuck sockets.
2684 if (unix_writable(sk))
2685 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2690 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2693 struct sock *sk = sock->sk, *other;
2694 unsigned int writable;
2697 sock_poll_wait(file, sock, wait);
2700 /* exceptional events? */
2701 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
2703 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
2705 if (sk->sk_shutdown & RCV_SHUTDOWN)
2706 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2707 if (sk->sk_shutdown == SHUTDOWN_MASK)
2711 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2712 mask |= EPOLLIN | EPOLLRDNORM;
2714 /* Connection-based need to check for termination and startup */
2715 if (sk->sk_type == SOCK_SEQPACKET) {
2716 if (sk->sk_state == TCP_CLOSE)
2718 /* connection hasn't started yet? */
2719 if (sk->sk_state == TCP_SYN_SENT)
2723 /* No write status requested, avoid expensive OUT tests. */
2724 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
2727 writable = unix_writable(sk);
2729 unix_state_lock(sk);
2731 other = unix_peer(sk);
2732 if (other && unix_peer(other) != sk &&
2733 unix_recvq_full(other) &&
2734 unix_dgram_peer_wake_me(sk, other))
2737 unix_state_unlock(sk);
2741 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2743 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2748 #ifdef CONFIG_PROC_FS
2750 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2752 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2753 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2754 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2756 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2758 unsigned long offset = get_offset(*pos);
2759 unsigned long bucket = get_bucket(*pos);
2761 unsigned long count = 0;
2763 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2764 if (sock_net(sk) != seq_file_net(seq))
2766 if (++count == offset)
2773 static struct sock *unix_next_socket(struct seq_file *seq,
2777 unsigned long bucket;
2779 while (sk > (struct sock *)SEQ_START_TOKEN) {
2783 if (sock_net(sk) == seq_file_net(seq))
2788 sk = unix_from_bucket(seq, pos);
2793 bucket = get_bucket(*pos) + 1;
2794 *pos = set_bucket_offset(bucket, 1);
2795 } while (bucket < ARRAY_SIZE(unix_socket_table));
2800 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2801 __acquires(unix_table_lock)
2803 spin_lock(&unix_table_lock);
2806 return SEQ_START_TOKEN;
2808 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2811 return unix_next_socket(seq, NULL, pos);
2814 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2817 return unix_next_socket(seq, v, pos);
2820 static void unix_seq_stop(struct seq_file *seq, void *v)
2821 __releases(unix_table_lock)
2823 spin_unlock(&unix_table_lock);
2826 static int unix_seq_show(struct seq_file *seq, void *v)
2829 if (v == SEQ_START_TOKEN)
2830 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2834 struct unix_sock *u = unix_sk(s);
2837 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2839 refcount_read(&s->sk_refcnt),
2841 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2844 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2845 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2848 if (u->addr) { // under unix_table_lock here
2853 len = u->addr->len - sizeof(short);
2854 if (!UNIX_ABSTRACT(s))
2860 for ( ; i < len; i++)
2861 seq_putc(seq, u->addr->name->sun_path[i] ?:
2864 unix_state_unlock(s);
2865 seq_putc(seq, '\n');
2871 static const struct seq_operations unix_seq_ops = {
2872 .start = unix_seq_start,
2873 .next = unix_seq_next,
2874 .stop = unix_seq_stop,
2875 .show = unix_seq_show,
2879 static const struct net_proto_family unix_family_ops = {
2881 .create = unix_create,
2882 .owner = THIS_MODULE,
2886 static int __net_init unix_net_init(struct net *net)
2888 int error = -ENOMEM;
2890 net->unx.sysctl_max_dgram_qlen = 10;
2891 if (unix_sysctl_register(net))
2894 #ifdef CONFIG_PROC_FS
2895 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
2896 sizeof(struct seq_net_private))) {
2897 unix_sysctl_unregister(net);
2906 static void __net_exit unix_net_exit(struct net *net)
2908 unix_sysctl_unregister(net);
2909 remove_proc_entry("unix", net->proc_net);
2912 static struct pernet_operations unix_net_ops = {
2913 .init = unix_net_init,
2914 .exit = unix_net_exit,
2917 static int __init af_unix_init(void)
2921 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
2923 rc = proto_register(&unix_proto, 1);
2925 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2929 sock_register(&unix_family_ops);
2930 register_pernet_subsys(&unix_net_ops);
2935 static void __exit af_unix_exit(void)
2937 sock_unregister(PF_UNIX);
2938 proto_unregister(&unix_proto);
2939 unregister_pernet_subsys(&unix_net_ops);
2942 /* Earlier than device_initcall() so that other drivers invoking
2943 request_module() don't end up in a loop when modprobe tries
2944 to use a UNIX socket. But later than subsys_initcall() because
2945 we depend on stuff initialised there */
2946 fs_initcall(af_unix_init);
2947 module_exit(af_unix_exit);
2949 MODULE_LICENSE("GPL");
2950 MODULE_ALIAS_NETPROTO(PF_UNIX);