1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
38 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
39 READ_ONCE(xs->umem->fq);
42 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
44 return xskq_cons_has_entries(umem->fq, cnt);
46 EXPORT_SYMBOL(xsk_umem_has_addrs);
48 bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
50 return xskq_cons_peek_addr(umem->fq, addr, umem);
52 EXPORT_SYMBOL(xsk_umem_peek_addr);
54 void xsk_umem_release_addr(struct xdp_umem *umem)
56 xskq_cons_release(umem->fq);
58 EXPORT_SYMBOL(xsk_umem_release_addr);
60 void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
62 if (umem->need_wakeup & XDP_WAKEUP_RX)
65 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
66 umem->need_wakeup |= XDP_WAKEUP_RX;
68 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
70 void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
74 if (umem->need_wakeup & XDP_WAKEUP_TX)
78 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
79 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
83 umem->need_wakeup |= XDP_WAKEUP_TX;
85 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
87 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
89 if (!(umem->need_wakeup & XDP_WAKEUP_RX))
92 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
93 umem->need_wakeup &= ~XDP_WAKEUP_RX;
95 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
97 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
101 if (!(umem->need_wakeup & XDP_WAKEUP_TX))
105 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
106 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
110 umem->need_wakeup &= ~XDP_WAKEUP_TX;
112 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
114 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
116 return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
118 EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
120 /* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
121 * each page. This is only required in copy mode.
123 static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
124 u32 len, u32 metalen)
126 void *to_buf = xdp_umem_get_data(umem, addr);
128 addr = xsk_umem_add_offset_to_addr(addr);
129 if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) {
130 void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
131 u64 page_start = addr & ~(PAGE_SIZE - 1);
132 u64 first_len = PAGE_SIZE - (addr - page_start);
134 memcpy(to_buf, from_buf, first_len + metalen);
135 memcpy(next_pg_addr, from_buf + first_len, len - first_len);
140 memcpy(to_buf, from_buf, len + metalen);
143 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
145 u64 offset = xs->umem->headroom;
146 u64 addr, memcpy_addr;
151 if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
152 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
157 if (unlikely(xdp_data_meta_unsupported(xdp))) {
158 from_buf = xdp->data;
161 from_buf = xdp->data_meta;
162 metalen = xdp->data - xdp->data_meta;
165 memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
166 __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);
169 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
170 err = xskq_prod_reserve_desc(xs->rx, addr, len);
172 xskq_cons_release(xs->umem->fq);
173 xdp_return_buff(xdp);
181 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
183 int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len);
191 static bool xsk_is_bound(struct xdp_sock *xs)
193 if (READ_ONCE(xs->state) == XSK_BOUND) {
194 /* Matches smp_wmb() in bind(). */
201 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
205 if (!xsk_is_bound(xs))
208 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
211 len = xdp->data_end - xdp->data;
213 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
214 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
217 static void xsk_flush(struct xdp_sock *xs)
219 xskq_prod_submit(xs->rx);
220 __xskq_cons_release(xs->umem->fq);
221 sock_def_readable(&xs->sk);
224 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
226 u32 metalen = xdp->data - xdp->data_meta;
227 u32 len = xdp->data_end - xdp->data;
228 u64 offset = xs->umem->headroom;
233 spin_lock_bh(&xs->rx_lock);
235 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
240 if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
241 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
246 addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
247 buffer = xdp_umem_get_data(xs->umem, addr);
248 memcpy(buffer, xdp->data_meta, len + metalen);
250 addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
251 err = xskq_prod_reserve_desc(xs->rx, addr, len);
255 xskq_cons_release(xs->umem->fq);
256 xskq_prod_submit(xs->rx);
258 spin_unlock_bh(&xs->rx_lock);
260 xs->sk.sk_data_ready(&xs->sk);
266 spin_unlock_bh(&xs->rx_lock);
270 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
272 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
275 err = xsk_rcv(xs, xdp);
279 if (!xs->flush_node.prev)
280 list_add(&xs->flush_node, flush_list);
285 void __xsk_map_flush(void)
287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
288 struct xdp_sock *xs, *tmp;
290 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
292 __list_del_clearprev(&xs->flush_node);
296 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
298 xskq_prod_submit_n(umem->cq, nb_entries);
300 EXPORT_SYMBOL(xsk_umem_complete_tx);
302 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
307 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
308 __xskq_cons_release(xs->tx);
309 xs->sk.sk_write_space(&xs->sk);
313 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
315 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
320 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
321 if (!xskq_cons_peek_desc(xs->tx, desc, umem))
324 /* This is the backpreassure mechanism for the Tx path.
325 * Reserve space in the completion queue and only proceed
326 * if there is space in it. This avoids having to implement
327 * any buffering in the Tx path.
329 if (xskq_prod_reserve_addr(umem->cq, desc->addr))
332 xskq_cons_release(xs->tx);
341 EXPORT_SYMBOL(xsk_umem_consume_tx);
343 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
345 struct net_device *dev = xs->dev;
349 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
355 static int xsk_zc_xmit(struct xdp_sock *xs)
357 return xsk_wakeup(xs, XDP_WAKEUP_TX);
360 static void xsk_destruct_skb(struct sk_buff *skb)
362 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
363 struct xdp_sock *xs = xdp_sk(skb->sk);
366 spin_lock_irqsave(&xs->tx_completion_lock, flags);
367 xskq_prod_submit_addr(xs->umem->cq, addr);
368 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
373 static int xsk_generic_xmit(struct sock *sk)
375 struct xdp_sock *xs = xdp_sk(sk);
376 u32 max_batch = TX_BATCH_SIZE;
377 bool sent_frame = false;
378 struct xdp_desc desc;
382 mutex_lock(&xs->mutex);
384 if (xs->queue_id >= xs->dev->real_num_tx_queues)
387 while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
392 if (max_batch-- == 0) {
398 skb = sock_alloc_send_skb(sk, len, 1, &err);
399 if (unlikely(!skb)) {
406 buffer = xdp_umem_get_data(xs->umem, addr);
407 err = skb_store_bits(skb, 0, buffer, len);
408 /* This is the backpreassure mechanism for the Tx path.
409 * Reserve space in the completion queue and only proceed
410 * if there is space in it. This avoids having to implement
411 * any buffering in the Tx path.
413 if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
419 skb->priority = sk->sk_priority;
420 skb->mark = sk->sk_mark;
421 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
422 skb->destructor = xsk_destruct_skb;
424 err = dev_direct_xmit(skb, xs->queue_id);
425 xskq_cons_release(xs->tx);
426 /* Ignore NET_XMIT_CN as packet might have been sent */
427 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
428 /* SKB completed but not sent */
438 sk->sk_write_space(sk);
440 mutex_unlock(&xs->mutex);
444 static int __xsk_sendmsg(struct sock *sk)
446 struct xdp_sock *xs = xdp_sk(sk);
448 if (unlikely(!(xs->dev->flags & IFF_UP)))
450 if (unlikely(!xs->tx))
453 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
456 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
458 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
459 struct sock *sk = sock->sk;
460 struct xdp_sock *xs = xdp_sk(sk);
462 if (unlikely(!xsk_is_bound(xs)))
464 if (unlikely(need_wait))
467 return __xsk_sendmsg(sk);
470 static __poll_t xsk_poll(struct file *file, struct socket *sock,
471 struct poll_table_struct *wait)
473 __poll_t mask = datagram_poll(file, sock, wait);
474 struct sock *sk = sock->sk;
475 struct xdp_sock *xs = xdp_sk(sk);
476 struct xdp_umem *umem;
478 if (unlikely(!xsk_is_bound(xs)))
483 if (umem->need_wakeup) {
485 xsk_wakeup(xs, umem->need_wakeup);
487 /* Poll needs to drive Tx also in copy mode */
491 if (xs->rx && !xskq_prod_is_empty(xs->rx))
492 mask |= EPOLLIN | EPOLLRDNORM;
493 if (xs->tx && !xskq_cons_is_full(xs->tx))
494 mask |= EPOLLOUT | EPOLLWRNORM;
499 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
504 if (entries == 0 || *queue || !is_power_of_2(entries))
507 q = xskq_create(entries, umem_queue);
511 /* Make sure queue is ready before it can be seen by others */
513 WRITE_ONCE(*queue, q);
517 static void xsk_unbind_dev(struct xdp_sock *xs)
519 struct net_device *dev = xs->dev;
521 if (xs->state != XSK_BOUND)
523 WRITE_ONCE(xs->state, XSK_UNBOUND);
525 /* Wait for driver to stop using the xdp socket. */
526 xdp_del_sk_umem(xs->umem, xs);
532 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
533 struct xdp_sock ***map_entry)
535 struct xsk_map *map = NULL;
536 struct xsk_map_node *node;
540 spin_lock_bh(&xs->map_list_lock);
541 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
544 WARN_ON(xsk_map_inc(node->map));
546 *map_entry = node->map_entry;
548 spin_unlock_bh(&xs->map_list_lock);
552 static void xsk_delete_from_maps(struct xdp_sock *xs)
554 /* This function removes the current XDP socket from all the
555 * maps it resides in. We need to take extra care here, due to
556 * the two locks involved. Each map has a lock synchronizing
557 * updates to the entries, and each socket has a lock that
558 * synchronizes access to the list of maps (map_list). For
559 * deadlock avoidance the locks need to be taken in the order
560 * "map lock"->"socket map list lock". We start off by
561 * accessing the socket map list, and take a reference to the
562 * map to guarantee existence between the
563 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
564 * calls. Then we ask the map to remove the socket, which
565 * tries to remove the socket from the map. Note that there
566 * might be updates to the map between
567 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
569 struct xdp_sock **map_entry = NULL;
572 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
573 xsk_map_try_sock_delete(map, xs, map_entry);
578 static int xsk_release(struct socket *sock)
580 struct sock *sk = sock->sk;
581 struct xdp_sock *xs = xdp_sk(sk);
589 mutex_lock(&net->xdp.lock);
590 sk_del_node_init_rcu(sk);
591 mutex_unlock(&net->xdp.lock);
594 sock_prot_inuse_add(net, sk->sk_prot, -1);
597 xsk_delete_from_maps(xs);
598 mutex_lock(&xs->mutex);
600 mutex_unlock(&xs->mutex);
602 xskq_destroy(xs->rx);
603 xskq_destroy(xs->tx);
608 sk_refcnt_debug_release(sk);
614 static struct socket *xsk_lookup_xsk_from_fd(int fd)
619 sock = sockfd_lookup(fd, &err);
621 return ERR_PTR(-ENOTSOCK);
623 if (sock->sk->sk_family != PF_XDP) {
625 return ERR_PTR(-ENOPROTOOPT);
631 /* Check if umem pages are contiguous.
632 * If zero-copy mode, use the DMA address to do the page contiguity check
633 * For all other modes we use addr (kernel virtual address)
634 * Store the result in the low bits of addr.
636 static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
638 struct xdp_umem_page *pgs = umem->pages;
641 for (i = 0; i < umem->npgs - 1; i++) {
642 is_contig = (flags & XDP_ZEROCOPY) ?
643 (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
644 (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
645 pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
649 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
651 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
652 struct sock *sk = sock->sk;
653 struct xdp_sock *xs = xdp_sk(sk);
654 struct net_device *dev;
658 if (addr_len < sizeof(struct sockaddr_xdp))
660 if (sxdp->sxdp_family != AF_XDP)
663 flags = sxdp->sxdp_flags;
664 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
665 XDP_USE_NEED_WAKEUP))
669 mutex_lock(&xs->mutex);
670 if (xs->state != XSK_READY) {
675 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
681 if (!xs->rx && !xs->tx) {
686 qid = sxdp->sxdp_queue_id;
688 if (flags & XDP_SHARED_UMEM) {
689 struct xdp_sock *umem_xs;
692 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
693 (flags & XDP_USE_NEED_WAKEUP)) {
694 /* Cannot specify flags for shared sockets. */
700 /* We have already our own. */
705 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
711 umem_xs = xdp_sk(sock->sk);
712 if (!xsk_is_bound(umem_xs)) {
717 if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
723 xdp_get_umem(umem_xs->umem);
724 WRITE_ONCE(xs->umem, umem_xs->umem);
726 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
730 /* This xsk has its own umem. */
731 xskq_set_umem(xs->umem->fq, xs->umem->size,
732 xs->umem->chunk_mask);
733 xskq_set_umem(xs->umem->cq, xs->umem->size,
734 xs->umem->chunk_mask);
736 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
740 xsk_check_page_contiguity(xs->umem, flags);
744 xs->zc = xs->umem->zc;
746 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
747 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
748 xdp_add_sk_umem(xs->umem, xs);
754 /* Matches smp_rmb() in bind() for shared umem
755 * sockets, and xsk_is_bound().
758 WRITE_ONCE(xs->state, XSK_BOUND);
761 mutex_unlock(&xs->mutex);
766 struct xdp_umem_reg_v1 {
767 __u64 addr; /* Start of packet data area */
768 __u64 len; /* Length of packet data area */
773 static int xsk_setsockopt(struct socket *sock, int level, int optname,
774 char __user *optval, unsigned int optlen)
776 struct sock *sk = sock->sk;
777 struct xdp_sock *xs = xdp_sk(sk);
780 if (level != SOL_XDP)
787 struct xsk_queue **q;
790 if (optlen < sizeof(entries))
792 if (copy_from_user(&entries, optval, sizeof(entries)))
795 mutex_lock(&xs->mutex);
796 if (xs->state != XSK_READY) {
797 mutex_unlock(&xs->mutex);
800 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
801 err = xsk_init_queue(entries, q, false);
802 if (!err && optname == XDP_TX_RING)
803 /* Tx needs to be explicitly woken up the first time */
804 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
805 mutex_unlock(&xs->mutex);
810 size_t mr_size = sizeof(struct xdp_umem_reg);
811 struct xdp_umem_reg mr = {};
812 struct xdp_umem *umem;
814 if (optlen < sizeof(struct xdp_umem_reg_v1))
816 else if (optlen < sizeof(mr))
817 mr_size = sizeof(struct xdp_umem_reg_v1);
819 if (copy_from_user(&mr, optval, mr_size))
822 mutex_lock(&xs->mutex);
823 if (xs->state != XSK_READY || xs->umem) {
824 mutex_unlock(&xs->mutex);
828 umem = xdp_umem_create(&mr);
830 mutex_unlock(&xs->mutex);
831 return PTR_ERR(umem);
834 /* Make sure umem is ready before it can be seen by others */
836 WRITE_ONCE(xs->umem, umem);
837 mutex_unlock(&xs->mutex);
840 case XDP_UMEM_FILL_RING:
841 case XDP_UMEM_COMPLETION_RING:
843 struct xsk_queue **q;
846 if (copy_from_user(&entries, optval, sizeof(entries)))
849 mutex_lock(&xs->mutex);
850 if (xs->state != XSK_READY) {
851 mutex_unlock(&xs->mutex);
855 mutex_unlock(&xs->mutex);
859 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
861 err = xsk_init_queue(entries, q, true);
862 mutex_unlock(&xs->mutex);
872 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
874 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
875 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
876 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
879 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
881 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
882 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
883 ring->desc = offsetof(struct xdp_umem_ring, desc);
886 static int xsk_getsockopt(struct socket *sock, int level, int optname,
887 char __user *optval, int __user *optlen)
889 struct sock *sk = sock->sk;
890 struct xdp_sock *xs = xdp_sk(sk);
893 if (level != SOL_XDP)
896 if (get_user(len, optlen))
904 struct xdp_statistics stats;
906 if (len < sizeof(stats))
909 mutex_lock(&xs->mutex);
910 stats.rx_dropped = xs->rx_dropped;
911 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
912 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
913 mutex_unlock(&xs->mutex);
915 if (copy_to_user(optval, &stats, sizeof(stats)))
917 if (put_user(sizeof(stats), optlen))
922 case XDP_MMAP_OFFSETS:
924 struct xdp_mmap_offsets off;
925 struct xdp_mmap_offsets_v1 off_v1;
926 bool flags_supported = true;
929 if (len < sizeof(off_v1))
931 else if (len < sizeof(off))
932 flags_supported = false;
934 if (flags_supported) {
935 /* xdp_ring_offset is identical to xdp_ring_offset_v1
936 * except for the flags field added to the end.
938 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
940 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
942 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
944 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
946 off.rx.flags = offsetof(struct xdp_rxtx_ring,
948 off.tx.flags = offsetof(struct xdp_rxtx_ring,
950 off.fr.flags = offsetof(struct xdp_umem_ring,
952 off.cr.flags = offsetof(struct xdp_umem_ring,
958 xsk_enter_rxtx_offsets(&off_v1.rx);
959 xsk_enter_rxtx_offsets(&off_v1.tx);
960 xsk_enter_umem_offsets(&off_v1.fr);
961 xsk_enter_umem_offsets(&off_v1.cr);
963 len = sizeof(off_v1);
967 if (copy_to_user(optval, to_copy, len))
969 if (put_user(len, optlen))
976 struct xdp_options opts = {};
978 if (len < sizeof(opts))
981 mutex_lock(&xs->mutex);
983 opts.flags |= XDP_OPTIONS_ZEROCOPY;
984 mutex_unlock(&xs->mutex);
987 if (copy_to_user(optval, &opts, len))
989 if (put_user(len, optlen))
1001 static int xsk_mmap(struct file *file, struct socket *sock,
1002 struct vm_area_struct *vma)
1004 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1005 unsigned long size = vma->vm_end - vma->vm_start;
1006 struct xdp_sock *xs = xdp_sk(sock->sk);
1007 struct xsk_queue *q = NULL;
1008 struct xdp_umem *umem;
1012 if (READ_ONCE(xs->state) != XSK_READY)
1015 if (offset == XDP_PGOFF_RX_RING) {
1016 q = READ_ONCE(xs->rx);
1017 } else if (offset == XDP_PGOFF_TX_RING) {
1018 q = READ_ONCE(xs->tx);
1020 umem = READ_ONCE(xs->umem);
1024 /* Matches the smp_wmb() in XDP_UMEM_REG */
1026 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1027 q = READ_ONCE(umem->fq);
1028 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1029 q = READ_ONCE(umem->cq);
1035 /* Matches the smp_wmb() in xsk_init_queue */
1037 qpg = virt_to_head_page(q->ring);
1038 if (size > page_size(qpg))
1041 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1042 return remap_pfn_range(vma, vma->vm_start, pfn,
1043 size, vma->vm_page_prot);
1046 static int xsk_notifier(struct notifier_block *this,
1047 unsigned long msg, void *ptr)
1049 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1050 struct net *net = dev_net(dev);
1054 case NETDEV_UNREGISTER:
1055 mutex_lock(&net->xdp.lock);
1056 sk_for_each(sk, &net->xdp.list) {
1057 struct xdp_sock *xs = xdp_sk(sk);
1059 mutex_lock(&xs->mutex);
1060 if (xs->dev == dev) {
1061 sk->sk_err = ENETDOWN;
1062 if (!sock_flag(sk, SOCK_DEAD))
1063 sk->sk_error_report(sk);
1067 /* Clear device references in umem. */
1068 xdp_umem_clear_dev(xs->umem);
1070 mutex_unlock(&xs->mutex);
1072 mutex_unlock(&net->xdp.lock);
1078 static struct proto xsk_proto = {
1080 .owner = THIS_MODULE,
1081 .obj_size = sizeof(struct xdp_sock),
1084 static const struct proto_ops xsk_proto_ops = {
1086 .owner = THIS_MODULE,
1087 .release = xsk_release,
1089 .connect = sock_no_connect,
1090 .socketpair = sock_no_socketpair,
1091 .accept = sock_no_accept,
1092 .getname = sock_no_getname,
1094 .ioctl = sock_no_ioctl,
1095 .listen = sock_no_listen,
1096 .shutdown = sock_no_shutdown,
1097 .setsockopt = xsk_setsockopt,
1098 .getsockopt = xsk_getsockopt,
1099 .sendmsg = xsk_sendmsg,
1100 .recvmsg = sock_no_recvmsg,
1102 .sendpage = sock_no_sendpage,
1105 static void xsk_destruct(struct sock *sk)
1107 struct xdp_sock *xs = xdp_sk(sk);
1109 if (!sock_flag(sk, SOCK_DEAD))
1112 xdp_put_umem(xs->umem);
1114 sk_refcnt_debug_dec(sk);
1117 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1121 struct xdp_sock *xs;
1123 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1125 if (sock->type != SOCK_RAW)
1126 return -ESOCKTNOSUPPORT;
1129 return -EPROTONOSUPPORT;
1131 sock->state = SS_UNCONNECTED;
1133 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1137 sock->ops = &xsk_proto_ops;
1139 sock_init_data(sock, sk);
1141 sk->sk_family = PF_XDP;
1143 sk->sk_destruct = xsk_destruct;
1144 sk_refcnt_debug_inc(sk);
1146 sock_set_flag(sk, SOCK_RCU_FREE);
1149 xs->state = XSK_READY;
1150 mutex_init(&xs->mutex);
1151 spin_lock_init(&xs->rx_lock);
1152 spin_lock_init(&xs->tx_completion_lock);
1154 INIT_LIST_HEAD(&xs->map_list);
1155 spin_lock_init(&xs->map_list_lock);
1157 mutex_lock(&net->xdp.lock);
1158 sk_add_node_rcu(sk, &net->xdp.list);
1159 mutex_unlock(&net->xdp.lock);
1162 sock_prot_inuse_add(net, &xsk_proto, 1);
1168 static const struct net_proto_family xsk_family_ops = {
1170 .create = xsk_create,
1171 .owner = THIS_MODULE,
1174 static struct notifier_block xsk_netdev_notifier = {
1175 .notifier_call = xsk_notifier,
1178 static int __net_init xsk_net_init(struct net *net)
1180 mutex_init(&net->xdp.lock);
1181 INIT_HLIST_HEAD(&net->xdp.list);
1185 static void __net_exit xsk_net_exit(struct net *net)
1187 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1190 static struct pernet_operations xsk_net_ops = {
1191 .init = xsk_net_init,
1192 .exit = xsk_net_exit,
1195 static int __init xsk_init(void)
1199 err = proto_register(&xsk_proto, 0 /* no slab */);
1203 err = sock_register(&xsk_family_ops);
1207 err = register_pernet_subsys(&xsk_net_ops);
1211 err = register_netdevice_notifier(&xsk_netdev_notifier);
1215 for_each_possible_cpu(cpu)
1216 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1220 unregister_pernet_subsys(&xsk_net_ops);
1222 sock_unregister(PF_XDP);
1224 proto_unregister(&xsk_proto);
1229 fs_initcall(xsk_init);