1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
31 #include "xsk_queue.h"
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
38 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
40 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
42 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
45 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
46 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
48 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
50 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
54 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
58 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
59 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
63 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
65 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
67 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
69 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
72 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
73 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
75 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
77 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
81 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
85 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
86 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
90 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
92 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
94 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
96 return pool->uses_need_wakeup;
98 EXPORT_SYMBOL(xsk_uses_need_wakeup);
100 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
103 if (queue_id < dev->real_num_rx_queues)
104 return dev->_rx[queue_id].pool;
105 if (queue_id < dev->real_num_tx_queues)
106 return dev->_tx[queue_id].pool;
110 EXPORT_SYMBOL(xsk_get_pool_from_qid);
112 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
114 if (queue_id < dev->num_rx_queues)
115 dev->_rx[queue_id].pool = NULL;
116 if (queue_id < dev->num_tx_queues)
117 dev->_tx[queue_id].pool = NULL;
120 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
121 * not know if the device has more tx queues than rx, or the opposite.
122 * This might also change during run time.
124 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
127 if (queue_id >= max_t(unsigned int,
128 dev->real_num_rx_queues,
129 dev->real_num_tx_queues))
132 if (queue_id < dev->real_num_rx_queues)
133 dev->_rx[queue_id].pool = pool;
134 if (queue_id < dev->real_num_tx_queues)
135 dev->_tx[queue_id].pool = pool;
140 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
146 addr = xp_get_handle(xskb);
147 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
157 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
159 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
160 u32 frags = xdp_buff_has_frags(xdp);
161 struct xdp_buff_xsk *pos, *tmp;
162 struct list_head *xskb_list;
167 contd = XDP_PKT_CONTD;
169 err = __xsk_rcv_zc(xs, xskb, len, contd);
170 if (err || likely(!frags))
173 xskb_list = &xskb->pool->xskb_list;
174 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
175 if (list_is_singular(xskb_list))
177 len = pos->xdp.data_end - pos->xdp.data;
178 err = __xsk_rcv_zc(xs, pos, len, contd);
181 list_del(&pos->xskb_list_node);
188 static void *xsk_copy_xdp_start(struct xdp_buff *from)
190 if (unlikely(xdp_data_meta_unsupported(from)))
193 return from->data_meta;
196 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
197 u32 *from_len, skb_frag_t **frag, u32 rem)
202 u32 copy_len = min_t(u32, *from_len, to_len);
204 memcpy(to, *from, copy_len);
209 if (*from_len == copy_len) {
210 *from = skb_frag_address(*frag);
211 *from_len = skb_frag_size((*frag)++);
214 *from_len -= copy_len;
216 if (to_len == copy_len)
224 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
226 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
227 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
228 u32 from_len, meta_len, rem, num_desc;
229 struct xdp_buff_xsk *xskb;
230 struct xdp_buff *xsk_xdp;
233 from_len = xdp->data_end - copy_from;
234 meta_len = xdp->data - copy_from;
235 rem = len + meta_len;
237 if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
240 xsk_xdp = xsk_buff_alloc(xs->pool);
245 memcpy(xsk_xdp->data - meta_len, copy_from, rem);
246 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
247 err = __xsk_rcv_zc(xs, xskb, len, 0);
249 xsk_buff_free(xsk_xdp);
256 num_desc = (len - 1) / frame_size + 1;
258 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
262 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
267 if (xdp_buff_has_frags(xdp)) {
268 struct skb_shared_info *sinfo;
270 sinfo = xdp_get_shared_info_from_buff(xdp);
271 frag = &sinfo->frags[0];
275 u32 to_len = frame_size + meta_len;
278 xsk_xdp = xsk_buff_alloc(xs->pool);
279 copy_to = xsk_xdp->data - meta_len;
281 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem);
284 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
285 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
292 static bool xsk_tx_writeable(struct xdp_sock *xs)
294 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
300 static bool xsk_is_bound(struct xdp_sock *xs)
302 if (READ_ONCE(xs->state) == XSK_BOUND) {
303 /* Matches smp_wmb() in bind(). */
310 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
312 if (!xsk_is_bound(xs))
315 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
318 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
323 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
327 static void xsk_flush(struct xdp_sock *xs)
329 xskq_prod_submit(xs->rx);
330 __xskq_cons_release(xs->pool->fq);
331 sock_def_readable(&xs->sk);
334 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
336 u32 len = xdp_get_buff_len(xdp);
339 spin_lock_bh(&xs->rx_lock);
340 err = xsk_rcv_check(xs, xdp, len);
342 err = __xsk_rcv(xs, xdp, len);
345 spin_unlock_bh(&xs->rx_lock);
349 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
351 u32 len = xdp_get_buff_len(xdp);
354 err = xsk_rcv_check(xs, xdp, len);
358 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
359 len = xdp->data_end - xdp->data;
360 return xsk_rcv_zc(xs, xdp, len);
363 err = __xsk_rcv(xs, xdp, len);
365 xdp_return_buff(xdp);
369 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
371 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
374 err = xsk_rcv(xs, xdp);
378 if (!xs->flush_node.prev)
379 list_add(&xs->flush_node, flush_list);
384 void __xsk_map_flush(void)
386 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
387 struct xdp_sock *xs, *tmp;
389 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
391 __list_del_clearprev(&xs->flush_node);
395 #ifdef CONFIG_DEBUG_NET
396 bool xsk_map_check_flush(void)
398 if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
405 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
407 xskq_prod_submit_n(pool->cq, nb_entries);
409 EXPORT_SYMBOL(xsk_tx_completed);
411 void xsk_tx_release(struct xsk_buff_pool *pool)
416 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
417 __xskq_cons_release(xs->tx);
418 if (xsk_tx_writeable(xs))
419 xs->sk.sk_write_space(&xs->sk);
423 EXPORT_SYMBOL(xsk_tx_release);
425 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
427 bool budget_exhausted = false;
432 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
433 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
434 budget_exhausted = true;
438 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
439 if (xskq_has_descs(xs->tx))
440 xskq_cons_release(xs->tx);
444 xs->tx_budget_spent++;
446 /* This is the backpressure mechanism for the Tx path.
447 * Reserve space in the completion queue and only proceed
448 * if there is space in it. This avoids having to implement
449 * any buffering in the Tx path.
451 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
454 xskq_cons_release(xs->tx);
459 if (budget_exhausted) {
460 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
461 xs->tx_budget_spent = 0;
463 budget_exhausted = false;
471 EXPORT_SYMBOL(xsk_tx_peek_desc);
473 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
475 struct xdp_desc *descs = pool->tx_descs;
478 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
481 xsk_tx_release(pool);
485 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
490 if (!list_is_singular(&pool->xsk_tx_list)) {
491 /* Fallback to the non-batched version */
493 return xsk_tx_peek_release_fallback(pool, nb_pkts);
496 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
502 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
504 /* This is the backpressure mechanism for the Tx path. Try to
505 * reserve space in the completion queue for all packets, but
506 * if there are fewer slots available, just process that many
507 * packets. This avoids having to implement any buffering in
510 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
514 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
516 xs->tx->queue_empty_descs++;
520 __xskq_cons_release(xs->tx);
521 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
522 xs->sk.sk_write_space(&xs->sk);
528 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
530 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
532 struct net_device *dev = xs->dev;
534 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
537 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
542 spin_lock_irqsave(&xs->pool->cq_lock, flags);
543 ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
544 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
549 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
553 spin_lock_irqsave(&xs->pool->cq_lock, flags);
554 xskq_prod_submit_n(xs->pool->cq, n);
555 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
558 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
562 spin_lock_irqsave(&xs->pool->cq_lock, flags);
563 xskq_prod_cancel_n(xs->pool->cq, n);
564 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
567 static u32 xsk_get_num_desc(struct sk_buff *skb)
569 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
572 static void xsk_destruct_skb(struct sk_buff *skb)
574 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
576 if (compl->tx_timestamp) {
577 /* sw completion timestamp, not a real one */
578 *compl->tx_timestamp = ktime_get_tai_fast_ns();
581 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
585 static void xsk_set_destructor_arg(struct sk_buff *skb)
587 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
589 skb_shinfo(skb)->destructor_arg = (void *)num;
592 static void xsk_consume_skb(struct sk_buff *skb)
594 struct xdp_sock *xs = xdp_sk(skb->sk);
596 skb->destructor = sock_wfree;
597 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
598 /* Free skb without triggering the perf drop trace */
603 static void xsk_drop_skb(struct sk_buff *skb)
605 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
606 xsk_consume_skb(skb);
609 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
610 struct xdp_desc *desc)
612 struct xsk_buff_pool *pool = xs->pool;
613 u32 hr, len, ts, offset, copy, copied;
614 struct sk_buff *skb = xs->skb;
621 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
623 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
627 skb_reserve(skb, hr);
632 ts = pool->unaligned ? len : pool->chunk_size;
634 buffer = xsk_buff_raw_get_data(pool, addr);
635 offset = offset_in_page(buffer);
636 addr = buffer - pool->addrs;
638 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
639 if (unlikely(i >= MAX_SKB_FRAGS))
640 return ERR_PTR(-EOVERFLOW);
642 page = pool->umem->pgs[addr >> PAGE_SHIFT];
645 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
646 skb_fill_page_desc(skb, i, page, offset, copy);
654 skb->data_len += len;
657 refcount_add(ts, &xs->sk.sk_wmem_alloc);
662 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
663 struct xdp_desc *desc)
665 struct xsk_tx_metadata *meta = NULL;
666 struct net_device *dev = xs->dev;
667 struct sk_buff *skb = xs->skb;
668 bool first_frag = false;
671 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
672 skb = xsk_build_skb_zerocopy(xs, desc);
681 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
685 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
686 tr = dev->needed_tailroom;
687 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
691 skb_reserve(skb, hr);
694 err = skb_store_bits(skb, 0, buffer, len);
702 int nr_frags = skb_shinfo(skb)->nr_frags;
706 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
711 page = alloc_page(xs->sk.sk_allocation);
712 if (unlikely(!page)) {
717 vaddr = kmap_local_page(page);
718 memcpy(vaddr, buffer, len);
721 skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
724 if (first_frag && desc->options & XDP_TX_METADATA) {
725 if (unlikely(xs->pool->tx_metadata_len == 0)) {
730 meta = buffer - xs->pool->tx_metadata_len;
731 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
736 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
737 if (unlikely(meta->request.csum_start +
738 meta->request.csum_offset +
739 sizeof(__sum16) > len)) {
744 skb->csum_start = hr + meta->request.csum_start;
745 skb->csum_offset = meta->request.csum_offset;
746 skb->ip_summed = CHECKSUM_PARTIAL;
748 if (unlikely(xs->pool->tx_sw_csum)) {
749 err = skb_checksum_help(skb);
758 skb->priority = READ_ONCE(xs->sk.sk_priority);
759 skb->mark = READ_ONCE(xs->sk.sk_mark);
760 skb->destructor = xsk_destruct_skb;
761 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
762 xsk_set_destructor_arg(skb);
767 if (err == -EOVERFLOW) {
768 /* Drop the packet */
769 xsk_set_destructor_arg(xs->skb);
770 xsk_drop_skb(xs->skb);
771 xskq_cons_release(xs->tx);
773 /* Let application retry */
774 xsk_cq_cancel_locked(xs, 1);
780 static int __xsk_generic_xmit(struct sock *sk)
782 struct xdp_sock *xs = xdp_sk(sk);
783 u32 max_batch = TX_BATCH_SIZE;
784 bool sent_frame = false;
785 struct xdp_desc desc;
789 mutex_lock(&xs->mutex);
791 /* Since we dropped the RCU read lock, the socket state might have changed. */
792 if (unlikely(!xsk_is_bound(xs))) {
797 if (xs->queue_id >= xs->dev->real_num_tx_queues)
800 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
801 if (max_batch-- == 0) {
806 /* This is the backpressure mechanism for the Tx path.
807 * Reserve space in the completion queue and only proceed
808 * if there is space in it. This avoids having to implement
809 * any buffering in the Tx path.
811 if (xsk_cq_reserve_addr_locked(xs, desc.addr))
814 skb = xsk_build_skb(xs, &desc);
817 if (err != -EOVERFLOW)
823 xskq_cons_release(xs->tx);
825 if (xp_mb_desc(&desc)) {
830 err = __dev_direct_xmit(skb, xs->queue_id);
831 if (err == NETDEV_TX_BUSY) {
832 /* Tell user-space to retry the send */
833 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
834 xsk_consume_skb(skb);
839 /* Ignore NET_XMIT_CN as packet might have been sent */
840 if (err == NET_XMIT_DROP) {
841 /* SKB completed but not sent */
851 if (xskq_has_descs(xs->tx)) {
853 xsk_drop_skb(xs->skb);
854 xskq_cons_release(xs->tx);
859 if (xsk_tx_writeable(xs))
860 sk->sk_write_space(sk);
862 mutex_unlock(&xs->mutex);
866 static int xsk_generic_xmit(struct sock *sk)
870 /* Drop the RCU lock since the SKB path might sleep. */
872 ret = __xsk_generic_xmit(sk);
873 /* Reaquire RCU lock before going into common code. */
879 static bool xsk_no_wakeup(struct sock *sk)
881 #ifdef CONFIG_NET_RX_BUSY_POLL
882 /* Prefer busy-polling, skip the wakeup. */
883 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
884 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
890 static int xsk_check_common(struct xdp_sock *xs)
892 if (unlikely(!xsk_is_bound(xs)))
894 if (unlikely(!(xs->dev->flags & IFF_UP)))
900 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
902 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
903 struct sock *sk = sock->sk;
904 struct xdp_sock *xs = xdp_sk(sk);
905 struct xsk_buff_pool *pool;
908 err = xsk_check_common(xs);
911 if (unlikely(need_wait))
913 if (unlikely(!xs->tx))
916 if (sk_can_busy_loop(sk)) {
918 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
919 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
922 if (xs->zc && xsk_no_wakeup(sk))
926 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
928 return xsk_wakeup(xs, XDP_WAKEUP_TX);
929 return xsk_generic_xmit(sk);
934 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
939 ret = __xsk_sendmsg(sock, m, total_len);
945 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
947 bool need_wait = !(flags & MSG_DONTWAIT);
948 struct sock *sk = sock->sk;
949 struct xdp_sock *xs = xdp_sk(sk);
952 err = xsk_check_common(xs);
955 if (unlikely(!xs->rx))
957 if (unlikely(need_wait))
960 if (sk_can_busy_loop(sk))
961 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
963 if (xsk_no_wakeup(sk))
966 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
967 return xsk_wakeup(xs, XDP_WAKEUP_RX);
971 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
976 ret = __xsk_recvmsg(sock, m, len, flags);
982 static __poll_t xsk_poll(struct file *file, struct socket *sock,
983 struct poll_table_struct *wait)
986 struct sock *sk = sock->sk;
987 struct xdp_sock *xs = xdp_sk(sk);
988 struct xsk_buff_pool *pool;
990 sock_poll_wait(file, sock, wait);
993 if (xsk_check_common(xs))
998 if (pool->cached_need_wakeup) {
1000 xsk_wakeup(xs, pool->cached_need_wakeup);
1002 /* Poll needs to drive Tx also in copy mode */
1003 xsk_generic_xmit(sk);
1007 if (xs->rx && !xskq_prod_is_empty(xs->rx))
1008 mask |= EPOLLIN | EPOLLRDNORM;
1009 if (xs->tx && xsk_tx_writeable(xs))
1010 mask |= EPOLLOUT | EPOLLWRNORM;
1016 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1019 struct xsk_queue *q;
1021 if (entries == 0 || *queue || !is_power_of_2(entries))
1024 q = xskq_create(entries, umem_queue);
1028 /* Make sure queue is ready before it can be seen by others */
1030 WRITE_ONCE(*queue, q);
1034 static void xsk_unbind_dev(struct xdp_sock *xs)
1036 struct net_device *dev = xs->dev;
1038 if (xs->state != XSK_BOUND)
1040 WRITE_ONCE(xs->state, XSK_UNBOUND);
1042 /* Wait for driver to stop using the xdp socket. */
1043 xp_del_xsk(xs->pool, xs);
1048 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1049 struct xdp_sock __rcu ***map_entry)
1051 struct xsk_map *map = NULL;
1052 struct xsk_map_node *node;
1056 spin_lock_bh(&xs->map_list_lock);
1057 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1060 bpf_map_inc(&node->map->map);
1062 *map_entry = node->map_entry;
1064 spin_unlock_bh(&xs->map_list_lock);
1068 static void xsk_delete_from_maps(struct xdp_sock *xs)
1070 /* This function removes the current XDP socket from all the
1071 * maps it resides in. We need to take extra care here, due to
1072 * the two locks involved. Each map has a lock synchronizing
1073 * updates to the entries, and each socket has a lock that
1074 * synchronizes access to the list of maps (map_list). For
1075 * deadlock avoidance the locks need to be taken in the order
1076 * "map lock"->"socket map list lock". We start off by
1077 * accessing the socket map list, and take a reference to the
1078 * map to guarantee existence between the
1079 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1080 * calls. Then we ask the map to remove the socket, which
1081 * tries to remove the socket from the map. Note that there
1082 * might be updates to the map between
1083 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1085 struct xdp_sock __rcu **map_entry = NULL;
1086 struct xsk_map *map;
1088 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1089 xsk_map_try_sock_delete(map, xs, map_entry);
1090 bpf_map_put(&map->map);
1094 static int xsk_release(struct socket *sock)
1096 struct sock *sk = sock->sk;
1097 struct xdp_sock *xs = xdp_sk(sk);
1106 xsk_drop_skb(xs->skb);
1108 mutex_lock(&net->xdp.lock);
1109 sk_del_node_init_rcu(sk);
1110 mutex_unlock(&net->xdp.lock);
1112 sock_prot_inuse_add(net, sk->sk_prot, -1);
1114 xsk_delete_from_maps(xs);
1115 mutex_lock(&xs->mutex);
1117 mutex_unlock(&xs->mutex);
1119 xskq_destroy(xs->rx);
1120 xskq_destroy(xs->tx);
1121 xskq_destroy(xs->fq_tmp);
1122 xskq_destroy(xs->cq_tmp);
1132 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1134 struct socket *sock;
1137 sock = sockfd_lookup(fd, &err);
1139 return ERR_PTR(-ENOTSOCK);
1141 if (sock->sk->sk_family != PF_XDP) {
1143 return ERR_PTR(-ENOPROTOOPT);
1149 static bool xsk_validate_queues(struct xdp_sock *xs)
1151 return xs->fq_tmp && xs->cq_tmp;
1154 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1156 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1157 struct sock *sk = sock->sk;
1158 struct xdp_sock *xs = xdp_sk(sk);
1159 struct net_device *dev;
1164 if (addr_len < sizeof(struct sockaddr_xdp))
1166 if (sxdp->sxdp_family != AF_XDP)
1169 flags = sxdp->sxdp_flags;
1170 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1171 XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1174 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1175 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1179 mutex_lock(&xs->mutex);
1180 if (xs->state != XSK_READY) {
1185 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1191 if (!xs->rx && !xs->tx) {
1196 qid = sxdp->sxdp_queue_id;
1198 if (flags & XDP_SHARED_UMEM) {
1199 struct xdp_sock *umem_xs;
1200 struct socket *sock;
1202 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1203 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1204 /* Cannot specify flags for shared sockets. */
1210 /* We have already our own. */
1215 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1217 err = PTR_ERR(sock);
1221 umem_xs = xdp_sk(sock->sk);
1222 if (!xsk_is_bound(umem_xs)) {
1228 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1229 /* Share the umem with another socket on another qid
1232 xs->pool = xp_create_and_assign_umem(xs,
1240 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1243 xp_destroy(xs->pool);
1249 /* Share the buffer pool with the other socket. */
1250 if (xs->fq_tmp || xs->cq_tmp) {
1251 /* Do not allow setting your own fq or cq. */
1257 xp_get_pool(umem_xs->pool);
1258 xs->pool = umem_xs->pool;
1260 /* If underlying shared umem was created without Tx
1261 * ring, allocate Tx descs array that Tx batching API
1264 if (xs->tx && !xs->pool->tx_descs) {
1265 err = xp_alloc_tx_descs(xs->pool, xs);
1267 xp_put_pool(xs->pool);
1275 xdp_get_umem(umem_xs->umem);
1276 WRITE_ONCE(xs->umem, umem_xs->umem);
1278 } else if (!xs->umem || !xsk_validate_queues(xs)) {
1282 /* This xsk has its own umem. */
1283 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1289 err = xp_assign_dev(xs->pool, dev, qid, flags);
1291 xp_destroy(xs->pool);
1297 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1302 xs->zc = xs->umem->zc;
1303 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1305 xp_add_xsk(xs->pool, xs);
1311 /* Matches smp_rmb() in bind() for shared umem
1312 * sockets, and xsk_is_bound().
1315 WRITE_ONCE(xs->state, XSK_BOUND);
1318 mutex_unlock(&xs->mutex);
1323 struct xdp_umem_reg_v1 {
1324 __u64 addr; /* Start of packet data area */
1325 __u64 len; /* Length of packet data area */
1330 struct xdp_umem_reg_v2 {
1331 __u64 addr; /* Start of packet data area */
1332 __u64 len; /* Length of packet data area */
1338 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1339 sockptr_t optval, unsigned int optlen)
1341 struct sock *sk = sock->sk;
1342 struct xdp_sock *xs = xdp_sk(sk);
1345 if (level != SOL_XDP)
1346 return -ENOPROTOOPT;
1352 struct xsk_queue **q;
1355 if (optlen < sizeof(entries))
1357 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1360 mutex_lock(&xs->mutex);
1361 if (xs->state != XSK_READY) {
1362 mutex_unlock(&xs->mutex);
1365 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1366 err = xsk_init_queue(entries, q, false);
1367 if (!err && optname == XDP_TX_RING)
1368 /* Tx needs to be explicitly woken up the first time */
1369 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1370 mutex_unlock(&xs->mutex);
1375 size_t mr_size = sizeof(struct xdp_umem_reg);
1376 struct xdp_umem_reg mr = {};
1377 struct xdp_umem *umem;
1379 if (optlen < sizeof(struct xdp_umem_reg_v1))
1381 else if (optlen < sizeof(struct xdp_umem_reg_v2))
1382 mr_size = sizeof(struct xdp_umem_reg_v1);
1383 else if (optlen < sizeof(mr))
1384 mr_size = sizeof(struct xdp_umem_reg_v2);
1386 if (copy_from_sockptr(&mr, optval, mr_size))
1389 mutex_lock(&xs->mutex);
1390 if (xs->state != XSK_READY || xs->umem) {
1391 mutex_unlock(&xs->mutex);
1395 umem = xdp_umem_create(&mr);
1397 mutex_unlock(&xs->mutex);
1398 return PTR_ERR(umem);
1401 /* Make sure umem is ready before it can be seen by others */
1403 WRITE_ONCE(xs->umem, umem);
1404 mutex_unlock(&xs->mutex);
1407 case XDP_UMEM_FILL_RING:
1408 case XDP_UMEM_COMPLETION_RING:
1410 struct xsk_queue **q;
1413 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1416 mutex_lock(&xs->mutex);
1417 if (xs->state != XSK_READY) {
1418 mutex_unlock(&xs->mutex);
1422 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1424 err = xsk_init_queue(entries, q, true);
1425 mutex_unlock(&xs->mutex);
1432 return -ENOPROTOOPT;
1435 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1437 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1438 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1439 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1442 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1444 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1445 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1446 ring->desc = offsetof(struct xdp_umem_ring, desc);
1449 struct xdp_statistics_v1 {
1451 __u64 rx_invalid_descs;
1452 __u64 tx_invalid_descs;
1455 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1456 char __user *optval, int __user *optlen)
1458 struct sock *sk = sock->sk;
1459 struct xdp_sock *xs = xdp_sk(sk);
1462 if (level != SOL_XDP)
1463 return -ENOPROTOOPT;
1465 if (get_user(len, optlen))
1471 case XDP_STATISTICS:
1473 struct xdp_statistics stats = {};
1474 bool extra_stats = true;
1477 if (len < sizeof(struct xdp_statistics_v1)) {
1479 } else if (len < sizeof(stats)) {
1480 extra_stats = false;
1481 stats_size = sizeof(struct xdp_statistics_v1);
1483 stats_size = sizeof(stats);
1486 mutex_lock(&xs->mutex);
1487 stats.rx_dropped = xs->rx_dropped;
1489 stats.rx_ring_full = xs->rx_queue_full;
1490 stats.rx_fill_ring_empty_descs =
1491 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1492 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1494 stats.rx_dropped += xs->rx_queue_full;
1496 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1497 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1498 mutex_unlock(&xs->mutex);
1500 if (copy_to_user(optval, &stats, stats_size))
1502 if (put_user(stats_size, optlen))
1507 case XDP_MMAP_OFFSETS:
1509 struct xdp_mmap_offsets off;
1510 struct xdp_mmap_offsets_v1 off_v1;
1511 bool flags_supported = true;
1514 if (len < sizeof(off_v1))
1516 else if (len < sizeof(off))
1517 flags_supported = false;
1519 if (flags_supported) {
1520 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1521 * except for the flags field added to the end.
1523 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1525 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1527 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1529 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1531 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1533 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1535 off.fr.flags = offsetof(struct xdp_umem_ring,
1537 off.cr.flags = offsetof(struct xdp_umem_ring,
1543 xsk_enter_rxtx_offsets(&off_v1.rx);
1544 xsk_enter_rxtx_offsets(&off_v1.tx);
1545 xsk_enter_umem_offsets(&off_v1.fr);
1546 xsk_enter_umem_offsets(&off_v1.cr);
1548 len = sizeof(off_v1);
1552 if (copy_to_user(optval, to_copy, len))
1554 if (put_user(len, optlen))
1561 struct xdp_options opts = {};
1563 if (len < sizeof(opts))
1566 mutex_lock(&xs->mutex);
1568 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1569 mutex_unlock(&xs->mutex);
1572 if (copy_to_user(optval, &opts, len))
1574 if (put_user(len, optlen))
1586 static int xsk_mmap(struct file *file, struct socket *sock,
1587 struct vm_area_struct *vma)
1589 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1590 unsigned long size = vma->vm_end - vma->vm_start;
1591 struct xdp_sock *xs = xdp_sk(sock->sk);
1592 int state = READ_ONCE(xs->state);
1593 struct xsk_queue *q = NULL;
1595 if (state != XSK_READY && state != XSK_BOUND)
1598 if (offset == XDP_PGOFF_RX_RING) {
1599 q = READ_ONCE(xs->rx);
1600 } else if (offset == XDP_PGOFF_TX_RING) {
1601 q = READ_ONCE(xs->tx);
1603 /* Matches the smp_wmb() in XDP_UMEM_REG */
1605 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1606 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1607 READ_ONCE(xs->pool->fq);
1608 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1609 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1610 READ_ONCE(xs->pool->cq);
1616 /* Matches the smp_wmb() in xsk_init_queue */
1618 if (size > q->ring_vmalloc_size)
1621 return remap_vmalloc_range(vma, q->ring, 0);
1624 static int xsk_notifier(struct notifier_block *this,
1625 unsigned long msg, void *ptr)
1627 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1628 struct net *net = dev_net(dev);
1632 case NETDEV_UNREGISTER:
1633 mutex_lock(&net->xdp.lock);
1634 sk_for_each(sk, &net->xdp.list) {
1635 struct xdp_sock *xs = xdp_sk(sk);
1637 mutex_lock(&xs->mutex);
1638 if (xs->dev == dev) {
1639 sk->sk_err = ENETDOWN;
1640 if (!sock_flag(sk, SOCK_DEAD))
1641 sk_error_report(sk);
1645 /* Clear device references. */
1646 xp_clear_dev(xs->pool);
1648 mutex_unlock(&xs->mutex);
1650 mutex_unlock(&net->xdp.lock);
1656 static struct proto xsk_proto = {
1658 .owner = THIS_MODULE,
1659 .obj_size = sizeof(struct xdp_sock),
1662 static const struct proto_ops xsk_proto_ops = {
1664 .owner = THIS_MODULE,
1665 .release = xsk_release,
1667 .connect = sock_no_connect,
1668 .socketpair = sock_no_socketpair,
1669 .accept = sock_no_accept,
1670 .getname = sock_no_getname,
1672 .ioctl = sock_no_ioctl,
1673 .listen = sock_no_listen,
1674 .shutdown = sock_no_shutdown,
1675 .setsockopt = xsk_setsockopt,
1676 .getsockopt = xsk_getsockopt,
1677 .sendmsg = xsk_sendmsg,
1678 .recvmsg = xsk_recvmsg,
1682 static void xsk_destruct(struct sock *sk)
1684 struct xdp_sock *xs = xdp_sk(sk);
1686 if (!sock_flag(sk, SOCK_DEAD))
1689 if (!xp_put_pool(xs->pool))
1690 xdp_put_umem(xs->umem, !xs->pool);
1693 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1696 struct xdp_sock *xs;
1699 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1701 if (sock->type != SOCK_RAW)
1702 return -ESOCKTNOSUPPORT;
1705 return -EPROTONOSUPPORT;
1707 sock->state = SS_UNCONNECTED;
1709 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1713 sock->ops = &xsk_proto_ops;
1715 sock_init_data(sock, sk);
1717 sk->sk_family = PF_XDP;
1719 sk->sk_destruct = xsk_destruct;
1721 sock_set_flag(sk, SOCK_RCU_FREE);
1724 xs->state = XSK_READY;
1725 mutex_init(&xs->mutex);
1726 spin_lock_init(&xs->rx_lock);
1728 INIT_LIST_HEAD(&xs->map_list);
1729 spin_lock_init(&xs->map_list_lock);
1731 mutex_lock(&net->xdp.lock);
1732 sk_add_node_rcu(sk, &net->xdp.list);
1733 mutex_unlock(&net->xdp.lock);
1735 sock_prot_inuse_add(net, &xsk_proto, 1);
1740 static const struct net_proto_family xsk_family_ops = {
1742 .create = xsk_create,
1743 .owner = THIS_MODULE,
1746 static struct notifier_block xsk_netdev_notifier = {
1747 .notifier_call = xsk_notifier,
1750 static int __net_init xsk_net_init(struct net *net)
1752 mutex_init(&net->xdp.lock);
1753 INIT_HLIST_HEAD(&net->xdp.list);
1757 static void __net_exit xsk_net_exit(struct net *net)
1759 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1762 static struct pernet_operations xsk_net_ops = {
1763 .init = xsk_net_init,
1764 .exit = xsk_net_exit,
1767 static int __init xsk_init(void)
1771 err = proto_register(&xsk_proto, 0 /* no slab */);
1775 err = sock_register(&xsk_family_ops);
1779 err = register_pernet_subsys(&xsk_net_ops);
1783 err = register_netdevice_notifier(&xsk_netdev_notifier);
1787 for_each_possible_cpu(cpu)
1788 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1792 unregister_pernet_subsys(&xsk_net_ops);
1794 sock_unregister(PF_XDP);
1796 proto_unregister(&xsk_proto);
1801 fs_initcall(xsk_init);