]> Git Repo - linux.git/blob - net/xdp/xsk.c
Merge branch 'bpf-fs-mount-options-parsing-follow-ups'
[linux.git] / net / xdp / xsk.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <[email protected]>
9  *            Magnus Karlsson <[email protected]>
10  */
11
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
29 #include <net/xdp.h>
30
31 #include "xsk_queue.h"
32 #include "xdp_umem.h"
33 #include "xsk.h"
34
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
37
38 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
39
40 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
41 {
42         if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
43                 return;
44
45         pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
46         pool->cached_need_wakeup |= XDP_WAKEUP_RX;
47 }
48 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
49
50 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
51 {
52         struct xdp_sock *xs;
53
54         if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
55                 return;
56
57         rcu_read_lock();
58         list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
59                 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
60         }
61         rcu_read_unlock();
62
63         pool->cached_need_wakeup |= XDP_WAKEUP_TX;
64 }
65 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
66
67 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
68 {
69         if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
70                 return;
71
72         pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
73         pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
74 }
75 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
76
77 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
78 {
79         struct xdp_sock *xs;
80
81         if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
82                 return;
83
84         rcu_read_lock();
85         list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
86                 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
87         }
88         rcu_read_unlock();
89
90         pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
91 }
92 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
93
94 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
95 {
96         return pool->uses_need_wakeup;
97 }
98 EXPORT_SYMBOL(xsk_uses_need_wakeup);
99
100 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
101                                             u16 queue_id)
102 {
103         if (queue_id < dev->real_num_rx_queues)
104                 return dev->_rx[queue_id].pool;
105         if (queue_id < dev->real_num_tx_queues)
106                 return dev->_tx[queue_id].pool;
107
108         return NULL;
109 }
110 EXPORT_SYMBOL(xsk_get_pool_from_qid);
111
112 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
113 {
114         if (queue_id < dev->num_rx_queues)
115                 dev->_rx[queue_id].pool = NULL;
116         if (queue_id < dev->num_tx_queues)
117                 dev->_tx[queue_id].pool = NULL;
118 }
119
120 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
121  * not know if the device has more tx queues than rx, or the opposite.
122  * This might also change during run time.
123  */
124 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
125                         u16 queue_id)
126 {
127         if (queue_id >= max_t(unsigned int,
128                               dev->real_num_rx_queues,
129                               dev->real_num_tx_queues))
130                 return -EINVAL;
131
132         if (queue_id < dev->real_num_rx_queues)
133                 dev->_rx[queue_id].pool = pool;
134         if (queue_id < dev->real_num_tx_queues)
135                 dev->_tx[queue_id].pool = pool;
136
137         return 0;
138 }
139
140 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
141                         u32 flags)
142 {
143         u64 addr;
144         int err;
145
146         addr = xp_get_handle(xskb);
147         err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
148         if (err) {
149                 xs->rx_queue_full++;
150                 return err;
151         }
152
153         xp_release(xskb);
154         return 0;
155 }
156
157 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
158 {
159         struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
160         u32 frags = xdp_buff_has_frags(xdp);
161         struct xdp_buff_xsk *pos, *tmp;
162         struct list_head *xskb_list;
163         u32 contd = 0;
164         int err;
165
166         if (frags)
167                 contd = XDP_PKT_CONTD;
168
169         err = __xsk_rcv_zc(xs, xskb, len, contd);
170         if (err || likely(!frags))
171                 goto out;
172
173         xskb_list = &xskb->pool->xskb_list;
174         list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
175                 if (list_is_singular(xskb_list))
176                         contd = 0;
177                 len = pos->xdp.data_end - pos->xdp.data;
178                 err = __xsk_rcv_zc(xs, pos, len, contd);
179                 if (err)
180                         return err;
181                 list_del(&pos->xskb_list_node);
182         }
183
184 out:
185         return err;
186 }
187
188 static void *xsk_copy_xdp_start(struct xdp_buff *from)
189 {
190         if (unlikely(xdp_data_meta_unsupported(from)))
191                 return from->data;
192         else
193                 return from->data_meta;
194 }
195
196 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
197                         u32 *from_len, skb_frag_t **frag, u32 rem)
198 {
199         u32 copied = 0;
200
201         while (1) {
202                 u32 copy_len = min_t(u32, *from_len, to_len);
203
204                 memcpy(to, *from, copy_len);
205                 copied += copy_len;
206                 if (rem == copied)
207                         return copied;
208
209                 if (*from_len == copy_len) {
210                         *from = skb_frag_address(*frag);
211                         *from_len = skb_frag_size((*frag)++);
212                 } else {
213                         *from += copy_len;
214                         *from_len -= copy_len;
215                 }
216                 if (to_len == copy_len)
217                         return copied;
218
219                 to_len -= copy_len;
220                 to += copy_len;
221         }
222 }
223
224 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
225 {
226         u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
227         void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
228         u32 from_len, meta_len, rem, num_desc;
229         struct xdp_buff_xsk *xskb;
230         struct xdp_buff *xsk_xdp;
231         skb_frag_t *frag;
232
233         from_len = xdp->data_end - copy_from;
234         meta_len = xdp->data - copy_from;
235         rem = len + meta_len;
236
237         if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
238                 int err;
239
240                 xsk_xdp = xsk_buff_alloc(xs->pool);
241                 if (!xsk_xdp) {
242                         xs->rx_dropped++;
243                         return -ENOMEM;
244                 }
245                 memcpy(xsk_xdp->data - meta_len, copy_from, rem);
246                 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
247                 err = __xsk_rcv_zc(xs, xskb, len, 0);
248                 if (err) {
249                         xsk_buff_free(xsk_xdp);
250                         return err;
251                 }
252
253                 return 0;
254         }
255
256         num_desc = (len - 1) / frame_size + 1;
257
258         if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
259                 xs->rx_dropped++;
260                 return -ENOMEM;
261         }
262         if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
263                 xs->rx_queue_full++;
264                 return -ENOBUFS;
265         }
266
267         if (xdp_buff_has_frags(xdp)) {
268                 struct skb_shared_info *sinfo;
269
270                 sinfo = xdp_get_shared_info_from_buff(xdp);
271                 frag =  &sinfo->frags[0];
272         }
273
274         do {
275                 u32 to_len = frame_size + meta_len;
276                 u32 copied;
277
278                 xsk_xdp = xsk_buff_alloc(xs->pool);
279                 copy_to = xsk_xdp->data - meta_len;
280
281                 copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
282                 rem -= copied;
283
284                 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
285                 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
286                 meta_len = 0;
287         } while (rem);
288
289         return 0;
290 }
291
292 static bool xsk_tx_writeable(struct xdp_sock *xs)
293 {
294         if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
295                 return false;
296
297         return true;
298 }
299
300 static bool xsk_is_bound(struct xdp_sock *xs)
301 {
302         if (READ_ONCE(xs->state) == XSK_BOUND) {
303                 /* Matches smp_wmb() in bind(). */
304                 smp_rmb();
305                 return true;
306         }
307         return false;
308 }
309
310 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
311 {
312         if (!xsk_is_bound(xs))
313                 return -ENXIO;
314
315         if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
316                 return -EINVAL;
317
318         if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
319                 xs->rx_dropped++;
320                 return -ENOSPC;
321         }
322
323         sk_mark_napi_id_once_xdp(&xs->sk, xdp);
324         return 0;
325 }
326
327 static void xsk_flush(struct xdp_sock *xs)
328 {
329         xskq_prod_submit(xs->rx);
330         __xskq_cons_release(xs->pool->fq);
331         sock_def_readable(&xs->sk);
332 }
333
334 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
335 {
336         u32 len = xdp_get_buff_len(xdp);
337         int err;
338
339         spin_lock_bh(&xs->rx_lock);
340         err = xsk_rcv_check(xs, xdp, len);
341         if (!err) {
342                 err = __xsk_rcv(xs, xdp, len);
343                 xsk_flush(xs);
344         }
345         spin_unlock_bh(&xs->rx_lock);
346         return err;
347 }
348
349 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
350 {
351         u32 len = xdp_get_buff_len(xdp);
352         int err;
353
354         err = xsk_rcv_check(xs, xdp, len);
355         if (err)
356                 return err;
357
358         if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
359                 len = xdp->data_end - xdp->data;
360                 return xsk_rcv_zc(xs, xdp, len);
361         }
362
363         err = __xsk_rcv(xs, xdp, len);
364         if (!err)
365                 xdp_return_buff(xdp);
366         return err;
367 }
368
369 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
370 {
371         struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
372         int err;
373
374         err = xsk_rcv(xs, xdp);
375         if (err)
376                 return err;
377
378         if (!xs->flush_node.prev)
379                 list_add(&xs->flush_node, flush_list);
380
381         return 0;
382 }
383
384 void __xsk_map_flush(void)
385 {
386         struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
387         struct xdp_sock *xs, *tmp;
388
389         list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
390                 xsk_flush(xs);
391                 __list_del_clearprev(&xs->flush_node);
392         }
393 }
394
395 #ifdef CONFIG_DEBUG_NET
396 bool xsk_map_check_flush(void)
397 {
398         if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
399                 return false;
400         __xsk_map_flush();
401         return true;
402 }
403 #endif
404
405 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
406 {
407         xskq_prod_submit_n(pool->cq, nb_entries);
408 }
409 EXPORT_SYMBOL(xsk_tx_completed);
410
411 void xsk_tx_release(struct xsk_buff_pool *pool)
412 {
413         struct xdp_sock *xs;
414
415         rcu_read_lock();
416         list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
417                 __xskq_cons_release(xs->tx);
418                 if (xsk_tx_writeable(xs))
419                         xs->sk.sk_write_space(&xs->sk);
420         }
421         rcu_read_unlock();
422 }
423 EXPORT_SYMBOL(xsk_tx_release);
424
425 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
426 {
427         bool budget_exhausted = false;
428         struct xdp_sock *xs;
429
430         rcu_read_lock();
431 again:
432         list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
433                 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
434                         budget_exhausted = true;
435                         continue;
436                 }
437
438                 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
439                         if (xskq_has_descs(xs->tx))
440                                 xskq_cons_release(xs->tx);
441                         continue;
442                 }
443
444                 xs->tx_budget_spent++;
445
446                 /* This is the backpressure mechanism for the Tx path.
447                  * Reserve space in the completion queue and only proceed
448                  * if there is space in it. This avoids having to implement
449                  * any buffering in the Tx path.
450                  */
451                 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
452                         goto out;
453
454                 xskq_cons_release(xs->tx);
455                 rcu_read_unlock();
456                 return true;
457         }
458
459         if (budget_exhausted) {
460                 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
461                         xs->tx_budget_spent = 0;
462
463                 budget_exhausted = false;
464                 goto again;
465         }
466
467 out:
468         rcu_read_unlock();
469         return false;
470 }
471 EXPORT_SYMBOL(xsk_tx_peek_desc);
472
473 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
474 {
475         struct xdp_desc *descs = pool->tx_descs;
476         u32 nb_pkts = 0;
477
478         while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
479                 nb_pkts++;
480
481         xsk_tx_release(pool);
482         return nb_pkts;
483 }
484
485 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
486 {
487         struct xdp_sock *xs;
488
489         rcu_read_lock();
490         if (!list_is_singular(&pool->xsk_tx_list)) {
491                 /* Fallback to the non-batched version */
492                 rcu_read_unlock();
493                 return xsk_tx_peek_release_fallback(pool, nb_pkts);
494         }
495
496         xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
497         if (!xs) {
498                 nb_pkts = 0;
499                 goto out;
500         }
501
502         nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
503
504         /* This is the backpressure mechanism for the Tx path. Try to
505          * reserve space in the completion queue for all packets, but
506          * if there are fewer slots available, just process that many
507          * packets. This avoids having to implement any buffering in
508          * the Tx path.
509          */
510         nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
511         if (!nb_pkts)
512                 goto out;
513
514         nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
515         if (!nb_pkts) {
516                 xs->tx->queue_empty_descs++;
517                 goto out;
518         }
519
520         __xskq_cons_release(xs->tx);
521         xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
522         xs->sk.sk_write_space(&xs->sk);
523
524 out:
525         rcu_read_unlock();
526         return nb_pkts;
527 }
528 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
529
530 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
531 {
532         struct net_device *dev = xs->dev;
533
534         return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
535 }
536
537 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
538 {
539         unsigned long flags;
540         int ret;
541
542         spin_lock_irqsave(&xs->pool->cq_lock, flags);
543         ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
544         spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
545
546         return ret;
547 }
548
549 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
550 {
551         unsigned long flags;
552
553         spin_lock_irqsave(&xs->pool->cq_lock, flags);
554         xskq_prod_submit_n(xs->pool->cq, n);
555         spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
556 }
557
558 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
559 {
560         unsigned long flags;
561
562         spin_lock_irqsave(&xs->pool->cq_lock, flags);
563         xskq_prod_cancel_n(xs->pool->cq, n);
564         spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
565 }
566
567 static u32 xsk_get_num_desc(struct sk_buff *skb)
568 {
569         return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
570 }
571
572 static void xsk_destruct_skb(struct sk_buff *skb)
573 {
574         struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
575
576         if (compl->tx_timestamp) {
577                 /* sw completion timestamp, not a real one */
578                 *compl->tx_timestamp = ktime_get_tai_fast_ns();
579         }
580
581         xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
582         sock_wfree(skb);
583 }
584
585 static void xsk_set_destructor_arg(struct sk_buff *skb)
586 {
587         long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
588
589         skb_shinfo(skb)->destructor_arg = (void *)num;
590 }
591
592 static void xsk_consume_skb(struct sk_buff *skb)
593 {
594         struct xdp_sock *xs = xdp_sk(skb->sk);
595
596         skb->destructor = sock_wfree;
597         xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
598         /* Free skb without triggering the perf drop trace */
599         consume_skb(skb);
600         xs->skb = NULL;
601 }
602
603 static void xsk_drop_skb(struct sk_buff *skb)
604 {
605         xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
606         xsk_consume_skb(skb);
607 }
608
609 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
610                                               struct xdp_desc *desc)
611 {
612         struct xsk_buff_pool *pool = xs->pool;
613         u32 hr, len, ts, offset, copy, copied;
614         struct sk_buff *skb = xs->skb;
615         struct page *page;
616         void *buffer;
617         int err, i;
618         u64 addr;
619
620         if (!skb) {
621                 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
622
623                 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
624                 if (unlikely(!skb))
625                         return ERR_PTR(err);
626
627                 skb_reserve(skb, hr);
628         }
629
630         addr = desc->addr;
631         len = desc->len;
632         ts = pool->unaligned ? len : pool->chunk_size;
633
634         buffer = xsk_buff_raw_get_data(pool, addr);
635         offset = offset_in_page(buffer);
636         addr = buffer - pool->addrs;
637
638         for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
639                 if (unlikely(i >= MAX_SKB_FRAGS))
640                         return ERR_PTR(-EOVERFLOW);
641
642                 page = pool->umem->pgs[addr >> PAGE_SHIFT];
643                 get_page(page);
644
645                 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
646                 skb_fill_page_desc(skb, i, page, offset, copy);
647
648                 copied += copy;
649                 addr += copy;
650                 offset = 0;
651         }
652
653         skb->len += len;
654         skb->data_len += len;
655         skb->truesize += ts;
656
657         refcount_add(ts, &xs->sk.sk_wmem_alloc);
658
659         return skb;
660 }
661
662 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
663                                      struct xdp_desc *desc)
664 {
665         struct xsk_tx_metadata *meta = NULL;
666         struct net_device *dev = xs->dev;
667         struct sk_buff *skb = xs->skb;
668         bool first_frag = false;
669         int err;
670
671         if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
672                 skb = xsk_build_skb_zerocopy(xs, desc);
673                 if (IS_ERR(skb)) {
674                         err = PTR_ERR(skb);
675                         goto free_err;
676                 }
677         } else {
678                 u32 hr, tr, len;
679                 void *buffer;
680
681                 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
682                 len = desc->len;
683
684                 if (!skb) {
685                         hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
686                         tr = dev->needed_tailroom;
687                         skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
688                         if (unlikely(!skb))
689                                 goto free_err;
690
691                         skb_reserve(skb, hr);
692                         skb_put(skb, len);
693
694                         err = skb_store_bits(skb, 0, buffer, len);
695                         if (unlikely(err)) {
696                                 kfree_skb(skb);
697                                 goto free_err;
698                         }
699
700                         first_frag = true;
701                 } else {
702                         int nr_frags = skb_shinfo(skb)->nr_frags;
703                         struct page *page;
704                         u8 *vaddr;
705
706                         if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
707                                 err = -EOVERFLOW;
708                                 goto free_err;
709                         }
710
711                         page = alloc_page(xs->sk.sk_allocation);
712                         if (unlikely(!page)) {
713                                 err = -EAGAIN;
714                                 goto free_err;
715                         }
716
717                         vaddr = kmap_local_page(page);
718                         memcpy(vaddr, buffer, len);
719                         kunmap_local(vaddr);
720
721                         skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
722                 }
723
724                 if (first_frag && desc->options & XDP_TX_METADATA) {
725                         if (unlikely(xs->pool->tx_metadata_len == 0)) {
726                                 err = -EINVAL;
727                                 goto free_err;
728                         }
729
730                         meta = buffer - xs->pool->tx_metadata_len;
731                         if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
732                                 err = -EINVAL;
733                                 goto free_err;
734                         }
735
736                         if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
737                                 if (unlikely(meta->request.csum_start +
738                                              meta->request.csum_offset +
739                                              sizeof(__sum16) > len)) {
740                                         err = -EINVAL;
741                                         goto free_err;
742                                 }
743
744                                 skb->csum_start = hr + meta->request.csum_start;
745                                 skb->csum_offset = meta->request.csum_offset;
746                                 skb->ip_summed = CHECKSUM_PARTIAL;
747
748                                 if (unlikely(xs->pool->tx_sw_csum)) {
749                                         err = skb_checksum_help(skb);
750                                         if (err)
751                                                 goto free_err;
752                                 }
753                         }
754                 }
755         }
756
757         skb->dev = dev;
758         skb->priority = READ_ONCE(xs->sk.sk_priority);
759         skb->mark = READ_ONCE(xs->sk.sk_mark);
760         skb->destructor = xsk_destruct_skb;
761         xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
762         xsk_set_destructor_arg(skb);
763
764         return skb;
765
766 free_err:
767         if (err == -EOVERFLOW) {
768                 /* Drop the packet */
769                 xsk_set_destructor_arg(xs->skb);
770                 xsk_drop_skb(xs->skb);
771                 xskq_cons_release(xs->tx);
772         } else {
773                 /* Let application retry */
774                 xsk_cq_cancel_locked(xs, 1);
775         }
776
777         return ERR_PTR(err);
778 }
779
780 static int __xsk_generic_xmit(struct sock *sk)
781 {
782         struct xdp_sock *xs = xdp_sk(sk);
783         u32 max_batch = TX_BATCH_SIZE;
784         bool sent_frame = false;
785         struct xdp_desc desc;
786         struct sk_buff *skb;
787         int err = 0;
788
789         mutex_lock(&xs->mutex);
790
791         /* Since we dropped the RCU read lock, the socket state might have changed. */
792         if (unlikely(!xsk_is_bound(xs))) {
793                 err = -ENXIO;
794                 goto out;
795         }
796
797         if (xs->queue_id >= xs->dev->real_num_tx_queues)
798                 goto out;
799
800         while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
801                 if (max_batch-- == 0) {
802                         err = -EAGAIN;
803                         goto out;
804                 }
805
806                 /* This is the backpressure mechanism for the Tx path.
807                  * Reserve space in the completion queue and only proceed
808                  * if there is space in it. This avoids having to implement
809                  * any buffering in the Tx path.
810                  */
811                 if (xsk_cq_reserve_addr_locked(xs, desc.addr))
812                         goto out;
813
814                 skb = xsk_build_skb(xs, &desc);
815                 if (IS_ERR(skb)) {
816                         err = PTR_ERR(skb);
817                         if (err != -EOVERFLOW)
818                                 goto out;
819                         err = 0;
820                         continue;
821                 }
822
823                 xskq_cons_release(xs->tx);
824
825                 if (xp_mb_desc(&desc)) {
826                         xs->skb = skb;
827                         continue;
828                 }
829
830                 err = __dev_direct_xmit(skb, xs->queue_id);
831                 if  (err == NETDEV_TX_BUSY) {
832                         /* Tell user-space to retry the send */
833                         xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
834                         xsk_consume_skb(skb);
835                         err = -EAGAIN;
836                         goto out;
837                 }
838
839                 /* Ignore NET_XMIT_CN as packet might have been sent */
840                 if (err == NET_XMIT_DROP) {
841                         /* SKB completed but not sent */
842                         err = -EBUSY;
843                         xs->skb = NULL;
844                         goto out;
845                 }
846
847                 sent_frame = true;
848                 xs->skb = NULL;
849         }
850
851         if (xskq_has_descs(xs->tx)) {
852                 if (xs->skb)
853                         xsk_drop_skb(xs->skb);
854                 xskq_cons_release(xs->tx);
855         }
856
857 out:
858         if (sent_frame)
859                 if (xsk_tx_writeable(xs))
860                         sk->sk_write_space(sk);
861
862         mutex_unlock(&xs->mutex);
863         return err;
864 }
865
866 static int xsk_generic_xmit(struct sock *sk)
867 {
868         int ret;
869
870         /* Drop the RCU lock since the SKB path might sleep. */
871         rcu_read_unlock();
872         ret = __xsk_generic_xmit(sk);
873         /* Reaquire RCU lock before going into common code. */
874         rcu_read_lock();
875
876         return ret;
877 }
878
879 static bool xsk_no_wakeup(struct sock *sk)
880 {
881 #ifdef CONFIG_NET_RX_BUSY_POLL
882         /* Prefer busy-polling, skip the wakeup. */
883         return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
884                 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
885 #else
886         return false;
887 #endif
888 }
889
890 static int xsk_check_common(struct xdp_sock *xs)
891 {
892         if (unlikely(!xsk_is_bound(xs)))
893                 return -ENXIO;
894         if (unlikely(!(xs->dev->flags & IFF_UP)))
895                 return -ENETDOWN;
896
897         return 0;
898 }
899
900 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
901 {
902         bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
903         struct sock *sk = sock->sk;
904         struct xdp_sock *xs = xdp_sk(sk);
905         struct xsk_buff_pool *pool;
906         int err;
907
908         err = xsk_check_common(xs);
909         if (err)
910                 return err;
911         if (unlikely(need_wait))
912                 return -EOPNOTSUPP;
913         if (unlikely(!xs->tx))
914                 return -ENOBUFS;
915
916         if (sk_can_busy_loop(sk)) {
917                 if (xs->zc)
918                         __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
919                 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
920         }
921
922         if (xs->zc && xsk_no_wakeup(sk))
923                 return 0;
924
925         pool = xs->pool;
926         if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
927                 if (xs->zc)
928                         return xsk_wakeup(xs, XDP_WAKEUP_TX);
929                 return xsk_generic_xmit(sk);
930         }
931         return 0;
932 }
933
934 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
935 {
936         int ret;
937
938         rcu_read_lock();
939         ret = __xsk_sendmsg(sock, m, total_len);
940         rcu_read_unlock();
941
942         return ret;
943 }
944
945 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
946 {
947         bool need_wait = !(flags & MSG_DONTWAIT);
948         struct sock *sk = sock->sk;
949         struct xdp_sock *xs = xdp_sk(sk);
950         int err;
951
952         err = xsk_check_common(xs);
953         if (err)
954                 return err;
955         if (unlikely(!xs->rx))
956                 return -ENOBUFS;
957         if (unlikely(need_wait))
958                 return -EOPNOTSUPP;
959
960         if (sk_can_busy_loop(sk))
961                 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
962
963         if (xsk_no_wakeup(sk))
964                 return 0;
965
966         if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
967                 return xsk_wakeup(xs, XDP_WAKEUP_RX);
968         return 0;
969 }
970
971 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
972 {
973         int ret;
974
975         rcu_read_lock();
976         ret = __xsk_recvmsg(sock, m, len, flags);
977         rcu_read_unlock();
978
979         return ret;
980 }
981
982 static __poll_t xsk_poll(struct file *file, struct socket *sock,
983                              struct poll_table_struct *wait)
984 {
985         __poll_t mask = 0;
986         struct sock *sk = sock->sk;
987         struct xdp_sock *xs = xdp_sk(sk);
988         struct xsk_buff_pool *pool;
989
990         sock_poll_wait(file, sock, wait);
991
992         rcu_read_lock();
993         if (xsk_check_common(xs))
994                 goto skip_tx;
995
996         pool = xs->pool;
997
998         if (pool->cached_need_wakeup) {
999                 if (xs->zc)
1000                         xsk_wakeup(xs, pool->cached_need_wakeup);
1001                 else if (xs->tx)
1002                         /* Poll needs to drive Tx also in copy mode */
1003                         xsk_generic_xmit(sk);
1004         }
1005
1006 skip_tx:
1007         if (xs->rx && !xskq_prod_is_empty(xs->rx))
1008                 mask |= EPOLLIN | EPOLLRDNORM;
1009         if (xs->tx && xsk_tx_writeable(xs))
1010                 mask |= EPOLLOUT | EPOLLWRNORM;
1011
1012         rcu_read_unlock();
1013         return mask;
1014 }
1015
1016 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1017                           bool umem_queue)
1018 {
1019         struct xsk_queue *q;
1020
1021         if (entries == 0 || *queue || !is_power_of_2(entries))
1022                 return -EINVAL;
1023
1024         q = xskq_create(entries, umem_queue);
1025         if (!q)
1026                 return -ENOMEM;
1027
1028         /* Make sure queue is ready before it can be seen by others */
1029         smp_wmb();
1030         WRITE_ONCE(*queue, q);
1031         return 0;
1032 }
1033
1034 static void xsk_unbind_dev(struct xdp_sock *xs)
1035 {
1036         struct net_device *dev = xs->dev;
1037
1038         if (xs->state != XSK_BOUND)
1039                 return;
1040         WRITE_ONCE(xs->state, XSK_UNBOUND);
1041
1042         /* Wait for driver to stop using the xdp socket. */
1043         xp_del_xsk(xs->pool, xs);
1044         synchronize_net();
1045         dev_put(dev);
1046 }
1047
1048 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1049                                               struct xdp_sock __rcu ***map_entry)
1050 {
1051         struct xsk_map *map = NULL;
1052         struct xsk_map_node *node;
1053
1054         *map_entry = NULL;
1055
1056         spin_lock_bh(&xs->map_list_lock);
1057         node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1058                                         node);
1059         if (node) {
1060                 bpf_map_inc(&node->map->map);
1061                 map = node->map;
1062                 *map_entry = node->map_entry;
1063         }
1064         spin_unlock_bh(&xs->map_list_lock);
1065         return map;
1066 }
1067
1068 static void xsk_delete_from_maps(struct xdp_sock *xs)
1069 {
1070         /* This function removes the current XDP socket from all the
1071          * maps it resides in. We need to take extra care here, due to
1072          * the two locks involved. Each map has a lock synchronizing
1073          * updates to the entries, and each socket has a lock that
1074          * synchronizes access to the list of maps (map_list). For
1075          * deadlock avoidance the locks need to be taken in the order
1076          * "map lock"->"socket map list lock". We start off by
1077          * accessing the socket map list, and take a reference to the
1078          * map to guarantee existence between the
1079          * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1080          * calls. Then we ask the map to remove the socket, which
1081          * tries to remove the socket from the map. Note that there
1082          * might be updates to the map between
1083          * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1084          */
1085         struct xdp_sock __rcu **map_entry = NULL;
1086         struct xsk_map *map;
1087
1088         while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1089                 xsk_map_try_sock_delete(map, xs, map_entry);
1090                 bpf_map_put(&map->map);
1091         }
1092 }
1093
1094 static int xsk_release(struct socket *sock)
1095 {
1096         struct sock *sk = sock->sk;
1097         struct xdp_sock *xs = xdp_sk(sk);
1098         struct net *net;
1099
1100         if (!sk)
1101                 return 0;
1102
1103         net = sock_net(sk);
1104
1105         if (xs->skb)
1106                 xsk_drop_skb(xs->skb);
1107
1108         mutex_lock(&net->xdp.lock);
1109         sk_del_node_init_rcu(sk);
1110         mutex_unlock(&net->xdp.lock);
1111
1112         sock_prot_inuse_add(net, sk->sk_prot, -1);
1113
1114         xsk_delete_from_maps(xs);
1115         mutex_lock(&xs->mutex);
1116         xsk_unbind_dev(xs);
1117         mutex_unlock(&xs->mutex);
1118
1119         xskq_destroy(xs->rx);
1120         xskq_destroy(xs->tx);
1121         xskq_destroy(xs->fq_tmp);
1122         xskq_destroy(xs->cq_tmp);
1123
1124         sock_orphan(sk);
1125         sock->sk = NULL;
1126
1127         sock_put(sk);
1128
1129         return 0;
1130 }
1131
1132 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1133 {
1134         struct socket *sock;
1135         int err;
1136
1137         sock = sockfd_lookup(fd, &err);
1138         if (!sock)
1139                 return ERR_PTR(-ENOTSOCK);
1140
1141         if (sock->sk->sk_family != PF_XDP) {
1142                 sockfd_put(sock);
1143                 return ERR_PTR(-ENOPROTOOPT);
1144         }
1145
1146         return sock;
1147 }
1148
1149 static bool xsk_validate_queues(struct xdp_sock *xs)
1150 {
1151         return xs->fq_tmp && xs->cq_tmp;
1152 }
1153
1154 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1155 {
1156         struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1157         struct sock *sk = sock->sk;
1158         struct xdp_sock *xs = xdp_sk(sk);
1159         struct net_device *dev;
1160         int bound_dev_if;
1161         u32 flags, qid;
1162         int err = 0;
1163
1164         if (addr_len < sizeof(struct sockaddr_xdp))
1165                 return -EINVAL;
1166         if (sxdp->sxdp_family != AF_XDP)
1167                 return -EINVAL;
1168
1169         flags = sxdp->sxdp_flags;
1170         if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1171                       XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1172                 return -EINVAL;
1173
1174         bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1175         if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1176                 return -EINVAL;
1177
1178         rtnl_lock();
1179         mutex_lock(&xs->mutex);
1180         if (xs->state != XSK_READY) {
1181                 err = -EBUSY;
1182                 goto out_release;
1183         }
1184
1185         dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1186         if (!dev) {
1187                 err = -ENODEV;
1188                 goto out_release;
1189         }
1190
1191         if (!xs->rx && !xs->tx) {
1192                 err = -EINVAL;
1193                 goto out_unlock;
1194         }
1195
1196         qid = sxdp->sxdp_queue_id;
1197
1198         if (flags & XDP_SHARED_UMEM) {
1199                 struct xdp_sock *umem_xs;
1200                 struct socket *sock;
1201
1202                 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1203                     (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1204                         /* Cannot specify flags for shared sockets. */
1205                         err = -EINVAL;
1206                         goto out_unlock;
1207                 }
1208
1209                 if (xs->umem) {
1210                         /* We have already our own. */
1211                         err = -EINVAL;
1212                         goto out_unlock;
1213                 }
1214
1215                 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1216                 if (IS_ERR(sock)) {
1217                         err = PTR_ERR(sock);
1218                         goto out_unlock;
1219                 }
1220
1221                 umem_xs = xdp_sk(sock->sk);
1222                 if (!xsk_is_bound(umem_xs)) {
1223                         err = -EBADF;
1224                         sockfd_put(sock);
1225                         goto out_unlock;
1226                 }
1227
1228                 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1229                         /* Share the umem with another socket on another qid
1230                          * and/or device.
1231                          */
1232                         xs->pool = xp_create_and_assign_umem(xs,
1233                                                              umem_xs->umem);
1234                         if (!xs->pool) {
1235                                 err = -ENOMEM;
1236                                 sockfd_put(sock);
1237                                 goto out_unlock;
1238                         }
1239
1240                         err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1241                                                    qid);
1242                         if (err) {
1243                                 xp_destroy(xs->pool);
1244                                 xs->pool = NULL;
1245                                 sockfd_put(sock);
1246                                 goto out_unlock;
1247                         }
1248                 } else {
1249                         /* Share the buffer pool with the other socket. */
1250                         if (xs->fq_tmp || xs->cq_tmp) {
1251                                 /* Do not allow setting your own fq or cq. */
1252                                 err = -EINVAL;
1253                                 sockfd_put(sock);
1254                                 goto out_unlock;
1255                         }
1256
1257                         xp_get_pool(umem_xs->pool);
1258                         xs->pool = umem_xs->pool;
1259
1260                         /* If underlying shared umem was created without Tx
1261                          * ring, allocate Tx descs array that Tx batching API
1262                          * utilizes
1263                          */
1264                         if (xs->tx && !xs->pool->tx_descs) {
1265                                 err = xp_alloc_tx_descs(xs->pool, xs);
1266                                 if (err) {
1267                                         xp_put_pool(xs->pool);
1268                                         xs->pool = NULL;
1269                                         sockfd_put(sock);
1270                                         goto out_unlock;
1271                                 }
1272                         }
1273                 }
1274
1275                 xdp_get_umem(umem_xs->umem);
1276                 WRITE_ONCE(xs->umem, umem_xs->umem);
1277                 sockfd_put(sock);
1278         } else if (!xs->umem || !xsk_validate_queues(xs)) {
1279                 err = -EINVAL;
1280                 goto out_unlock;
1281         } else {
1282                 /* This xsk has its own umem. */
1283                 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1284                 if (!xs->pool) {
1285                         err = -ENOMEM;
1286                         goto out_unlock;
1287                 }
1288
1289                 err = xp_assign_dev(xs->pool, dev, qid, flags);
1290                 if (err) {
1291                         xp_destroy(xs->pool);
1292                         xs->pool = NULL;
1293                         goto out_unlock;
1294                 }
1295         }
1296
1297         /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1298         xs->fq_tmp = NULL;
1299         xs->cq_tmp = NULL;
1300
1301         xs->dev = dev;
1302         xs->zc = xs->umem->zc;
1303         xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1304         xs->queue_id = qid;
1305         xp_add_xsk(xs->pool, xs);
1306
1307 out_unlock:
1308         if (err) {
1309                 dev_put(dev);
1310         } else {
1311                 /* Matches smp_rmb() in bind() for shared umem
1312                  * sockets, and xsk_is_bound().
1313                  */
1314                 smp_wmb();
1315                 WRITE_ONCE(xs->state, XSK_BOUND);
1316         }
1317 out_release:
1318         mutex_unlock(&xs->mutex);
1319         rtnl_unlock();
1320         return err;
1321 }
1322
1323 struct xdp_umem_reg_v1 {
1324         __u64 addr; /* Start of packet data area */
1325         __u64 len; /* Length of packet data area */
1326         __u32 chunk_size;
1327         __u32 headroom;
1328 };
1329
1330 struct xdp_umem_reg_v2 {
1331         __u64 addr; /* Start of packet data area */
1332         __u64 len; /* Length of packet data area */
1333         __u32 chunk_size;
1334         __u32 headroom;
1335         __u32 flags;
1336 };
1337
1338 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1339                           sockptr_t optval, unsigned int optlen)
1340 {
1341         struct sock *sk = sock->sk;
1342         struct xdp_sock *xs = xdp_sk(sk);
1343         int err;
1344
1345         if (level != SOL_XDP)
1346                 return -ENOPROTOOPT;
1347
1348         switch (optname) {
1349         case XDP_RX_RING:
1350         case XDP_TX_RING:
1351         {
1352                 struct xsk_queue **q;
1353                 int entries;
1354
1355                 if (optlen < sizeof(entries))
1356                         return -EINVAL;
1357                 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1358                         return -EFAULT;
1359
1360                 mutex_lock(&xs->mutex);
1361                 if (xs->state != XSK_READY) {
1362                         mutex_unlock(&xs->mutex);
1363                         return -EBUSY;
1364                 }
1365                 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1366                 err = xsk_init_queue(entries, q, false);
1367                 if (!err && optname == XDP_TX_RING)
1368                         /* Tx needs to be explicitly woken up the first time */
1369                         xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1370                 mutex_unlock(&xs->mutex);
1371                 return err;
1372         }
1373         case XDP_UMEM_REG:
1374         {
1375                 size_t mr_size = sizeof(struct xdp_umem_reg);
1376                 struct xdp_umem_reg mr = {};
1377                 struct xdp_umem *umem;
1378
1379                 if (optlen < sizeof(struct xdp_umem_reg_v1))
1380                         return -EINVAL;
1381                 else if (optlen < sizeof(struct xdp_umem_reg_v2))
1382                         mr_size = sizeof(struct xdp_umem_reg_v1);
1383                 else if (optlen < sizeof(mr))
1384                         mr_size = sizeof(struct xdp_umem_reg_v2);
1385
1386                 if (copy_from_sockptr(&mr, optval, mr_size))
1387                         return -EFAULT;
1388
1389                 mutex_lock(&xs->mutex);
1390                 if (xs->state != XSK_READY || xs->umem) {
1391                         mutex_unlock(&xs->mutex);
1392                         return -EBUSY;
1393                 }
1394
1395                 umem = xdp_umem_create(&mr);
1396                 if (IS_ERR(umem)) {
1397                         mutex_unlock(&xs->mutex);
1398                         return PTR_ERR(umem);
1399                 }
1400
1401                 /* Make sure umem is ready before it can be seen by others */
1402                 smp_wmb();
1403                 WRITE_ONCE(xs->umem, umem);
1404                 mutex_unlock(&xs->mutex);
1405                 return 0;
1406         }
1407         case XDP_UMEM_FILL_RING:
1408         case XDP_UMEM_COMPLETION_RING:
1409         {
1410                 struct xsk_queue **q;
1411                 int entries;
1412
1413                 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1414                         return -EFAULT;
1415
1416                 mutex_lock(&xs->mutex);
1417                 if (xs->state != XSK_READY) {
1418                         mutex_unlock(&xs->mutex);
1419                         return -EBUSY;
1420                 }
1421
1422                 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1423                         &xs->cq_tmp;
1424                 err = xsk_init_queue(entries, q, true);
1425                 mutex_unlock(&xs->mutex);
1426                 return err;
1427         }
1428         default:
1429                 break;
1430         }
1431
1432         return -ENOPROTOOPT;
1433 }
1434
1435 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1436 {
1437         ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1438         ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1439         ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1440 }
1441
1442 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1443 {
1444         ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1445         ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1446         ring->desc = offsetof(struct xdp_umem_ring, desc);
1447 }
1448
1449 struct xdp_statistics_v1 {
1450         __u64 rx_dropped;
1451         __u64 rx_invalid_descs;
1452         __u64 tx_invalid_descs;
1453 };
1454
1455 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1456                           char __user *optval, int __user *optlen)
1457 {
1458         struct sock *sk = sock->sk;
1459         struct xdp_sock *xs = xdp_sk(sk);
1460         int len;
1461
1462         if (level != SOL_XDP)
1463                 return -ENOPROTOOPT;
1464
1465         if (get_user(len, optlen))
1466                 return -EFAULT;
1467         if (len < 0)
1468                 return -EINVAL;
1469
1470         switch (optname) {
1471         case XDP_STATISTICS:
1472         {
1473                 struct xdp_statistics stats = {};
1474                 bool extra_stats = true;
1475                 size_t stats_size;
1476
1477                 if (len < sizeof(struct xdp_statistics_v1)) {
1478                         return -EINVAL;
1479                 } else if (len < sizeof(stats)) {
1480                         extra_stats = false;
1481                         stats_size = sizeof(struct xdp_statistics_v1);
1482                 } else {
1483                         stats_size = sizeof(stats);
1484                 }
1485
1486                 mutex_lock(&xs->mutex);
1487                 stats.rx_dropped = xs->rx_dropped;
1488                 if (extra_stats) {
1489                         stats.rx_ring_full = xs->rx_queue_full;
1490                         stats.rx_fill_ring_empty_descs =
1491                                 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1492                         stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1493                 } else {
1494                         stats.rx_dropped += xs->rx_queue_full;
1495                 }
1496                 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1497                 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1498                 mutex_unlock(&xs->mutex);
1499
1500                 if (copy_to_user(optval, &stats, stats_size))
1501                         return -EFAULT;
1502                 if (put_user(stats_size, optlen))
1503                         return -EFAULT;
1504
1505                 return 0;
1506         }
1507         case XDP_MMAP_OFFSETS:
1508         {
1509                 struct xdp_mmap_offsets off;
1510                 struct xdp_mmap_offsets_v1 off_v1;
1511                 bool flags_supported = true;
1512                 void *to_copy;
1513
1514                 if (len < sizeof(off_v1))
1515                         return -EINVAL;
1516                 else if (len < sizeof(off))
1517                         flags_supported = false;
1518
1519                 if (flags_supported) {
1520                         /* xdp_ring_offset is identical to xdp_ring_offset_v1
1521                          * except for the flags field added to the end.
1522                          */
1523                         xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1524                                                &off.rx);
1525                         xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1526                                                &off.tx);
1527                         xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1528                                                &off.fr);
1529                         xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1530                                                &off.cr);
1531                         off.rx.flags = offsetof(struct xdp_rxtx_ring,
1532                                                 ptrs.flags);
1533                         off.tx.flags = offsetof(struct xdp_rxtx_ring,
1534                                                 ptrs.flags);
1535                         off.fr.flags = offsetof(struct xdp_umem_ring,
1536                                                 ptrs.flags);
1537                         off.cr.flags = offsetof(struct xdp_umem_ring,
1538                                                 ptrs.flags);
1539
1540                         len = sizeof(off);
1541                         to_copy = &off;
1542                 } else {
1543                         xsk_enter_rxtx_offsets(&off_v1.rx);
1544                         xsk_enter_rxtx_offsets(&off_v1.tx);
1545                         xsk_enter_umem_offsets(&off_v1.fr);
1546                         xsk_enter_umem_offsets(&off_v1.cr);
1547
1548                         len = sizeof(off_v1);
1549                         to_copy = &off_v1;
1550                 }
1551
1552                 if (copy_to_user(optval, to_copy, len))
1553                         return -EFAULT;
1554                 if (put_user(len, optlen))
1555                         return -EFAULT;
1556
1557                 return 0;
1558         }
1559         case XDP_OPTIONS:
1560         {
1561                 struct xdp_options opts = {};
1562
1563                 if (len < sizeof(opts))
1564                         return -EINVAL;
1565
1566                 mutex_lock(&xs->mutex);
1567                 if (xs->zc)
1568                         opts.flags |= XDP_OPTIONS_ZEROCOPY;
1569                 mutex_unlock(&xs->mutex);
1570
1571                 len = sizeof(opts);
1572                 if (copy_to_user(optval, &opts, len))
1573                         return -EFAULT;
1574                 if (put_user(len, optlen))
1575                         return -EFAULT;
1576
1577                 return 0;
1578         }
1579         default:
1580                 break;
1581         }
1582
1583         return -EOPNOTSUPP;
1584 }
1585
1586 static int xsk_mmap(struct file *file, struct socket *sock,
1587                     struct vm_area_struct *vma)
1588 {
1589         loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1590         unsigned long size = vma->vm_end - vma->vm_start;
1591         struct xdp_sock *xs = xdp_sk(sock->sk);
1592         int state = READ_ONCE(xs->state);
1593         struct xsk_queue *q = NULL;
1594
1595         if (state != XSK_READY && state != XSK_BOUND)
1596                 return -EBUSY;
1597
1598         if (offset == XDP_PGOFF_RX_RING) {
1599                 q = READ_ONCE(xs->rx);
1600         } else if (offset == XDP_PGOFF_TX_RING) {
1601                 q = READ_ONCE(xs->tx);
1602         } else {
1603                 /* Matches the smp_wmb() in XDP_UMEM_REG */
1604                 smp_rmb();
1605                 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1606                         q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1607                                                  READ_ONCE(xs->pool->fq);
1608                 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1609                         q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1610                                                  READ_ONCE(xs->pool->cq);
1611         }
1612
1613         if (!q)
1614                 return -EINVAL;
1615
1616         /* Matches the smp_wmb() in xsk_init_queue */
1617         smp_rmb();
1618         if (size > q->ring_vmalloc_size)
1619                 return -EINVAL;
1620
1621         return remap_vmalloc_range(vma, q->ring, 0);
1622 }
1623
1624 static int xsk_notifier(struct notifier_block *this,
1625                         unsigned long msg, void *ptr)
1626 {
1627         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1628         struct net *net = dev_net(dev);
1629         struct sock *sk;
1630
1631         switch (msg) {
1632         case NETDEV_UNREGISTER:
1633                 mutex_lock(&net->xdp.lock);
1634                 sk_for_each(sk, &net->xdp.list) {
1635                         struct xdp_sock *xs = xdp_sk(sk);
1636
1637                         mutex_lock(&xs->mutex);
1638                         if (xs->dev == dev) {
1639                                 sk->sk_err = ENETDOWN;
1640                                 if (!sock_flag(sk, SOCK_DEAD))
1641                                         sk_error_report(sk);
1642
1643                                 xsk_unbind_dev(xs);
1644
1645                                 /* Clear device references. */
1646                                 xp_clear_dev(xs->pool);
1647                         }
1648                         mutex_unlock(&xs->mutex);
1649                 }
1650                 mutex_unlock(&net->xdp.lock);
1651                 break;
1652         }
1653         return NOTIFY_DONE;
1654 }
1655
1656 static struct proto xsk_proto = {
1657         .name =         "XDP",
1658         .owner =        THIS_MODULE,
1659         .obj_size =     sizeof(struct xdp_sock),
1660 };
1661
1662 static const struct proto_ops xsk_proto_ops = {
1663         .family         = PF_XDP,
1664         .owner          = THIS_MODULE,
1665         .release        = xsk_release,
1666         .bind           = xsk_bind,
1667         .connect        = sock_no_connect,
1668         .socketpair     = sock_no_socketpair,
1669         .accept         = sock_no_accept,
1670         .getname        = sock_no_getname,
1671         .poll           = xsk_poll,
1672         .ioctl          = sock_no_ioctl,
1673         .listen         = sock_no_listen,
1674         .shutdown       = sock_no_shutdown,
1675         .setsockopt     = xsk_setsockopt,
1676         .getsockopt     = xsk_getsockopt,
1677         .sendmsg        = xsk_sendmsg,
1678         .recvmsg        = xsk_recvmsg,
1679         .mmap           = xsk_mmap,
1680 };
1681
1682 static void xsk_destruct(struct sock *sk)
1683 {
1684         struct xdp_sock *xs = xdp_sk(sk);
1685
1686         if (!sock_flag(sk, SOCK_DEAD))
1687                 return;
1688
1689         if (!xp_put_pool(xs->pool))
1690                 xdp_put_umem(xs->umem, !xs->pool);
1691 }
1692
1693 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1694                       int kern)
1695 {
1696         struct xdp_sock *xs;
1697         struct sock *sk;
1698
1699         if (!ns_capable(net->user_ns, CAP_NET_RAW))
1700                 return -EPERM;
1701         if (sock->type != SOCK_RAW)
1702                 return -ESOCKTNOSUPPORT;
1703
1704         if (protocol)
1705                 return -EPROTONOSUPPORT;
1706
1707         sock->state = SS_UNCONNECTED;
1708
1709         sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1710         if (!sk)
1711                 return -ENOBUFS;
1712
1713         sock->ops = &xsk_proto_ops;
1714
1715         sock_init_data(sock, sk);
1716
1717         sk->sk_family = PF_XDP;
1718
1719         sk->sk_destruct = xsk_destruct;
1720
1721         sock_set_flag(sk, SOCK_RCU_FREE);
1722
1723         xs = xdp_sk(sk);
1724         xs->state = XSK_READY;
1725         mutex_init(&xs->mutex);
1726         spin_lock_init(&xs->rx_lock);
1727
1728         INIT_LIST_HEAD(&xs->map_list);
1729         spin_lock_init(&xs->map_list_lock);
1730
1731         mutex_lock(&net->xdp.lock);
1732         sk_add_node_rcu(sk, &net->xdp.list);
1733         mutex_unlock(&net->xdp.lock);
1734
1735         sock_prot_inuse_add(net, &xsk_proto, 1);
1736
1737         return 0;
1738 }
1739
1740 static const struct net_proto_family xsk_family_ops = {
1741         .family = PF_XDP,
1742         .create = xsk_create,
1743         .owner  = THIS_MODULE,
1744 };
1745
1746 static struct notifier_block xsk_netdev_notifier = {
1747         .notifier_call  = xsk_notifier,
1748 };
1749
1750 static int __net_init xsk_net_init(struct net *net)
1751 {
1752         mutex_init(&net->xdp.lock);
1753         INIT_HLIST_HEAD(&net->xdp.list);
1754         return 0;
1755 }
1756
1757 static void __net_exit xsk_net_exit(struct net *net)
1758 {
1759         WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1760 }
1761
1762 static struct pernet_operations xsk_net_ops = {
1763         .init = xsk_net_init,
1764         .exit = xsk_net_exit,
1765 };
1766
1767 static int __init xsk_init(void)
1768 {
1769         int err, cpu;
1770
1771         err = proto_register(&xsk_proto, 0 /* no slab */);
1772         if (err)
1773                 goto out;
1774
1775         err = sock_register(&xsk_family_ops);
1776         if (err)
1777                 goto out_proto;
1778
1779         err = register_pernet_subsys(&xsk_net_ops);
1780         if (err)
1781                 goto out_sk;
1782
1783         err = register_netdevice_notifier(&xsk_netdev_notifier);
1784         if (err)
1785                 goto out_pernet;
1786
1787         for_each_possible_cpu(cpu)
1788                 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1789         return 0;
1790
1791 out_pernet:
1792         unregister_pernet_subsys(&xsk_net_ops);
1793 out_sk:
1794         sock_unregister(PF_XDP);
1795 out_proto:
1796         proto_unregister(&xsk_proto);
1797 out:
1798         return err;
1799 }
1800
1801 fs_initcall(xsk_init);
This page took 0.136413 seconds and 4 git commands to generate.