1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
18 #include <net/rtnetlink.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
30 #define DRV_NAME "veth"
31 #define DRV_VERSION "1.0"
33 #define VETH_XDP_FLAG BIT(0)
34 #define VETH_RING_SIZE 256
35 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 #define VETH_XDP_TX_BULK_SIZE 16
38 #define VETH_XDP_BATCH 16
50 u64 peer_tq_xdp_xmit_err;
53 struct veth_rq_stats {
55 struct u64_stats_sync syncp;
59 struct napi_struct xdp_napi;
60 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
61 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
63 struct xdp_mem_info xdp_mem;
64 struct veth_rq_stats stats;
65 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
71 struct net_device __rcu *peer;
73 struct bpf_prog *_xdp_prog;
75 unsigned int requested_headroom;
78 struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
87 struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
92 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
94 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
97 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
104 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
106 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
111 #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
114 const char string[ETH_GSTRING_LEN];
115 } ethtool_stats_keys[] = {
119 static int veth_get_link_ksettings(struct net_device *dev,
120 struct ethtool_link_ksettings *cmd)
122 cmd->base.speed = SPEED_10000;
123 cmd->base.duplex = DUPLEX_FULL;
124 cmd->base.port = PORT_TP;
125 cmd->base.autoneg = AUTONEG_DISABLE;
129 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
131 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
132 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
135 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
137 char *p = (char *)buf;
142 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys));
143 p += sizeof(ethtool_stats_keys);
144 for (i = 0; i < dev->real_num_rx_queues; i++) {
145 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
146 snprintf(p, ETH_GSTRING_LEN,
148 i, veth_rq_stats_desc[j].desc);
149 p += ETH_GSTRING_LEN;
152 for (i = 0; i < dev->real_num_tx_queues; i++) {
153 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
154 snprintf(p, ETH_GSTRING_LEN,
156 i, veth_tq_stats_desc[j].desc);
157 p += ETH_GSTRING_LEN;
164 static int veth_get_sset_count(struct net_device *dev, int sset)
168 return ARRAY_SIZE(ethtool_stats_keys) +
169 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
170 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
176 static void veth_get_ethtool_stats(struct net_device *dev,
177 struct ethtool_stats *stats, u64 *data)
179 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
180 struct net_device *peer = rtnl_dereference(priv->peer);
183 data[0] = peer ? peer->ifindex : 0;
185 for (i = 0; i < dev->real_num_rx_queues; i++) {
186 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
187 const void *stats_base = (void *)&rq_stats->vs;
192 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
193 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
194 offset = veth_rq_stats_desc[j].offset;
195 data[idx + j] = *(u64 *)(stats_base + offset);
197 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
198 idx += VETH_RQ_STATS_LEN;
204 rcv_priv = netdev_priv(peer);
205 for (i = 0; i < peer->real_num_rx_queues; i++) {
206 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
207 const void *base = (void *)&rq_stats->vs;
208 unsigned int start, tx_idx = idx;
211 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
213 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
214 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
215 offset = veth_tq_stats_desc[j].offset;
216 data[tx_idx + j] += *(u64 *)(base + offset);
218 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
222 static void veth_get_channels(struct net_device *dev,
223 struct ethtool_channels *channels)
225 channels->tx_count = dev->real_num_tx_queues;
226 channels->rx_count = dev->real_num_rx_queues;
227 channels->max_tx = dev->real_num_tx_queues;
228 channels->max_rx = dev->real_num_rx_queues;
229 channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
230 channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
233 static const struct ethtool_ops veth_ethtool_ops = {
234 .get_drvinfo = veth_get_drvinfo,
235 .get_link = ethtool_op_get_link,
236 .get_strings = veth_get_strings,
237 .get_sset_count = veth_get_sset_count,
238 .get_ethtool_stats = veth_get_ethtool_stats,
239 .get_link_ksettings = veth_get_link_ksettings,
240 .get_ts_info = ethtool_op_get_ts_info,
241 .get_channels = veth_get_channels,
244 /* general routines */
246 static bool veth_is_xdp_frame(void *ptr)
248 return (unsigned long)ptr & VETH_XDP_FLAG;
251 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
253 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
256 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
258 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
261 static void veth_ptr_free(void *ptr)
263 if (veth_is_xdp_frame(ptr))
264 xdp_return_frame(veth_ptr_to_xdp(ptr));
269 static void __veth_xdp_flush(struct veth_rq *rq)
271 /* Write ptr_ring before reading rx_notify_masked */
273 if (!rq->rx_notify_masked) {
274 rq->rx_notify_masked = true;
275 napi_schedule(&rq->xdp_napi);
279 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
281 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
282 dev_kfree_skb_any(skb);
286 return NET_RX_SUCCESS;
289 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
290 struct veth_rq *rq, bool xdp)
292 return __dev_forward_skb(dev, skb) ?: xdp ?
293 veth_xdp_rx(rq, skb) :
297 /* return true if the specified skb has chances of GRO aggregation
298 * Don't strive for accuracy, but try to avoid GRO overhead in the most
300 * When XDP is enabled, all traffic is considered eligible, as the xmit
301 * device has TSO off.
302 * When TSO is enabled on the xmit device, we are likely interested only
303 * in UDP aggregation, explicitly check for that if the skb is suspected
304 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
305 * to belong to locally generated UDP traffic.
307 static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
308 const struct net_device *rcv,
309 const struct sk_buff *skb)
311 return !(dev->features & NETIF_F_ALL_TSO) ||
312 (skb->destructor == sock_wfree &&
313 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
316 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
318 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
319 struct veth_rq *rq = NULL;
320 struct net_device *rcv;
321 int length = skb->len;
322 bool use_napi = false;
326 rcv = rcu_dereference(priv->peer);
327 if (unlikely(!rcv)) {
332 rcv_priv = netdev_priv(rcv);
333 rxq = skb_get_queue_mapping(skb);
334 if (rxq < rcv->real_num_rx_queues) {
335 rq = &rcv_priv->rq[rxq];
337 /* The napi pointer is available when an XDP program is
338 * attached or when GRO is enabled
339 * Don't bother with napi/GRO if the skb can't be aggregated
341 use_napi = rcu_access_pointer(rq->napi) &&
342 veth_skb_is_eligible_for_gro(dev, rcv, skb);
343 skb_record_rx_queue(skb, rxq);
346 skb_tx_timestamp(skb);
347 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
349 dev_lstats_add(dev, length);
352 atomic64_inc(&priv->dropped);
356 __veth_xdp_flush(rq);
363 static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
365 struct veth_priv *priv = netdev_priv(dev);
367 dev_lstats_read(dev, packets, bytes);
368 return atomic64_read(&priv->dropped);
371 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
373 struct veth_priv *priv = netdev_priv(dev);
376 result->peer_tq_xdp_xmit_err = 0;
377 result->xdp_packets = 0;
378 result->xdp_tx_err = 0;
379 result->xdp_bytes = 0;
380 result->rx_drops = 0;
381 for (i = 0; i < dev->num_rx_queues; i++) {
382 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
383 struct veth_rq_stats *stats = &priv->rq[i].stats;
387 start = u64_stats_fetch_begin_irq(&stats->syncp);
388 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
389 xdp_tx_err = stats->vs.xdp_tx_err;
390 packets = stats->vs.xdp_packets;
391 bytes = stats->vs.xdp_bytes;
392 drops = stats->vs.rx_drops;
393 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
394 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
395 result->xdp_tx_err += xdp_tx_err;
396 result->xdp_packets += packets;
397 result->xdp_bytes += bytes;
398 result->rx_drops += drops;
402 static void veth_get_stats64(struct net_device *dev,
403 struct rtnl_link_stats64 *tot)
405 struct veth_priv *priv = netdev_priv(dev);
406 struct net_device *peer;
407 struct veth_stats rx;
410 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
411 tot->tx_bytes = bytes;
412 tot->tx_packets = packets;
414 veth_stats_rx(&rx, dev);
415 tot->tx_dropped += rx.xdp_tx_err;
416 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
417 tot->rx_bytes = rx.xdp_bytes;
418 tot->rx_packets = rx.xdp_packets;
421 peer = rcu_dereference(priv->peer);
423 veth_stats_tx(peer, &packets, &bytes);
424 tot->rx_bytes += bytes;
425 tot->rx_packets += packets;
427 veth_stats_rx(&rx, peer);
428 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
429 tot->rx_dropped += rx.xdp_tx_err;
430 tot->tx_bytes += rx.xdp_bytes;
431 tot->tx_packets += rx.xdp_packets;
436 /* fake multicast ability */
437 static void veth_set_multicast_list(struct net_device *dev)
441 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
446 skb = build_skb(head, buflen);
450 skb_reserve(skb, headroom);
456 static int veth_select_rxq(struct net_device *dev)
458 return smp_processor_id() % dev->real_num_rx_queues;
461 static struct net_device *veth_peer_dev(struct net_device *dev)
463 struct veth_priv *priv = netdev_priv(dev);
465 /* Callers must be under RCU read side. */
466 return rcu_dereference(priv->peer);
469 static int veth_xdp_xmit(struct net_device *dev, int n,
470 struct xdp_frame **frames,
471 u32 flags, bool ndo_xmit)
473 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
474 int i, ret = -ENXIO, nxmit = 0;
475 struct net_device *rcv;
476 unsigned int max_len;
479 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
483 rcv = rcu_dereference(priv->peer);
487 rcv_priv = netdev_priv(rcv);
488 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
489 /* The napi pointer is set if NAPI is enabled, which ensures that
490 * xdp_ring is initialized on receive side and the peer device is up.
492 if (!rcu_access_pointer(rq->napi))
495 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
497 spin_lock(&rq->xdp_ring.producer_lock);
498 for (i = 0; i < n; i++) {
499 struct xdp_frame *frame = frames[i];
500 void *ptr = veth_xdp_to_ptr(frame);
502 if (unlikely(frame->len > max_len ||
503 __ptr_ring_produce(&rq->xdp_ring, ptr)))
507 spin_unlock(&rq->xdp_ring.producer_lock);
509 if (flags & XDP_XMIT_FLUSH)
510 __veth_xdp_flush(rq);
514 u64_stats_update_begin(&rq->stats.syncp);
515 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
516 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
517 u64_stats_update_end(&rq->stats.syncp);
526 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
527 struct xdp_frame **frames, u32 flags)
531 err = veth_xdp_xmit(dev, n, frames, flags, true);
533 struct veth_priv *priv = netdev_priv(dev);
535 atomic64_add(n, &priv->dropped);
541 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
543 int sent, i, err = 0, drops;
545 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
551 for (i = sent; unlikely(i < bq->count); i++)
552 xdp_return_frame(bq->q[i]);
554 drops = bq->count - sent;
555 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
557 u64_stats_update_begin(&rq->stats.syncp);
558 rq->stats.vs.xdp_tx += sent;
559 rq->stats.vs.xdp_tx_err += drops;
560 u64_stats_update_end(&rq->stats.syncp);
565 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
567 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
568 struct net_device *rcv;
569 struct veth_rq *rcv_rq;
572 veth_xdp_flush_bq(rq, bq);
573 rcv = rcu_dereference(priv->peer);
577 rcv_priv = netdev_priv(rcv);
578 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
579 /* xdp_ring is initialized on receive side? */
580 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
583 __veth_xdp_flush(rcv_rq);
588 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
589 struct veth_xdp_tx_bq *bq)
591 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
593 if (unlikely(!frame))
596 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
597 veth_xdp_flush_bq(rq, bq);
599 bq->q[bq->count++] = frame;
604 static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
605 struct xdp_frame *frame,
606 struct veth_xdp_tx_bq *bq,
607 struct veth_stats *stats)
609 struct xdp_frame orig_frame;
610 struct bpf_prog *xdp_prog;
613 xdp_prog = rcu_dereference(rq->xdp_prog);
614 if (likely(xdp_prog)) {
618 xdp_convert_frame_to_buff(frame, &xdp);
619 xdp.rxq = &rq->xdp_rxq;
621 act = bpf_prog_run_xdp(xdp_prog, &xdp);
625 if (xdp_update_frame_from_buff(&xdp, frame))
630 xdp.rxq->mem = frame->mem;
631 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
632 trace_xdp_exception(rq->dev, xdp_prog, act);
642 xdp.rxq->mem = frame->mem;
643 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
648 stats->xdp_redirect++;
652 bpf_warn_invalid_xdp_action(act);
655 trace_xdp_exception(rq->dev, xdp_prog, act);
667 xdp_return_frame(frame);
672 /* frames array contains VETH_XDP_BATCH at most */
673 static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
674 int n_xdpf, struct veth_xdp_tx_bq *bq,
675 struct veth_stats *stats)
677 void *skbs[VETH_XDP_BATCH];
680 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
681 GFP_ATOMIC | __GFP_ZERO) < 0) {
682 for (i = 0; i < n_xdpf; i++)
683 xdp_return_frame(frames[i]);
684 stats->rx_drops += n_xdpf;
689 for (i = 0; i < n_xdpf; i++) {
690 struct sk_buff *skb = skbs[i];
692 skb = __xdp_build_skb_from_frame(frames[i], skb,
695 xdp_return_frame(frames[i]);
699 napi_gro_receive(&rq->xdp_napi, skb);
703 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
705 struct veth_xdp_tx_bq *bq,
706 struct veth_stats *stats)
708 u32 pktlen, headroom, act, metalen, frame_sz;
709 void *orig_data, *orig_data_end;
710 struct bpf_prog *xdp_prog;
711 int mac_len, delta, off;
714 skb_orphan_partial(skb);
717 xdp_prog = rcu_dereference(rq->xdp_prog);
718 if (unlikely(!xdp_prog)) {
723 mac_len = skb->data - skb_mac_header(skb);
724 pktlen = skb->len + mac_len;
725 headroom = skb_headroom(skb) - mac_len;
727 if (skb_shared(skb) || skb_head_is_locked(skb) ||
728 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
729 struct sk_buff *nskb;
734 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
735 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
736 if (size > PAGE_SIZE)
739 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
743 head = page_address(page);
744 start = head + VETH_XDP_HEADROOM;
745 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
746 page_frag_free(head);
750 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
751 skb->len, PAGE_SIZE);
753 page_frag_free(head);
757 skb_copy_header(nskb, skb);
758 head_off = skb_headroom(nskb) - skb_headroom(skb);
759 skb_headers_offset_update(nskb, head_off);
764 /* SKB "head" area always have tailroom for skb_shared_info */
765 frame_sz = skb_end_pointer(skb) - skb->head;
766 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
767 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
768 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
770 orig_data = xdp.data;
771 orig_data_end = xdp.data_end;
773 act = bpf_prog_run_xdp(xdp_prog, &xdp);
779 get_page(virt_to_page(xdp.data));
781 xdp.rxq->mem = rq->xdp_mem;
782 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
783 trace_xdp_exception(rq->dev, xdp_prog, act);
791 get_page(virt_to_page(xdp.data));
793 xdp.rxq->mem = rq->xdp_mem;
794 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
798 stats->xdp_redirect++;
802 bpf_warn_invalid_xdp_action(act);
805 trace_xdp_exception(rq->dev, xdp_prog, act);
813 /* check if bpf_xdp_adjust_head was used */
814 delta = orig_data - xdp.data;
815 off = mac_len + delta;
817 __skb_push(skb, off);
819 __skb_pull(skb, -off);
820 skb->mac_header -= delta;
822 /* check if bpf_xdp_adjust_tail was used */
823 off = xdp.data_end - orig_data_end;
825 __skb_put(skb, off); /* positive on grow, negative on shrink */
826 skb->protocol = eth_type_trans(skb, rq->dev);
828 metalen = xdp.data - xdp.data_meta;
830 skb_metadata_set(skb, metalen);
841 page_frag_free(xdp.data);
846 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
847 struct veth_xdp_tx_bq *bq,
848 struct veth_stats *stats)
850 int i, done = 0, n_xdpf = 0;
851 void *xdpf[VETH_XDP_BATCH];
853 for (i = 0; i < budget; i++) {
854 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
859 if (veth_is_xdp_frame(ptr)) {
861 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
863 stats->xdp_bytes += frame->len;
864 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
867 xdpf[n_xdpf++] = frame;
868 if (n_xdpf == VETH_XDP_BATCH) {
869 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
876 struct sk_buff *skb = ptr;
878 stats->xdp_bytes += skb->len;
879 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
881 napi_gro_receive(&rq->xdp_napi, skb);
887 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
889 u64_stats_update_begin(&rq->stats.syncp);
890 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
891 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
892 rq->stats.vs.xdp_drops += stats->xdp_drops;
893 rq->stats.vs.rx_drops += stats->rx_drops;
894 rq->stats.vs.xdp_packets += done;
895 u64_stats_update_end(&rq->stats.syncp);
900 static int veth_poll(struct napi_struct *napi, int budget)
903 container_of(napi, struct veth_rq, xdp_napi);
904 struct veth_stats stats = {};
905 struct veth_xdp_tx_bq bq;
910 xdp_set_return_frame_no_direct();
911 done = veth_xdp_rcv(rq, budget, &bq, &stats);
913 if (done < budget && napi_complete_done(napi, done)) {
914 /* Write rx_notify_masked before reading ptr_ring */
915 smp_store_mb(rq->rx_notify_masked, false);
916 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
917 rq->rx_notify_masked = true;
918 napi_schedule(&rq->xdp_napi);
922 if (stats.xdp_tx > 0)
923 veth_xdp_flush(rq, &bq);
924 if (stats.xdp_redirect > 0)
926 xdp_clear_return_frame_no_direct();
931 static int __veth_napi_enable(struct net_device *dev)
933 struct veth_priv *priv = netdev_priv(dev);
936 for (i = 0; i < dev->real_num_rx_queues; i++) {
937 struct veth_rq *rq = &priv->rq[i];
939 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
944 for (i = 0; i < dev->real_num_rx_queues; i++) {
945 struct veth_rq *rq = &priv->rq[i];
947 napi_enable(&rq->xdp_napi);
948 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
953 for (i--; i >= 0; i--)
954 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
959 static void veth_napi_del(struct net_device *dev)
961 struct veth_priv *priv = netdev_priv(dev);
964 for (i = 0; i < dev->real_num_rx_queues; i++) {
965 struct veth_rq *rq = &priv->rq[i];
967 rcu_assign_pointer(priv->rq[i].napi, NULL);
968 napi_disable(&rq->xdp_napi);
969 __netif_napi_del(&rq->xdp_napi);
973 for (i = 0; i < dev->real_num_rx_queues; i++) {
974 struct veth_rq *rq = &priv->rq[i];
976 rq->rx_notify_masked = false;
977 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
981 static bool veth_gro_requested(const struct net_device *dev)
983 return !!(dev->wanted_features & NETIF_F_GRO);
986 static int veth_enable_xdp(struct net_device *dev)
988 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
989 struct veth_priv *priv = netdev_priv(dev);
992 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
993 for (i = 0; i < dev->real_num_rx_queues; i++) {
994 struct veth_rq *rq = &priv->rq[i];
996 if (!napi_already_on)
997 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
998 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1002 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1003 MEM_TYPE_PAGE_SHARED,
1008 /* Save original mem info as it can be overwritten */
1009 rq->xdp_mem = rq->xdp_rxq.mem;
1012 if (!napi_already_on) {
1013 err = __veth_napi_enable(dev);
1017 if (!veth_gro_requested(dev)) {
1018 /* user-space did not require GRO, but adding XDP
1019 * is supposed to get GRO working
1021 dev->features |= NETIF_F_GRO;
1022 netdev_features_change(dev);
1027 for (i = 0; i < dev->real_num_rx_queues; i++) {
1028 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
1029 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1034 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1036 for (i--; i >= 0; i--) {
1037 struct veth_rq *rq = &priv->rq[i];
1039 xdp_rxq_info_unreg(&rq->xdp_rxq);
1040 if (!napi_already_on)
1041 netif_napi_del(&rq->xdp_napi);
1047 static void veth_disable_xdp(struct net_device *dev)
1049 struct veth_priv *priv = netdev_priv(dev);
1052 for (i = 0; i < dev->real_num_rx_queues; i++)
1053 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
1055 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1058 /* if user-space did not require GRO, since adding XDP
1059 * enabled it, clear it now
1061 if (!veth_gro_requested(dev) && netif_running(dev)) {
1062 dev->features &= ~NETIF_F_GRO;
1063 netdev_features_change(dev);
1067 for (i = 0; i < dev->real_num_rx_queues; i++) {
1068 struct veth_rq *rq = &priv->rq[i];
1070 rq->xdp_rxq.mem = rq->xdp_mem;
1071 xdp_rxq_info_unreg(&rq->xdp_rxq);
1075 static int veth_napi_enable(struct net_device *dev)
1077 struct veth_priv *priv = netdev_priv(dev);
1080 for (i = 0; i < dev->real_num_rx_queues; i++) {
1081 struct veth_rq *rq = &priv->rq[i];
1083 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1086 err = __veth_napi_enable(dev);
1088 for (i = 0; i < dev->real_num_rx_queues; i++) {
1089 struct veth_rq *rq = &priv->rq[i];
1091 netif_napi_del(&rq->xdp_napi);
1098 static int veth_open(struct net_device *dev)
1100 struct veth_priv *priv = netdev_priv(dev);
1101 struct net_device *peer = rtnl_dereference(priv->peer);
1107 if (priv->_xdp_prog) {
1108 err = veth_enable_xdp(dev);
1111 } else if (veth_gro_requested(dev)) {
1112 err = veth_napi_enable(dev);
1117 if (peer->flags & IFF_UP) {
1118 netif_carrier_on(dev);
1119 netif_carrier_on(peer);
1125 static int veth_close(struct net_device *dev)
1127 struct veth_priv *priv = netdev_priv(dev);
1128 struct net_device *peer = rtnl_dereference(priv->peer);
1130 netif_carrier_off(dev);
1132 netif_carrier_off(peer);
1134 if (priv->_xdp_prog)
1135 veth_disable_xdp(dev);
1136 else if (veth_gro_requested(dev))
1142 static int is_valid_veth_mtu(int mtu)
1144 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1147 static int veth_alloc_queues(struct net_device *dev)
1149 struct veth_priv *priv = netdev_priv(dev);
1152 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1156 for (i = 0; i < dev->num_rx_queues; i++) {
1157 priv->rq[i].dev = dev;
1158 u64_stats_init(&priv->rq[i].stats.syncp);
1164 static void veth_free_queues(struct net_device *dev)
1166 struct veth_priv *priv = netdev_priv(dev);
1171 static int veth_dev_init(struct net_device *dev)
1175 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1179 err = veth_alloc_queues(dev);
1181 free_percpu(dev->lstats);
1188 static void veth_dev_free(struct net_device *dev)
1190 veth_free_queues(dev);
1191 free_percpu(dev->lstats);
1194 #ifdef CONFIG_NET_POLL_CONTROLLER
1195 static void veth_poll_controller(struct net_device *dev)
1197 /* veth only receives frames when its peer sends one
1198 * Since it has nothing to do with disabling irqs, we are guaranteed
1199 * never to have pending data when we poll for it so
1200 * there is nothing to do here.
1202 * We need this though so netpoll recognizes us as an interface that
1203 * supports polling, which enables bridge devices in virt setups to
1204 * still use netconsole
1207 #endif /* CONFIG_NET_POLL_CONTROLLER */
1209 static int veth_get_iflink(const struct net_device *dev)
1211 struct veth_priv *priv = netdev_priv(dev);
1212 struct net_device *peer;
1216 peer = rcu_dereference(priv->peer);
1217 iflink = peer ? peer->ifindex : 0;
1223 static netdev_features_t veth_fix_features(struct net_device *dev,
1224 netdev_features_t features)
1226 struct veth_priv *priv = netdev_priv(dev);
1227 struct net_device *peer;
1229 peer = rtnl_dereference(priv->peer);
1231 struct veth_priv *peer_priv = netdev_priv(peer);
1233 if (peer_priv->_xdp_prog)
1234 features &= ~NETIF_F_GSO_SOFTWARE;
1236 if (priv->_xdp_prog)
1237 features |= NETIF_F_GRO;
1242 static int veth_set_features(struct net_device *dev,
1243 netdev_features_t features)
1245 netdev_features_t changed = features ^ dev->features;
1246 struct veth_priv *priv = netdev_priv(dev);
1249 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1252 if (features & NETIF_F_GRO) {
1253 err = veth_napi_enable(dev);
1262 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1264 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1265 struct net_device *peer;
1271 peer = rcu_dereference(priv->peer);
1272 if (unlikely(!peer))
1275 peer_priv = netdev_priv(peer);
1276 priv->requested_headroom = new_hr;
1277 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1278 dev->needed_headroom = new_hr;
1279 peer->needed_headroom = new_hr;
1285 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1286 struct netlink_ext_ack *extack)
1288 struct veth_priv *priv = netdev_priv(dev);
1289 struct bpf_prog *old_prog;
1290 struct net_device *peer;
1291 unsigned int max_mtu;
1294 old_prog = priv->_xdp_prog;
1295 priv->_xdp_prog = prog;
1296 peer = rtnl_dereference(priv->peer);
1300 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1305 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1306 peer->hard_header_len -
1307 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1308 if (peer->mtu > max_mtu) {
1309 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1314 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1315 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1320 if (dev->flags & IFF_UP) {
1321 err = veth_enable_xdp(dev);
1323 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1329 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1330 peer->max_mtu = max_mtu;
1336 if (dev->flags & IFF_UP)
1337 veth_disable_xdp(dev);
1340 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1341 peer->max_mtu = ETH_MAX_MTU;
1344 bpf_prog_put(old_prog);
1347 if ((!!old_prog ^ !!prog) && peer)
1348 netdev_update_features(peer);
1352 priv->_xdp_prog = old_prog;
1357 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1359 switch (xdp->command) {
1360 case XDP_SETUP_PROG:
1361 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1367 static const struct net_device_ops veth_netdev_ops = {
1368 .ndo_init = veth_dev_init,
1369 .ndo_open = veth_open,
1370 .ndo_stop = veth_close,
1371 .ndo_start_xmit = veth_xmit,
1372 .ndo_get_stats64 = veth_get_stats64,
1373 .ndo_set_rx_mode = veth_set_multicast_list,
1374 .ndo_set_mac_address = eth_mac_addr,
1375 #ifdef CONFIG_NET_POLL_CONTROLLER
1376 .ndo_poll_controller = veth_poll_controller,
1378 .ndo_get_iflink = veth_get_iflink,
1379 .ndo_fix_features = veth_fix_features,
1380 .ndo_set_features = veth_set_features,
1381 .ndo_features_check = passthru_features_check,
1382 .ndo_set_rx_headroom = veth_set_rx_headroom,
1383 .ndo_bpf = veth_xdp,
1384 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
1385 .ndo_get_peer_dev = veth_peer_dev,
1388 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1389 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1390 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1391 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1392 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1394 static void veth_setup(struct net_device *dev)
1398 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1399 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1400 dev->priv_flags |= IFF_NO_QUEUE;
1401 dev->priv_flags |= IFF_PHONY_HEADROOM;
1403 dev->netdev_ops = &veth_netdev_ops;
1404 dev->ethtool_ops = &veth_ethtool_ops;
1405 dev->features |= NETIF_F_LLTX;
1406 dev->features |= VETH_FEATURES;
1407 dev->vlan_features = dev->features &
1408 ~(NETIF_F_HW_VLAN_CTAG_TX |
1409 NETIF_F_HW_VLAN_STAG_TX |
1410 NETIF_F_HW_VLAN_CTAG_RX |
1411 NETIF_F_HW_VLAN_STAG_RX);
1412 dev->needs_free_netdev = true;
1413 dev->priv_destructor = veth_dev_free;
1414 dev->max_mtu = ETH_MAX_MTU;
1416 dev->hw_features = VETH_FEATURES;
1417 dev->hw_enc_features = VETH_FEATURES;
1418 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1425 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1426 struct netlink_ext_ack *extack)
1428 if (tb[IFLA_ADDRESS]) {
1429 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1431 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1432 return -EADDRNOTAVAIL;
1435 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1441 static struct rtnl_link_ops veth_link_ops;
1443 static void veth_disable_gro(struct net_device *dev)
1445 dev->features &= ~NETIF_F_GRO;
1446 dev->wanted_features &= ~NETIF_F_GRO;
1447 netdev_update_features(dev);
1450 static int veth_newlink(struct net *src_net, struct net_device *dev,
1451 struct nlattr *tb[], struct nlattr *data[],
1452 struct netlink_ext_ack *extack)
1455 struct net_device *peer;
1456 struct veth_priv *priv;
1457 char ifname[IFNAMSIZ];
1458 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1459 unsigned char name_assign_type;
1460 struct ifinfomsg *ifmp;
1464 * create and register peer first
1466 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1467 struct nlattr *nla_peer;
1469 nla_peer = data[VETH_INFO_PEER];
1470 ifmp = nla_data(nla_peer);
1471 err = rtnl_nla_parse_ifla(peer_tb,
1472 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1473 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1478 err = veth_validate(peer_tb, NULL, extack);
1488 if (ifmp && tbp[IFLA_IFNAME]) {
1489 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1490 name_assign_type = NET_NAME_USER;
1492 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1493 name_assign_type = NET_NAME_ENUM;
1496 net = rtnl_link_get_net(src_net, tbp);
1498 return PTR_ERR(net);
1500 peer = rtnl_create_link(net, ifname, name_assign_type,
1501 &veth_link_ops, tbp, extack);
1504 return PTR_ERR(peer);
1507 if (!ifmp || !tbp[IFLA_ADDRESS])
1508 eth_hw_addr_random(peer);
1510 if (ifmp && (dev->ifindex != 0))
1511 peer->ifindex = ifmp->ifi_index;
1513 peer->gso_max_size = dev->gso_max_size;
1514 peer->gso_max_segs = dev->gso_max_segs;
1516 err = register_netdevice(peer);
1520 goto err_register_peer;
1522 /* keep GRO disabled by default to be consistent with the established
1525 veth_disable_gro(peer);
1526 netif_carrier_off(peer);
1528 err = rtnl_configure_link(peer, ifmp);
1530 goto err_configure_peer;
1535 * note, that since we've registered new device the dev's name
1536 * should be re-allocated
1539 if (tb[IFLA_ADDRESS] == NULL)
1540 eth_hw_addr_random(dev);
1542 if (tb[IFLA_IFNAME])
1543 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1545 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1547 err = register_netdevice(dev);
1549 goto err_register_dev;
1551 netif_carrier_off(dev);
1554 * tie the deviced together
1557 priv = netdev_priv(dev);
1558 rcu_assign_pointer(priv->peer, peer);
1560 priv = netdev_priv(peer);
1561 rcu_assign_pointer(priv->peer, dev);
1563 veth_disable_gro(dev);
1569 unregister_netdevice(peer);
1577 static void veth_dellink(struct net_device *dev, struct list_head *head)
1579 struct veth_priv *priv;
1580 struct net_device *peer;
1582 priv = netdev_priv(dev);
1583 peer = rtnl_dereference(priv->peer);
1585 /* Note : dellink() is called from default_device_exit_batch(),
1586 * before a rcu_synchronize() point. The devices are guaranteed
1587 * not being freed before one RCU grace period.
1589 RCU_INIT_POINTER(priv->peer, NULL);
1590 unregister_netdevice_queue(dev, head);
1593 priv = netdev_priv(peer);
1594 RCU_INIT_POINTER(priv->peer, NULL);
1595 unregister_netdevice_queue(peer, head);
1599 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1600 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1603 static struct net *veth_get_link_net(const struct net_device *dev)
1605 struct veth_priv *priv = netdev_priv(dev);
1606 struct net_device *peer = rtnl_dereference(priv->peer);
1608 return peer ? dev_net(peer) : dev_net(dev);
1611 static struct rtnl_link_ops veth_link_ops = {
1613 .priv_size = sizeof(struct veth_priv),
1614 .setup = veth_setup,
1615 .validate = veth_validate,
1616 .newlink = veth_newlink,
1617 .dellink = veth_dellink,
1618 .policy = veth_policy,
1619 .maxtype = VETH_INFO_MAX,
1620 .get_link_net = veth_get_link_net,
1627 static __init int veth_init(void)
1629 return rtnl_link_register(&veth_link_ops);
1632 static __exit void veth_exit(void)
1634 rtnl_link_unregister(&veth_link_ops);
1637 module_init(veth_init);
1638 module_exit(veth_exit);
1640 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1641 MODULE_LICENSE("GPL v2");
1642 MODULE_ALIAS_RTNL_LINK(DRV_NAME);