1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
18 #include <net/rtnetlink.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
30 #define DRV_NAME "veth"
31 #define DRV_VERSION "1.0"
33 #define VETH_XDP_FLAG BIT(0)
34 #define VETH_RING_SIZE 256
35 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 /* Separating two types of XDP xmit */
38 #define VETH_XDP_TX BIT(0)
39 #define VETH_XDP_REDIR BIT(1)
41 #define VETH_XDP_TX_BULK_SIZE 16
43 struct veth_rq_stats {
47 struct u64_stats_sync syncp;
51 struct napi_struct xdp_napi;
52 struct net_device *dev;
53 struct bpf_prog __rcu *xdp_prog;
54 struct xdp_mem_info xdp_mem;
55 struct veth_rq_stats stats;
56 bool rx_notify_masked;
57 struct ptr_ring xdp_ring;
58 struct xdp_rxq_info xdp_rxq;
62 struct net_device __rcu *peer;
64 struct bpf_prog *_xdp_prog;
66 unsigned int requested_headroom;
69 struct veth_xdp_tx_bq {
70 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
78 struct veth_q_stat_desc {
79 char desc[ETH_GSTRING_LEN];
83 #define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
85 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
86 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
87 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
88 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
91 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
94 const char string[ETH_GSTRING_LEN];
95 } ethtool_stats_keys[] = {
99 static int veth_get_link_ksettings(struct net_device *dev,
100 struct ethtool_link_ksettings *cmd)
102 cmd->base.speed = SPEED_10000;
103 cmd->base.duplex = DUPLEX_FULL;
104 cmd->base.port = PORT_TP;
105 cmd->base.autoneg = AUTONEG_DISABLE;
109 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
111 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
112 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
115 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
117 char *p = (char *)buf;
122 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys));
123 p += sizeof(ethtool_stats_keys);
124 for (i = 0; i < dev->real_num_rx_queues; i++) {
125 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
126 snprintf(p, ETH_GSTRING_LEN,
128 i, veth_rq_stats_desc[j].desc);
129 p += ETH_GSTRING_LEN;
136 static int veth_get_sset_count(struct net_device *dev, int sset)
140 return ARRAY_SIZE(ethtool_stats_keys) +
141 VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
147 static void veth_get_ethtool_stats(struct net_device *dev,
148 struct ethtool_stats *stats, u64 *data)
150 struct veth_priv *priv = netdev_priv(dev);
151 struct net_device *peer = rtnl_dereference(priv->peer);
154 data[0] = peer ? peer->ifindex : 0;
156 for (i = 0; i < dev->real_num_rx_queues; i++) {
157 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
158 const void *stats_base = (void *)rq_stats;
163 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
164 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
165 offset = veth_rq_stats_desc[j].offset;
166 data[idx + j] = *(u64 *)(stats_base + offset);
168 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
169 idx += VETH_RQ_STATS_LEN;
173 static const struct ethtool_ops veth_ethtool_ops = {
174 .get_drvinfo = veth_get_drvinfo,
175 .get_link = ethtool_op_get_link,
176 .get_strings = veth_get_strings,
177 .get_sset_count = veth_get_sset_count,
178 .get_ethtool_stats = veth_get_ethtool_stats,
179 .get_link_ksettings = veth_get_link_ksettings,
180 .get_ts_info = ethtool_op_get_ts_info,
183 /* general routines */
185 static bool veth_is_xdp_frame(void *ptr)
187 return (unsigned long)ptr & VETH_XDP_FLAG;
190 static void *veth_ptr_to_xdp(void *ptr)
192 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
195 static void *veth_xdp_to_ptr(void *ptr)
197 return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
200 static void veth_ptr_free(void *ptr)
202 if (veth_is_xdp_frame(ptr))
203 xdp_return_frame(veth_ptr_to_xdp(ptr));
208 static void __veth_xdp_flush(struct veth_rq *rq)
210 /* Write ptr_ring before reading rx_notify_masked */
212 if (!rq->rx_notify_masked) {
213 rq->rx_notify_masked = true;
214 napi_schedule(&rq->xdp_napi);
218 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
220 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
221 dev_kfree_skb_any(skb);
225 return NET_RX_SUCCESS;
228 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
229 struct veth_rq *rq, bool xdp)
231 return __dev_forward_skb(dev, skb) ?: xdp ?
232 veth_xdp_rx(rq, skb) :
236 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
238 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
239 struct veth_rq *rq = NULL;
240 struct net_device *rcv;
241 int length = skb->len;
242 bool rcv_xdp = false;
246 rcv = rcu_dereference(priv->peer);
247 if (unlikely(!rcv)) {
252 rcv_priv = netdev_priv(rcv);
253 rxq = skb_get_queue_mapping(skb);
254 if (rxq < rcv->real_num_rx_queues) {
255 rq = &rcv_priv->rq[rxq];
256 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
258 skb_record_rx_queue(skb, rxq);
261 skb_tx_timestamp(skb);
262 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
264 struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
266 u64_stats_update_begin(&stats->syncp);
267 stats->bytes += length;
269 u64_stats_update_end(&stats->syncp);
273 atomic64_inc(&priv->dropped);
277 __veth_xdp_flush(rq);
284 static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
286 struct veth_priv *priv = netdev_priv(dev);
291 for_each_possible_cpu(cpu) {
292 struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
297 start = u64_stats_fetch_begin_irq(&stats->syncp);
298 packets = stats->packets;
299 bytes = stats->bytes;
300 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
301 result->packets += packets;
302 result->bytes += bytes;
304 return atomic64_read(&priv->dropped);
307 static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
309 struct veth_priv *priv = netdev_priv(dev);
312 result->xdp_packets = 0;
313 result->xdp_bytes = 0;
314 result->xdp_drops = 0;
315 for (i = 0; i < dev->num_rx_queues; i++) {
316 struct veth_rq_stats *stats = &priv->rq[i].stats;
317 u64 packets, bytes, drops;
321 start = u64_stats_fetch_begin_irq(&stats->syncp);
322 packets = stats->xdp_packets;
323 bytes = stats->xdp_bytes;
324 drops = stats->xdp_drops;
325 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
326 result->xdp_packets += packets;
327 result->xdp_bytes += bytes;
328 result->xdp_drops += drops;
332 static void veth_get_stats64(struct net_device *dev,
333 struct rtnl_link_stats64 *tot)
335 struct veth_priv *priv = netdev_priv(dev);
336 struct net_device *peer;
337 struct veth_rq_stats rx;
338 struct pcpu_lstats tx;
340 tot->tx_dropped = veth_stats_tx(&tx, dev);
341 tot->tx_bytes = tx.bytes;
342 tot->tx_packets = tx.packets;
344 veth_stats_rx(&rx, dev);
345 tot->rx_dropped = rx.xdp_drops;
346 tot->rx_bytes = rx.xdp_bytes;
347 tot->rx_packets = rx.xdp_packets;
350 peer = rcu_dereference(priv->peer);
352 tot->rx_dropped += veth_stats_tx(&tx, peer);
353 tot->rx_bytes += tx.bytes;
354 tot->rx_packets += tx.packets;
356 veth_stats_rx(&rx, peer);
357 tot->tx_bytes += rx.xdp_bytes;
358 tot->tx_packets += rx.xdp_packets;
363 /* fake multicast ability */
364 static void veth_set_multicast_list(struct net_device *dev)
368 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
374 buflen = SKB_DATA_ALIGN(headroom + len) +
375 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
377 skb = build_skb(head, buflen);
381 skb_reserve(skb, headroom);
387 static int veth_select_rxq(struct net_device *dev)
389 return smp_processor_id() % dev->real_num_rx_queues;
392 static int veth_xdp_xmit(struct net_device *dev, int n,
393 struct xdp_frame **frames, u32 flags)
395 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
396 struct net_device *rcv;
397 int i, ret, drops = n;
398 unsigned int max_len;
401 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
406 rcv = rcu_dereference(priv->peer);
407 if (unlikely(!rcv)) {
412 rcv_priv = netdev_priv(rcv);
413 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
414 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
415 * side. This means an XDP program is loaded on the peer and the peer
418 if (!rcu_access_pointer(rq->xdp_prog)) {
424 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
426 spin_lock(&rq->xdp_ring.producer_lock);
427 for (i = 0; i < n; i++) {
428 struct xdp_frame *frame = frames[i];
429 void *ptr = veth_xdp_to_ptr(frame);
431 if (unlikely(frame->len > max_len ||
432 __ptr_ring_produce(&rq->xdp_ring, ptr))) {
433 xdp_return_frame_rx_napi(frame);
437 spin_unlock(&rq->xdp_ring.producer_lock);
439 if (flags & XDP_XMIT_FLUSH)
440 __veth_xdp_flush(rq);
447 atomic64_add(drops, &priv->dropped);
452 static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
454 int sent, i, err = 0;
456 sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
460 for (i = 0; i < bq->count; i++)
461 xdp_return_frame(bq->q[i]);
463 trace_xdp_bulk_tx(dev, sent, bq->count - sent, err);
468 static void veth_xdp_flush(struct net_device *dev, struct veth_xdp_tx_bq *bq)
470 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
471 struct net_device *rcv;
475 veth_xdp_flush_bq(dev, bq);
476 rcv = rcu_dereference(priv->peer);
480 rcv_priv = netdev_priv(rcv);
481 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
482 /* xdp_ring is initialized on receive side? */
483 if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
486 __veth_xdp_flush(rq);
491 static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
492 struct veth_xdp_tx_bq *bq)
494 struct xdp_frame *frame = convert_to_xdp_frame(xdp);
496 if (unlikely(!frame))
499 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
500 veth_xdp_flush_bq(dev, bq);
502 bq->q[bq->count++] = frame;
507 static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
508 struct xdp_frame *frame,
509 unsigned int *xdp_xmit,
510 struct veth_xdp_tx_bq *bq)
512 void *hard_start = frame->data - frame->headroom;
513 void *head = hard_start - sizeof(struct xdp_frame);
514 int len = frame->len, delta = 0;
515 struct xdp_frame orig_frame;
516 struct bpf_prog *xdp_prog;
517 unsigned int headroom;
521 xdp_prog = rcu_dereference(rq->xdp_prog);
522 if (likely(xdp_prog)) {
526 xdp.data_hard_start = hard_start;
527 xdp.data = frame->data;
528 xdp.data_end = frame->data + frame->len;
529 xdp.data_meta = frame->data - frame->metasize;
530 xdp.rxq = &rq->xdp_rxq;
532 act = bpf_prog_run_xdp(xdp_prog, &xdp);
536 delta = frame->data - xdp.data;
537 len = xdp.data_end - xdp.data;
541 xdp.data_hard_start = head;
542 xdp.rxq->mem = frame->mem;
543 if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
544 trace_xdp_exception(rq->dev, xdp_prog, act);
548 *xdp_xmit |= VETH_XDP_TX;
553 xdp.data_hard_start = head;
554 xdp.rxq->mem = frame->mem;
555 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
559 *xdp_xmit |= VETH_XDP_REDIR;
563 bpf_warn_invalid_xdp_action(act);
566 trace_xdp_exception(rq->dev, xdp_prog, act);
574 headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
575 skb = veth_build_skb(head, headroom, len, 0);
577 xdp_return_frame(frame);
581 xdp_release_frame(frame);
582 xdp_scrub_frame(frame);
583 skb->protocol = eth_type_trans(skb, rq->dev);
588 xdp_return_frame(frame);
593 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
594 unsigned int *xdp_xmit,
595 struct veth_xdp_tx_bq *bq)
597 u32 pktlen, headroom, act, metalen;
598 void *orig_data, *orig_data_end;
599 struct bpf_prog *xdp_prog;
600 int mac_len, delta, off;
606 xdp_prog = rcu_dereference(rq->xdp_prog);
607 if (unlikely(!xdp_prog)) {
612 mac_len = skb->data - skb_mac_header(skb);
613 pktlen = skb->len + mac_len;
614 headroom = skb_headroom(skb) - mac_len;
616 if (skb_shared(skb) || skb_head_is_locked(skb) ||
617 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
618 struct sk_buff *nskb;
623 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
624 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
625 if (size > PAGE_SIZE)
628 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
632 head = page_address(page);
633 start = head + VETH_XDP_HEADROOM;
634 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
635 page_frag_free(head);
639 nskb = veth_build_skb(head,
640 VETH_XDP_HEADROOM + mac_len, skb->len,
643 page_frag_free(head);
647 skb_copy_header(nskb, skb);
648 head_off = skb_headroom(nskb) - skb_headroom(skb);
649 skb_headers_offset_update(nskb, head_off);
654 xdp.data_hard_start = skb->head;
655 xdp.data = skb_mac_header(skb);
656 xdp.data_end = xdp.data + pktlen;
657 xdp.data_meta = xdp.data;
658 xdp.rxq = &rq->xdp_rxq;
659 orig_data = xdp.data;
660 orig_data_end = xdp.data_end;
662 act = bpf_prog_run_xdp(xdp_prog, &xdp);
668 get_page(virt_to_page(xdp.data));
670 xdp.rxq->mem = rq->xdp_mem;
671 if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
672 trace_xdp_exception(rq->dev, xdp_prog, act);
675 *xdp_xmit |= VETH_XDP_TX;
679 get_page(virt_to_page(xdp.data));
681 xdp.rxq->mem = rq->xdp_mem;
682 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
684 *xdp_xmit |= VETH_XDP_REDIR;
688 bpf_warn_invalid_xdp_action(act);
691 trace_xdp_exception(rq->dev, xdp_prog, act);
698 delta = orig_data - xdp.data;
699 off = mac_len + delta;
701 __skb_push(skb, off);
703 __skb_pull(skb, -off);
704 skb->mac_header -= delta;
705 off = xdp.data_end - orig_data_end;
708 skb->protocol = eth_type_trans(skb, rq->dev);
710 metalen = xdp.data - xdp.data_meta;
712 skb_metadata_set(skb, metalen);
721 page_frag_free(xdp.data);
726 static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
727 struct veth_xdp_tx_bq *bq)
729 int i, done = 0, drops = 0, bytes = 0;
731 for (i = 0; i < budget; i++) {
732 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
733 unsigned int xdp_xmit_one = 0;
739 if (veth_is_xdp_frame(ptr)) {
740 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
743 skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
747 skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
749 *xdp_xmit |= xdp_xmit_one;
752 napi_gro_receive(&rq->xdp_napi, skb);
753 else if (!xdp_xmit_one)
759 u64_stats_update_begin(&rq->stats.syncp);
760 rq->stats.xdp_packets += done;
761 rq->stats.xdp_bytes += bytes;
762 rq->stats.xdp_drops += drops;
763 u64_stats_update_end(&rq->stats.syncp);
768 static int veth_poll(struct napi_struct *napi, int budget)
771 container_of(napi, struct veth_rq, xdp_napi);
772 unsigned int xdp_xmit = 0;
773 struct veth_xdp_tx_bq bq;
778 xdp_set_return_frame_no_direct();
779 done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
781 if (done < budget && napi_complete_done(napi, done)) {
782 /* Write rx_notify_masked before reading ptr_ring */
783 smp_store_mb(rq->rx_notify_masked, false);
784 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
785 rq->rx_notify_masked = true;
786 napi_schedule(&rq->xdp_napi);
790 if (xdp_xmit & VETH_XDP_TX)
791 veth_xdp_flush(rq->dev, &bq);
792 if (xdp_xmit & VETH_XDP_REDIR)
794 xdp_clear_return_frame_no_direct();
799 static int veth_napi_add(struct net_device *dev)
801 struct veth_priv *priv = netdev_priv(dev);
804 for (i = 0; i < dev->real_num_rx_queues; i++) {
805 struct veth_rq *rq = &priv->rq[i];
807 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
812 for (i = 0; i < dev->real_num_rx_queues; i++) {
813 struct veth_rq *rq = &priv->rq[i];
815 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
816 napi_enable(&rq->xdp_napi);
821 for (i--; i >= 0; i--)
822 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
827 static void veth_napi_del(struct net_device *dev)
829 struct veth_priv *priv = netdev_priv(dev);
832 for (i = 0; i < dev->real_num_rx_queues; i++) {
833 struct veth_rq *rq = &priv->rq[i];
835 napi_disable(&rq->xdp_napi);
836 napi_hash_del(&rq->xdp_napi);
840 for (i = 0; i < dev->real_num_rx_queues; i++) {
841 struct veth_rq *rq = &priv->rq[i];
843 netif_napi_del(&rq->xdp_napi);
844 rq->rx_notify_masked = false;
845 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
849 static int veth_enable_xdp(struct net_device *dev)
851 struct veth_priv *priv = netdev_priv(dev);
854 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
855 for (i = 0; i < dev->real_num_rx_queues; i++) {
856 struct veth_rq *rq = &priv->rq[i];
858 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
862 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
863 MEM_TYPE_PAGE_SHARED,
868 /* Save original mem info as it can be overwritten */
869 rq->xdp_mem = rq->xdp_rxq.mem;
872 err = veth_napi_add(dev);
877 for (i = 0; i < dev->real_num_rx_queues; i++)
878 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
882 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
884 for (i--; i >= 0; i--)
885 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
890 static void veth_disable_xdp(struct net_device *dev)
892 struct veth_priv *priv = netdev_priv(dev);
895 for (i = 0; i < dev->real_num_rx_queues; i++)
896 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
898 for (i = 0; i < dev->real_num_rx_queues; i++) {
899 struct veth_rq *rq = &priv->rq[i];
901 rq->xdp_rxq.mem = rq->xdp_mem;
902 xdp_rxq_info_unreg(&rq->xdp_rxq);
906 static int veth_open(struct net_device *dev)
908 struct veth_priv *priv = netdev_priv(dev);
909 struct net_device *peer = rtnl_dereference(priv->peer);
915 if (priv->_xdp_prog) {
916 err = veth_enable_xdp(dev);
921 if (peer->flags & IFF_UP) {
922 netif_carrier_on(dev);
923 netif_carrier_on(peer);
929 static int veth_close(struct net_device *dev)
931 struct veth_priv *priv = netdev_priv(dev);
932 struct net_device *peer = rtnl_dereference(priv->peer);
934 netif_carrier_off(dev);
936 netif_carrier_off(peer);
939 veth_disable_xdp(dev);
944 static int is_valid_veth_mtu(int mtu)
946 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
949 static int veth_alloc_queues(struct net_device *dev)
951 struct veth_priv *priv = netdev_priv(dev);
954 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
958 for (i = 0; i < dev->num_rx_queues; i++) {
959 priv->rq[i].dev = dev;
960 u64_stats_init(&priv->rq[i].stats.syncp);
966 static void veth_free_queues(struct net_device *dev)
968 struct veth_priv *priv = netdev_priv(dev);
973 static int veth_dev_init(struct net_device *dev)
977 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
981 err = veth_alloc_queues(dev);
983 free_percpu(dev->lstats);
990 static void veth_dev_free(struct net_device *dev)
992 veth_free_queues(dev);
993 free_percpu(dev->lstats);
996 #ifdef CONFIG_NET_POLL_CONTROLLER
997 static void veth_poll_controller(struct net_device *dev)
999 /* veth only receives frames when its peer sends one
1000 * Since it has nothing to do with disabling irqs, we are guaranteed
1001 * never to have pending data when we poll for it so
1002 * there is nothing to do here.
1004 * We need this though so netpoll recognizes us as an interface that
1005 * supports polling, which enables bridge devices in virt setups to
1006 * still use netconsole
1009 #endif /* CONFIG_NET_POLL_CONTROLLER */
1011 static int veth_get_iflink(const struct net_device *dev)
1013 struct veth_priv *priv = netdev_priv(dev);
1014 struct net_device *peer;
1018 peer = rcu_dereference(priv->peer);
1019 iflink = peer ? peer->ifindex : 0;
1025 static netdev_features_t veth_fix_features(struct net_device *dev,
1026 netdev_features_t features)
1028 struct veth_priv *priv = netdev_priv(dev);
1029 struct net_device *peer;
1031 peer = rtnl_dereference(priv->peer);
1033 struct veth_priv *peer_priv = netdev_priv(peer);
1035 if (peer_priv->_xdp_prog)
1036 features &= ~NETIF_F_GSO_SOFTWARE;
1042 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1044 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1045 struct net_device *peer;
1051 peer = rcu_dereference(priv->peer);
1052 if (unlikely(!peer))
1055 peer_priv = netdev_priv(peer);
1056 priv->requested_headroom = new_hr;
1057 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1058 dev->needed_headroom = new_hr;
1059 peer->needed_headroom = new_hr;
1065 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1066 struct netlink_ext_ack *extack)
1068 struct veth_priv *priv = netdev_priv(dev);
1069 struct bpf_prog *old_prog;
1070 struct net_device *peer;
1071 unsigned int max_mtu;
1074 old_prog = priv->_xdp_prog;
1075 priv->_xdp_prog = prog;
1076 peer = rtnl_dereference(priv->peer);
1080 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1085 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1086 peer->hard_header_len -
1087 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1088 if (peer->mtu > max_mtu) {
1089 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1094 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1095 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1100 if (dev->flags & IFF_UP) {
1101 err = veth_enable_xdp(dev);
1103 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1109 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1110 peer->max_mtu = max_mtu;
1116 if (dev->flags & IFF_UP)
1117 veth_disable_xdp(dev);
1120 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1121 peer->max_mtu = ETH_MAX_MTU;
1124 bpf_prog_put(old_prog);
1127 if ((!!old_prog ^ !!prog) && peer)
1128 netdev_update_features(peer);
1132 priv->_xdp_prog = old_prog;
1137 static u32 veth_xdp_query(struct net_device *dev)
1139 struct veth_priv *priv = netdev_priv(dev);
1140 const struct bpf_prog *xdp_prog;
1142 xdp_prog = priv->_xdp_prog;
1144 return xdp_prog->aux->id;
1149 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1151 switch (xdp->command) {
1152 case XDP_SETUP_PROG:
1153 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1154 case XDP_QUERY_PROG:
1155 xdp->prog_id = veth_xdp_query(dev);
1162 static const struct net_device_ops veth_netdev_ops = {
1163 .ndo_init = veth_dev_init,
1164 .ndo_open = veth_open,
1165 .ndo_stop = veth_close,
1166 .ndo_start_xmit = veth_xmit,
1167 .ndo_get_stats64 = veth_get_stats64,
1168 .ndo_set_rx_mode = veth_set_multicast_list,
1169 .ndo_set_mac_address = eth_mac_addr,
1170 #ifdef CONFIG_NET_POLL_CONTROLLER
1171 .ndo_poll_controller = veth_poll_controller,
1173 .ndo_get_iflink = veth_get_iflink,
1174 .ndo_fix_features = veth_fix_features,
1175 .ndo_features_check = passthru_features_check,
1176 .ndo_set_rx_headroom = veth_set_rx_headroom,
1177 .ndo_bpf = veth_xdp,
1178 .ndo_xdp_xmit = veth_xdp_xmit,
1181 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1182 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1183 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1184 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1185 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1187 static void veth_setup(struct net_device *dev)
1191 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1192 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1193 dev->priv_flags |= IFF_NO_QUEUE;
1194 dev->priv_flags |= IFF_PHONY_HEADROOM;
1196 dev->netdev_ops = &veth_netdev_ops;
1197 dev->ethtool_ops = &veth_ethtool_ops;
1198 dev->features |= NETIF_F_LLTX;
1199 dev->features |= VETH_FEATURES;
1200 dev->vlan_features = dev->features &
1201 ~(NETIF_F_HW_VLAN_CTAG_TX |
1202 NETIF_F_HW_VLAN_STAG_TX |
1203 NETIF_F_HW_VLAN_CTAG_RX |
1204 NETIF_F_HW_VLAN_STAG_RX);
1205 dev->needs_free_netdev = true;
1206 dev->priv_destructor = veth_dev_free;
1207 dev->max_mtu = ETH_MAX_MTU;
1209 dev->hw_features = VETH_FEATURES;
1210 dev->hw_enc_features = VETH_FEATURES;
1211 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1218 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1219 struct netlink_ext_ack *extack)
1221 if (tb[IFLA_ADDRESS]) {
1222 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1224 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1225 return -EADDRNOTAVAIL;
1228 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1234 static struct rtnl_link_ops veth_link_ops;
1236 static int veth_newlink(struct net *src_net, struct net_device *dev,
1237 struct nlattr *tb[], struct nlattr *data[],
1238 struct netlink_ext_ack *extack)
1241 struct net_device *peer;
1242 struct veth_priv *priv;
1243 char ifname[IFNAMSIZ];
1244 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1245 unsigned char name_assign_type;
1246 struct ifinfomsg *ifmp;
1250 * create and register peer first
1252 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1253 struct nlattr *nla_peer;
1255 nla_peer = data[VETH_INFO_PEER];
1256 ifmp = nla_data(nla_peer);
1257 err = rtnl_nla_parse_ifla(peer_tb,
1258 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1259 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1264 err = veth_validate(peer_tb, NULL, extack);
1274 if (ifmp && tbp[IFLA_IFNAME]) {
1275 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1276 name_assign_type = NET_NAME_USER;
1278 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1279 name_assign_type = NET_NAME_ENUM;
1282 net = rtnl_link_get_net(src_net, tbp);
1284 return PTR_ERR(net);
1286 peer = rtnl_create_link(net, ifname, name_assign_type,
1287 &veth_link_ops, tbp, extack);
1290 return PTR_ERR(peer);
1293 if (!ifmp || !tbp[IFLA_ADDRESS])
1294 eth_hw_addr_random(peer);
1296 if (ifmp && (dev->ifindex != 0))
1297 peer->ifindex = ifmp->ifi_index;
1299 peer->gso_max_size = dev->gso_max_size;
1300 peer->gso_max_segs = dev->gso_max_segs;
1302 err = register_netdevice(peer);
1306 goto err_register_peer;
1308 netif_carrier_off(peer);
1310 err = rtnl_configure_link(peer, ifmp);
1312 goto err_configure_peer;
1317 * note, that since we've registered new device the dev's name
1318 * should be re-allocated
1321 if (tb[IFLA_ADDRESS] == NULL)
1322 eth_hw_addr_random(dev);
1324 if (tb[IFLA_IFNAME])
1325 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1327 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1329 err = register_netdevice(dev);
1331 goto err_register_dev;
1333 netif_carrier_off(dev);
1336 * tie the deviced together
1339 priv = netdev_priv(dev);
1340 rcu_assign_pointer(priv->peer, peer);
1342 priv = netdev_priv(peer);
1343 rcu_assign_pointer(priv->peer, dev);
1350 unregister_netdevice(peer);
1358 static void veth_dellink(struct net_device *dev, struct list_head *head)
1360 struct veth_priv *priv;
1361 struct net_device *peer;
1363 priv = netdev_priv(dev);
1364 peer = rtnl_dereference(priv->peer);
1366 /* Note : dellink() is called from default_device_exit_batch(),
1367 * before a rcu_synchronize() point. The devices are guaranteed
1368 * not being freed before one RCU grace period.
1370 RCU_INIT_POINTER(priv->peer, NULL);
1371 unregister_netdevice_queue(dev, head);
1374 priv = netdev_priv(peer);
1375 RCU_INIT_POINTER(priv->peer, NULL);
1376 unregister_netdevice_queue(peer, head);
1380 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1381 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1384 static struct net *veth_get_link_net(const struct net_device *dev)
1386 struct veth_priv *priv = netdev_priv(dev);
1387 struct net_device *peer = rtnl_dereference(priv->peer);
1389 return peer ? dev_net(peer) : dev_net(dev);
1392 static struct rtnl_link_ops veth_link_ops = {
1394 .priv_size = sizeof(struct veth_priv),
1395 .setup = veth_setup,
1396 .validate = veth_validate,
1397 .newlink = veth_newlink,
1398 .dellink = veth_dellink,
1399 .policy = veth_policy,
1400 .maxtype = VETH_INFO_MAX,
1401 .get_link_net = veth_get_link_net,
1408 static __init int veth_init(void)
1410 return rtnl_link_register(&veth_link_ops);
1413 static __exit void veth_exit(void)
1415 rtnl_link_unregister(&veth_link_ops);
1418 module_init(veth_init);
1419 module_exit(veth_exit);
1421 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1422 MODULE_LICENSE("GPL v2");
1423 MODULE_ALIAS_RTNL_LINK(DRV_NAME);