1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
4 #include <linux/if_vlan.h>
5 #include <net/xdp_sock_drv.h>
10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11 struct netlink_ext_ack *extack)
13 struct net_device *dev = adapter->netdev;
14 bool if_running = netif_running(dev);
15 struct bpf_prog *old_prog;
17 if (dev->mtu > ETH_DATA_LEN) {
18 /* For now, the driver doesn't support XDP functionality with
19 * jumbo frames so we return error.
21 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
28 old_prog = xchg(&adapter->xdp_prog, prog);
30 bpf_prog_put(old_prog);
33 xdp_features_set_redirect_target(dev, true);
35 xdp_features_clear_redirect_target(dev);
43 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
44 struct xsk_buff_pool *pool, u16 queue_id)
46 struct net_device *ndev = adapter->netdev;
47 struct device *dev = &adapter->pdev->dev;
48 struct igc_ring *rx_ring, *tx_ring;
49 struct napi_struct *napi;
54 if (queue_id >= adapter->num_rx_queues ||
55 queue_id >= adapter->num_tx_queues)
58 frame_size = xsk_pool_get_rx_frame_size(pool);
59 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
60 /* When XDP is enabled, the driver doesn't support frames that
61 * span over multiple buffers. To avoid that, we check if xsk
62 * frame size is big enough to fit the max ethernet frame size
63 * + vlan double tagging.
68 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
70 netdev_err(ndev, "Failed to map xsk pool\n");
74 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
76 rx_ring = adapter->rx_ring[queue_id];
77 tx_ring = adapter->tx_ring[queue_id];
78 /* Rx and Tx rings share the same napi context. */
79 napi = &rx_ring->q_vector->napi;
82 igc_disable_rx_ring(rx_ring);
83 igc_disable_tx_ring(tx_ring);
87 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
88 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
92 igc_enable_rx_ring(rx_ring);
93 igc_enable_tx_ring(tx_ring);
95 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
97 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
105 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
107 struct igc_ring *rx_ring, *tx_ring;
108 struct xsk_buff_pool *pool;
109 struct napi_struct *napi;
112 if (queue_id >= adapter->num_rx_queues ||
113 queue_id >= adapter->num_tx_queues)
116 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
120 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
122 rx_ring = adapter->rx_ring[queue_id];
123 tx_ring = adapter->tx_ring[queue_id];
124 /* Rx and Tx rings share the same napi context. */
125 napi = &rx_ring->q_vector->napi;
128 igc_disable_rx_ring(rx_ring);
129 igc_disable_tx_ring(tx_ring);
133 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
134 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
135 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
139 igc_enable_rx_ring(rx_ring);
140 igc_enable_tx_ring(tx_ring);
146 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
149 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
150 igc_xdp_disable_pool(adapter, queue_id);