]> Git Repo - linux.git/blob - drivers/net/ethernet/intel/igc/igc_xdp.c
Merge tag 'irq-core-2025-01-21' into loongarch-next
[linux.git] / drivers / net / ethernet / intel / igc / igc_xdp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3
4 #include <linux/if_vlan.h>
5 #include <net/xdp_sock_drv.h>
6
7 #include "igc.h"
8 #include "igc_xdp.h"
9
10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11                      struct netlink_ext_ack *extack)
12 {
13         struct net_device *dev = adapter->netdev;
14         bool if_running = netif_running(dev);
15         struct bpf_prog *old_prog;
16
17         if (dev->mtu > ETH_DATA_LEN) {
18                 /* For now, the driver doesn't support XDP functionality with
19                  * jumbo frames so we return error.
20                  */
21                 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
22                 return -EOPNOTSUPP;
23         }
24
25         if (if_running)
26                 igc_close(dev);
27
28         old_prog = xchg(&adapter->xdp_prog, prog);
29         if (old_prog)
30                 bpf_prog_put(old_prog);
31
32         if (prog)
33                 xdp_features_set_redirect_target(dev, true);
34         else
35                 xdp_features_clear_redirect_target(dev);
36
37         if (if_running)
38                 igc_open(dev);
39
40         return 0;
41 }
42
43 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
44                                struct xsk_buff_pool *pool, u16 queue_id)
45 {
46         struct net_device *ndev = adapter->netdev;
47         struct device *dev = &adapter->pdev->dev;
48         struct igc_ring *rx_ring, *tx_ring;
49         struct napi_struct *napi;
50         bool needs_reset;
51         u32 frame_size;
52         int err;
53
54         if (queue_id >= adapter->num_rx_queues ||
55             queue_id >= adapter->num_tx_queues)
56                 return -EINVAL;
57
58         frame_size = xsk_pool_get_rx_frame_size(pool);
59         if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
60                 /* When XDP is enabled, the driver doesn't support frames that
61                  * span over multiple buffers. To avoid that, we check if xsk
62                  * frame size is big enough to fit the max ethernet frame size
63                  * + vlan double tagging.
64                  */
65                 return -EOPNOTSUPP;
66         }
67
68         err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
69         if (err) {
70                 netdev_err(ndev, "Failed to map xsk pool\n");
71                 return err;
72         }
73
74         needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
75
76         rx_ring = adapter->rx_ring[queue_id];
77         tx_ring = adapter->tx_ring[queue_id];
78         /* Rx and Tx rings share the same napi context. */
79         napi = &rx_ring->q_vector->napi;
80
81         if (needs_reset) {
82                 igc_disable_rx_ring(rx_ring);
83                 igc_disable_tx_ring(tx_ring);
84                 napi_disable(napi);
85         }
86
87         set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
88         set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
89
90         if (needs_reset) {
91                 napi_enable(napi);
92                 igc_enable_rx_ring(rx_ring);
93                 igc_enable_tx_ring(tx_ring);
94
95                 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
96                 if (err) {
97                         xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
98                         return err;
99                 }
100         }
101
102         return 0;
103 }
104
105 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
106 {
107         struct igc_ring *rx_ring, *tx_ring;
108         struct xsk_buff_pool *pool;
109         struct napi_struct *napi;
110         bool needs_reset;
111
112         if (queue_id >= adapter->num_rx_queues ||
113             queue_id >= adapter->num_tx_queues)
114                 return -EINVAL;
115
116         pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
117         if (!pool)
118                 return -EINVAL;
119
120         needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
121
122         rx_ring = adapter->rx_ring[queue_id];
123         tx_ring = adapter->tx_ring[queue_id];
124         /* Rx and Tx rings share the same napi context. */
125         napi = &rx_ring->q_vector->napi;
126
127         if (needs_reset) {
128                 igc_disable_rx_ring(rx_ring);
129                 igc_disable_tx_ring(tx_ring);
130                 napi_disable(napi);
131         }
132
133         xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
134         clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
135         clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
136
137         if (needs_reset) {
138                 napi_enable(napi);
139                 igc_enable_rx_ring(rx_ring);
140                 igc_enable_tx_ring(tx_ring);
141         }
142
143         return 0;
144 }
145
146 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
147                        u16 queue_id)
148 {
149         return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
150                       igc_xdp_disable_pool(adapter, queue_id);
151 }
This page took 0.042236 seconds and 4 git commands to generate.