1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved.
9 #include "vmxnet3_int.h"
10 #include "vmxnet3_xdp.h"
13 vmxnet3_xdp_exchange_program(struct vmxnet3_adapter *adapter,
14 struct bpf_prog *prog)
16 rcu_assign_pointer(adapter->xdp_bpf_prog, prog);
19 static inline struct vmxnet3_tx_queue *
20 vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
22 struct vmxnet3_tx_queue *tq;
26 tq_number = adapter->num_tx_queues;
27 cpu = smp_processor_id();
28 if (likely(cpu < tq_number))
29 tq = &adapter->tx_queue[cpu];
31 tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
37 vmxnet3_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf,
38 struct netlink_ext_ack *extack)
40 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
41 struct bpf_prog *new_bpf_prog = bpf->prog;
42 struct bpf_prog *old_bpf_prog;
47 if (new_bpf_prog && netdev->mtu > VMXNET3_XDP_MAX_MTU) {
48 NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP",
53 if (adapter->netdev->features & NETIF_F_LRO) {
54 NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP");
55 adapter->netdev->features &= ~NETIF_F_LRO;
58 old_bpf_prog = rcu_dereference(adapter->xdp_bpf_prog);
59 if (!new_bpf_prog && !old_bpf_prog)
62 running = netif_running(netdev);
63 need_update = !!old_bpf_prog != !!new_bpf_prog;
65 if (running && need_update)
66 vmxnet3_quiesce_dev(adapter);
68 vmxnet3_xdp_exchange_program(adapter, new_bpf_prog);
70 bpf_prog_put(old_bpf_prog);
72 if (!running || !need_update)
76 xdp_features_set_redirect_target(netdev, false);
78 xdp_features_clear_redirect_target(netdev);
80 vmxnet3_reset_dev(adapter);
81 vmxnet3_rq_destroy_all(adapter);
82 vmxnet3_adjust_rx_ring_size(adapter);
83 err = vmxnet3_rq_create_all(adapter);
85 NL_SET_ERR_MSG_MOD(extack,
86 "failed to re-create rx queues for XDP.");
89 err = vmxnet3_activate_dev(adapter);
91 NL_SET_ERR_MSG_MOD(extack,
92 "failed to activate device for XDP.");
95 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
100 /* This is the main xdp call used by kernel to set/unset eBPF program. */
102 vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
104 switch (bpf->command) {
106 return vmxnet3_xdp_set(netdev, bpf, bpf->extack);
115 vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
116 struct xdp_frame *xdpf,
117 struct vmxnet3_tx_queue *tq, bool dma_map)
119 struct vmxnet3_tx_buf_info *tbi = NULL;
120 union Vmxnet3_GenericDesc *gdesc;
121 struct vmxnet3_tx_ctx ctx;
127 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
129 ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
132 buf_size = xdpf->len;
133 tbi = tq->buf_info + tq->tx_ring.next2fill;
135 if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
136 tq->stats.tx_ring_full++;
140 tbi->map_type = VMXNET3_MAP_XDP;
141 if (dma_map) { /* ndo_xdp_xmit */
142 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
143 xdpf->data, buf_size,
145 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
147 tbi->map_type |= VMXNET3_MAP_SINGLE;
148 } else { /* XDP buffer from page pool */
149 page = virt_to_page(xdpf->data);
150 tbi->dma_addr = page_pool_get_dma_addr(page) +
151 VMXNET3_XDP_HEADROOM;
152 dma_sync_single_for_device(&adapter->pdev->dev,
153 tbi->dma_addr, buf_size,
159 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
160 WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
162 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
163 gdesc->dword[2] = cpu_to_le32(dw2);
165 /* Setup the EOP desc */
166 gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
169 gdesc->txd.msscof = 0;
173 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
174 le32_add_cpu(&tq->shared->txNumDeferred, 1);
177 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
179 /* set the last buf_info for the pkt */
180 tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
183 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
186 /* No need to handle the case when tx_num_deferred doesn't reach
187 * threshold. Backend driver at hypervisor side will poll and reset
188 * tq->shared->txNumDeferred to 0.
190 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
191 tq->shared->txNumDeferred = 0;
192 VMXNET3_WRITE_BAR0_REG(adapter,
193 VMXNET3_REG_TXPROD + tq->qid * 8,
194 tq->tx_ring.next2fill);
201 vmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter,
202 struct xdp_frame *xdpf)
204 struct vmxnet3_tx_queue *tq;
205 struct netdev_queue *nq;
208 tq = vmxnet3_xdp_get_tq(adapter);
212 nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
214 __netif_tx_lock(nq, smp_processor_id());
215 err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false);
216 __netif_tx_unlock(nq);
223 vmxnet3_xdp_xmit(struct net_device *dev,
224 int n, struct xdp_frame **frames, u32 flags)
226 struct vmxnet3_adapter *adapter = netdev_priv(dev);
227 struct vmxnet3_tx_queue *tq;
230 if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
232 if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)))
235 tq = vmxnet3_xdp_get_tq(adapter);
239 for (i = 0; i < n; i++) {
240 if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
241 tq->stats.xdp_xmit_err++;
245 tq->stats.xdp_xmit += i;
251 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
252 struct bpf_prog *prog)
254 struct xdp_frame *xdpf;
259 rq->stats.xdp_packets++;
260 act = bpf_prog_run_xdp(prog, xdp);
261 page = virt_to_page(xdp->data_hard_start);
267 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
269 rq->stats.xdp_redirects++;
271 rq->stats.xdp_drops++;
272 page_pool_recycle_direct(rq->page_pool, page);
276 xdpf = xdp_convert_buff_to_frame(xdp);
277 if (unlikely(!xdpf ||
278 vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
279 rq->stats.xdp_drops++;
280 page_pool_recycle_direct(rq->page_pool, page);
286 bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act);
289 trace_xdp_exception(rq->adapter->netdev, prog, act);
290 rq->stats.xdp_aborted++;
293 rq->stats.xdp_drops++;
297 page_pool_recycle_direct(rq->page_pool, page);
302 static struct sk_buff *
303 vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page,
304 const struct xdp_buff *xdp)
308 skb = build_skb(page_address(page), PAGE_SIZE);
309 if (unlikely(!skb)) {
310 page_pool_recycle_direct(rq->page_pool, page);
311 rq->stats.rx_buf_alloc_failure++;
315 /* bpf prog might change len and data position. */
316 skb_reserve(skb, xdp->data - xdp->data_hard_start);
317 skb_put(skb, xdp->data_end - xdp->data);
318 skb_mark_for_recycle(skb);
323 /* Handle packets from DataRing. */
325 vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter,
326 struct vmxnet3_rx_queue *rq,
328 struct sk_buff **skb_xdp_pass)
330 struct bpf_prog *xdp_prog;
335 page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC);
336 if (unlikely(!page)) {
337 rq->stats.rx_buf_alloc_failure++;
341 xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
342 xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
344 xdp_buff_clear_frags_flag(&xdp);
346 /* Must copy the data because it's at dataring. */
347 memcpy(xdp.data, data, len);
349 xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
354 act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
359 *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
363 /* No need to refill. */
364 return likely(*skb_xdp_pass) ? act : XDP_DROP;
368 vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
369 struct vmxnet3_rx_queue *rq,
370 struct Vmxnet3_RxCompDesc *rcd,
371 struct vmxnet3_rx_buf_info *rbi,
372 struct Vmxnet3_RxDesc *rxd,
373 struct sk_buff **skb_xdp_pass)
375 struct bpf_prog *xdp_prog;
376 dma_addr_t new_dma_addr;
383 dma_sync_single_for_cpu(&adapter->pdev->dev,
384 page_pool_get_dma_addr(page) +
385 rq->page_pool->p.offset, rcd->len,
386 page_pool_get_dma_dir(rq->page_pool));
388 xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
389 xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
391 xdp_buff_clear_frags_flag(&xdp);
393 xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
398 act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
400 if (act == XDP_PASS) {
402 *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
407 new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr,
410 rq->stats.rx_buf_alloc_failure++;
413 rbi->page = virt_to_page(new_data);
414 rbi->dma_addr = new_dma_addr;
415 rxd->addr = cpu_to_le64(rbi->dma_addr);