2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
31 #include "vmxnet3_xdp.h"
33 char vmxnet3_driver_name[] = "vmxnet3";
34 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
38 * Last entry must be all 0s
40 static const struct pci_device_id vmxnet3_pciid_table[] = {
41 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
45 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
47 static int enable_mq = 1;
50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
53 * Enable/Disable the given intr
56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
70 * Enable/Disable all intrs used by the device
73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
77 for (i = 0; i < adapter->intr.num_intrs; i++)
78 vmxnet3_enable_intr(adapter, i);
79 if (!VMXNET3_VERSION_GE_6(adapter) ||
80 !adapter->queuesExtEnabled) {
81 adapter->shared->devRead.intrConf.intrCtrl &=
82 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &=
85 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
91 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
95 if (!VMXNET3_VERSION_GE_6(adapter) ||
96 !adapter->queuesExtEnabled) {
97 adapter->shared->devRead.intrConf.intrCtrl |=
98 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |=
101 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
103 for (i = 0; i < adapter->intr.num_intrs; i++)
104 vmxnet3_disable_intr(adapter, i);
109 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
111 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
147 vmxnet3_get_cycles(int pmc)
150 return native_read_pmc(pmc);
157 vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate)
161 if (tq->tsPktCount == 1) {
163 tq->tsPktCount = rate;
172 /* Check if capability is supported by UPT device or
173 * UPT is even requested
176 vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
178 if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
179 cap_supported & (1UL << cap)) {
188 * Check the link state. This may start or stop the tx queue.
191 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
197 spin_lock_irqsave(&adapter->cmd_lock, flags);
198 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
199 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
200 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
202 adapter->link_speed = ret >> 16;
203 if (ret & 1) { /* Link is up. */
205 * From vmxnet3 v9, the hypervisor reports the speed in Gbps.
206 * Convert the speed to Mbps before rporting it to the kernel.
207 * Max link speed supported is 10000G.
209 if (VMXNET3_VERSION_GE_9(adapter) &&
210 adapter->link_speed < 10000)
211 adapter->link_speed = adapter->link_speed * 1000;
212 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
213 adapter->link_speed);
214 netif_carrier_on(adapter->netdev);
217 for (i = 0; i < adapter->num_tx_queues; i++)
218 vmxnet3_tq_start(&adapter->tx_queue[i],
222 netdev_info(adapter->netdev, "NIC Link is Down\n");
223 netif_carrier_off(adapter->netdev);
226 for (i = 0; i < adapter->num_tx_queues; i++)
227 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
233 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
237 u32 events = le32_to_cpu(adapter->shared->ecr);
241 vmxnet3_ack_events(adapter, events);
243 /* Check if link state has changed */
244 if (events & VMXNET3_ECR_LINK)
245 vmxnet3_check_link(adapter, true);
247 /* Check if there is an error on xmit/recv queues */
248 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
249 spin_lock_irqsave(&adapter->cmd_lock, flags);
250 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
251 VMXNET3_CMD_GET_QUEUE_STATUS);
252 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
254 for (i = 0; i < adapter->num_tx_queues; i++)
255 if (adapter->tqd_start[i].status.stopped)
256 dev_err(&adapter->netdev->dev,
257 "%s: tq[%d] error 0x%x\n",
258 adapter->netdev->name, i, le32_to_cpu(
259 adapter->tqd_start[i].status.error));
260 for (i = 0; i < adapter->num_rx_queues; i++)
261 if (adapter->rqd_start[i].status.stopped)
262 dev_err(&adapter->netdev->dev,
263 "%s: rq[%d] error 0x%x\n",
264 adapter->netdev->name, i,
265 adapter->rqd_start[i].status.error);
267 schedule_work(&adapter->work);
271 #ifdef __BIG_ENDIAN_BITFIELD
273 * The device expects the bitfields in shared structures to be written in
274 * little endian. When CPU is big endian, the following routines are used to
275 * correctly read and write into ABI.
276 * The general technique used here is : double word bitfields are defined in
277 * opposite order for big endian architecture. Then before reading them in
278 * driver the complete double word is translated using le32_to_cpu. Similarly
279 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
280 * double words into required format.
281 * In order to avoid touching bits in shared structure more than once, temporary
282 * descriptors are used. These are passed as srcDesc to following functions.
284 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
285 struct Vmxnet3_RxDesc *dstDesc)
287 u32 *src = (u32 *)srcDesc + 2;
288 u32 *dst = (u32 *)dstDesc + 2;
289 dstDesc->addr = le64_to_cpu(srcDesc->addr);
290 *dst = le32_to_cpu(*src);
291 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
294 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
295 struct Vmxnet3_TxDesc *dstDesc)
298 u32 *src = (u32 *)(srcDesc + 1);
299 u32 *dst = (u32 *)(dstDesc + 1);
301 /* Working backwards so that the gen bit is set at the end. */
302 for (i = 2; i > 0; i--) {
305 *dst = cpu_to_le32(*src);
310 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
311 struct Vmxnet3_RxCompDesc *dstDesc)
314 u32 *src = (u32 *)srcDesc;
315 u32 *dst = (u32 *)dstDesc;
316 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
317 *dst = le32_to_cpu(*src);
324 /* Used to read bitfield values from double words. */
325 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
327 u32 temp = le32_to_cpu(*bitfield);
328 u32 mask = ((1 << size) - 1) << pos;
336 #endif /* __BIG_ENDIAN_BITFIELD */
338 #ifdef __BIG_ENDIAN_BITFIELD
340 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
341 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
342 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
343 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
344 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
345 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
346 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
347 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
348 VMXNET3_TCD_GEN_SIZE)
349 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
350 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
351 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
353 vmxnet3_RxCompToCPU((rcd), (tmp)); \
355 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
357 vmxnet3_RxDescToCPU((rxd), (tmp)); \
362 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
363 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
364 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
365 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
366 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
367 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
369 #endif /* __BIG_ENDIAN_BITFIELD */
373 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
374 struct pci_dev *pdev)
376 u32 map_type = tbi->map_type;
378 if (map_type & VMXNET3_MAP_SINGLE)
379 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
381 else if (map_type & VMXNET3_MAP_PAGE)
382 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
385 BUG_ON(map_type & ~VMXNET3_MAP_XDP);
387 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
392 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
393 struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
394 struct xdp_frame_bulk *bq)
396 struct vmxnet3_tx_buf_info *tbi;
400 /* no out of order completion */
401 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
402 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
404 tbi = &tq->buf_info[eop_idx];
406 map_type = tbi->map_type;
407 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
409 while (tq->tx_ring.next2comp != eop_idx) {
410 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
413 /* update next2comp w/o tx_lock. Since we are marking more,
414 * instead of less, tx ring entries avail, the worst case is
415 * that the tx routine incorrectly re-queues a pkt due to
416 * insufficient tx ring entries.
418 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
422 if (map_type & VMXNET3_MAP_XDP)
423 xdp_return_frame_bulk(tbi->xdpf, bq);
425 dev_kfree_skb_any(tbi->skb);
427 /* xdpf and skb are in an anonymous union. */
435 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
436 struct vmxnet3_adapter *adapter)
438 union Vmxnet3_GenericDesc *gdesc;
439 struct xdp_frame_bulk bq;
442 xdp_frame_bulk_init(&bq);
445 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
446 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
447 /* Prevent any &gdesc->tcd field from being (speculatively)
448 * read before (&gdesc->tcd)->gen is read.
452 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
453 &gdesc->tcd), tq, adapter->pdev,
456 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
457 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
459 xdp_flush_frame_bulk(&bq);
463 spin_lock(&tq->tx_lock);
464 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
465 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
466 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
467 netif_carrier_ok(adapter->netdev))) {
468 vmxnet3_tq_wake(tq, adapter);
470 spin_unlock(&tq->tx_lock);
477 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
478 struct vmxnet3_adapter *adapter)
480 struct xdp_frame_bulk bq;
484 xdp_frame_bulk_init(&bq);
487 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
488 struct vmxnet3_tx_buf_info *tbi;
490 tbi = tq->buf_info + tq->tx_ring.next2comp;
491 map_type = tbi->map_type;
493 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
495 if (map_type & VMXNET3_MAP_XDP)
496 xdp_return_frame_bulk(tbi->xdpf, &bq);
498 dev_kfree_skb_any(tbi->skb);
501 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
504 xdp_flush_frame_bulk(&bq);
507 /* sanity check, verify all buffers are indeed unmapped */
508 for (i = 0; i < tq->tx_ring.size; i++)
509 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
511 tq->tx_ring.gen = VMXNET3_INIT_GEN;
512 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
514 tq->comp_ring.gen = VMXNET3_INIT_GEN;
515 tq->comp_ring.next2proc = 0;
520 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
521 struct vmxnet3_adapter *adapter)
523 if (tq->tx_ring.base) {
524 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
525 sizeof(struct Vmxnet3_TxDesc),
526 tq->tx_ring.base, tq->tx_ring.basePA);
527 tq->tx_ring.base = NULL;
529 if (tq->data_ring.base) {
530 dma_free_coherent(&adapter->pdev->dev,
531 tq->data_ring.size * tq->txdata_desc_size,
532 tq->data_ring.base, tq->data_ring.basePA);
533 tq->data_ring.base = NULL;
535 if (tq->ts_ring.base) {
536 dma_free_coherent(&adapter->pdev->dev,
537 tq->tx_ring.size * tq->tx_ts_desc_size,
538 tq->ts_ring.base, tq->ts_ring.basePA);
539 tq->ts_ring.base = NULL;
541 if (tq->comp_ring.base) {
542 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
543 sizeof(struct Vmxnet3_TxCompDesc),
544 tq->comp_ring.base, tq->comp_ring.basePA);
545 tq->comp_ring.base = NULL;
552 /* Destroy all tx queues */
554 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
558 for (i = 0; i < adapter->num_tx_queues; i++)
559 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
564 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
565 struct vmxnet3_adapter *adapter)
569 /* reset the tx ring contents to 0 and reset the tx ring states */
570 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
571 sizeof(struct Vmxnet3_TxDesc));
572 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
573 tq->tx_ring.gen = VMXNET3_INIT_GEN;
575 memset(tq->data_ring.base, 0,
576 tq->data_ring.size * tq->txdata_desc_size);
578 if (tq->ts_ring.base)
579 memset(tq->ts_ring.base, 0,
580 tq->tx_ring.size * tq->tx_ts_desc_size);
582 /* reset the tx comp ring contents to 0 and reset comp ring states */
583 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
584 sizeof(struct Vmxnet3_TxCompDesc));
585 tq->comp_ring.next2proc = 0;
586 tq->comp_ring.gen = VMXNET3_INIT_GEN;
588 /* reset the bookkeeping data */
589 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
590 for (i = 0; i < tq->tx_ring.size; i++)
591 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
593 /* stats are not reset */
598 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
599 struct vmxnet3_adapter *adapter)
601 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
602 tq->comp_ring.base || tq->buf_info);
604 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
605 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
606 &tq->tx_ring.basePA, GFP_KERNEL);
607 if (!tq->tx_ring.base) {
608 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
612 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
613 tq->data_ring.size * tq->txdata_desc_size,
614 &tq->data_ring.basePA, GFP_KERNEL);
615 if (!tq->data_ring.base) {
616 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
620 if (tq->tx_ts_desc_size != 0) {
621 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
622 tq->tx_ring.size * tq->tx_ts_desc_size,
623 &tq->ts_ring.basePA, GFP_KERNEL);
624 if (!tq->ts_ring.base) {
625 netdev_err(adapter->netdev, "failed to allocate tx ts ring\n");
626 tq->tx_ts_desc_size = 0;
629 tq->ts_ring.base = NULL;
632 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
633 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
634 &tq->comp_ring.basePA, GFP_KERNEL);
635 if (!tq->comp_ring.base) {
636 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
640 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
642 dev_to_node(&adapter->pdev->dev));
649 vmxnet3_tq_destroy(tq, adapter);
654 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
658 for (i = 0; i < adapter->num_tx_queues; i++)
659 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
663 * starting from ring->next2fill, allocate rx buffers for the given ring
664 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
665 * are allocated or allocation fails
669 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
670 int num_to_alloc, struct vmxnet3_adapter *adapter)
672 int num_allocated = 0;
673 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
674 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
677 while (num_allocated <= num_to_alloc) {
678 struct vmxnet3_rx_buf_info *rbi;
679 union Vmxnet3_GenericDesc *gd;
681 rbi = rbi_base + ring->next2fill;
682 gd = ring->base + ring->next2fill;
683 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
685 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
686 void *data = vmxnet3_pp_get_buff(rq->page_pool,
690 rq->stats.rx_buf_alloc_failure++;
693 rbi->page = virt_to_page(data);
694 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
695 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
696 if (rbi->skb == NULL) {
697 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
700 if (unlikely(rbi->skb == NULL)) {
701 rq->stats.rx_buf_alloc_failure++;
705 rbi->dma_addr = dma_map_single(
707 rbi->skb->data, rbi->len,
709 if (dma_mapping_error(&adapter->pdev->dev,
711 dev_kfree_skb_any(rbi->skb);
713 rq->stats.rx_buf_alloc_failure++;
717 /* rx buffer skipped by the device */
719 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
721 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
722 rbi->len != PAGE_SIZE);
724 if (rbi->page == NULL) {
725 rbi->page = alloc_page(GFP_ATOMIC);
726 if (unlikely(rbi->page == NULL)) {
727 rq->stats.rx_buf_alloc_failure++;
730 rbi->dma_addr = dma_map_page(
732 rbi->page, 0, PAGE_SIZE,
734 if (dma_mapping_error(&adapter->pdev->dev,
738 rq->stats.rx_buf_alloc_failure++;
742 /* rx buffers skipped by the device */
744 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
747 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
748 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
751 /* Fill the last buffer but dont mark it ready, or else the
752 * device will think that the queue is full */
753 if (num_allocated == num_to_alloc) {
754 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
758 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
760 vmxnet3_cmd_ring_adv_next2fill(ring);
763 netdev_dbg(adapter->netdev,
764 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
765 num_allocated, ring->next2fill, ring->next2comp);
767 /* so that the device can distinguish a full ring and an empty ring */
768 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
770 return num_allocated;
775 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
776 struct vmxnet3_rx_buf_info *rbi)
778 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
780 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
782 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
783 skb->data_len += rcd->len;
784 skb->truesize += PAGE_SIZE;
785 skb_shinfo(skb)->nr_frags++;
790 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
791 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
792 struct vmxnet3_adapter *adapter)
795 unsigned long buf_offset;
797 union Vmxnet3_GenericDesc *gdesc;
798 struct vmxnet3_tx_buf_info *tbi = NULL;
800 BUG_ON(ctx->copy_size > skb_headlen(skb));
802 /* use the previous gen bit for the SOP desc */
803 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
805 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
806 gdesc = ctx->sop_txd; /* both loops below can be skipped */
808 /* no need to map the buffer if headers are copied */
809 if (ctx->copy_size) {
810 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
811 tq->tx_ring.next2fill *
812 tq->txdata_desc_size);
813 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
814 ctx->sop_txd->dword[3] = 0;
816 tbi = tq->buf_info + tq->tx_ring.next2fill;
817 tbi->map_type = VMXNET3_MAP_NONE;
819 netdev_dbg(adapter->netdev,
820 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
821 tq->tx_ring.next2fill,
822 le64_to_cpu(ctx->sop_txd->txd.addr),
823 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
824 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
826 /* use the right gen for non-SOP desc */
827 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
830 /* linear part can use multiple tx desc if it's big */
831 len = skb_headlen(skb) - ctx->copy_size;
832 buf_offset = ctx->copy_size;
836 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
840 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
841 /* spec says that for TxDesc.len, 0 == 2^14 */
844 tbi = tq->buf_info + tq->tx_ring.next2fill;
845 tbi->map_type = VMXNET3_MAP_SINGLE;
846 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
847 skb->data + buf_offset, buf_size,
849 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
854 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
855 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
857 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
858 gdesc->dword[2] = cpu_to_le32(dw2);
861 netdev_dbg(adapter->netdev,
862 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
863 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
864 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
865 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
866 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
869 buf_offset += buf_size;
872 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
873 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
877 len = skb_frag_size(frag);
879 tbi = tq->buf_info + tq->tx_ring.next2fill;
880 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
884 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
885 /* spec says that for TxDesc.len, 0 == 2^14 */
887 tbi->map_type = VMXNET3_MAP_PAGE;
888 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
889 buf_offset, buf_size,
891 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
896 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
897 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
899 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
900 gdesc->dword[2] = cpu_to_le32(dw2);
903 netdev_dbg(adapter->netdev,
904 "txd[%u]: 0x%llx %u %u\n",
905 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
906 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
907 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
908 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
911 buf_offset += buf_size;
915 ctx->eop_txd = gdesc;
917 /* set the last buf_info for the pkt */
919 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
920 if (tq->tx_ts_desc_size != 0) {
921 ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base +
922 tbi->sop_idx * tq->tx_ts_desc_size);
923 ctx->ts_txd->ts.tsi = 0;
930 /* Init all tx queues */
932 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
936 for (i = 0; i < adapter->num_tx_queues; i++)
937 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
942 * parse relevant protocol headers:
943 * For a tso pkt, relevant headers are L2/3/4 including options
944 * For a pkt requesting csum offloading, they are L2/3 and may include L4
945 * if it's a TCP/UDP pkt
948 * -1: error happens during parsing
949 * 0: protocol headers parsed, but too big to be copied
950 * 1: protocol headers parsed and copied
953 * 1. related *ctx fields are updated.
954 * 2. ctx->copy_size is # of bytes copied
955 * 3. the portion to be copied is guaranteed to be in the linear part
959 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
960 struct vmxnet3_tx_ctx *ctx,
961 struct vmxnet3_adapter *adapter)
965 if (ctx->mss) { /* TSO */
966 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
967 ctx->l4_offset = skb_inner_transport_offset(skb);
968 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
969 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
971 ctx->l4_offset = skb_transport_offset(skb);
972 ctx->l4_hdr_size = tcp_hdrlen(skb);
973 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
976 if (skb->ip_summed == CHECKSUM_PARTIAL) {
977 /* For encap packets, skb_checksum_start_offset refers
978 * to inner L4 offset. Thus, below works for encap as
979 * well as non-encap case
981 ctx->l4_offset = skb_checksum_start_offset(skb);
983 if (VMXNET3_VERSION_GE_4(adapter) &&
984 skb->encapsulation) {
985 struct iphdr *iph = inner_ip_hdr(skb);
987 if (iph->version == 4) {
988 protocol = iph->protocol;
990 const struct ipv6hdr *ipv6h;
992 ipv6h = inner_ipv6_hdr(skb);
993 protocol = ipv6h->nexthdr;
997 const struct iphdr *iph = ip_hdr(skb);
999 protocol = iph->protocol;
1000 } else if (ctx->ipv6) {
1001 const struct ipv6hdr *ipv6h;
1003 ipv6h = ipv6_hdr(skb);
1004 protocol = ipv6h->nexthdr;
1010 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
1014 ctx->l4_hdr_size = sizeof(struct udphdr);
1017 ctx->l4_hdr_size = 0;
1021 ctx->copy_size = min(ctx->l4_offset +
1022 ctx->l4_hdr_size, skb->len);
1025 ctx->l4_hdr_size = 0;
1026 /* copy as much as allowed */
1027 ctx->copy_size = min_t(unsigned int,
1028 tq->txdata_desc_size,
1032 if (skb->len <= tq->txdata_desc_size)
1033 ctx->copy_size = skb->len;
1035 /* make sure headers are accessible directly */
1036 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
1040 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
1041 tq->stats.oversized_hdr++;
1052 * copy relevant protocol headers to the transmit ring:
1053 * For a tso pkt, relevant headers are L2/3/4 including options
1054 * For a pkt requesting csum offloading, they are L2/3 and may include L4
1055 * if it's a TCP/UDP pkt
1058 * Note that this requires that vmxnet3_parse_hdr be called first to set the
1059 * appropriate bits in ctx first
1062 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1063 struct vmxnet3_tx_ctx *ctx,
1064 struct vmxnet3_adapter *adapter)
1066 struct Vmxnet3_TxDataDesc *tdd;
1068 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1069 tq->tx_ring.next2fill *
1070 tq->txdata_desc_size);
1072 memcpy(tdd->data, skb->data, ctx->copy_size);
1073 netdev_dbg(adapter->netdev,
1074 "copy %u bytes to dataRing[%u]\n",
1075 ctx->copy_size, tq->tx_ring.next2fill);
1080 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
1081 struct vmxnet3_tx_ctx *ctx)
1083 struct tcphdr *tcph = inner_tcp_hdr(skb);
1084 struct iphdr *iph = inner_ip_hdr(skb);
1086 if (iph->version == 4) {
1088 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1091 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
1093 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
1099 vmxnet3_prepare_tso(struct sk_buff *skb,
1100 struct vmxnet3_tx_ctx *ctx)
1102 struct tcphdr *tcph = tcp_hdr(skb);
1105 struct iphdr *iph = ip_hdr(skb);
1108 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1110 } else if (ctx->ipv6) {
1111 tcp_v6_gso_csum_prep(skb);
1115 static int txd_estimate(const struct sk_buff *skb)
1117 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1120 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1121 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1123 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1129 * Transmits a pkt thru a given tq
1131 * NETDEV_TX_OK: descriptors are setup successfully
1132 * NETDEV_TX_OK: error occurred, the pkt is dropped
1133 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1136 * 1. tx ring may be changed
1137 * 2. tq stats may be updated accordingly
1138 * 3. shared->txNumDeferred may be updated
1142 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1143 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1148 int tx_num_deferred;
1149 unsigned long flags;
1150 struct vmxnet3_tx_ctx ctx;
1151 union Vmxnet3_GenericDesc *gdesc;
1152 #ifdef __BIG_ENDIAN_BITFIELD
1153 /* Use temporary descriptor to avoid touching bits multiple times */
1154 union Vmxnet3_GenericDesc tempTxDesc;
1157 count = txd_estimate(skb);
1159 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1160 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1162 ctx.mss = skb_shinfo(skb)->gso_size;
1164 if (skb_header_cloned(skb)) {
1165 if (unlikely(pskb_expand_head(skb, 0, 0,
1166 GFP_ATOMIC) != 0)) {
1167 tq->stats.drop_tso++;
1170 tq->stats.copy_skb_header++;
1172 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1173 /* tso pkts must not use more than
1174 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1176 if (skb_linearize(skb) != 0) {
1177 tq->stats.drop_too_many_frags++;
1180 tq->stats.linearized++;
1182 /* recalculate the # of descriptors to use */
1183 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1184 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1185 tq->stats.drop_too_many_frags++;
1189 if (skb->encapsulation) {
1190 vmxnet3_prepare_inner_tso(skb, &ctx);
1192 vmxnet3_prepare_tso(skb, &ctx);
1195 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1197 /* non-tso pkts must not use more than
1198 * VMXNET3_MAX_TXD_PER_PKT entries
1200 if (skb_linearize(skb) != 0) {
1201 tq->stats.drop_too_many_frags++;
1204 tq->stats.linearized++;
1206 /* recalculate the # of descriptors to use */
1207 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1211 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1213 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1214 /* hdrs parsed, check against other limits */
1216 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1217 VMXNET3_MAX_TX_BUF_SIZE)) {
1218 tq->stats.drop_oversized_hdr++;
1222 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1223 if (unlikely(ctx.l4_offset +
1225 VMXNET3_MAX_CSUM_OFFSET)) {
1226 tq->stats.drop_oversized_hdr++;
1232 tq->stats.drop_hdr_inspect_err++;
1236 spin_lock_irqsave(&tq->tx_lock, flags);
1238 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1239 tq->stats.tx_ring_full++;
1240 netdev_dbg(adapter->netdev,
1241 "tx queue stopped on %s, next2comp %u"
1242 " next2fill %u\n", adapter->netdev->name,
1243 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1245 vmxnet3_tq_stop(tq, adapter);
1246 spin_unlock_irqrestore(&tq->tx_lock, flags);
1247 return NETDEV_TX_BUSY;
1251 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1253 /* fill tx descs related to addr & len */
1254 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1255 goto unlock_drop_pkt;
1257 /* setup the EOP desc */
1258 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1260 /* setup the SOP desc */
1261 #ifdef __BIG_ENDIAN_BITFIELD
1262 gdesc = &tempTxDesc;
1263 gdesc->dword[2] = ctx.sop_txd->dword[2];
1264 gdesc->dword[3] = ctx.sop_txd->dword[3];
1266 gdesc = ctx.sop_txd;
1268 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1270 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1271 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1272 if (VMXNET3_VERSION_GE_7(adapter)) {
1273 gdesc->txd.om = VMXNET3_OM_TSO;
1274 gdesc->txd.ext1 = 1;
1276 gdesc->txd.om = VMXNET3_OM_ENCAP;
1278 gdesc->txd.msscof = ctx.mss;
1280 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1283 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1284 gdesc->txd.om = VMXNET3_OM_TSO;
1285 gdesc->txd.msscof = ctx.mss;
1287 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1289 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1290 if (VMXNET3_VERSION_GE_4(adapter) &&
1291 skb->encapsulation) {
1292 gdesc->txd.hlen = ctx.l4_offset +
1294 if (VMXNET3_VERSION_GE_7(adapter)) {
1295 gdesc->txd.om = VMXNET3_OM_CSUM;
1296 gdesc->txd.msscof = ctx.l4_offset +
1298 gdesc->txd.ext1 = 1;
1300 gdesc->txd.om = VMXNET3_OM_ENCAP;
1301 gdesc->txd.msscof = 0; /* Reserved */
1304 gdesc->txd.hlen = ctx.l4_offset;
1305 gdesc->txd.om = VMXNET3_OM_CSUM;
1306 gdesc->txd.msscof = ctx.l4_offset +
1311 gdesc->txd.msscof = 0;
1315 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1316 tx_num_deferred += num_pkts;
1318 if (skb_vlan_tag_present(skb)) {
1320 gdesc->txd.tci = skb_vlan_tag_get(skb);
1323 if (tq->tx_ts_desc_size != 0 &&
1324 adapter->latencyConf->sampleRate != 0) {
1325 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) {
1326 ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
1327 ctx.ts_txd->ts.tsi = 1;
1331 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1332 * all other writes to &gdesc->txd.
1336 /* finally flips the GEN bit of the SOP desc. */
1337 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1339 #ifdef __BIG_ENDIAN_BITFIELD
1340 /* Finished updating in bitfields of Tx Desc, so write them in original
1343 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1344 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1345 gdesc = ctx.sop_txd;
1347 netdev_dbg(adapter->netdev,
1348 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1350 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1351 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1353 spin_unlock_irqrestore(&tq->tx_lock, flags);
1355 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1356 tq->shared->txNumDeferred = 0;
1357 VMXNET3_WRITE_BAR0_REG(adapter,
1358 adapter->tx_prod_offset + tq->qid * 8,
1359 tq->tx_ring.next2fill);
1362 return NETDEV_TX_OK;
1365 spin_unlock_irqrestore(&tq->tx_lock, flags);
1367 tq->stats.drop_total++;
1368 dev_kfree_skb_any(skb);
1369 return NETDEV_TX_OK;
1373 vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
1374 struct vmxnet3_rx_queue *rq, int size)
1376 bool xdp_prog = vmxnet3_xdp_enabled(adapter);
1377 const struct page_pool_params pp_params = {
1379 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1381 .nid = NUMA_NO_NODE,
1382 .dev = &adapter->pdev->dev,
1383 .offset = VMXNET3_XDP_RX_OFFSET,
1384 .max_len = VMXNET3_XDP_MAX_FRSIZE,
1385 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1387 struct page_pool *pp;
1390 pp = page_pool_create(&pp_params);
1394 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1399 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
1401 goto err_unregister_rxq;
1408 xdp_rxq_info_unreg(&rq->xdp_rxq);
1410 page_pool_destroy(pp);
1416 vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1421 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1422 if (unlikely(!page))
1425 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
1427 return page_address(page);
1431 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1433 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1435 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1436 return vmxnet3_tq_xmit(skb,
1437 &adapter->tx_queue[skb->queue_mapping],
1443 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1444 struct sk_buff *skb,
1445 union Vmxnet3_GenericDesc *gdesc)
1447 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1448 if (gdesc->rcd.v4 &&
1449 (le32_to_cpu(gdesc->dword[3]) &
1450 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1451 skb->ip_summed = CHECKSUM_UNNECESSARY;
1452 if ((le32_to_cpu(gdesc->dword[0]) &
1453 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1454 skb->csum_level = 1;
1456 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1457 !(le32_to_cpu(gdesc->dword[0]) &
1458 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1459 WARN_ON_ONCE(gdesc->rcd.frg &&
1460 !(le32_to_cpu(gdesc->dword[0]) &
1461 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1462 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1463 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1464 skb->ip_summed = CHECKSUM_UNNECESSARY;
1465 if ((le32_to_cpu(gdesc->dword[0]) &
1466 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1467 skb->csum_level = 1;
1469 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1470 !(le32_to_cpu(gdesc->dword[0]) &
1471 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1472 WARN_ON_ONCE(gdesc->rcd.frg &&
1473 !(le32_to_cpu(gdesc->dword[0]) &
1474 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1476 if (gdesc->rcd.csum) {
1477 skb->csum = htons(gdesc->rcd.csum);
1478 skb->ip_summed = CHECKSUM_PARTIAL;
1480 skb_checksum_none_assert(skb);
1484 skb_checksum_none_assert(skb);
1490 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1491 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1493 rq->stats.drop_err++;
1495 rq->stats.drop_fcs++;
1497 rq->stats.drop_total++;
1500 * We do not unmap and chain the rx buffer to the skb.
1501 * We basically pretend this buffer is not used and will be recycled
1502 * by vmxnet3_rq_alloc_rx_buf()
1506 * ctx->skb may be NULL if this is the first and the only one
1510 dev_kfree_skb_irq(ctx->skb);
1517 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1518 union Vmxnet3_GenericDesc *gdesc)
1524 struct vlan_ethhdr *veth;
1526 struct ipv6hdr *ipv6;
1529 BUG_ON(gdesc->rcd.tcp == 0);
1531 maplen = skb_headlen(skb);
1532 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1535 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1536 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1537 hlen = sizeof(struct vlan_ethhdr);
1539 hlen = sizeof(struct ethhdr);
1541 hdr.eth = eth_hdr(skb);
1542 if (gdesc->rcd.v4) {
1543 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1544 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1546 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1547 hlen = hdr.ipv4->ihl << 2;
1548 hdr.ptr += hdr.ipv4->ihl << 2;
1549 } else if (gdesc->rcd.v6) {
1550 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1551 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1553 /* Use an estimated value, since we also need to handle
1556 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1557 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1558 hlen = sizeof(struct ipv6hdr);
1559 hdr.ptr += sizeof(struct ipv6hdr);
1561 /* Non-IP pkt, dont estimate header length */
1565 if (hlen + sizeof(struct tcphdr) > maplen)
1568 return (hlen + (hdr.tcp->doff << 2));
1572 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1573 struct vmxnet3_adapter *adapter, int quota)
1575 u32 rxprod_reg[2] = {
1576 adapter->rx_prod_offset, adapter->rx_prod2_offset
1579 bool skip_page_frags = false;
1580 bool encap_lro = false;
1581 struct Vmxnet3_RxCompDesc *rcd;
1582 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1583 u16 segCnt = 0, mss = 0;
1584 int comp_offset, fill_offset;
1585 #ifdef __BIG_ENDIAN_BITFIELD
1586 struct Vmxnet3_RxDesc rxCmdDesc;
1587 struct Vmxnet3_RxCompDesc rxComp;
1589 bool need_flush = false;
1591 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1593 while (rcd->gen == rq->comp_ring.gen) {
1594 struct vmxnet3_rx_buf_info *rbi;
1595 struct sk_buff *skb, *new_skb = NULL;
1596 struct page *new_page = NULL;
1597 dma_addr_t new_dma_addr;
1599 struct Vmxnet3_RxDesc *rxd;
1601 struct vmxnet3_cmd_ring *ring = NULL;
1602 if (num_pkts >= quota) {
1603 /* we may stop even before we see the EOP desc of
1609 /* Prevent any rcd field from being (speculatively) read before
1614 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1615 rcd->rqID != rq->dataRingQid);
1617 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1618 ring = rq->rx_ring + ring_idx;
1619 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1621 rbi = rq->buf_info[ring_idx] + idx;
1623 BUG_ON(rxd->addr != rbi->dma_addr ||
1624 rxd->len != rbi->len);
1626 if (unlikely(rcd->eop && rcd->err)) {
1627 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1631 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
1632 struct sk_buff *skb_xdp_pass;
1635 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
1637 goto skip_xdp; /* Handle it later. */
1640 if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
1643 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1645 if (act == XDP_PASS) {
1646 ctx->skb = skb_xdp_pass;
1650 need_flush |= act == XDP_REDIRECT;
1656 if (rcd->sop) { /* first buf of the pkt */
1657 bool rxDataRingUsed;
1660 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1661 (rcd->rqID != rq->qid &&
1662 rcd->rqID != rq->dataRingQid));
1664 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
1665 rbi->buf_type != VMXNET3_RX_BUF_XDP);
1666 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1668 if (unlikely(rcd->len == 0)) {
1669 /* Pretend the rx buffer is skipped. */
1670 BUG_ON(!(rcd->sop && rcd->eop));
1671 netdev_dbg(adapter->netdev,
1672 "rxRing[%u][%u] 0 length\n",
1677 skip_page_frags = false;
1678 ctx->skb = rbi->skb;
1680 if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
1681 struct Vmxnet3_RxTSDesc *ts_rxd;
1683 ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
1684 idx * rq->rx_ts_desc_size);
1685 ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
1690 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1691 len = rxDataRingUsed ? rcd->len : rbi->len;
1693 if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
1694 struct sk_buff *skb_xdp_pass;
1698 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1699 act = vmxnet3_process_xdp_small(adapter, rq,
1700 &rq->data_ring.base[sz],
1703 if (act == XDP_PASS) {
1704 ctx->skb = skb_xdp_pass;
1707 need_flush |= act == XDP_REDIRECT;
1711 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1713 if (new_skb == NULL) {
1714 /* Skb allocation failed, do not handover this
1715 * skb to stack. Reuse it. Drop the existing pkt
1717 rq->stats.rx_buf_alloc_failure++;
1719 rq->stats.drop_total++;
1720 skip_page_frags = true;
1724 if (rxDataRingUsed && adapter->rxdataring_enabled) {
1727 BUG_ON(rcd->len > rq->data_ring.desc_size);
1730 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1731 memcpy(new_skb->data,
1732 &rq->data_ring.base[sz], rcd->len);
1734 ctx->skb = rbi->skb;
1737 dma_map_single(&adapter->pdev->dev,
1738 new_skb->data, rbi->len,
1740 if (dma_mapping_error(&adapter->pdev->dev,
1742 dev_kfree_skb(new_skb);
1743 /* Skb allocation failed, do not
1744 * handover this skb to stack. Reuse
1745 * it. Drop the existing pkt.
1747 rq->stats.rx_buf_alloc_failure++;
1749 rq->stats.drop_total++;
1750 skip_page_frags = true;
1754 dma_unmap_single(&adapter->pdev->dev,
1759 /* Immediate refill */
1761 rbi->dma_addr = new_dma_addr;
1762 rxd->addr = cpu_to_le64(rbi->dma_addr);
1763 rxd->len = rbi->len;
1766 skb_record_rx_queue(ctx->skb, rq->qid);
1767 skb_put(ctx->skb, rcd->len);
1769 if (VMXNET3_VERSION_GE_2(adapter) &&
1770 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1771 struct Vmxnet3_RxCompDescExt *rcdlro;
1772 union Vmxnet3_GenericDesc *gdesc;
1774 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1775 gdesc = (union Vmxnet3_GenericDesc *)rcd;
1777 segCnt = rcdlro->segCnt;
1778 WARN_ON_ONCE(segCnt == 0);
1780 if (unlikely(segCnt <= 1))
1782 encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1783 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1788 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1790 /* non SOP buffer must be type 1 in most cases */
1791 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1792 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1794 /* If an sop buffer was dropped, skip all
1795 * following non-sop fragments. They will be reused.
1797 if (skip_page_frags)
1801 new_page = alloc_page(GFP_ATOMIC);
1802 /* Replacement page frag could not be allocated.
1803 * Reuse this page. Drop the pkt and free the
1804 * skb which contained this page as a frag. Skip
1805 * processing all the following non-sop frags.
1807 if (unlikely(!new_page)) {
1808 rq->stats.rx_buf_alloc_failure++;
1809 dev_kfree_skb(ctx->skb);
1811 skip_page_frags = true;
1814 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1818 if (dma_mapping_error(&adapter->pdev->dev,
1821 rq->stats.rx_buf_alloc_failure++;
1822 dev_kfree_skb(ctx->skb);
1824 skip_page_frags = true;
1828 dma_unmap_page(&adapter->pdev->dev,
1829 rbi->dma_addr, rbi->len,
1832 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1834 /* Immediate refill */
1835 rbi->page = new_page;
1836 rbi->dma_addr = new_dma_addr;
1837 rxd->addr = cpu_to_le64(rbi->dma_addr);
1838 rxd->len = rbi->len;
1846 u32 mtu = adapter->netdev->mtu;
1847 skb->len += skb->data_len;
1850 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1851 (adapter->netdev->features & NETIF_F_RXHASH)) {
1852 enum pkt_hash_types hash_type;
1854 switch (rcd->rssType) {
1855 case VMXNET3_RCD_RSS_TYPE_IPV4:
1856 case VMXNET3_RCD_RSS_TYPE_IPV6:
1857 hash_type = PKT_HASH_TYPE_L3;
1859 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1860 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1861 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1862 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1863 hash_type = PKT_HASH_TYPE_L4;
1866 hash_type = PKT_HASH_TYPE_L3;
1870 le32_to_cpu(rcd->rssHash),
1874 vmxnet3_rx_csum(adapter, skb,
1875 (union Vmxnet3_GenericDesc *)rcd);
1876 skb->protocol = eth_type_trans(skb, adapter->netdev);
1877 if ((!rcd->tcp && !encap_lro) ||
1878 !(adapter->netdev->features & NETIF_F_LRO))
1881 if (segCnt != 0 && mss != 0) {
1882 skb_shinfo(skb)->gso_type = rcd->v4 ?
1883 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1884 skb_shinfo(skb)->gso_size = mss;
1885 skb_shinfo(skb)->gso_segs = segCnt;
1886 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1889 hlen = vmxnet3_get_hdr_len(adapter, skb,
1890 (union Vmxnet3_GenericDesc *)rcd);
1894 skb_shinfo(skb)->gso_type =
1895 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1897 skb_shinfo(skb)->gso_segs = segCnt;
1898 skb_shinfo(skb)->gso_size =
1899 DIV_ROUND_UP(skb->len -
1902 skb_shinfo(skb)->gso_size = mtu - hlen;
1906 if (unlikely(rcd->ts))
1907 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1909 /* Use GRO callback if UPT is enabled */
1910 if ((adapter->netdev->features & NETIF_F_LRO) &&
1911 !rq->shared->updateRxProd)
1912 netif_receive_skb(skb);
1914 napi_gro_receive(&rq->napi, skb);
1922 /* device may have skipped some rx descs */
1923 ring = rq->rx_ring + ring_idx;
1924 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1926 comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1927 fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1928 idx - ring->next2fill - 1;
1929 if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1930 ring->next2comp = idx;
1931 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1933 /* Ensure that the writes to rxd->gen bits will be observed
1934 * after all other writes to rxd objects.
1938 while (num_to_alloc) {
1939 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1940 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1942 if (ring_idx == 0) {
1943 /* ring0 Type1 buffers can get skipped; re-fill them */
1944 if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1947 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1949 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1951 WARN_ON(!rxd->addr);
1953 /* Recv desc is ready to be used by the device */
1954 rxd->gen = ring->gen;
1955 vmxnet3_cmd_ring_adv_next2fill(ring);
1956 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1959 /* rx completion hasn't occurred */
1960 ring->isOutOfOrder = 1;
1965 if (num_to_alloc == 0) {
1966 ring->isOutOfOrder = 0;
1969 /* if needed, update the register */
1970 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1971 VMXNET3_WRITE_BAR0_REG(adapter,
1972 rxprod_reg[ring_idx] + rq->qid * 8,
1976 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1977 vmxnet3_getRxComp(rcd,
1978 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1988 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1989 struct vmxnet3_adapter *adapter)
1992 struct Vmxnet3_RxDesc *rxd;
1994 /* ring has already been cleaned up */
1995 if (!rq->rx_ring[0].base)
1998 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1999 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
2000 struct vmxnet3_rx_buf_info *rbi;
2001 #ifdef __BIG_ENDIAN_BITFIELD
2002 struct Vmxnet3_RxDesc rxDesc;
2005 rbi = &rq->buf_info[ring_idx][i];
2006 vmxnet3_getRxDesc(rxd,
2007 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
2009 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
2010 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
2011 page_pool_recycle_direct(rq->page_pool,
2014 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
2016 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
2017 rxd->len, DMA_FROM_DEVICE);
2018 dev_kfree_skb(rbi->skb);
2020 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
2022 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
2023 rxd->len, DMA_FROM_DEVICE);
2024 put_page(rbi->page);
2029 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
2030 rq->rx_ring[ring_idx].next2fill =
2031 rq->rx_ring[ring_idx].next2comp = 0;
2034 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2035 rq->comp_ring.next2proc = 0;
2040 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
2044 for (i = 0; i < adapter->num_rx_queues; i++)
2045 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
2046 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
2050 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
2051 struct vmxnet3_adapter *adapter)
2056 /* all rx buffers must have already been freed */
2057 for (i = 0; i < 2; i++) {
2058 if (rq->buf_info[i]) {
2059 for (j = 0; j < rq->rx_ring[i].size; j++)
2060 BUG_ON(rq->buf_info[i][j].page != NULL);
2065 for (i = 0; i < 2; i++) {
2066 if (rq->rx_ring[i].base) {
2067 dma_free_coherent(&adapter->pdev->dev,
2069 * sizeof(struct Vmxnet3_RxDesc),
2070 rq->rx_ring[i].base,
2071 rq->rx_ring[i].basePA);
2072 rq->rx_ring[i].base = NULL;
2076 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
2077 xdp_rxq_info_unreg(&rq->xdp_rxq);
2078 page_pool_destroy(rq->page_pool);
2079 rq->page_pool = NULL;
2081 if (rq->data_ring.base) {
2082 dma_free_coherent(&adapter->pdev->dev,
2083 rq->rx_ring[0].size * rq->data_ring.desc_size,
2084 rq->data_ring.base, rq->data_ring.basePA);
2085 rq->data_ring.base = NULL;
2088 if (rq->ts_ring.base) {
2089 dma_free_coherent(&adapter->pdev->dev,
2090 rq->rx_ring[0].size * rq->rx_ts_desc_size,
2091 rq->ts_ring.base, rq->ts_ring.basePA);
2092 rq->ts_ring.base = NULL;
2095 if (rq->comp_ring.base) {
2096 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2097 * sizeof(struct Vmxnet3_RxCompDesc),
2098 rq->comp_ring.base, rq->comp_ring.basePA);
2099 rq->comp_ring.base = NULL;
2102 kfree(rq->buf_info[0]);
2103 rq->buf_info[0] = NULL;
2104 rq->buf_info[1] = NULL;
2108 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
2112 for (i = 0; i < adapter->num_rx_queues; i++) {
2113 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2115 if (rq->data_ring.base) {
2116 dma_free_coherent(&adapter->pdev->dev,
2117 (rq->rx_ring[0].size *
2118 rq->data_ring.desc_size),
2120 rq->data_ring.basePA);
2121 rq->data_ring.base = NULL;
2123 rq->data_ring.desc_size = 0;
2128 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2129 struct vmxnet3_adapter *adapter)
2133 /* initialize buf_info */
2134 for (i = 0; i < rq->rx_ring[0].size; i++) {
2136 /* 1st buf for a pkt is skbuff or xdp page */
2137 if (i % adapter->rx_buf_per_pkt == 0) {
2138 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2139 VMXNET3_RX_BUF_XDP :
2141 rq->buf_info[0][i].len = adapter->skb_buf_size;
2142 } else { /* subsequent bufs for a pkt is frag */
2143 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2144 rq->buf_info[0][i].len = PAGE_SIZE;
2147 for (i = 0; i < rq->rx_ring[1].size; i++) {
2148 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2149 rq->buf_info[1][i].len = PAGE_SIZE;
2152 /* reset internal state and allocate buffers for both rings */
2153 for (i = 0; i < 2; i++) {
2154 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2156 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2157 sizeof(struct Vmxnet3_RxDesc));
2158 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
2159 rq->rx_ring[i].isOutOfOrder = 0;
2162 err = vmxnet3_create_pp(adapter, rq,
2163 rq->rx_ring[0].size + rq->rx_ring[1].size);
2167 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2169 xdp_rxq_info_unreg(&rq->xdp_rxq);
2170 page_pool_destroy(rq->page_pool);
2171 rq->page_pool = NULL;
2173 /* at least has 1 rx buffer for the 1st ring */
2176 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2178 if (rq->ts_ring.base)
2179 memset(rq->ts_ring.base, 0,
2180 rq->rx_ring[0].size * rq->rx_ts_desc_size);
2182 /* reset the comp ring */
2183 rq->comp_ring.next2proc = 0;
2184 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2185 sizeof(struct Vmxnet3_RxCompDesc));
2186 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2189 rq->rx_ctx.skb = NULL;
2191 /* stats are not reset */
2197 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
2201 for (i = 0; i < adapter->num_rx_queues; i++) {
2202 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
2203 if (unlikely(err)) {
2204 dev_err(&adapter->netdev->dev, "%s: failed to "
2205 "initialize rx queue%i\n",
2206 adapter->netdev->name, i);
2216 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2220 struct vmxnet3_rx_buf_info *bi;
2222 for (i = 0; i < 2; i++) {
2224 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2225 rq->rx_ring[i].base = dma_alloc_coherent(
2226 &adapter->pdev->dev, sz,
2227 &rq->rx_ring[i].basePA,
2229 if (!rq->rx_ring[i].base) {
2230 netdev_err(adapter->netdev,
2231 "failed to allocate rx ring %d\n", i);
2236 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2237 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
2238 rq->data_ring.base =
2239 dma_alloc_coherent(&adapter->pdev->dev, sz,
2240 &rq->data_ring.basePA,
2242 if (!rq->data_ring.base) {
2243 netdev_err(adapter->netdev,
2244 "rx data ring will be disabled\n");
2245 adapter->rxdataring_enabled = false;
2248 rq->data_ring.base = NULL;
2249 rq->data_ring.desc_size = 0;
2252 if (rq->rx_ts_desc_size != 0) {
2253 sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
2255 dma_alloc_coherent(&adapter->pdev->dev, sz,
2256 &rq->ts_ring.basePA,
2258 if (!rq->ts_ring.base) {
2259 netdev_err(adapter->netdev,
2260 "rx ts ring will be disabled\n");
2261 rq->rx_ts_desc_size = 0;
2264 rq->ts_ring.base = NULL;
2267 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2268 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2269 &rq->comp_ring.basePA,
2271 if (!rq->comp_ring.base) {
2272 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2276 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2277 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2278 dev_to_node(&adapter->pdev->dev));
2282 rq->buf_info[0] = bi;
2283 rq->buf_info[1] = bi + rq->rx_ring[0].size;
2288 vmxnet3_rq_destroy(rq, adapter);
2294 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2298 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2300 for (i = 0; i < adapter->num_rx_queues; i++) {
2301 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2302 if (unlikely(err)) {
2303 dev_err(&adapter->netdev->dev,
2304 "%s: failed to create rx queue%i\n",
2305 adapter->netdev->name, i);
2310 if (!adapter->rxdataring_enabled)
2311 vmxnet3_rq_destroy_all_rxdataring(adapter);
2315 vmxnet3_rq_destroy_all(adapter);
2320 /* Multiple queue aware polling function for tx and rx */
2323 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2325 int rcd_done = 0, i;
2326 if (unlikely(adapter->shared->ecr))
2327 vmxnet3_process_events(adapter);
2328 for (i = 0; i < adapter->num_tx_queues; i++)
2329 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2331 for (i = 0; i < adapter->num_rx_queues; i++)
2332 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2339 vmxnet3_poll(struct napi_struct *napi, int budget)
2341 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2342 struct vmxnet3_rx_queue, napi);
2345 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2347 if (rxd_done < budget) {
2348 napi_complete_done(napi, rxd_done);
2349 vmxnet3_enable_all_intrs(rx_queue->adapter);
2355 * NAPI polling function for MSI-X mode with multiple Rx queues
2356 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2360 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2362 struct vmxnet3_rx_queue *rq = container_of(napi,
2363 struct vmxnet3_rx_queue, napi);
2364 struct vmxnet3_adapter *adapter = rq->adapter;
2367 /* When sharing interrupt with corresponding tx queue, process
2368 * tx completions in that queue as well
2370 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2371 struct vmxnet3_tx_queue *tq =
2372 &adapter->tx_queue[rq - adapter->rx_queue];
2373 vmxnet3_tq_tx_complete(tq, adapter);
2376 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2378 if (rxd_done < budget) {
2379 napi_complete_done(napi, rxd_done);
2380 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2386 #ifdef CONFIG_PCI_MSI
2389 * Handle completion interrupts on tx queues
2390 * Returns whether or not the intr is handled
2394 vmxnet3_msix_tx(int irq, void *data)
2396 struct vmxnet3_tx_queue *tq = data;
2397 struct vmxnet3_adapter *adapter = tq->adapter;
2399 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2400 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2402 /* Handle the case where only one irq is allocate for all tx queues */
2403 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2405 for (i = 0; i < adapter->num_tx_queues; i++) {
2406 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2407 vmxnet3_tq_tx_complete(txq, adapter);
2410 vmxnet3_tq_tx_complete(tq, adapter);
2412 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2419 * Handle completion interrupts on rx queues. Returns whether or not the
2424 vmxnet3_msix_rx(int irq, void *data)
2426 struct vmxnet3_rx_queue *rq = data;
2427 struct vmxnet3_adapter *adapter = rq->adapter;
2429 /* disable intr if needed */
2430 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2431 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2432 napi_schedule(&rq->napi);
2438 *----------------------------------------------------------------------------
2440 * vmxnet3_msix_event --
2442 * vmxnet3 msix event intr handler
2445 * whether or not the intr is handled
2447 *----------------------------------------------------------------------------
2451 vmxnet3_msix_event(int irq, void *data)
2453 struct net_device *dev = data;
2454 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2456 /* disable intr if needed */
2457 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2458 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2460 if (adapter->shared->ecr)
2461 vmxnet3_process_events(adapter);
2463 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2468 #endif /* CONFIG_PCI_MSI */
2471 /* Interrupt handler for vmxnet3 */
2473 vmxnet3_intr(int irq, void *dev_id)
2475 struct net_device *dev = dev_id;
2476 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2478 if (adapter->intr.type == VMXNET3_IT_INTX) {
2479 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2480 if (unlikely(icr == 0))
2486 /* disable intr if needed */
2487 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2488 vmxnet3_disable_all_intrs(adapter);
2490 napi_schedule(&adapter->rx_queue[0].napi);
2495 #ifdef CONFIG_NET_POLL_CONTROLLER
2497 /* netpoll callback. */
2499 vmxnet3_netpoll(struct net_device *netdev)
2501 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2503 switch (adapter->intr.type) {
2504 #ifdef CONFIG_PCI_MSI
2505 case VMXNET3_IT_MSIX: {
2507 for (i = 0; i < adapter->num_rx_queues; i++)
2508 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2512 case VMXNET3_IT_MSI:
2514 vmxnet3_intr(0, adapter->netdev);
2519 #endif /* CONFIG_NET_POLL_CONTROLLER */
2522 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2524 struct vmxnet3_intr *intr = &adapter->intr;
2528 #ifdef CONFIG_PCI_MSI
2529 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2530 for (i = 0; i < adapter->num_tx_queues; i++) {
2531 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2532 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2533 adapter->netdev->name, vector);
2535 intr->msix_entries[vector].vector,
2537 adapter->tx_queue[i].name,
2538 &adapter->tx_queue[i]);
2540 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2541 adapter->netdev->name, vector);
2544 dev_err(&adapter->netdev->dev,
2545 "Failed to request irq for MSIX, %s, "
2547 adapter->tx_queue[i].name, err);
2551 /* Handle the case where only 1 MSIx was allocated for
2553 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2554 for (; i < adapter->num_tx_queues; i++)
2555 adapter->tx_queue[i].comp_ring.intr_idx
2560 adapter->tx_queue[i].comp_ring.intr_idx
2564 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2567 for (i = 0; i < adapter->num_rx_queues; i++) {
2568 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2569 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2570 adapter->netdev->name, vector);
2572 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2573 adapter->netdev->name, vector);
2574 err = request_irq(intr->msix_entries[vector].vector,
2576 adapter->rx_queue[i].name,
2577 &(adapter->rx_queue[i]));
2579 netdev_err(adapter->netdev,
2580 "Failed to request irq for MSIX, "
2582 adapter->rx_queue[i].name, err);
2586 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2589 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2590 adapter->netdev->name, vector);
2591 err = request_irq(intr->msix_entries[vector].vector,
2592 vmxnet3_msix_event, 0,
2593 intr->event_msi_vector_name, adapter->netdev);
2594 intr->event_intr_idx = vector;
2596 } else if (intr->type == VMXNET3_IT_MSI) {
2597 adapter->num_rx_queues = 1;
2598 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2599 adapter->netdev->name, adapter->netdev);
2602 adapter->num_rx_queues = 1;
2603 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2604 IRQF_SHARED, adapter->netdev->name,
2606 #ifdef CONFIG_PCI_MSI
2609 intr->num_intrs = vector + 1;
2611 netdev_err(adapter->netdev,
2612 "Failed to request irq (intr type:%d), error %d\n",
2615 /* Number of rx queues will not change after this */
2616 for (i = 0; i < adapter->num_rx_queues; i++) {
2617 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2619 rq->qid2 = i + adapter->num_rx_queues;
2620 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2623 /* init our intr settings */
2624 for (i = 0; i < intr->num_intrs; i++)
2625 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2626 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2627 adapter->intr.event_intr_idx = 0;
2628 for (i = 0; i < adapter->num_tx_queues; i++)
2629 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2630 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2633 netdev_info(adapter->netdev,
2634 "intr type %u, mode %u, %u vectors allocated\n",
2635 intr->type, intr->mask_mode, intr->num_intrs);
2643 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2645 struct vmxnet3_intr *intr = &adapter->intr;
2646 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2648 switch (intr->type) {
2649 #ifdef CONFIG_PCI_MSI
2650 case VMXNET3_IT_MSIX:
2654 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2655 for (i = 0; i < adapter->num_tx_queues; i++) {
2656 free_irq(intr->msix_entries[vector++].vector,
2657 &(adapter->tx_queue[i]));
2658 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2663 for (i = 0; i < adapter->num_rx_queues; i++) {
2664 free_irq(intr->msix_entries[vector++].vector,
2665 &(adapter->rx_queue[i]));
2668 free_irq(intr->msix_entries[vector].vector,
2670 BUG_ON(vector >= intr->num_intrs);
2674 case VMXNET3_IT_MSI:
2675 free_irq(adapter->pdev->irq, adapter->netdev);
2677 case VMXNET3_IT_INTX:
2678 free_irq(adapter->pdev->irq, adapter->netdev);
2687 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2689 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2692 /* allow untagged pkts */
2693 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2695 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2696 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2701 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2703 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2705 if (!(netdev->flags & IFF_PROMISC)) {
2706 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2707 unsigned long flags;
2709 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2710 spin_lock_irqsave(&adapter->cmd_lock, flags);
2711 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2712 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2713 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2716 set_bit(vid, adapter->active_vlans);
2723 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2725 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2727 if (!(netdev->flags & IFF_PROMISC)) {
2728 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2729 unsigned long flags;
2731 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2732 spin_lock_irqsave(&adapter->cmd_lock, flags);
2733 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2734 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2735 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2738 clear_bit(vid, adapter->active_vlans);
2745 vmxnet3_copy_mc(struct net_device *netdev)
2748 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2750 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2752 /* We may be called with BH disabled */
2753 buf = kmalloc(sz, GFP_ATOMIC);
2755 struct netdev_hw_addr *ha;
2758 netdev_for_each_mc_addr(ha, netdev)
2759 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2768 vmxnet3_set_mc(struct net_device *netdev)
2770 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2771 unsigned long flags;
2772 struct Vmxnet3_RxFilterConf *rxConf =
2773 &adapter->shared->devRead.rxFilterConf;
2774 u8 *new_table = NULL;
2775 dma_addr_t new_table_pa = 0;
2776 bool new_table_pa_valid = false;
2777 u32 new_mode = VMXNET3_RXM_UCAST;
2779 if (netdev->flags & IFF_PROMISC) {
2780 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2781 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2783 new_mode |= VMXNET3_RXM_PROMISC;
2785 vmxnet3_restore_vlan(adapter);
2788 if (netdev->flags & IFF_BROADCAST)
2789 new_mode |= VMXNET3_RXM_BCAST;
2791 if (netdev->flags & IFF_ALLMULTI)
2792 new_mode |= VMXNET3_RXM_ALL_MULTI;
2794 if (!netdev_mc_empty(netdev)) {
2795 new_table = vmxnet3_copy_mc(netdev);
2797 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2799 rxConf->mfTableLen = cpu_to_le16(sz);
2800 new_table_pa = dma_map_single(
2801 &adapter->pdev->dev,
2805 if (!dma_mapping_error(&adapter->pdev->dev,
2807 new_mode |= VMXNET3_RXM_MCAST;
2808 new_table_pa_valid = true;
2809 rxConf->mfTablePA = cpu_to_le64(
2813 if (!new_table_pa_valid) {
2815 "failed to copy mcast list, setting ALL_MULTI\n");
2816 new_mode |= VMXNET3_RXM_ALL_MULTI;
2820 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2821 rxConf->mfTableLen = 0;
2822 rxConf->mfTablePA = 0;
2825 spin_lock_irqsave(&adapter->cmd_lock, flags);
2826 if (new_mode != rxConf->rxMode) {
2827 rxConf->rxMode = cpu_to_le32(new_mode);
2828 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2829 VMXNET3_CMD_UPDATE_RX_MODE);
2830 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2831 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2834 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2835 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2836 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2838 if (new_table_pa_valid)
2839 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2840 rxConf->mfTableLen, DMA_TO_DEVICE);
2845 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2849 for (i = 0; i < adapter->num_rx_queues; i++)
2850 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2855 * Set up driver_shared based on settings in adapter.
2859 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2861 struct Vmxnet3_DriverShared *shared = adapter->shared;
2862 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2863 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2864 struct Vmxnet3_TxQueueConf *tqc;
2865 struct Vmxnet3_RxQueueConf *rqc;
2866 struct Vmxnet3_TxQueueTSConf *tqtsc;
2867 struct Vmxnet3_RxQueueTSConf *rqtsc;
2870 memset(shared, 0, sizeof(*shared));
2872 /* driver settings */
2873 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2874 devRead->misc.driverInfo.version = cpu_to_le32(
2875 VMXNET3_DRIVER_VERSION_NUM);
2876 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2877 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2878 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2879 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2880 *((u32 *)&devRead->misc.driverInfo.gos));
2881 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2882 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2884 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2885 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2887 /* set up feature flags */
2888 if (adapter->netdev->features & NETIF_F_RXCSUM)
2889 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2891 if (adapter->netdev->features & NETIF_F_LRO) {
2892 devRead->misc.uptFeatures |= UPT1_F_LRO;
2893 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2895 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2896 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2898 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2899 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2900 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2902 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2903 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2904 devRead->misc.queueDescLen = cpu_to_le32(
2905 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2906 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2908 /* tx queue settings */
2909 devRead->misc.numTxQueues = adapter->num_tx_queues;
2910 for (i = 0; i < adapter->num_tx_queues; i++) {
2911 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2912 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2913 tqc = &adapter->tqd_start[i].conf;
2914 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2915 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2916 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2917 tqc->ddPA = cpu_to_le64(~0ULL);
2918 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2919 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2920 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2921 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2922 tqc->ddLen = cpu_to_le32(0);
2923 tqc->intrIdx = tq->comp_ring.intr_idx;
2924 if (VMXNET3_VERSION_GE_9(adapter)) {
2925 tqtsc = &adapter->tqd_start[i].tsConf;
2926 tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA);
2927 tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size);
2931 /* rx queue settings */
2932 devRead->misc.numRxQueues = adapter->num_rx_queues;
2933 for (i = 0; i < adapter->num_rx_queues; i++) {
2934 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2935 rqc = &adapter->rqd_start[i].conf;
2936 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2937 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2938 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2939 rqc->ddPA = cpu_to_le64(~0ULL);
2940 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2941 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2942 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2943 rqc->ddLen = cpu_to_le32(0);
2944 rqc->intrIdx = rq->comp_ring.intr_idx;
2945 if (VMXNET3_VERSION_GE_3(adapter)) {
2946 rqc->rxDataRingBasePA =
2947 cpu_to_le64(rq->data_ring.basePA);
2948 rqc->rxDataRingDescSize =
2949 cpu_to_le16(rq->data_ring.desc_size);
2951 if (VMXNET3_VERSION_GE_9(adapter)) {
2952 rqtsc = &adapter->rqd_start[i].tsConf;
2953 rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
2954 rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
2959 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2962 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2964 devRead->misc.uptFeatures |= UPT1_F_RSS;
2965 devRead->misc.numRxQueues = adapter->num_rx_queues;
2966 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2967 UPT1_RSS_HASH_TYPE_IPV4 |
2968 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2969 UPT1_RSS_HASH_TYPE_IPV6;
2970 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2971 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2972 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2973 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2975 for (i = 0; i < rssConf->indTableSize; i++)
2976 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2977 i, adapter->num_rx_queues);
2979 devRead->rssConfDesc.confVer = 1;
2980 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2981 devRead->rssConfDesc.confPA =
2982 cpu_to_le64(adapter->rss_conf_pa);
2985 #endif /* VMXNET3_RSS */
2988 if (!VMXNET3_VERSION_GE_6(adapter) ||
2989 !adapter->queuesExtEnabled) {
2990 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2992 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2993 for (i = 0; i < adapter->intr.num_intrs; i++)
2994 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2996 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2997 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2999 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
3001 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
3002 for (i = 0; i < adapter->intr.num_intrs; i++)
3003 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
3005 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
3006 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
3009 /* rx filter settings */
3010 devRead->rxFilterConf.rxMode = 0;
3011 vmxnet3_restore_vlan(adapter);
3012 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
3014 /* the rest are already zeroed */
3018 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
3020 struct Vmxnet3_DriverShared *shared = adapter->shared;
3021 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3022 unsigned long flags;
3024 if (!VMXNET3_VERSION_GE_7(adapter))
3027 cmdInfo->ringBufSize = adapter->ringBufSize;
3028 spin_lock_irqsave(&adapter->cmd_lock, flags);
3029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3030 VMXNET3_CMD_SET_RING_BUFFER_SIZE);
3031 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3035 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
3037 struct Vmxnet3_DriverShared *shared = adapter->shared;
3038 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3039 unsigned long flags;
3041 if (!VMXNET3_VERSION_GE_3(adapter))
3044 spin_lock_irqsave(&adapter->cmd_lock, flags);
3045 cmdInfo->varConf.confVer = 1;
3046 cmdInfo->varConf.confLen =
3047 cpu_to_le32(sizeof(*adapter->coal_conf));
3048 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
3050 if (adapter->default_coal_mode) {
3051 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3052 VMXNET3_CMD_GET_COALESCE);
3054 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3055 VMXNET3_CMD_SET_COALESCE);
3058 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3062 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
3064 struct Vmxnet3_DriverShared *shared = adapter->shared;
3065 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3066 unsigned long flags;
3068 if (!VMXNET3_VERSION_GE_4(adapter))
3071 spin_lock_irqsave(&adapter->cmd_lock, flags);
3073 if (adapter->default_rss_fields) {
3074 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3075 VMXNET3_CMD_GET_RSS_FIELDS);
3076 adapter->rss_fields =
3077 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3079 if (VMXNET3_VERSION_GE_7(adapter)) {
3080 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
3081 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
3082 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3083 VMXNET3_CAP_UDP_RSS)) {
3084 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
3086 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
3089 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
3090 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3091 VMXNET3_CAP_ESP_RSS_IPV4)) {
3092 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
3094 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
3097 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
3098 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3099 VMXNET3_CAP_ESP_RSS_IPV6)) {
3100 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
3102 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
3105 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3106 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3107 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3109 cmdInfo->setRssFields = adapter->rss_fields;
3110 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3111 VMXNET3_CMD_SET_RSS_FIELDS);
3112 /* Not all requested RSS may get applied, so get and
3113 * cache what was actually applied.
3115 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3116 VMXNET3_CMD_GET_RSS_FIELDS);
3117 adapter->rss_fields =
3118 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3121 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3125 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3129 unsigned long flags;
3131 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
3132 " ring sizes %u %u %u\n", adapter->netdev->name,
3133 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
3134 adapter->tx_queue[0].tx_ring.size,
3135 adapter->rx_queue[0].rx_ring[0].size,
3136 adapter->rx_queue[0].rx_ring[1].size);
3138 vmxnet3_tq_init_all(adapter);
3139 err = vmxnet3_rq_init_all(adapter);
3141 netdev_err(adapter->netdev,
3142 "Failed to init rx queue error %d\n", err);
3146 err = vmxnet3_request_irqs(adapter);
3148 netdev_err(adapter->netdev,
3149 "Failed to setup irq for error %d\n", err);
3153 vmxnet3_setup_driver_shared(adapter);
3155 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3156 adapter->shared_pa));
3157 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3158 adapter->shared_pa));
3159 spin_lock_irqsave(&adapter->cmd_lock, flags);
3160 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3161 VMXNET3_CMD_ACTIVATE_DEV);
3162 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3163 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3166 netdev_err(adapter->netdev,
3167 "Failed to activate dev: error %u\n", ret);
3172 vmxnet3_init_bufsize(adapter);
3173 vmxnet3_init_coalesce(adapter);
3174 vmxnet3_init_rssfields(adapter);
3176 for (i = 0; i < adapter->num_rx_queues; i++) {
3177 VMXNET3_WRITE_BAR0_REG(adapter,
3178 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
3179 adapter->rx_queue[i].rx_ring[0].next2fill);
3180 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
3181 (i * VMXNET3_REG_ALIGN)),
3182 adapter->rx_queue[i].rx_ring[1].next2fill);
3185 /* Apply the rx filter settins last. */
3186 vmxnet3_set_mc(adapter->netdev);
3189 * Check link state when first activating device. It will start the
3190 * tx queue if the link is up.
3192 vmxnet3_check_link(adapter, true);
3193 netif_tx_wake_all_queues(adapter->netdev);
3194 for (i = 0; i < adapter->num_rx_queues; i++)
3195 napi_enable(&adapter->rx_queue[i].napi);
3196 vmxnet3_enable_all_intrs(adapter);
3197 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3201 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3202 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3203 vmxnet3_free_irqs(adapter);
3206 /* free up buffers we allocated */
3207 vmxnet3_rq_cleanup_all(adapter);
3213 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3215 unsigned long flags;
3216 spin_lock_irqsave(&adapter->cmd_lock, flags);
3217 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3223 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3226 unsigned long flags;
3227 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3231 spin_lock_irqsave(&adapter->cmd_lock, flags);
3232 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3233 VMXNET3_CMD_QUIESCE_DEV);
3234 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3235 vmxnet3_disable_all_intrs(adapter);
3237 for (i = 0; i < adapter->num_rx_queues; i++)
3238 napi_disable(&adapter->rx_queue[i].napi);
3239 netif_tx_disable(adapter->netdev);
3240 adapter->link_speed = 0;
3241 netif_carrier_off(adapter->netdev);
3243 vmxnet3_tq_cleanup_all(adapter);
3244 vmxnet3_rq_cleanup_all(adapter);
3245 vmxnet3_free_irqs(adapter);
3251 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3256 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3258 tmp = (mac[5] << 8) | mac[4];
3259 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3264 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
3266 struct sockaddr *addr = p;
3267 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3269 dev_addr_set(netdev, addr->sa_data);
3270 vmxnet3_write_mac_addr(adapter, addr->sa_data);
3276 /* ==================== initialization and cleanup routines ============ */
3279 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3282 unsigned long mmio_start, mmio_len;
3283 struct pci_dev *pdev = adapter->pdev;
3285 err = pci_enable_device(pdev);
3287 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3291 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3292 vmxnet3_driver_name);
3295 "Failed to request region for adapter: error %d\n", err);
3296 goto err_enable_device;
3299 pci_set_master(pdev);
3301 mmio_start = pci_resource_start(pdev, 0);
3302 mmio_len = pci_resource_len(pdev, 0);
3303 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3304 if (!adapter->hw_addr0) {
3305 dev_err(&pdev->dev, "Failed to map bar0\n");
3310 mmio_start = pci_resource_start(pdev, 1);
3311 mmio_len = pci_resource_len(pdev, 1);
3312 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3313 if (!adapter->hw_addr1) {
3314 dev_err(&pdev->dev, "Failed to map bar1\n");
3321 iounmap(adapter->hw_addr0);
3323 pci_release_selected_regions(pdev, (1 << 2) - 1);
3325 pci_disable_device(pdev);
3331 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3333 BUG_ON(!adapter->pdev);
3335 iounmap(adapter->hw_addr0);
3336 iounmap(adapter->hw_addr1);
3337 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3338 pci_disable_device(adapter->pdev);
3343 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3345 size_t sz, i, ring0_size, ring1_size, comp_size;
3346 /* With version7 ring1 will have only T0 buffers */
3347 if (!VMXNET3_VERSION_GE_7(adapter)) {
3348 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3349 VMXNET3_MAX_ETH_HDR_SIZE) {
3350 adapter->skb_buf_size = adapter->netdev->mtu +
3351 VMXNET3_MAX_ETH_HDR_SIZE;
3352 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3353 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3355 adapter->rx_buf_per_pkt = 1;
3357 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3358 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3359 VMXNET3_MAX_ETH_HDR_SIZE;
3360 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3363 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3364 VMXNET3_MAX_SKB_BUF_SIZE);
3365 adapter->rx_buf_per_pkt = 1;
3366 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3367 adapter->ringBufSize.ring1BufSizeType1 = 0;
3368 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3372 * for simplicity, force the ring0 size to be a multiple of
3373 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3375 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3376 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3377 ring0_size = (ring0_size + sz - 1) / sz * sz;
3378 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3380 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3381 ring1_size = (ring1_size + sz - 1) / sz * sz;
3382 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3384 /* For v7 and later, keep ring size power of 2 for UPT */
3385 if (VMXNET3_VERSION_GE_7(adapter)) {
3386 ring0_size = rounddown_pow_of_two(ring0_size);
3387 ring1_size = rounddown_pow_of_two(ring1_size);
3389 comp_size = ring0_size + ring1_size;
3391 for (i = 0; i < adapter->num_rx_queues; i++) {
3392 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3394 rq->rx_ring[0].size = ring0_size;
3395 rq->rx_ring[1].size = ring1_size;
3396 rq->comp_ring.size = comp_size;
3402 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3403 u32 rx_ring_size, u32 rx_ring2_size,
3404 u16 txdata_desc_size, u16 rxdata_desc_size)
3408 for (i = 0; i < adapter->num_tx_queues; i++) {
3409 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3410 tq->tx_ring.size = tx_ring_size;
3411 tq->data_ring.size = tx_ring_size;
3412 tq->comp_ring.size = tx_ring_size;
3413 tq->txdata_desc_size = txdata_desc_size;
3414 tq->shared = &adapter->tqd_start[i].ctrl;
3416 tq->adapter = adapter;
3418 tq->tx_ts_desc_size = adapter->tx_ts_desc_size;
3420 err = vmxnet3_tq_create(tq, adapter);
3422 * Too late to change num_tx_queues. We cannot do away with
3423 * lesser number of queues than what we asked for
3429 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3430 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3431 vmxnet3_adjust_rx_ring_size(adapter);
3433 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3434 for (i = 0; i < adapter->num_rx_queues; i++) {
3435 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3436 /* qid and qid2 for rx queues will be assigned later when num
3437 * of rx queues is finalized after allocating intrs */
3438 rq->shared = &adapter->rqd_start[i].ctrl;
3439 rq->adapter = adapter;
3440 rq->data_ring.desc_size = rxdata_desc_size;
3441 rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
3442 err = vmxnet3_rq_create(rq, adapter);
3445 netdev_err(adapter->netdev,
3446 "Could not allocate any rx queues. "
3450 netdev_info(adapter->netdev,
3451 "Number of rx queues changed "
3453 adapter->num_rx_queues = i;
3460 if (!adapter->rxdataring_enabled)
3461 vmxnet3_rq_destroy_all_rxdataring(adapter);
3465 vmxnet3_tq_destroy_all(adapter);
3470 vmxnet3_open(struct net_device *netdev)
3472 struct vmxnet3_adapter *adapter;
3475 adapter = netdev_priv(netdev);
3477 for (i = 0; i < adapter->num_tx_queues; i++)
3478 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3480 if (VMXNET3_VERSION_GE_3(adapter)) {
3481 unsigned long flags;
3482 u16 txdata_desc_size;
3485 spin_lock_irqsave(&adapter->cmd_lock, flags);
3486 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3487 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3488 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3489 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3491 txdata_desc_size = ret & 0xffff;
3492 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3493 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3494 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3495 adapter->txdata_desc_size =
3496 sizeof(struct Vmxnet3_TxDataDesc);
3498 adapter->txdata_desc_size = txdata_desc_size;
3500 if (VMXNET3_VERSION_GE_9(adapter))
3501 adapter->rxdata_desc_size = (ret >> 16) & 0xffff;
3503 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3506 if (VMXNET3_VERSION_GE_9(adapter)) {
3507 unsigned long flags;
3508 u16 tx_ts_desc_size = 0;
3509 u16 rx_ts_desc_size = 0;
3512 spin_lock_irqsave(&adapter->cmd_lock, flags);
3513 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3514 VMXNET3_CMD_GET_TSRING_DESC_SIZE);
3515 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3516 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3518 tx_ts_desc_size = (ret & 0xff);
3519 rx_ts_desc_size = ((ret >> 16) & 0xff);
3521 if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE ||
3522 tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK)
3523 tx_ts_desc_size = 0;
3524 if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE ||
3525 rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK)
3526 rx_ts_desc_size = 0;
3527 adapter->tx_ts_desc_size = tx_ts_desc_size;
3528 adapter->rx_ts_desc_size = rx_ts_desc_size;
3530 adapter->tx_ts_desc_size = 0;
3531 adapter->rx_ts_desc_size = 0;
3534 err = vmxnet3_create_queues(adapter,
3535 adapter->tx_ring_size,
3536 adapter->rx_ring_size,
3537 adapter->rx_ring2_size,
3538 adapter->txdata_desc_size,
3539 adapter->rxdata_desc_size);
3543 err = vmxnet3_activate_dev(adapter);
3550 vmxnet3_rq_destroy_all(adapter);
3551 vmxnet3_tq_destroy_all(adapter);
3558 vmxnet3_close(struct net_device *netdev)
3560 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3563 * Reset_work may be in the middle of resetting the device, wait for its
3566 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3567 usleep_range(1000, 2000);
3569 vmxnet3_quiesce_dev(adapter);
3571 vmxnet3_rq_destroy_all(adapter);
3572 vmxnet3_tq_destroy_all(adapter);
3574 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3582 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3587 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3588 * vmxnet3_close() will deadlock.
3590 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3592 /* we need to enable NAPI, otherwise dev_close will deadlock */
3593 for (i = 0; i < adapter->num_rx_queues; i++)
3594 napi_enable(&adapter->rx_queue[i].napi);
3596 * Need to clear the quiesce bit to ensure that vmxnet3_close
3597 * can quiesce the device properly
3599 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3600 dev_close(adapter->netdev);
3605 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3607 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3610 WRITE_ONCE(netdev->mtu, new_mtu);
3613 * Reset_work may be in the middle of resetting the device, wait for its
3616 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3617 usleep_range(1000, 2000);
3619 if (netif_running(netdev)) {
3620 vmxnet3_quiesce_dev(adapter);
3621 vmxnet3_reset_dev(adapter);
3623 /* we need to re-create the rx queue based on the new mtu */
3624 vmxnet3_rq_destroy_all(adapter);
3625 vmxnet3_adjust_rx_ring_size(adapter);
3626 err = vmxnet3_rq_create_all(adapter);
3629 "failed to re-create rx queues, "
3630 " error %d. Closing it.\n", err);
3634 err = vmxnet3_activate_dev(adapter);
3637 "failed to re-activate, error %d. "
3638 "Closing it\n", err);
3644 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3646 vmxnet3_force_close(adapter);
3653 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3655 struct net_device *netdev = adapter->netdev;
3656 unsigned long flags;
3658 if (VMXNET3_VERSION_GE_9(adapter)) {
3659 spin_lock_irqsave(&adapter->cmd_lock, flags);
3660 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3661 VMXNET3_CMD_GET_DISABLED_OFFLOADS);
3662 adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3663 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3666 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3667 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3668 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3669 NETIF_F_LRO | NETIF_F_HIGHDMA;
3671 if (VMXNET3_VERSION_GE_4(adapter)) {
3672 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3673 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3675 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3676 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3677 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3678 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3679 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3682 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) {
3683 netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3684 netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3687 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) {
3688 netdev->hw_features &= ~(NETIF_F_LRO);
3689 netdev->hw_enc_features &= ~(NETIF_F_LRO);
3692 if (VMXNET3_VERSION_GE_7(adapter)) {
3693 unsigned long flags;
3695 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3696 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3697 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3699 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3700 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3701 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3703 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3704 VMXNET3_CAP_GENEVE_TSO)) {
3705 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3707 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3708 VMXNET3_CAP_VXLAN_TSO)) {
3709 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3711 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3712 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3713 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3715 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3716 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3717 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3721 spin_lock_irqsave(&adapter->cmd_lock, flags);
3722 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3723 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3724 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3726 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3727 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3728 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3729 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3730 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3731 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3733 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3734 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3735 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3736 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3740 netdev->vlan_features = netdev->hw_features &
3741 ~(NETIF_F_HW_VLAN_CTAG_TX |
3742 NETIF_F_HW_VLAN_CTAG_RX);
3743 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3748 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3752 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3755 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3756 mac[4] = tmp & 0xff;
3757 mac[5] = (tmp >> 8) & 0xff;
3760 #ifdef CONFIG_PCI_MSI
3763 * Enable MSIx vectors.
3765 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3767 * number of vectors which were enabled otherwise (this number is greater
3768 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3772 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3774 int ret = pci_enable_msix_range(adapter->pdev,
3775 adapter->intr.msix_entries, nvec, nvec);
3777 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3778 dev_err(&adapter->netdev->dev,
3779 "Failed to enable %d MSI-X, trying %d\n",
3780 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3782 ret = pci_enable_msix_range(adapter->pdev,
3783 adapter->intr.msix_entries,
3784 VMXNET3_LINUX_MIN_MSIX_VECT,
3785 VMXNET3_LINUX_MIN_MSIX_VECT);
3789 dev_err(&adapter->netdev->dev,
3790 "Failed to enable MSI-X, error: %d\n", ret);
3797 #endif /* CONFIG_PCI_MSI */
3800 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3803 unsigned long flags;
3806 spin_lock_irqsave(&adapter->cmd_lock, flags);
3807 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3808 VMXNET3_CMD_GET_CONF_INTR);
3809 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3810 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3811 adapter->intr.type = cfg & 0x3;
3812 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3814 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3815 adapter->intr.type = VMXNET3_IT_MSIX;
3818 #ifdef CONFIG_PCI_MSI
3819 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3820 int i, nvec, nvec_allocated;
3822 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3823 1 : adapter->num_tx_queues;
3824 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3825 0 : adapter->num_rx_queues;
3826 nvec += 1; /* for link event */
3827 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3828 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3830 for (i = 0; i < nvec; i++)
3831 adapter->intr.msix_entries[i].entry = i;
3833 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3834 if (nvec_allocated < 0)
3837 /* If we cannot allocate one MSIx vector per queue
3838 * then limit the number of rx queues to 1
3840 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3841 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3842 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3843 || adapter->num_rx_queues != 1) {
3844 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3845 netdev_err(adapter->netdev,
3846 "Number of rx queues : 1\n");
3847 adapter->num_rx_queues = 1;
3851 adapter->intr.num_intrs = nvec_allocated;
3855 /* If we cannot allocate MSIx vectors use only one rx queue */
3856 dev_info(&adapter->pdev->dev,
3857 "Failed to enable MSI-X, error %d. "
3858 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3860 adapter->intr.type = VMXNET3_IT_MSI;
3863 if (adapter->intr.type == VMXNET3_IT_MSI) {
3864 if (!pci_enable_msi(adapter->pdev)) {
3865 adapter->num_rx_queues = 1;
3866 adapter->intr.num_intrs = 1;
3870 #endif /* CONFIG_PCI_MSI */
3872 adapter->num_rx_queues = 1;
3873 dev_info(&adapter->netdev->dev,
3874 "Using INTx interrupt, #Rx queues: 1.\n");
3875 adapter->intr.type = VMXNET3_IT_INTX;
3877 /* INT-X related setting */
3878 adapter->intr.num_intrs = 1;
3883 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3885 if (adapter->intr.type == VMXNET3_IT_MSIX)
3886 pci_disable_msix(adapter->pdev);
3887 else if (adapter->intr.type == VMXNET3_IT_MSI)
3888 pci_disable_msi(adapter->pdev);
3890 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3895 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3897 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3898 adapter->tx_timeout_count++;
3900 netdev_err(adapter->netdev, "tx hang\n");
3901 schedule_work(&adapter->work);
3906 vmxnet3_reset_work(struct work_struct *data)
3908 struct vmxnet3_adapter *adapter;
3910 adapter = container_of(data, struct vmxnet3_adapter, work);
3912 /* if another thread is resetting the device, no need to proceed */
3913 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3916 /* if the device is closed, we must leave it alone */
3918 if (netif_running(adapter->netdev)) {
3919 netdev_notice(adapter->netdev, "resetting\n");
3920 vmxnet3_quiesce_dev(adapter);
3921 vmxnet3_reset_dev(adapter);
3922 vmxnet3_activate_dev(adapter);
3924 netdev_info(adapter->netdev, "already closed\n");
3928 netif_wake_queue(adapter->netdev);
3929 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3934 vmxnet3_probe_device(struct pci_dev *pdev,
3935 const struct pci_device_id *id)
3937 static const struct net_device_ops vmxnet3_netdev_ops = {
3938 .ndo_open = vmxnet3_open,
3939 .ndo_stop = vmxnet3_close,
3940 .ndo_start_xmit = vmxnet3_xmit_frame,
3941 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3942 .ndo_change_mtu = vmxnet3_change_mtu,
3943 .ndo_fix_features = vmxnet3_fix_features,
3944 .ndo_set_features = vmxnet3_set_features,
3945 .ndo_features_check = vmxnet3_features_check,
3946 .ndo_get_stats64 = vmxnet3_get_stats64,
3947 .ndo_tx_timeout = vmxnet3_tx_timeout,
3948 .ndo_set_rx_mode = vmxnet3_set_mc,
3949 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3950 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3951 #ifdef CONFIG_NET_POLL_CONTROLLER
3952 .ndo_poll_controller = vmxnet3_netpoll,
3954 .ndo_bpf = vmxnet3_xdp,
3955 .ndo_xdp_xmit = vmxnet3_xdp_xmit,
3959 struct net_device *netdev;
3960 struct vmxnet3_adapter *adapter;
3966 unsigned long flags;
3968 if (!pci_msi_enabled())
3973 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3974 (int)num_online_cpus());
3980 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3981 (int)num_online_cpus());
3985 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3986 max(num_tx_queues, num_rx_queues));
3990 pci_set_drvdata(pdev, netdev);
3991 adapter = netdev_priv(netdev);
3992 adapter->netdev = netdev;
3993 adapter->pdev = pdev;
3995 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3996 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3997 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3999 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4001 dev_err(&pdev->dev, "dma_set_mask failed\n");
4005 spin_lock_init(&adapter->cmd_lock);
4006 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
4007 sizeof(struct vmxnet3_adapter),
4009 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
4010 dev_err(&pdev->dev, "Failed to map dma\n");
4014 adapter->shared = dma_alloc_coherent(
4015 &adapter->pdev->dev,
4016 sizeof(struct Vmxnet3_DriverShared),
4017 &adapter->shared_pa, GFP_KERNEL);
4018 if (!adapter->shared) {
4019 dev_err(&pdev->dev, "Failed to allocate memory\n");
4021 goto err_alloc_shared;
4024 err = vmxnet3_alloc_pci_resources(adapter);
4028 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
4029 for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) {
4030 if (ver & (1 << i)) {
4031 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i);
4032 adapter->version = i + 1;
4036 if (i < VMXNET3_REV_1) {
4038 "Incompatible h/w version (0x%x) for adapter\n", ver);
4042 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
4044 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
4046 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
4049 "Incompatible upt version (0x%x) for adapter\n", ver);
4054 if (VMXNET3_VERSION_GE_7(adapter)) {
4055 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
4056 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
4057 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
4058 adapter->dev_caps[0] = adapter->devcap_supported[0] &
4059 (1UL << VMXNET3_CAP_LARGE_BAR);
4061 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
4062 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
4063 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
4064 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
4065 (1UL << VMXNET3_CAP_OOORX_COMP);
4067 if (adapter->dev_caps[0])
4068 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
4070 spin_lock_irqsave(&adapter->cmd_lock, flags);
4071 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
4072 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4073 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4076 if (VMXNET3_VERSION_GE_7(adapter) &&
4077 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
4078 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
4079 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
4080 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
4082 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
4083 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
4084 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
4087 if (VMXNET3_VERSION_GE_6(adapter)) {
4088 spin_lock_irqsave(&adapter->cmd_lock, flags);
4089 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4090 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4091 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4092 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4094 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
4095 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
4097 adapter->num_rx_queues = min(num_rx_queues,
4098 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4099 adapter->num_tx_queues = min(num_tx_queues,
4100 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
4102 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
4103 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
4104 adapter->queuesExtEnabled = true;
4106 adapter->queuesExtEnabled = false;
4109 adapter->queuesExtEnabled = false;
4110 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4111 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
4112 adapter->num_rx_queues = min(num_rx_queues,
4113 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4114 adapter->num_tx_queues = min(num_tx_queues,
4115 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
4117 dev_info(&pdev->dev,
4118 "# of Tx queues : %d, # of Rx queues : %d\n",
4119 adapter->num_tx_queues, adapter->num_rx_queues);
4121 adapter->rx_buf_per_pkt = 1;
4123 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4124 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
4125 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
4126 &adapter->queue_desc_pa,
4129 if (!adapter->tqd_start) {
4130 dev_err(&pdev->dev, "Failed to allocate memory\n");
4134 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
4135 adapter->num_tx_queues);
4136 if (VMXNET3_VERSION_GE_9(adapter))
4137 adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf;
4139 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
4140 sizeof(struct Vmxnet3_PMConf),
4141 &adapter->pm_conf_pa,
4143 if (adapter->pm_conf == NULL) {
4150 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
4151 sizeof(struct UPT1_RSSConf),
4152 &adapter->rss_conf_pa,
4154 if (adapter->rss_conf == NULL) {
4158 #endif /* VMXNET3_RSS */
4160 if (VMXNET3_VERSION_GE_3(adapter)) {
4161 adapter->coal_conf =
4162 dma_alloc_coherent(&adapter->pdev->dev,
4163 sizeof(struct Vmxnet3_CoalesceScheme)
4165 &adapter->coal_conf_pa,
4167 if (!adapter->coal_conf) {
4171 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
4172 adapter->default_coal_mode = true;
4175 if (VMXNET3_VERSION_GE_4(adapter)) {
4176 adapter->default_rss_fields = true;
4177 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4180 SET_NETDEV_DEV(netdev, &pdev->dev);
4181 vmxnet3_declare_features(adapter);
4182 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4183 NETDEV_XDP_ACT_NDO_XMIT;
4185 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
4186 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
4188 if (adapter->num_tx_queues == adapter->num_rx_queues)
4189 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
4191 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
4193 vmxnet3_alloc_intr_resources(adapter);
4196 if (adapter->num_rx_queues > 1 &&
4197 adapter->intr.type == VMXNET3_IT_MSIX) {
4198 adapter->rss = true;
4199 netdev->hw_features |= NETIF_F_RXHASH;
4200 netdev->features |= NETIF_F_RXHASH;
4201 dev_dbg(&pdev->dev, "RSS is enabled.\n");
4203 adapter->rss = false;
4207 vmxnet3_read_mac_addr(adapter, mac);
4208 dev_addr_set(netdev, mac);
4210 netdev->netdev_ops = &vmxnet3_netdev_ops;
4211 vmxnet3_set_ethtool_ops(netdev);
4212 netdev->watchdog_timeo = 5 * HZ;
4214 /* MTU range: 60 - 9190 */
4215 netdev->min_mtu = VMXNET3_MIN_MTU;
4216 if (VMXNET3_VERSION_GE_6(adapter))
4217 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
4219 netdev->max_mtu = VMXNET3_MAX_MTU;
4221 INIT_WORK(&adapter->work, vmxnet3_reset_work);
4222 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4224 if (adapter->intr.type == VMXNET3_IT_MSIX) {
4226 for (i = 0; i < adapter->num_rx_queues; i++) {
4227 netif_napi_add(adapter->netdev,
4228 &adapter->rx_queue[i].napi,
4229 vmxnet3_poll_rx_only);
4232 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4236 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4237 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
4239 netif_carrier_off(netdev);
4240 err = register_netdev(netdev);
4243 dev_err(&pdev->dev, "Failed to register adapter\n");
4247 vmxnet3_check_link(adapter, false);
4251 if (VMXNET3_VERSION_GE_3(adapter)) {
4252 dma_free_coherent(&adapter->pdev->dev,
4253 sizeof(struct Vmxnet3_CoalesceScheme),
4254 adapter->coal_conf, adapter->coal_conf_pa);
4256 vmxnet3_free_intr_resources(adapter);
4259 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4260 adapter->rss_conf, adapter->rss_conf_pa);
4263 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4264 adapter->pm_conf, adapter->pm_conf_pa);
4266 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4267 adapter->queue_desc_pa);
4269 vmxnet3_free_pci_resources(adapter);
4271 dma_free_coherent(&adapter->pdev->dev,
4272 sizeof(struct Vmxnet3_DriverShared),
4273 adapter->shared, adapter->shared_pa);
4275 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4276 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4278 free_netdev(netdev);
4284 vmxnet3_remove_device(struct pci_dev *pdev)
4286 struct net_device *netdev = pci_get_drvdata(pdev);
4287 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4289 int num_rx_queues, rx_queues;
4290 unsigned long flags;
4294 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
4295 (int)num_online_cpus());
4299 if (!VMXNET3_VERSION_GE_6(adapter)) {
4300 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4302 if (VMXNET3_VERSION_GE_6(adapter)) {
4303 spin_lock_irqsave(&adapter->cmd_lock, flags);
4304 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4305 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4306 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4307 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4309 rx_queues = (rx_queues >> 8) & 0xff;
4311 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4312 num_rx_queues = min(num_rx_queues, rx_queues);
4314 num_rx_queues = min(num_rx_queues,
4315 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4318 cancel_work_sync(&adapter->work);
4320 unregister_netdev(netdev);
4322 vmxnet3_free_intr_resources(adapter);
4323 vmxnet3_free_pci_resources(adapter);
4324 if (VMXNET3_VERSION_GE_3(adapter)) {
4325 dma_free_coherent(&adapter->pdev->dev,
4326 sizeof(struct Vmxnet3_CoalesceScheme),
4327 adapter->coal_conf, adapter->coal_conf_pa);
4330 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4331 adapter->rss_conf, adapter->rss_conf_pa);
4333 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4334 adapter->pm_conf, adapter->pm_conf_pa);
4336 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4337 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4338 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4339 adapter->queue_desc_pa);
4340 dma_free_coherent(&adapter->pdev->dev,
4341 sizeof(struct Vmxnet3_DriverShared),
4342 adapter->shared, adapter->shared_pa);
4343 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4344 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4345 free_netdev(netdev);
4348 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4350 struct net_device *netdev = pci_get_drvdata(pdev);
4351 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4352 unsigned long flags;
4354 /* Reset_work may be in the middle of resetting the device, wait for its
4357 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4358 usleep_range(1000, 2000);
4360 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4362 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4365 spin_lock_irqsave(&adapter->cmd_lock, flags);
4366 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4367 VMXNET3_CMD_QUIESCE_DEV);
4368 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4369 vmxnet3_disable_all_intrs(adapter);
4371 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4378 vmxnet3_suspend(struct device *device)
4380 struct pci_dev *pdev = to_pci_dev(device);
4381 struct net_device *netdev = pci_get_drvdata(pdev);
4382 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4383 struct Vmxnet3_PMConf *pmConf;
4384 struct ethhdr *ehdr;
4385 struct arphdr *ahdr;
4387 struct in_device *in_dev;
4388 struct in_ifaddr *ifa;
4389 unsigned long flags;
4392 if (!netif_running(netdev))
4395 for (i = 0; i < adapter->num_rx_queues; i++)
4396 napi_disable(&adapter->rx_queue[i].napi);
4398 vmxnet3_disable_all_intrs(adapter);
4399 vmxnet3_free_irqs(adapter);
4400 vmxnet3_free_intr_resources(adapter);
4402 netif_device_detach(netdev);
4404 /* Create wake-up filters. */
4405 pmConf = adapter->pm_conf;
4406 memset(pmConf, 0, sizeof(*pmConf));
4408 if (adapter->wol & WAKE_UCAST) {
4409 pmConf->filters[i].patternSize = ETH_ALEN;
4410 pmConf->filters[i].maskSize = 1;
4411 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4412 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4414 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4418 if (adapter->wol & WAKE_ARP) {
4421 in_dev = __in_dev_get_rcu(netdev);
4427 ifa = rcu_dereference(in_dev->ifa_list);
4433 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4434 sizeof(struct arphdr) + /* ARP header */
4435 2 * ETH_ALEN + /* 2 Ethernet addresses*/
4436 2 * sizeof(u32); /*2 IPv4 addresses */
4437 pmConf->filters[i].maskSize =
4438 (pmConf->filters[i].patternSize - 1) / 8 + 1;
4440 /* ETH_P_ARP in Ethernet header. */
4441 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4442 ehdr->h_proto = htons(ETH_P_ARP);
4444 /* ARPOP_REQUEST in ARP header. */
4445 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4446 ahdr->ar_op = htons(ARPOP_REQUEST);
4447 arpreq = (u8 *)(ahdr + 1);
4449 /* The Unicast IPv4 address in 'tip' field. */
4450 arpreq += 2 * ETH_ALEN + sizeof(u32);
4451 *(__be32 *)arpreq = ifa->ifa_address;
4455 /* The mask for the relevant bits. */
4456 pmConf->filters[i].mask[0] = 0x00;
4457 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4458 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4459 pmConf->filters[i].mask[3] = 0x00;
4460 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4461 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4463 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4468 if (adapter->wol & WAKE_MAGIC)
4469 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4471 pmConf->numFilters = i;
4473 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4474 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4476 adapter->shared->devRead.pmConfDesc.confPA =
4477 cpu_to_le64(adapter->pm_conf_pa);
4479 spin_lock_irqsave(&adapter->cmd_lock, flags);
4480 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4481 VMXNET3_CMD_UPDATE_PMCFG);
4482 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4484 pci_save_state(pdev);
4485 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4487 pci_disable_device(pdev);
4488 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4495 vmxnet3_resume(struct device *device)
4498 unsigned long flags;
4499 struct pci_dev *pdev = to_pci_dev(device);
4500 struct net_device *netdev = pci_get_drvdata(pdev);
4501 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4503 if (!netif_running(netdev))
4506 pci_set_power_state(pdev, PCI_D0);
4507 pci_restore_state(pdev);
4508 err = pci_enable_device_mem(pdev);
4512 pci_enable_wake(pdev, PCI_D0, 0);
4514 vmxnet3_alloc_intr_resources(adapter);
4516 /* During hibernate and suspend, device has to be reinitialized as the
4517 * device state need not be preserved.
4520 /* Need not check adapter state as other reset tasks cannot run during
4523 spin_lock_irqsave(&adapter->cmd_lock, flags);
4524 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4525 VMXNET3_CMD_QUIESCE_DEV);
4526 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4527 vmxnet3_tq_cleanup_all(adapter);
4528 vmxnet3_rq_cleanup_all(adapter);
4530 vmxnet3_reset_dev(adapter);
4531 err = vmxnet3_activate_dev(adapter);
4534 "failed to re-activate on resume, error: %d", err);
4535 vmxnet3_force_close(adapter);
4538 netif_device_attach(netdev);
4543 static const struct dev_pm_ops vmxnet3_pm_ops = {
4544 .suspend = vmxnet3_suspend,
4545 .resume = vmxnet3_resume,
4546 .freeze = vmxnet3_suspend,
4547 .restore = vmxnet3_resume,
4551 static struct pci_driver vmxnet3_driver = {
4552 .name = vmxnet3_driver_name,
4553 .id_table = vmxnet3_pciid_table,
4554 .probe = vmxnet3_probe_device,
4555 .remove = vmxnet3_remove_device,
4556 .shutdown = vmxnet3_shutdown_device,
4558 .driver.pm = &vmxnet3_pm_ops,
4564 vmxnet3_init_module(void)
4566 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4567 VMXNET3_DRIVER_VERSION_REPORT);
4568 return pci_register_driver(&vmxnet3_driver);
4571 module_init(vmxnet3_init_module);
4575 vmxnet3_exit_module(void)
4577 pci_unregister_driver(&vmxnet3_driver);
4580 module_exit(vmxnet3_exit_module);
4582 MODULE_AUTHOR("VMware, Inc.");
4583 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4584 MODULE_LICENSE("GPL v2");
4585 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);