1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /******************************************************************************
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
6 ******************************************************************************/
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/vmalloc.h>
16 #include <linux/string.h>
19 #include <linux/tcp.h>
20 #include <linux/sctp.h>
21 #include <linux/ipv6.h>
22 #include <linux/slab.h>
23 #include <net/checksum.h>
24 #include <net/ip6_checksum.h>
25 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #include <linux/prefetch.h>
30 #include <linux/bpf.h>
31 #include <linux/bpf_trace.h>
32 #include <linux/atomic.h>
37 const char ixgbevf_driver_name[] = "ixgbevf";
38 static const char ixgbevf_driver_string[] =
39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
41 static char ixgbevf_copyright[] =
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
44 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
45 [board_82599_vf] = &ixgbevf_82599_vf_info,
46 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
47 [board_X540_vf] = &ixgbevf_X540_vf_info,
48 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
49 [board_X550_vf] = &ixgbevf_X550_vf_info,
50 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
51 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
52 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
53 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
56 /* ixgbevf_pci_tbl - PCI Device ID Table
58 * Wildcard entries (PCI_ANY_ID) should come last
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62 * Class, Class Mask, private data (not used) }
64 static const struct pci_device_id ixgbevf_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
74 /* required last entry */
77 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
79 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
80 MODULE_LICENSE("GPL v2");
82 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
83 static int debug = -1;
84 module_param(debug, int, 0);
85 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87 static struct workqueue_struct *ixgbevf_wq;
89 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
91 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
92 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
93 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
94 queue_work(ixgbevf_wq, &adapter->service_task);
97 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
99 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
101 /* flush memory to make sure state is correct before next watchdog */
102 smp_mb__before_atomic();
103 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
107 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
108 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
109 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
110 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
111 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
112 struct ixgbevf_rx_buffer *old_buff);
114 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
116 struct ixgbevf_adapter *adapter = hw->back;
121 dev_err(&adapter->pdev->dev, "Adapter removed\n");
122 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
123 ixgbevf_service_event_schedule(adapter);
126 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
130 /* The following check not only optimizes a bit by not
131 * performing a read on the status register when the
132 * register just read was a status register read that
133 * returned IXGBE_FAILED_READ_REG. It also blocks any
134 * potential recursion.
136 if (reg == IXGBE_VFSTATUS) {
137 ixgbevf_remove_adapter(hw);
140 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
141 if (value == IXGBE_FAILED_READ_REG)
142 ixgbevf_remove_adapter(hw);
145 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
147 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
150 if (IXGBE_REMOVED(reg_addr))
151 return IXGBE_FAILED_READ_REG;
152 value = readl(reg_addr + reg);
153 if (unlikely(value == IXGBE_FAILED_READ_REG))
154 ixgbevf_check_remove(hw, reg);
159 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
160 * @adapter: pointer to adapter struct
161 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
162 * @queue: queue to map the corresponding interrupt to
163 * @msix_vector: the vector to map to the corresponding queue
165 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
166 u8 queue, u8 msix_vector)
169 struct ixgbe_hw *hw = &adapter->hw;
171 if (direction == -1) {
173 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
174 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
177 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
179 /* Tx or Rx causes */
180 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181 index = ((16 * (queue & 1)) + (8 * direction));
182 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
183 ivar &= ~(0xFF << index);
184 ivar |= (msix_vector << index);
185 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
189 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
191 return ring->stats.packets;
194 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
196 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
197 struct ixgbe_hw *hw = &adapter->hw;
199 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
200 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
203 return (head < tail) ?
204 tail - head : (tail + ring->count - head);
209 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
211 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
212 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
213 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
215 clear_check_for_tx_hang(tx_ring);
217 /* Check for a hung queue, but be thorough. This verifies
218 * that a transmit has been completed since the previous
219 * check AND there is at least one packet pending. The
220 * ARMED bit is set to indicate a potential hang.
222 if ((tx_done_old == tx_done) && tx_pending) {
223 /* make sure it is true for two checks in a row */
224 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
227 /* reset the countdown */
228 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
230 /* update completed stats and continue */
231 tx_ring->tx_stats.tx_done_old = tx_done;
236 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
238 /* Do the reset outside of interrupt context */
239 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
240 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
241 ixgbevf_service_event_schedule(adapter);
246 * ixgbevf_tx_timeout - Respond to a Tx Hang
247 * @netdev: network interface device structure
248 * @txqueue: transmit queue hanging (unused)
250 static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
252 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
254 ixgbevf_tx_timeout_reset(adapter);
258 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
259 * @q_vector: board private structure
260 * @tx_ring: tx ring to clean
261 * @napi_budget: Used to determine if we are in netpoll
263 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
264 struct ixgbevf_ring *tx_ring, int napi_budget)
266 struct ixgbevf_adapter *adapter = q_vector->adapter;
267 struct ixgbevf_tx_buffer *tx_buffer;
268 union ixgbe_adv_tx_desc *tx_desc;
269 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
270 unsigned int budget = tx_ring->count / 2;
271 unsigned int i = tx_ring->next_to_clean;
273 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
276 tx_buffer = &tx_ring->tx_buffer_info[i];
277 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
281 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
283 /* if next_to_watch is not set then there is no work pending */
287 /* prevent any other reads prior to eop_desc */
290 /* if DD is not set pending work has not been completed */
291 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
294 /* clear next_to_watch to prevent false hangs */
295 tx_buffer->next_to_watch = NULL;
297 /* update the statistics for this packet */
298 total_bytes += tx_buffer->bytecount;
299 total_packets += tx_buffer->gso_segs;
300 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
304 if (ring_is_xdp(tx_ring))
305 page_frag_free(tx_buffer->data);
307 napi_consume_skb(tx_buffer->skb, napi_budget);
309 /* unmap skb header data */
310 dma_unmap_single(tx_ring->dev,
311 dma_unmap_addr(tx_buffer, dma),
312 dma_unmap_len(tx_buffer, len),
315 /* clear tx_buffer data */
316 dma_unmap_len_set(tx_buffer, len, 0);
318 /* unmap remaining buffers */
319 while (tx_desc != eop_desc) {
325 tx_buffer = tx_ring->tx_buffer_info;
326 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
329 /* unmap any remaining paged data */
330 if (dma_unmap_len(tx_buffer, len)) {
331 dma_unmap_page(tx_ring->dev,
332 dma_unmap_addr(tx_buffer, dma),
333 dma_unmap_len(tx_buffer, len),
335 dma_unmap_len_set(tx_buffer, len, 0);
339 /* move us one more past the eop_desc for start of next pkt */
345 tx_buffer = tx_ring->tx_buffer_info;
346 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
349 /* issue prefetch for next Tx descriptor */
352 /* update budget accounting */
354 } while (likely(budget));
357 tx_ring->next_to_clean = i;
358 u64_stats_update_begin(&tx_ring->syncp);
359 tx_ring->stats.bytes += total_bytes;
360 tx_ring->stats.packets += total_packets;
361 u64_stats_update_end(&tx_ring->syncp);
362 q_vector->tx.total_bytes += total_bytes;
363 q_vector->tx.total_packets += total_packets;
364 adapter->tx_ipsec += total_ipsec;
366 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
367 struct ixgbe_hw *hw = &adapter->hw;
368 union ixgbe_adv_tx_desc *eop_desc;
370 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
372 pr_err("Detected Tx Unit Hang%s\n"
374 " TDH, TDT <%x>, <%x>\n"
375 " next_to_use <%x>\n"
376 " next_to_clean <%x>\n"
377 "tx_buffer_info[next_to_clean]\n"
378 " next_to_watch <%p>\n"
379 " eop_desc->wb.status <%x>\n"
380 " time_stamp <%lx>\n"
382 ring_is_xdp(tx_ring) ? " XDP" : "",
383 tx_ring->queue_index,
384 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
385 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
386 tx_ring->next_to_use, i,
387 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
388 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
390 if (!ring_is_xdp(tx_ring))
391 netif_stop_subqueue(tx_ring->netdev,
392 tx_ring->queue_index);
394 /* schedule immediate reset if we believe we hung */
395 ixgbevf_tx_timeout_reset(adapter);
400 if (ring_is_xdp(tx_ring))
403 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
404 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
405 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
406 /* Make sure that anybody stopping the queue after this
407 * sees the new next_to_clean.
411 if (__netif_subqueue_stopped(tx_ring->netdev,
412 tx_ring->queue_index) &&
413 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
414 netif_wake_subqueue(tx_ring->netdev,
415 tx_ring->queue_index);
416 ++tx_ring->tx_stats.restart_queue;
424 * ixgbevf_rx_skb - Helper function to determine proper Rx method
425 * @q_vector: structure containing interrupt and ring information
426 * @skb: packet to send up
428 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
431 napi_gro_receive(&q_vector->napi, skb);
434 #define IXGBE_RSS_L4_TYPES_MASK \
435 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
436 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
440 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
441 union ixgbe_adv_rx_desc *rx_desc,
446 if (!(ring->netdev->features & NETIF_F_RXHASH))
449 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
450 IXGBE_RXDADV_RSSTYPE_MASK;
455 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
456 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
457 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
461 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
462 * @ring: structure containig ring specific data
463 * @rx_desc: current Rx descriptor being processed
464 * @skb: skb currently being received and modified
466 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
467 union ixgbe_adv_rx_desc *rx_desc,
470 skb_checksum_none_assert(skb);
472 /* Rx csum disabled */
473 if (!(ring->netdev->features & NETIF_F_RXCSUM))
476 /* if IP and error */
477 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
478 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
479 ring->rx_stats.csum_err++;
483 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
486 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
487 ring->rx_stats.csum_err++;
491 /* It must be a TCP or UDP packet with a valid checksum */
492 skb->ip_summed = CHECKSUM_UNNECESSARY;
496 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
497 * @rx_ring: rx descriptor ring packet is being transacted on
498 * @rx_desc: pointer to the EOP Rx descriptor
499 * @skb: pointer to current skb being populated
501 * This function checks the ring, descriptor, and packet information in
502 * order to populate the checksum, VLAN, protocol, and other fields within
505 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
506 union ixgbe_adv_rx_desc *rx_desc,
509 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
510 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
512 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
513 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
514 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
516 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
517 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
520 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
521 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
523 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
527 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
528 const unsigned int size)
530 struct ixgbevf_rx_buffer *rx_buffer;
532 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
533 prefetchw(rx_buffer->page);
535 /* we are reusing so sync this buffer for CPU use */
536 dma_sync_single_range_for_cpu(rx_ring->dev,
538 rx_buffer->page_offset,
542 rx_buffer->pagecnt_bias--;
547 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
548 struct ixgbevf_rx_buffer *rx_buffer,
551 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
552 /* hand second half of page back to the ring */
553 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
556 /* We are not reusing the buffer so unmap it and free
557 * any references we are holding to it
559 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
560 ixgbevf_rx_pg_size(rx_ring),
562 IXGBEVF_RX_DMA_ATTR);
563 __page_frag_cache_drain(rx_buffer->page,
564 rx_buffer->pagecnt_bias);
567 /* clear contents of rx_buffer */
568 rx_buffer->page = NULL;
572 * ixgbevf_is_non_eop - process handling of non-EOP buffers
573 * @rx_ring: Rx ring being processed
574 * @rx_desc: Rx descriptor for current buffer
576 * This function updates next to clean. If the buffer is an EOP buffer
577 * this function exits returning false, otherwise it will place the
578 * sk_buff in the next buffer to be chained and return true indicating
579 * that this is in fact a non-EOP buffer.
581 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
582 union ixgbe_adv_rx_desc *rx_desc)
584 u32 ntc = rx_ring->next_to_clean + 1;
586 /* fetch, update, and store next to clean */
587 ntc = (ntc < rx_ring->count) ? ntc : 0;
588 rx_ring->next_to_clean = ntc;
590 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
592 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
598 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
600 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
603 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
604 struct ixgbevf_rx_buffer *bi)
606 struct page *page = bi->page;
609 /* since we are recycling buffers we should seldom need to alloc */
613 /* alloc new page for storage */
614 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
615 if (unlikely(!page)) {
616 rx_ring->rx_stats.alloc_rx_page_failed++;
620 /* map page for use */
621 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
622 ixgbevf_rx_pg_size(rx_ring),
623 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
625 /* if mapping failed free memory back to system since
626 * there isn't much point in holding memory we can't use
628 if (dma_mapping_error(rx_ring->dev, dma)) {
629 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
631 rx_ring->rx_stats.alloc_rx_page_failed++;
637 bi->page_offset = ixgbevf_rx_offset(rx_ring);
638 bi->pagecnt_bias = 1;
639 rx_ring->rx_stats.alloc_rx_page++;
645 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
646 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
647 * @cleaned_count: number of buffers to replace
649 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
652 union ixgbe_adv_rx_desc *rx_desc;
653 struct ixgbevf_rx_buffer *bi;
654 unsigned int i = rx_ring->next_to_use;
656 /* nothing to do or no valid netdev defined */
657 if (!cleaned_count || !rx_ring->netdev)
660 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
661 bi = &rx_ring->rx_buffer_info[i];
665 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
668 /* sync the buffer for use by the device */
669 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
671 ixgbevf_rx_bufsz(rx_ring),
674 /* Refresh the desc even if pkt_addr didn't change
675 * because each write-back erases this info.
677 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
683 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
684 bi = rx_ring->rx_buffer_info;
688 /* clear the length for the next_to_use descriptor */
689 rx_desc->wb.upper.length = 0;
692 } while (cleaned_count);
696 if (rx_ring->next_to_use != i) {
697 /* record the next descriptor to use */
698 rx_ring->next_to_use = i;
700 /* update next to alloc since we have filled the ring */
701 rx_ring->next_to_alloc = i;
703 /* Force memory writes to complete before letting h/w
704 * know there are new descriptors to fetch. (Only
705 * applicable for weak-ordered memory model archs,
709 ixgbevf_write_tail(rx_ring, i);
714 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
715 * @rx_ring: rx descriptor ring packet is being transacted on
716 * @rx_desc: pointer to the EOP Rx descriptor
717 * @skb: pointer to current skb being fixed
719 * Check for corrupted packet headers caused by senders on the local L2
720 * embedded NIC switch not setting up their Tx Descriptors right. These
721 * should be very rare.
723 * Also address the case where we are pulling data in on pages only
724 * and as such no data is present in the skb header.
726 * In addition if skb is not at least 60 bytes we need to pad it so that
727 * it is large enough to qualify as a valid Ethernet frame.
729 * Returns true if an error was encountered and skb was freed.
731 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
732 union ixgbe_adv_rx_desc *rx_desc,
735 /* XDP packets use error pointer so abort at this point */
739 /* verify that the packet does not have any known errors */
740 if (unlikely(ixgbevf_test_staterr(rx_desc,
741 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
742 struct net_device *netdev = rx_ring->netdev;
744 if (!(netdev->features & NETIF_F_RXALL)) {
745 dev_kfree_skb_any(skb);
750 /* if eth_skb_pad returns an error the skb was freed */
751 if (eth_skb_pad(skb))
758 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
759 * @rx_ring: rx descriptor ring to store buffers on
760 * @old_buff: donor buffer to have page reused
762 * Synchronizes page for reuse by the adapter
764 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
765 struct ixgbevf_rx_buffer *old_buff)
767 struct ixgbevf_rx_buffer *new_buff;
768 u16 nta = rx_ring->next_to_alloc;
770 new_buff = &rx_ring->rx_buffer_info[nta];
772 /* update, and store next to alloc */
774 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
776 /* transfer page from old buffer to new buffer */
777 new_buff->page = old_buff->page;
778 new_buff->dma = old_buff->dma;
779 new_buff->page_offset = old_buff->page_offset;
780 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
783 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
785 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
786 struct page *page = rx_buffer->page;
788 /* avoid re-using remote and pfmemalloc pages */
789 if (!dev_page_is_reusable(page))
792 #if (PAGE_SIZE < 8192)
793 /* if we are only owner of page we can reuse it */
794 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
797 #define IXGBEVF_LAST_OFFSET \
798 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
800 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
805 /* If we have drained the page fragment pool we need to update
806 * the pagecnt_bias and page count so that we fully restock the
807 * number of references the driver holds.
809 if (unlikely(!pagecnt_bias)) {
810 page_ref_add(page, USHRT_MAX);
811 rx_buffer->pagecnt_bias = USHRT_MAX;
818 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
819 * @rx_ring: rx descriptor ring to transact packets on
820 * @rx_buffer: buffer containing page to add
821 * @skb: sk_buff to place the data into
822 * @size: size of buffer to be added
824 * This function will add the data contained in rx_buffer->page to the skb.
826 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
827 struct ixgbevf_rx_buffer *rx_buffer,
831 #if (PAGE_SIZE < 8192)
832 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
834 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
835 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
836 SKB_DATA_ALIGN(size);
838 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
839 rx_buffer->page_offset, size, truesize);
840 #if (PAGE_SIZE < 8192)
841 rx_buffer->page_offset ^= truesize;
843 rx_buffer->page_offset += truesize;
848 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
849 struct ixgbevf_rx_buffer *rx_buffer,
850 struct xdp_buff *xdp,
851 union ixgbe_adv_rx_desc *rx_desc)
853 unsigned int size = xdp->data_end - xdp->data;
854 #if (PAGE_SIZE < 8192)
855 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
858 xdp->data_hard_start);
860 unsigned int headlen;
863 /* prefetch first cache line of first page */
864 net_prefetch(xdp->data);
866 /* Note, we get here by enabling legacy-rx via:
868 * ethtool --set-priv-flags <dev> legacy-rx on
870 * In this mode, we currently get 0 extra XDP headroom as
871 * opposed to having legacy-rx off, where we process XDP
872 * packets going to stack via ixgbevf_build_skb().
874 * For ixgbevf_construct_skb() mode it means that the
875 * xdp->data_meta will always point to xdp->data, since
876 * the helper cannot expand the head. Should this ever
877 * changed in future for legacy-rx mode on, then lets also
878 * add xdp->data_meta handling here.
881 /* allocate a skb to store the frags */
882 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
886 /* Determine available headroom for copy */
888 if (headlen > IXGBEVF_RX_HDR_SIZE)
889 headlen = eth_get_headlen(skb->dev, xdp->data,
890 IXGBEVF_RX_HDR_SIZE);
892 /* align pull length to size of long to optimize memcpy performance */
893 memcpy(__skb_put(skb, headlen), xdp->data,
894 ALIGN(headlen, sizeof(long)));
896 /* update all of the pointers */
899 skb_add_rx_frag(skb, 0, rx_buffer->page,
900 (xdp->data + headlen) -
901 page_address(rx_buffer->page),
903 #if (PAGE_SIZE < 8192)
904 rx_buffer->page_offset ^= truesize;
906 rx_buffer->page_offset += truesize;
909 rx_buffer->pagecnt_bias++;
915 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
918 struct ixgbe_hw *hw = &adapter->hw;
920 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
923 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
924 struct ixgbevf_rx_buffer *rx_buffer,
925 struct xdp_buff *xdp,
926 union ixgbe_adv_rx_desc *rx_desc)
928 unsigned int metasize = xdp->data - xdp->data_meta;
929 #if (PAGE_SIZE < 8192)
930 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
932 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
933 SKB_DATA_ALIGN(xdp->data_end -
934 xdp->data_hard_start);
938 /* Prefetch first cache line of first page. If xdp->data_meta
939 * is unused, this points to xdp->data, otherwise, we likely
940 * have a consumer accessing first few bytes of meta data,
941 * and then actual data.
943 net_prefetch(xdp->data_meta);
945 /* build an skb around the page buffer */
946 skb = napi_build_skb(xdp->data_hard_start, truesize);
950 /* update pointers within the skb to store the data */
951 skb_reserve(skb, xdp->data - xdp->data_hard_start);
952 __skb_put(skb, xdp->data_end - xdp->data);
954 skb_metadata_set(skb, metasize);
956 /* update buffer offset */
957 #if (PAGE_SIZE < 8192)
958 rx_buffer->page_offset ^= truesize;
960 rx_buffer->page_offset += truesize;
966 #define IXGBEVF_XDP_PASS 0
967 #define IXGBEVF_XDP_CONSUMED 1
968 #define IXGBEVF_XDP_TX 2
970 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
971 struct xdp_buff *xdp)
973 struct ixgbevf_tx_buffer *tx_buffer;
974 union ixgbe_adv_tx_desc *tx_desc;
979 len = xdp->data_end - xdp->data;
981 if (unlikely(!ixgbevf_desc_unused(ring)))
982 return IXGBEVF_XDP_CONSUMED;
984 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
985 if (dma_mapping_error(ring->dev, dma))
986 return IXGBEVF_XDP_CONSUMED;
988 /* record the location of the first descriptor for this packet */
989 i = ring->next_to_use;
990 tx_buffer = &ring->tx_buffer_info[i];
992 dma_unmap_len_set(tx_buffer, len, len);
993 dma_unmap_addr_set(tx_buffer, dma, dma);
994 tx_buffer->data = xdp->data;
995 tx_buffer->bytecount = len;
996 tx_buffer->gso_segs = 1;
997 tx_buffer->protocol = 0;
999 /* Populate minimal context descriptor that will provide for the
1000 * fact that we are expected to process Ethernet frames.
1002 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1003 struct ixgbe_adv_tx_context_desc *context_desc;
1005 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1007 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1008 context_desc->vlan_macip_lens =
1009 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1010 context_desc->fceof_saidx = 0;
1011 context_desc->type_tucmd_mlhl =
1012 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1013 IXGBE_ADVTXD_DTYP_CTXT);
1014 context_desc->mss_l4len_idx = 0;
1019 /* put descriptor type bits */
1020 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1021 IXGBE_ADVTXD_DCMD_DEXT |
1022 IXGBE_ADVTXD_DCMD_IFCS;
1023 cmd_type |= len | IXGBE_TXD_CMD;
1025 tx_desc = IXGBEVF_TX_DESC(ring, i);
1026 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1028 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1029 tx_desc->read.olinfo_status =
1030 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1033 /* Avoid any potential race with cleanup */
1036 /* set next_to_watch value indicating a packet is present */
1038 if (i == ring->count)
1041 tx_buffer->next_to_watch = tx_desc;
1042 ring->next_to_use = i;
1044 return IXGBEVF_XDP_TX;
1047 static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1048 struct ixgbevf_ring *rx_ring,
1049 struct xdp_buff *xdp)
1051 int result = IXGBEVF_XDP_PASS;
1052 struct ixgbevf_ring *xdp_ring;
1053 struct bpf_prog *xdp_prog;
1056 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1061 act = bpf_prog_run_xdp(xdp_prog, xdp);
1066 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1067 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1068 if (result == IXGBEVF_XDP_CONSUMED)
1072 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
1076 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1077 fallthrough; /* handle aborts by dropping packet */
1079 result = IXGBEVF_XDP_CONSUMED;
1083 return ERR_PTR(-result);
1086 static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
1089 unsigned int truesize;
1091 #if (PAGE_SIZE < 8192)
1092 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1094 truesize = ring_uses_build_skb(rx_ring) ?
1095 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
1096 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1097 SKB_DATA_ALIGN(size);
1102 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1103 struct ixgbevf_rx_buffer *rx_buffer,
1106 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
1108 #if (PAGE_SIZE < 8192)
1109 rx_buffer->page_offset ^= truesize;
1111 rx_buffer->page_offset += truesize;
1115 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1116 struct ixgbevf_ring *rx_ring,
1119 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
1120 struct ixgbevf_adapter *adapter = q_vector->adapter;
1121 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1122 struct sk_buff *skb = rx_ring->skb;
1123 bool xdp_xmit = false;
1124 struct xdp_buff xdp;
1126 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1127 #if (PAGE_SIZE < 8192)
1128 frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
1130 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1132 while (likely(total_rx_packets < budget)) {
1133 struct ixgbevf_rx_buffer *rx_buffer;
1134 union ixgbe_adv_rx_desc *rx_desc;
1137 /* return some buffers to hardware, one at a time is too slow */
1138 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1139 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1143 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1144 size = le16_to_cpu(rx_desc->wb.upper.length);
1148 /* This memory barrier is needed to keep us from reading
1149 * any other fields out of the rx_desc until we know the
1150 * RXD_STAT_DD bit is set
1154 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1156 /* retrieve a buffer from the ring */
1158 unsigned int offset = ixgbevf_rx_offset(rx_ring);
1159 unsigned char *hard_start;
1161 hard_start = page_address(rx_buffer->page) +
1162 rx_buffer->page_offset - offset;
1163 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1164 #if (PAGE_SIZE > 4096)
1165 /* At larger PAGE_SIZE, frame_sz depend on len size */
1166 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
1168 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1172 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1174 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1177 rx_buffer->pagecnt_bias++;
1180 total_rx_bytes += size;
1182 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1183 } else if (ring_uses_build_skb(rx_ring)) {
1184 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1187 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1191 /* exit if we failed to retrieve a buffer */
1193 rx_ring->rx_stats.alloc_rx_buff_failed++;
1194 rx_buffer->pagecnt_bias++;
1198 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1201 /* fetch next buffer in frame if non-eop */
1202 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1205 /* verify the packet layout is correct */
1206 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1211 /* probably a little skewed due to removing CRC */
1212 total_rx_bytes += skb->len;
1214 /* Workaround hardware that can't do proper VEPA multicast
1217 if ((skb->pkt_type == PACKET_BROADCAST ||
1218 skb->pkt_type == PACKET_MULTICAST) &&
1219 ether_addr_equal(rx_ring->netdev->dev_addr,
1220 eth_hdr(skb)->h_source)) {
1221 dev_kfree_skb_irq(skb);
1225 /* populate checksum, VLAN, and protocol */
1226 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1228 ixgbevf_rx_skb(q_vector, skb);
1230 /* reset skb pointer */
1233 /* update budget accounting */
1237 /* place incomplete frames back on ring for completion */
1241 struct ixgbevf_ring *xdp_ring =
1242 adapter->xdp_ring[rx_ring->queue_index];
1244 /* Force memory writes to complete before letting h/w
1245 * know there are new descriptors to fetch.
1248 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1251 u64_stats_update_begin(&rx_ring->syncp);
1252 rx_ring->stats.packets += total_rx_packets;
1253 rx_ring->stats.bytes += total_rx_bytes;
1254 u64_stats_update_end(&rx_ring->syncp);
1255 q_vector->rx.total_packets += total_rx_packets;
1256 q_vector->rx.total_bytes += total_rx_bytes;
1258 return total_rx_packets;
1262 * ixgbevf_poll - NAPI polling calback
1263 * @napi: napi struct with our devices info in it
1264 * @budget: amount of work driver is allowed to do this pass, in packets
1266 * This function will clean more than one or more rings associated with a
1269 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1271 struct ixgbevf_q_vector *q_vector =
1272 container_of(napi, struct ixgbevf_q_vector, napi);
1273 struct ixgbevf_adapter *adapter = q_vector->adapter;
1274 struct ixgbevf_ring *ring;
1275 int per_ring_budget, work_done = 0;
1276 bool clean_complete = true;
1278 ixgbevf_for_each_ring(ring, q_vector->tx) {
1279 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1280 clean_complete = false;
1286 /* attempt to distribute budget to each queue fairly, but don't allow
1287 * the budget to go below 1 because we'll exit polling
1289 if (q_vector->rx.count > 1)
1290 per_ring_budget = max(budget/q_vector->rx.count, 1);
1292 per_ring_budget = budget;
1294 ixgbevf_for_each_ring(ring, q_vector->rx) {
1295 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1297 work_done += cleaned;
1298 if (cleaned >= per_ring_budget)
1299 clean_complete = false;
1302 /* If all work not completed, return budget and keep polling */
1303 if (!clean_complete)
1306 /* Exit the polling mode, but don't re-enable interrupts if stack might
1307 * poll us due to busy-polling
1309 if (likely(napi_complete_done(napi, work_done))) {
1310 if (adapter->rx_itr_setting == 1)
1311 ixgbevf_set_itr(q_vector);
1312 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1313 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1314 ixgbevf_irq_enable_queues(adapter,
1315 BIT(q_vector->v_idx));
1318 return min(work_done, budget - 1);
1322 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1323 * @q_vector: structure containing interrupt and ring information
1325 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1327 struct ixgbevf_adapter *adapter = q_vector->adapter;
1328 struct ixgbe_hw *hw = &adapter->hw;
1329 int v_idx = q_vector->v_idx;
1330 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1332 /* set the WDIS bit to not clear the timer bits and cause an
1333 * immediate assertion of the interrupt
1335 itr_reg |= IXGBE_EITR_CNT_WDIS;
1337 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1341 * ixgbevf_configure_msix - Configure MSI-X hardware
1342 * @adapter: board private structure
1344 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1347 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1349 struct ixgbevf_q_vector *q_vector;
1350 int q_vectors, v_idx;
1352 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1353 adapter->eims_enable_mask = 0;
1355 /* Populate the IVAR table and set the ITR values to the
1356 * corresponding register.
1358 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1359 struct ixgbevf_ring *ring;
1361 q_vector = adapter->q_vector[v_idx];
1363 ixgbevf_for_each_ring(ring, q_vector->rx)
1364 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1366 ixgbevf_for_each_ring(ring, q_vector->tx)
1367 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1369 if (q_vector->tx.ring && !q_vector->rx.ring) {
1370 /* Tx only vector */
1371 if (adapter->tx_itr_setting == 1)
1372 q_vector->itr = IXGBE_12K_ITR;
1374 q_vector->itr = adapter->tx_itr_setting;
1376 /* Rx or Rx/Tx vector */
1377 if (adapter->rx_itr_setting == 1)
1378 q_vector->itr = IXGBE_20K_ITR;
1380 q_vector->itr = adapter->rx_itr_setting;
1383 /* add q_vector eims value to global eims_enable_mask */
1384 adapter->eims_enable_mask |= BIT(v_idx);
1386 ixgbevf_write_eitr(q_vector);
1389 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1390 /* setup eims_other and add value to global eims_enable_mask */
1391 adapter->eims_other = BIT(v_idx);
1392 adapter->eims_enable_mask |= adapter->eims_other;
1395 enum latency_range {
1399 latency_invalid = 255
1403 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1404 * @q_vector: structure containing interrupt and ring information
1405 * @ring_container: structure containing ring performance data
1407 * Stores a new ITR value based on packets and byte
1408 * counts during the last interrupt. The advantage of per interrupt
1409 * computation is faster updates and more accurate ITR for the current
1410 * traffic pattern. Constants in this function were computed
1411 * based on theoretical maximum wire speed and thresholds were set based
1412 * on testing data as well as attempting to minimize response time
1413 * while increasing bulk throughput.
1415 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1416 struct ixgbevf_ring_container *ring_container)
1418 int bytes = ring_container->total_bytes;
1419 int packets = ring_container->total_packets;
1422 u8 itr_setting = ring_container->itr;
1427 /* simple throttle rate management
1428 * 0-20MB/s lowest (100000 ints/s)
1429 * 20-100MB/s low (20000 ints/s)
1430 * 100-1249MB/s bulk (12000 ints/s)
1432 /* what was last interrupt timeslice? */
1433 timepassed_us = q_vector->itr >> 2;
1434 if (timepassed_us == 0)
1437 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1439 switch (itr_setting) {
1440 case lowest_latency:
1441 if (bytes_perint > 10)
1442 itr_setting = low_latency;
1445 if (bytes_perint > 20)
1446 itr_setting = bulk_latency;
1447 else if (bytes_perint <= 10)
1448 itr_setting = lowest_latency;
1451 if (bytes_perint <= 20)
1452 itr_setting = low_latency;
1456 /* clear work counters since we have the values we need */
1457 ring_container->total_bytes = 0;
1458 ring_container->total_packets = 0;
1460 /* write updated itr to ring container */
1461 ring_container->itr = itr_setting;
1464 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1466 u32 new_itr = q_vector->itr;
1469 ixgbevf_update_itr(q_vector, &q_vector->tx);
1470 ixgbevf_update_itr(q_vector, &q_vector->rx);
1472 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1474 switch (current_itr) {
1475 /* counts and packets in update_itr are dependent on these numbers */
1476 case lowest_latency:
1477 new_itr = IXGBE_100K_ITR;
1480 new_itr = IXGBE_20K_ITR;
1483 new_itr = IXGBE_12K_ITR;
1489 if (new_itr != q_vector->itr) {
1490 /* do an exponential smoothing */
1491 new_itr = (10 * new_itr * q_vector->itr) /
1492 ((9 * new_itr) + q_vector->itr);
1494 /* save the algorithm value here */
1495 q_vector->itr = new_itr;
1497 ixgbevf_write_eitr(q_vector);
1501 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1503 struct ixgbevf_adapter *adapter = data;
1504 struct ixgbe_hw *hw = &adapter->hw;
1506 hw->mac.get_link_status = 1;
1508 ixgbevf_service_event_schedule(adapter);
1510 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1516 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1518 * @data: pointer to our q_vector struct for this interrupt vector
1520 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1522 struct ixgbevf_q_vector *q_vector = data;
1524 /* EIAM disabled interrupts (on this vector) for us */
1525 if (q_vector->rx.ring || q_vector->tx.ring)
1526 napi_schedule_irqoff(&q_vector->napi);
1532 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1533 * @adapter: board private structure
1535 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1536 * interrupts from the kernel.
1538 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1540 struct net_device *netdev = adapter->netdev;
1541 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1542 unsigned int ri = 0, ti = 0;
1545 for (vector = 0; vector < q_vectors; vector++) {
1546 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1547 struct msix_entry *entry = &adapter->msix_entries[vector];
1549 if (q_vector->tx.ring && q_vector->rx.ring) {
1550 snprintf(q_vector->name, sizeof(q_vector->name),
1551 "%s-TxRx-%u", netdev->name, ri++);
1553 } else if (q_vector->rx.ring) {
1554 snprintf(q_vector->name, sizeof(q_vector->name),
1555 "%s-rx-%u", netdev->name, ri++);
1556 } else if (q_vector->tx.ring) {
1557 snprintf(q_vector->name, sizeof(q_vector->name),
1558 "%s-tx-%u", netdev->name, ti++);
1560 /* skip this unused q_vector */
1563 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1564 q_vector->name, q_vector);
1566 hw_dbg(&adapter->hw,
1567 "request_irq failed for MSIX interrupt Error: %d\n",
1569 goto free_queue_irqs;
1573 err = request_irq(adapter->msix_entries[vector].vector,
1574 &ixgbevf_msix_other, 0, netdev->name, adapter);
1576 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1578 goto free_queue_irqs;
1586 free_irq(adapter->msix_entries[vector].vector,
1587 adapter->q_vector[vector]);
1589 /* This failure is non-recoverable - it indicates the system is
1590 * out of MSIX vector resources and the VF driver cannot run
1591 * without them. Set the number of msix vectors to zero
1592 * indicating that not enough can be allocated. The error
1593 * will be returned to the user indicating device open failed.
1594 * Any further attempts to force the driver to open will also
1595 * fail. The only way to recover is to unload the driver and
1596 * reload it again. If the system has recovered some MSIX
1597 * vectors then it may succeed.
1599 adapter->num_msix_vectors = 0;
1604 * ixgbevf_request_irq - initialize interrupts
1605 * @adapter: board private structure
1607 * Attempts to configure interrupts using the best available
1608 * capabilities of the hardware and kernel.
1610 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1612 int err = ixgbevf_request_msix_irqs(adapter);
1615 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1620 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1624 if (!adapter->msix_entries)
1627 q_vectors = adapter->num_msix_vectors;
1630 free_irq(adapter->msix_entries[i].vector, adapter);
1633 for (; i >= 0; i--) {
1634 /* free only the irqs that were actually requested */
1635 if (!adapter->q_vector[i]->rx.ring &&
1636 !adapter->q_vector[i]->tx.ring)
1639 free_irq(adapter->msix_entries[i].vector,
1640 adapter->q_vector[i]);
1645 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1646 * @adapter: board private structure
1648 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1650 struct ixgbe_hw *hw = &adapter->hw;
1653 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1654 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1655 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1657 IXGBE_WRITE_FLUSH(hw);
1659 for (i = 0; i < adapter->num_msix_vectors; i++)
1660 synchronize_irq(adapter->msix_entries[i].vector);
1664 * ixgbevf_irq_enable - Enable default interrupt generation settings
1665 * @adapter: board private structure
1667 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1669 struct ixgbe_hw *hw = &adapter->hw;
1671 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1672 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1677 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1678 * @adapter: board private structure
1679 * @ring: structure containing ring specific data
1681 * Configure the Tx descriptor ring after a reset.
1683 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1684 struct ixgbevf_ring *ring)
1686 struct ixgbe_hw *hw = &adapter->hw;
1687 u64 tdba = ring->dma;
1689 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1690 u8 reg_idx = ring->reg_idx;
1692 /* disable queue to avoid issues while updating state */
1693 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1694 IXGBE_WRITE_FLUSH(hw);
1696 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1697 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1698 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1699 ring->count * sizeof(union ixgbe_adv_tx_desc));
1701 /* disable head writeback */
1702 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1703 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1705 /* enable relaxed ordering */
1706 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1707 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1708 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1710 /* reset head and tail pointers */
1711 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1712 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1713 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1715 /* reset ntu and ntc to place SW in sync with hardwdare */
1716 ring->next_to_clean = 0;
1717 ring->next_to_use = 0;
1719 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1720 * to or less than the number of on chip descriptors, which is
1723 txdctl |= (8 << 16); /* WTHRESH = 8 */
1725 /* Setting PTHRESH to 32 both improves performance */
1726 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1727 32; /* PTHRESH = 32 */
1729 /* reinitialize tx_buffer_info */
1730 memset(ring->tx_buffer_info, 0,
1731 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1733 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1734 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1736 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1738 /* poll to verify queue is enabled */
1740 usleep_range(1000, 2000);
1741 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1742 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1744 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1748 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1749 * @adapter: board private structure
1751 * Configure the Tx unit of the MAC after a reset.
1753 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1757 /* Setup the HW Tx Head and Tail descriptor pointers */
1758 for (i = 0; i < adapter->num_tx_queues; i++)
1759 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1760 for (i = 0; i < adapter->num_xdp_queues; i++)
1761 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1764 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1766 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1767 struct ixgbevf_ring *ring, int index)
1769 struct ixgbe_hw *hw = &adapter->hw;
1772 srrctl = IXGBE_SRRCTL_DROP_EN;
1774 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1775 if (ring_uses_large_buffer(ring))
1776 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1778 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1779 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1781 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1784 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1786 struct ixgbe_hw *hw = &adapter->hw;
1788 /* PSRTYPE must be initialized in 82599 */
1789 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1790 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1791 IXGBE_PSRTYPE_L2HDR;
1793 if (adapter->num_rx_queues > 1)
1796 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1799 #define IXGBEVF_MAX_RX_DESC_POLL 10
1800 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1801 struct ixgbevf_ring *ring)
1803 struct ixgbe_hw *hw = &adapter->hw;
1804 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1806 u8 reg_idx = ring->reg_idx;
1808 if (IXGBE_REMOVED(hw->hw_addr))
1810 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1811 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1813 /* write value back with RXDCTL.ENABLE bit cleared */
1814 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1816 /* the hardware may take up to 100us to really disable the Rx queue */
1819 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1820 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1823 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1827 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1828 struct ixgbevf_ring *ring)
1830 struct ixgbe_hw *hw = &adapter->hw;
1831 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1833 u8 reg_idx = ring->reg_idx;
1835 if (IXGBE_REMOVED(hw->hw_addr))
1838 usleep_range(1000, 2000);
1839 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1840 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1843 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1848 * ixgbevf_init_rss_key - Initialize adapter RSS key
1849 * @adapter: device handle
1851 * Allocates and initializes the RSS key if it is not allocated.
1853 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1857 if (!adapter->rss_key) {
1858 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1859 if (unlikely(!rss_key))
1862 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1863 adapter->rss_key = rss_key;
1869 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1871 struct ixgbe_hw *hw = &adapter->hw;
1872 u32 vfmrqc = 0, vfreta = 0;
1873 u16 rss_i = adapter->num_rx_queues;
1876 /* Fill out hash function seeds */
1877 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1878 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1880 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1884 adapter->rss_indir_tbl[i] = j;
1886 vfreta |= j << (i & 0x3) * 8;
1888 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1893 /* Perform hash on these packet types */
1894 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1895 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1896 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1897 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1899 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1901 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1904 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1905 struct ixgbevf_ring *ring)
1907 struct ixgbe_hw *hw = &adapter->hw;
1908 union ixgbe_adv_rx_desc *rx_desc;
1909 u64 rdba = ring->dma;
1911 u8 reg_idx = ring->reg_idx;
1913 /* disable queue to avoid issues while updating state */
1914 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1915 ixgbevf_disable_rx_queue(adapter, ring);
1917 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1918 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1919 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1920 ring->count * sizeof(union ixgbe_adv_rx_desc));
1922 #ifndef CONFIG_SPARC
1923 /* enable relaxed ordering */
1924 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1925 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1927 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1928 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1929 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1932 /* reset head and tail pointers */
1933 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1934 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1935 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1937 /* initialize rx_buffer_info */
1938 memset(ring->rx_buffer_info, 0,
1939 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1941 /* initialize Rx descriptor 0 */
1942 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1943 rx_desc->wb.upper.length = 0;
1945 /* reset ntu and ntc to place SW in sync with hardwdare */
1946 ring->next_to_clean = 0;
1947 ring->next_to_use = 0;
1948 ring->next_to_alloc = 0;
1950 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1952 /* RXDCTL.RLPML does not work on 82599 */
1953 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1954 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1955 IXGBE_RXDCTL_RLPML_EN);
1957 #if (PAGE_SIZE < 8192)
1958 /* Limit the maximum frame size so we don't overrun the skb */
1959 if (ring_uses_build_skb(ring) &&
1960 !ring_uses_large_buffer(ring))
1961 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1962 IXGBE_RXDCTL_RLPML_EN;
1966 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1967 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1969 ixgbevf_rx_desc_queue_enable(adapter, ring);
1970 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1973 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1974 struct ixgbevf_ring *rx_ring)
1976 struct net_device *netdev = adapter->netdev;
1977 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1979 /* set build_skb and buffer size flags */
1980 clear_ring_build_skb_enabled(rx_ring);
1981 clear_ring_uses_large_buffer(rx_ring);
1983 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1986 if (PAGE_SIZE < 8192)
1987 if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
1988 set_ring_uses_large_buffer(rx_ring);
1990 /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
1991 if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
1994 set_ring_build_skb_enabled(rx_ring);
1998 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1999 * @adapter: board private structure
2001 * Configure the Rx unit of the MAC after a reset.
2003 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
2005 struct ixgbe_hw *hw = &adapter->hw;
2006 struct net_device *netdev = adapter->netdev;
2009 ixgbevf_setup_psrtype(adapter);
2010 if (hw->mac.type >= ixgbe_mac_X550_vf)
2011 ixgbevf_setup_vfmrqc(adapter);
2013 spin_lock_bh(&adapter->mbx_lock);
2014 /* notify the PF of our intent to use this size of frame */
2015 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2016 spin_unlock_bh(&adapter->mbx_lock);
2018 dev_err(&adapter->pdev->dev,
2019 "Failed to set MTU at %d\n", netdev->mtu);
2021 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2022 * the Base and Length of the Rx Descriptor Ring
2024 for (i = 0; i < adapter->num_rx_queues; i++) {
2025 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2027 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2028 ixgbevf_configure_rx_ring(adapter, rx_ring);
2032 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2033 __be16 proto, u16 vid)
2035 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2036 struct ixgbe_hw *hw = &adapter->hw;
2039 spin_lock_bh(&adapter->mbx_lock);
2041 /* add VID to filter table */
2042 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2044 spin_unlock_bh(&adapter->mbx_lock);
2047 netdev_err(netdev, "VF could not set VLAN %d\n", vid);
2049 /* translate error return types so error makes sense */
2050 if (err == IXGBE_ERR_MBX)
2053 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2057 set_bit(vid, adapter->active_vlans);
2062 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2063 __be16 proto, u16 vid)
2065 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2066 struct ixgbe_hw *hw = &adapter->hw;
2069 spin_lock_bh(&adapter->mbx_lock);
2071 /* remove VID from filter table */
2072 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2074 spin_unlock_bh(&adapter->mbx_lock);
2077 netdev_err(netdev, "Could not remove VLAN %d\n", vid);
2079 clear_bit(vid, adapter->active_vlans);
2084 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2088 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2089 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2090 htons(ETH_P_8021Q), vid);
2093 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2095 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2096 struct ixgbe_hw *hw = &adapter->hw;
2099 if (!netdev_uc_empty(netdev)) {
2100 struct netdev_hw_addr *ha;
2102 netdev_for_each_uc_addr(ha, netdev) {
2103 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2107 /* If the list is empty then send message to PF driver to
2108 * clear all MAC VLANs on this VF.
2110 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2117 * ixgbevf_set_rx_mode - Multicast and unicast set
2118 * @netdev: network interface device structure
2120 * The set_rx_method entry point is called whenever the multicast address
2121 * list, unicast address list or the network interface flags are updated.
2122 * This routine is responsible for configuring the hardware for proper
2123 * multicast mode and configuring requested unicast filters.
2125 static void ixgbevf_set_rx_mode(struct net_device *netdev)
2127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2128 struct ixgbe_hw *hw = &adapter->hw;
2129 unsigned int flags = netdev->flags;
2132 /* request the most inclusive mode we need */
2133 if (flags & IFF_PROMISC)
2134 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2135 else if (flags & IFF_ALLMULTI)
2136 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2137 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2138 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2140 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2142 spin_lock_bh(&adapter->mbx_lock);
2144 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2146 /* reprogram multicast list */
2147 hw->mac.ops.update_mc_addr_list(hw, netdev);
2149 ixgbevf_write_uc_addr_list(netdev);
2151 spin_unlock_bh(&adapter->mbx_lock);
2154 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2157 struct ixgbevf_q_vector *q_vector;
2158 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2160 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2161 q_vector = adapter->q_vector[q_idx];
2162 napi_enable(&q_vector->napi);
2166 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2169 struct ixgbevf_q_vector *q_vector;
2170 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2172 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2173 q_vector = adapter->q_vector[q_idx];
2174 napi_disable(&q_vector->napi);
2178 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2180 struct ixgbe_hw *hw = &adapter->hw;
2181 unsigned int def_q = 0;
2182 unsigned int num_tcs = 0;
2183 unsigned int num_rx_queues = adapter->num_rx_queues;
2184 unsigned int num_tx_queues = adapter->num_tx_queues;
2187 spin_lock_bh(&adapter->mbx_lock);
2189 /* fetch queue configuration from the PF */
2190 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2192 spin_unlock_bh(&adapter->mbx_lock);
2198 /* we need only one Tx queue */
2201 /* update default Tx ring register index */
2202 adapter->tx_ring[0]->reg_idx = def_q;
2204 /* we need as many queues as traffic classes */
2205 num_rx_queues = num_tcs;
2208 /* if we have a bad config abort request queue reset */
2209 if ((adapter->num_rx_queues != num_rx_queues) ||
2210 (adapter->num_tx_queues != num_tx_queues)) {
2211 /* force mailbox timeout to prevent further messages */
2212 hw->mbx.timeout = 0;
2214 /* wait for watchdog to come around and bail us out */
2215 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2221 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2223 ixgbevf_configure_dcb(adapter);
2225 ixgbevf_set_rx_mode(adapter->netdev);
2227 ixgbevf_restore_vlan(adapter);
2228 ixgbevf_ipsec_restore(adapter);
2230 ixgbevf_configure_tx(adapter);
2231 ixgbevf_configure_rx(adapter);
2234 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2236 /* Only save pre-reset stats if there are some */
2237 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2238 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2239 adapter->stats.base_vfgprc;
2240 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2241 adapter->stats.base_vfgptc;
2242 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2243 adapter->stats.base_vfgorc;
2244 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2245 adapter->stats.base_vfgotc;
2246 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2247 adapter->stats.base_vfmprc;
2251 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2253 struct ixgbe_hw *hw = &adapter->hw;
2255 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2256 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2257 adapter->stats.last_vfgorc |=
2258 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2259 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2260 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2261 adapter->stats.last_vfgotc |=
2262 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2263 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2265 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2266 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2267 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2268 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2269 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2272 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2274 struct ixgbe_hw *hw = &adapter->hw;
2275 static const int api[] = {
2282 ixgbe_mbox_api_unknown
2286 spin_lock_bh(&adapter->mbx_lock);
2288 while (api[idx] != ixgbe_mbox_api_unknown) {
2289 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2295 if (hw->api_version >= ixgbe_mbox_api_15) {
2296 hw->mbx.ops.init_params(hw);
2297 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
2298 sizeof(struct ixgbe_mbx_operations));
2301 spin_unlock_bh(&adapter->mbx_lock);
2304 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2306 struct net_device *netdev = adapter->netdev;
2307 struct pci_dev *pdev = adapter->pdev;
2308 struct ixgbe_hw *hw = &adapter->hw;
2311 ixgbevf_configure_msix(adapter);
2313 spin_lock_bh(&adapter->mbx_lock);
2315 if (is_valid_ether_addr(hw->mac.addr))
2316 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2318 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2320 spin_unlock_bh(&adapter->mbx_lock);
2322 state = adapter->link_state;
2323 hw->mac.ops.get_link_state(hw, &adapter->link_state);
2324 if (state && state != adapter->link_state)
2325 dev_info(&pdev->dev, "VF is administratively disabled\n");
2327 smp_mb__before_atomic();
2328 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2329 ixgbevf_napi_enable_all(adapter);
2331 /* clear any pending interrupts, may auto mask */
2332 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2333 ixgbevf_irq_enable(adapter);
2335 /* enable transmits */
2336 netif_tx_start_all_queues(netdev);
2338 ixgbevf_save_reset_stats(adapter);
2339 ixgbevf_init_last_counter_stats(adapter);
2341 hw->mac.get_link_status = 1;
2342 mod_timer(&adapter->service_timer, jiffies);
2345 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2347 ixgbevf_configure(adapter);
2349 ixgbevf_up_complete(adapter);
2353 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2354 * @rx_ring: ring to free buffers from
2356 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2358 u16 i = rx_ring->next_to_clean;
2360 /* Free Rx ring sk_buff */
2362 dev_kfree_skb(rx_ring->skb);
2363 rx_ring->skb = NULL;
2366 /* Free all the Rx ring pages */
2367 while (i != rx_ring->next_to_alloc) {
2368 struct ixgbevf_rx_buffer *rx_buffer;
2370 rx_buffer = &rx_ring->rx_buffer_info[i];
2372 /* Invalidate cache lines that may have been written to by
2373 * device so that we avoid corrupting memory.
2375 dma_sync_single_range_for_cpu(rx_ring->dev,
2377 rx_buffer->page_offset,
2378 ixgbevf_rx_bufsz(rx_ring),
2381 /* free resources associated with mapping */
2382 dma_unmap_page_attrs(rx_ring->dev,
2384 ixgbevf_rx_pg_size(rx_ring),
2386 IXGBEVF_RX_DMA_ATTR);
2388 __page_frag_cache_drain(rx_buffer->page,
2389 rx_buffer->pagecnt_bias);
2392 if (i == rx_ring->count)
2396 rx_ring->next_to_alloc = 0;
2397 rx_ring->next_to_clean = 0;
2398 rx_ring->next_to_use = 0;
2402 * ixgbevf_clean_tx_ring - Free Tx Buffers
2403 * @tx_ring: ring to be cleaned
2405 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2407 u16 i = tx_ring->next_to_clean;
2408 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2410 while (i != tx_ring->next_to_use) {
2411 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2413 /* Free all the Tx ring sk_buffs */
2414 if (ring_is_xdp(tx_ring))
2415 page_frag_free(tx_buffer->data);
2417 dev_kfree_skb_any(tx_buffer->skb);
2419 /* unmap skb header data */
2420 dma_unmap_single(tx_ring->dev,
2421 dma_unmap_addr(tx_buffer, dma),
2422 dma_unmap_len(tx_buffer, len),
2425 /* check for eop_desc to determine the end of the packet */
2426 eop_desc = tx_buffer->next_to_watch;
2427 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2429 /* unmap remaining buffers */
2430 while (tx_desc != eop_desc) {
2434 if (unlikely(i == tx_ring->count)) {
2436 tx_buffer = tx_ring->tx_buffer_info;
2437 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2440 /* unmap any remaining paged data */
2441 if (dma_unmap_len(tx_buffer, len))
2442 dma_unmap_page(tx_ring->dev,
2443 dma_unmap_addr(tx_buffer, dma),
2444 dma_unmap_len(tx_buffer, len),
2448 /* move us one more past the eop_desc for start of next pkt */
2451 if (unlikely(i == tx_ring->count)) {
2453 tx_buffer = tx_ring->tx_buffer_info;
2457 /* reset next_to_use and next_to_clean */
2458 tx_ring->next_to_use = 0;
2459 tx_ring->next_to_clean = 0;
2464 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2465 * @adapter: board private structure
2467 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2471 for (i = 0; i < adapter->num_rx_queues; i++)
2472 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2476 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2477 * @adapter: board private structure
2479 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2483 for (i = 0; i < adapter->num_tx_queues; i++)
2484 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2485 for (i = 0; i < adapter->num_xdp_queues; i++)
2486 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2489 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2491 struct net_device *netdev = adapter->netdev;
2492 struct ixgbe_hw *hw = &adapter->hw;
2495 /* signal that we are down to the interrupt handler */
2496 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2497 return; /* do nothing if already down */
2499 /* disable all enabled Rx queues */
2500 for (i = 0; i < adapter->num_rx_queues; i++)
2501 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2503 usleep_range(10000, 20000);
2505 netif_tx_stop_all_queues(netdev);
2507 /* call carrier off first to avoid false dev_watchdog timeouts */
2508 netif_carrier_off(netdev);
2509 netif_tx_disable(netdev);
2511 ixgbevf_irq_disable(adapter);
2513 ixgbevf_napi_disable_all(adapter);
2515 del_timer_sync(&adapter->service_timer);
2517 /* disable transmits in the hardware now that interrupts are off */
2518 for (i = 0; i < adapter->num_tx_queues; i++) {
2519 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2521 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2522 IXGBE_TXDCTL_SWFLSH);
2525 for (i = 0; i < adapter->num_xdp_queues; i++) {
2526 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2528 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2529 IXGBE_TXDCTL_SWFLSH);
2532 if (!pci_channel_offline(adapter->pdev))
2533 ixgbevf_reset(adapter);
2535 ixgbevf_clean_all_tx_rings(adapter);
2536 ixgbevf_clean_all_rx_rings(adapter);
2539 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2541 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2544 ixgbevf_down(adapter);
2545 pci_set_master(adapter->pdev);
2546 ixgbevf_up(adapter);
2548 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2551 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2553 struct ixgbe_hw *hw = &adapter->hw;
2554 struct net_device *netdev = adapter->netdev;
2556 if (hw->mac.ops.reset_hw(hw)) {
2557 hw_dbg(hw, "PF still resetting\n");
2559 hw->mac.ops.init_hw(hw);
2560 ixgbevf_negotiate_api(adapter);
2563 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2564 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2565 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2568 adapter->last_reset = jiffies;
2571 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2574 int vector_threshold;
2576 /* We'll want at least 2 (vector_threshold):
2577 * 1) TxQ[0] + RxQ[0] handler
2578 * 2) Other (Link Status Change, etc.)
2580 vector_threshold = MIN_MSIX_COUNT;
2582 /* The more we get, the more we will assign to Tx/Rx Cleanup
2583 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2584 * Right now, we simply care about how many we'll get; we'll
2585 * set them up later while requesting irq's.
2587 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2588 vector_threshold, vectors);
2591 dev_err(&adapter->pdev->dev,
2592 "Unable to allocate MSI-X interrupts\n");
2593 kfree(adapter->msix_entries);
2594 adapter->msix_entries = NULL;
2598 /* Adjust for only the vectors we'll use, which is minimum
2599 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2600 * vectors we were allocated.
2602 adapter->num_msix_vectors = vectors;
2608 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2609 * @adapter: board private structure to initialize
2611 * This is the top level queue allocation routine. The order here is very
2612 * important, starting with the "most" number of features turned on at once,
2613 * and ending with the smallest set of features. This way large combinations
2614 * can be allocated if they're turned on, and smaller combinations are the
2615 * fall through conditions.
2618 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2620 struct ixgbe_hw *hw = &adapter->hw;
2621 unsigned int def_q = 0;
2622 unsigned int num_tcs = 0;
2625 /* Start with base case */
2626 adapter->num_rx_queues = 1;
2627 adapter->num_tx_queues = 1;
2628 adapter->num_xdp_queues = 0;
2630 spin_lock_bh(&adapter->mbx_lock);
2632 /* fetch queue configuration from the PF */
2633 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2635 spin_unlock_bh(&adapter->mbx_lock);
2640 /* we need as many queues as traffic classes */
2642 adapter->num_rx_queues = num_tcs;
2644 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2646 switch (hw->api_version) {
2647 case ixgbe_mbox_api_11:
2648 case ixgbe_mbox_api_12:
2649 case ixgbe_mbox_api_13:
2650 case ixgbe_mbox_api_14:
2651 case ixgbe_mbox_api_15:
2652 if (adapter->xdp_prog &&
2653 hw->mac.max_tx_queues == rss)
2654 rss = rss > 3 ? 2 : 1;
2656 adapter->num_rx_queues = rss;
2657 adapter->num_tx_queues = rss;
2658 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2667 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2668 * @adapter: board private structure to initialize
2670 * Attempt to configure the interrupts using the best available
2671 * capabilities of the hardware and the kernel.
2673 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2675 int vector, v_budget;
2677 /* It's easy to be greedy for MSI-X vectors, but it really
2678 * doesn't do us much good if we have a lot more vectors
2679 * than CPU's. So let's be conservative and only ask for
2680 * (roughly) the same number of vectors as there are CPU's.
2681 * The default is to use pairs of vectors.
2683 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2684 v_budget = min_t(int, v_budget, num_online_cpus());
2685 v_budget += NON_Q_VECTORS;
2687 adapter->msix_entries = kcalloc(v_budget,
2688 sizeof(struct msix_entry), GFP_KERNEL);
2689 if (!adapter->msix_entries)
2692 for (vector = 0; vector < v_budget; vector++)
2693 adapter->msix_entries[vector].entry = vector;
2695 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2696 * does not support any other modes, so we will simply fail here. Note
2697 * that we clean up the msix_entries pointer else-where.
2699 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2702 static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2703 struct ixgbevf_ring_container *head)
2705 ring->next = head->ring;
2711 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2712 * @adapter: board private structure to initialize
2713 * @v_idx: index of vector in adapter struct
2714 * @txr_count: number of Tx rings for q vector
2715 * @txr_idx: index of first Tx ring to assign
2716 * @xdp_count: total number of XDP rings to allocate
2717 * @xdp_idx: index of first XDP ring to allocate
2718 * @rxr_count: number of Rx rings for q vector
2719 * @rxr_idx: index of first Rx ring to assign
2721 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2723 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2724 int txr_count, int txr_idx,
2725 int xdp_count, int xdp_idx,
2726 int rxr_count, int rxr_idx)
2728 struct ixgbevf_q_vector *q_vector;
2729 int reg_idx = txr_idx + xdp_idx;
2730 struct ixgbevf_ring *ring;
2731 int ring_count, size;
2733 ring_count = txr_count + xdp_count + rxr_count;
2734 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2736 /* allocate q_vector and rings */
2737 q_vector = kzalloc(size, GFP_KERNEL);
2741 /* initialize NAPI */
2742 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
2744 /* tie q_vector and adapter together */
2745 adapter->q_vector[v_idx] = q_vector;
2746 q_vector->adapter = adapter;
2747 q_vector->v_idx = v_idx;
2749 /* initialize pointer to rings */
2750 ring = q_vector->ring;
2753 /* assign generic ring traits */
2754 ring->dev = &adapter->pdev->dev;
2755 ring->netdev = adapter->netdev;
2757 /* configure backlink on ring */
2758 ring->q_vector = q_vector;
2760 /* update q_vector Tx values */
2761 ixgbevf_add_ring(ring, &q_vector->tx);
2763 /* apply Tx specific ring traits */
2764 ring->count = adapter->tx_ring_count;
2765 ring->queue_index = txr_idx;
2766 ring->reg_idx = reg_idx;
2768 /* assign ring to adapter */
2769 adapter->tx_ring[txr_idx] = ring;
2771 /* update count and index */
2776 /* push pointer to next ring */
2781 /* assign generic ring traits */
2782 ring->dev = &adapter->pdev->dev;
2783 ring->netdev = adapter->netdev;
2785 /* configure backlink on ring */
2786 ring->q_vector = q_vector;
2788 /* update q_vector Tx values */
2789 ixgbevf_add_ring(ring, &q_vector->tx);
2791 /* apply Tx specific ring traits */
2792 ring->count = adapter->tx_ring_count;
2793 ring->queue_index = xdp_idx;
2794 ring->reg_idx = reg_idx;
2797 /* assign ring to adapter */
2798 adapter->xdp_ring[xdp_idx] = ring;
2800 /* update count and index */
2805 /* push pointer to next ring */
2810 /* assign generic ring traits */
2811 ring->dev = &adapter->pdev->dev;
2812 ring->netdev = adapter->netdev;
2814 /* configure backlink on ring */
2815 ring->q_vector = q_vector;
2817 /* update q_vector Rx values */
2818 ixgbevf_add_ring(ring, &q_vector->rx);
2820 /* apply Rx specific ring traits */
2821 ring->count = adapter->rx_ring_count;
2822 ring->queue_index = rxr_idx;
2823 ring->reg_idx = rxr_idx;
2825 /* assign ring to adapter */
2826 adapter->rx_ring[rxr_idx] = ring;
2828 /* update count and index */
2832 /* push pointer to next ring */
2840 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2841 * @adapter: board private structure to initialize
2842 * @v_idx: index of vector in adapter struct
2844 * This function frees the memory allocated to the q_vector. In addition if
2845 * NAPI is enabled it will delete any references to the NAPI struct prior
2846 * to freeing the q_vector.
2848 static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2850 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2851 struct ixgbevf_ring *ring;
2853 ixgbevf_for_each_ring(ring, q_vector->tx) {
2854 if (ring_is_xdp(ring))
2855 adapter->xdp_ring[ring->queue_index] = NULL;
2857 adapter->tx_ring[ring->queue_index] = NULL;
2860 ixgbevf_for_each_ring(ring, q_vector->rx)
2861 adapter->rx_ring[ring->queue_index] = NULL;
2863 adapter->q_vector[v_idx] = NULL;
2864 netif_napi_del(&q_vector->napi);
2866 /* ixgbevf_get_stats() might access the rings on this vector,
2867 * we must wait a grace period before freeing it.
2869 kfree_rcu(q_vector, rcu);
2873 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2874 * @adapter: board private structure to initialize
2876 * We allocate one q_vector per queue interrupt. If allocation fails we
2879 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2881 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2882 int rxr_remaining = adapter->num_rx_queues;
2883 int txr_remaining = adapter->num_tx_queues;
2884 int xdp_remaining = adapter->num_xdp_queues;
2885 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2888 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2889 for (; rxr_remaining; v_idx++, q_vectors--) {
2890 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2892 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2893 0, 0, 0, 0, rqpv, rxr_idx);
2897 /* update counts and index */
2898 rxr_remaining -= rqpv;
2903 for (; q_vectors; v_idx++, q_vectors--) {
2904 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2905 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2906 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2908 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2916 /* update counts and index */
2917 rxr_remaining -= rqpv;
2919 txr_remaining -= tqpv;
2921 xdp_remaining -= xqpv;
2930 ixgbevf_free_q_vector(adapter, v_idx);
2937 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2938 * @adapter: board private structure to initialize
2940 * This function frees the memory allocated to the q_vectors. In addition if
2941 * NAPI is enabled it will delete any references to the NAPI struct prior
2942 * to freeing the q_vector.
2944 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2946 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2950 ixgbevf_free_q_vector(adapter, q_vectors);
2955 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2956 * @adapter: board private structure
2959 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2961 if (!adapter->msix_entries)
2964 pci_disable_msix(adapter->pdev);
2965 kfree(adapter->msix_entries);
2966 adapter->msix_entries = NULL;
2970 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2971 * @adapter: board private structure to initialize
2974 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2978 /* Number of supported queues */
2979 ixgbevf_set_num_queues(adapter);
2981 err = ixgbevf_set_interrupt_capability(adapter);
2983 hw_dbg(&adapter->hw,
2984 "Unable to setup interrupt capabilities\n");
2985 goto err_set_interrupt;
2988 err = ixgbevf_alloc_q_vectors(adapter);
2990 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2991 goto err_alloc_q_vectors;
2994 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2995 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2996 adapter->num_rx_queues, adapter->num_tx_queues,
2997 adapter->num_xdp_queues);
2999 set_bit(__IXGBEVF_DOWN, &adapter->state);
3002 err_alloc_q_vectors:
3003 ixgbevf_reset_interrupt_capability(adapter);
3009 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
3010 * @adapter: board private structure to clear interrupt scheme on
3012 * We go through and clear interrupt specific resources and reset the structure
3013 * to pre-load conditions
3015 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
3017 adapter->num_tx_queues = 0;
3018 adapter->num_xdp_queues = 0;
3019 adapter->num_rx_queues = 0;
3021 ixgbevf_free_q_vectors(adapter);
3022 ixgbevf_reset_interrupt_capability(adapter);
3026 * ixgbevf_sw_init - Initialize general software structures
3027 * @adapter: board private structure to initialize
3029 * ixgbevf_sw_init initializes the Adapter private data structure.
3030 * Fields are initialized based on PCI device information and
3031 * OS network device settings (MTU size).
3033 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3035 struct ixgbe_hw *hw = &adapter->hw;
3036 struct pci_dev *pdev = adapter->pdev;
3037 struct net_device *netdev = adapter->netdev;
3040 /* PCI config space info */
3041 hw->vendor_id = pdev->vendor;
3042 hw->device_id = pdev->device;
3043 hw->revision_id = pdev->revision;
3044 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3045 hw->subsystem_device_id = pdev->subsystem_device;
3047 hw->mbx.ops.init_params(hw);
3049 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3050 err = ixgbevf_init_rss_key(adapter);
3055 /* assume legacy case in which PF would only give VF 2 queues */
3056 hw->mac.max_tx_queues = 2;
3057 hw->mac.max_rx_queues = 2;
3059 /* lock to protect mailbox accesses */
3060 spin_lock_init(&adapter->mbx_lock);
3062 err = hw->mac.ops.reset_hw(hw);
3064 dev_info(&pdev->dev,
3065 "PF still in reset state. Is the PF interface up?\n");
3067 err = hw->mac.ops.init_hw(hw);
3069 pr_err("init_shared_code failed: %d\n", err);
3072 ixgbevf_negotiate_api(adapter);
3073 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3075 dev_info(&pdev->dev, "Error reading MAC address\n");
3076 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3077 dev_info(&pdev->dev,
3078 "MAC address not assigned by administrator.\n");
3079 eth_hw_addr_set(netdev, hw->mac.addr);
3082 if (!is_valid_ether_addr(netdev->dev_addr)) {
3083 dev_info(&pdev->dev, "Assigning random MAC address\n");
3084 eth_hw_addr_random(netdev);
3085 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3086 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3089 /* Enable dynamic interrupt throttling rates */
3090 adapter->rx_itr_setting = 1;
3091 adapter->tx_itr_setting = 1;
3093 /* set default ring sizes */
3094 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3095 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3097 adapter->link_state = true;
3099 set_bit(__IXGBEVF_DOWN, &adapter->state);
3106 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3108 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3109 if (current_counter < last_counter) \
3110 counter += 0x100000000LL; \
3111 last_counter = current_counter; \
3112 counter &= 0xFFFFFFFF00000000LL; \
3113 counter |= current_counter; \
3116 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3118 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3119 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3120 u64 current_counter = (current_counter_msb << 32) | \
3121 current_counter_lsb; \
3122 if (current_counter < last_counter) \
3123 counter += 0x1000000000LL; \
3124 last_counter = current_counter; \
3125 counter &= 0xFFFFFFF000000000LL; \
3126 counter |= current_counter; \
3129 * ixgbevf_update_stats - Update the board statistics counters.
3130 * @adapter: board private structure
3132 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3134 struct ixgbe_hw *hw = &adapter->hw;
3135 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3136 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3139 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3140 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3143 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3144 adapter->stats.vfgprc);
3145 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3146 adapter->stats.vfgptc);
3147 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3148 adapter->stats.last_vfgorc,
3149 adapter->stats.vfgorc);
3150 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3151 adapter->stats.last_vfgotc,
3152 adapter->stats.vfgotc);
3153 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3154 adapter->stats.vfmprc);
3156 for (i = 0; i < adapter->num_rx_queues; i++) {
3157 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3159 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3160 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3161 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3162 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3165 adapter->hw_csum_rx_error = hw_csum_rx_error;
3166 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3167 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3168 adapter->alloc_rx_page = alloc_rx_page;
3172 * ixgbevf_service_timer - Timer Call-back
3173 * @t: pointer to timer_list struct
3175 static void ixgbevf_service_timer(struct timer_list *t)
3177 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3180 /* Reset the timer */
3181 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3183 ixgbevf_service_event_schedule(adapter);
3186 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3188 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3192 /* If we're already down or resetting, just bail */
3193 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3194 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3195 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3200 adapter->tx_timeout_count++;
3202 ixgbevf_reinit_locked(adapter);
3207 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3208 * @adapter: pointer to the device adapter structure
3210 * This function serves two purposes. First it strobes the interrupt lines
3211 * in order to make certain interrupts are occurring. Secondly it sets the
3212 * bits needed to check for TX hangs. As a result we should immediately
3213 * determine if a hang has occurred.
3215 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3217 struct ixgbe_hw *hw = &adapter->hw;
3221 /* If we're down or resetting, just bail */
3222 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3223 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3226 /* Force detection of hung controller */
3227 if (netif_carrier_ok(adapter->netdev)) {
3228 for (i = 0; i < adapter->num_tx_queues; i++)
3229 set_check_for_tx_hang(adapter->tx_ring[i]);
3230 for (i = 0; i < adapter->num_xdp_queues; i++)
3231 set_check_for_tx_hang(adapter->xdp_ring[i]);
3234 /* get one bit for every active Tx/Rx interrupt vector */
3235 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3236 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3238 if (qv->rx.ring || qv->tx.ring)
3242 /* Cause software interrupt to ensure rings are cleaned */
3243 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3247 * ixgbevf_watchdog_update_link - update the link status
3248 * @adapter: pointer to the device adapter structure
3250 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3252 struct ixgbe_hw *hw = &adapter->hw;
3253 u32 link_speed = adapter->link_speed;
3254 bool link_up = adapter->link_up;
3257 spin_lock_bh(&adapter->mbx_lock);
3259 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3261 spin_unlock_bh(&adapter->mbx_lock);
3263 /* if check for link returns error we will need to reset */
3264 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3265 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3269 adapter->link_up = link_up;
3270 adapter->link_speed = link_speed;
3274 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3275 * print link up message
3276 * @adapter: pointer to the device adapter structure
3278 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3280 struct net_device *netdev = adapter->netdev;
3282 /* only continue if link was previously down */
3283 if (netif_carrier_ok(netdev))
3286 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3287 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3289 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3291 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3295 netif_carrier_on(netdev);
3299 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3300 * print link down message
3301 * @adapter: pointer to the adapter structure
3303 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3305 struct net_device *netdev = adapter->netdev;
3307 adapter->link_speed = 0;
3309 /* only continue if link was up previously */
3310 if (!netif_carrier_ok(netdev))
3313 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3315 netif_carrier_off(netdev);
3319 * ixgbevf_watchdog_subtask - worker thread to bring link up
3320 * @adapter: board private structure
3322 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3324 /* if interface is down do nothing */
3325 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3326 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3329 ixgbevf_watchdog_update_link(adapter);
3331 if (adapter->link_up && adapter->link_state)
3332 ixgbevf_watchdog_link_is_up(adapter);
3334 ixgbevf_watchdog_link_is_down(adapter);
3336 ixgbevf_update_stats(adapter);
3340 * ixgbevf_service_task - manages and runs subtasks
3341 * @work: pointer to work_struct containing our data
3343 static void ixgbevf_service_task(struct work_struct *work)
3345 struct ixgbevf_adapter *adapter = container_of(work,
3346 struct ixgbevf_adapter,
3348 struct ixgbe_hw *hw = &adapter->hw;
3350 if (IXGBE_REMOVED(hw->hw_addr)) {
3351 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3353 ixgbevf_down(adapter);
3359 ixgbevf_queue_reset_subtask(adapter);
3360 ixgbevf_reset_subtask(adapter);
3361 ixgbevf_watchdog_subtask(adapter);
3362 ixgbevf_check_hang_subtask(adapter);
3364 ixgbevf_service_event_complete(adapter);
3368 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3369 * @tx_ring: Tx descriptor ring for a specific queue
3371 * Free all transmit software resources
3373 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3375 ixgbevf_clean_tx_ring(tx_ring);
3377 vfree(tx_ring->tx_buffer_info);
3378 tx_ring->tx_buffer_info = NULL;
3380 /* if not set, then don't free */
3384 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3387 tx_ring->desc = NULL;
3391 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3392 * @adapter: board private structure
3394 * Free all transmit software resources
3396 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3400 for (i = 0; i < adapter->num_tx_queues; i++)
3401 if (adapter->tx_ring[i]->desc)
3402 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3403 for (i = 0; i < adapter->num_xdp_queues; i++)
3404 if (adapter->xdp_ring[i]->desc)
3405 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3409 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3410 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3412 * Return 0 on success, negative on failure
3414 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3416 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3419 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3420 tx_ring->tx_buffer_info = vmalloc(size);
3421 if (!tx_ring->tx_buffer_info)
3424 u64_stats_init(&tx_ring->syncp);
3426 /* round up to nearest 4K */
3427 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3428 tx_ring->size = ALIGN(tx_ring->size, 4096);
3430 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3431 &tx_ring->dma, GFP_KERNEL);
3438 vfree(tx_ring->tx_buffer_info);
3439 tx_ring->tx_buffer_info = NULL;
3440 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3445 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3446 * @adapter: board private structure
3448 * If this function returns with an error, then it's possible one or
3449 * more of the rings is populated (while the rest are not). It is the
3450 * callers duty to clean those orphaned rings.
3452 * Return 0 on success, negative on failure
3454 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3456 int i, j = 0, err = 0;
3458 for (i = 0; i < adapter->num_tx_queues; i++) {
3459 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3462 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3466 for (j = 0; j < adapter->num_xdp_queues; j++) {
3467 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3470 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3476 /* rewind the index freeing the rings as we go */
3478 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3480 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3486 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3487 * @adapter: board private structure
3488 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3490 * Returns 0 on success, negative on failure
3492 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3493 struct ixgbevf_ring *rx_ring)
3497 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3498 rx_ring->rx_buffer_info = vmalloc(size);
3499 if (!rx_ring->rx_buffer_info)
3502 u64_stats_init(&rx_ring->syncp);
3504 /* Round up to nearest 4K */
3505 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3506 rx_ring->size = ALIGN(rx_ring->size, 4096);
3508 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3509 &rx_ring->dma, GFP_KERNEL);
3514 /* XDP RX-queue info */
3515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3516 rx_ring->queue_index, 0) < 0)
3519 rx_ring->xdp_prog = adapter->xdp_prog;
3523 vfree(rx_ring->rx_buffer_info);
3524 rx_ring->rx_buffer_info = NULL;
3525 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3530 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3531 * @adapter: board private structure
3533 * If this function returns with an error, then it's possible one or
3534 * more of the rings is populated (while the rest are not). It is the
3535 * callers duty to clean those orphaned rings.
3537 * Return 0 on success, negative on failure
3539 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3543 for (i = 0; i < adapter->num_rx_queues; i++) {
3544 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3547 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3553 /* rewind the index freeing the rings as we go */
3555 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3560 * ixgbevf_free_rx_resources - Free Rx Resources
3561 * @rx_ring: ring to clean the resources from
3563 * Free all receive software resources
3565 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3567 ixgbevf_clean_rx_ring(rx_ring);
3569 rx_ring->xdp_prog = NULL;
3570 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3571 vfree(rx_ring->rx_buffer_info);
3572 rx_ring->rx_buffer_info = NULL;
3574 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3577 rx_ring->desc = NULL;
3581 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3582 * @adapter: board private structure
3584 * Free all receive software resources
3586 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3590 for (i = 0; i < adapter->num_rx_queues; i++)
3591 if (adapter->rx_ring[i]->desc)
3592 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3596 * ixgbevf_open - Called when a network interface is made active
3597 * @netdev: network interface device structure
3599 * Returns 0 on success, negative value on failure
3601 * The open entry point is called when a network interface is made
3602 * active by the system (IFF_UP). At this point all resources needed
3603 * for transmit and receive operations are allocated, the interrupt
3604 * handler is registered with the OS, the watchdog timer is started,
3605 * and the stack is notified that the interface is ready.
3607 int ixgbevf_open(struct net_device *netdev)
3609 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3610 struct ixgbe_hw *hw = &adapter->hw;
3613 /* A previous failure to open the device because of a lack of
3614 * available MSIX vector resources may have reset the number
3615 * of msix vectors variable to zero. The only way to recover
3616 * is to unload/reload the driver and hope that the system has
3617 * been able to recover some MSIX vector resources.
3619 if (!adapter->num_msix_vectors)
3622 if (hw->adapter_stopped) {
3623 ixgbevf_reset(adapter);
3624 /* if adapter is still stopped then PF isn't up and
3625 * the VF can't start.
3627 if (hw->adapter_stopped) {
3628 err = IXGBE_ERR_MBX;
3629 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3630 goto err_setup_reset;
3634 /* disallow open during test */
3635 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3638 netif_carrier_off(netdev);
3640 /* allocate transmit descriptors */
3641 err = ixgbevf_setup_all_tx_resources(adapter);
3645 /* allocate receive descriptors */
3646 err = ixgbevf_setup_all_rx_resources(adapter);
3650 ixgbevf_configure(adapter);
3652 err = ixgbevf_request_irq(adapter);
3656 /* Notify the stack of the actual queue counts. */
3657 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3659 goto err_set_queues;
3661 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3663 goto err_set_queues;
3665 ixgbevf_up_complete(adapter);
3670 ixgbevf_free_irq(adapter);
3672 ixgbevf_free_all_rx_resources(adapter);
3674 ixgbevf_free_all_tx_resources(adapter);
3676 ixgbevf_reset(adapter);
3683 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3684 * @adapter: the private adapter struct
3686 * This function should contain the necessary work common to both suspending
3687 * and closing of the device.
3689 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3691 ixgbevf_down(adapter);
3692 ixgbevf_free_irq(adapter);
3693 ixgbevf_free_all_tx_resources(adapter);
3694 ixgbevf_free_all_rx_resources(adapter);
3698 * ixgbevf_close - Disables a network interface
3699 * @netdev: network interface device structure
3701 * Returns 0, this is not allowed to fail
3703 * The close entry point is called when an interface is de-activated
3704 * by the OS. The hardware is still under the drivers control, but
3705 * needs to be disabled. A global MAC reset is issued to stop the
3706 * hardware, and all transmit and receive resources are freed.
3708 int ixgbevf_close(struct net_device *netdev)
3710 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3712 if (netif_device_present(netdev))
3713 ixgbevf_close_suspend(adapter);
3718 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3720 struct net_device *dev = adapter->netdev;
3722 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3726 /* if interface is down do nothing */
3727 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3728 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3731 /* Hardware has to reinitialize queues and interrupts to
3732 * match packet buffer alignment. Unfortunately, the
3733 * hardware is not flexible enough to do this dynamically.
3737 if (netif_running(dev))
3740 ixgbevf_clear_interrupt_scheme(adapter);
3741 ixgbevf_init_interrupt_scheme(adapter);
3743 if (netif_running(dev))
3749 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3750 u32 vlan_macip_lens, u32 fceof_saidx,
3751 u32 type_tucmd, u32 mss_l4len_idx)
3753 struct ixgbe_adv_tx_context_desc *context_desc;
3754 u16 i = tx_ring->next_to_use;
3756 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3759 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3761 /* set bits to identify this as an advanced context descriptor */
3762 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3764 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3765 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3766 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3767 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3770 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3771 struct ixgbevf_tx_buffer *first,
3773 struct ixgbevf_ipsec_tx_data *itd)
3775 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3776 struct sk_buff *skb = first->skb;
3786 u32 paylen, l4_offset;
3787 u32 fceof_saidx = 0;
3790 if (skb->ip_summed != CHECKSUM_PARTIAL)
3793 if (!skb_is_gso(skb))
3796 err = skb_cow_head(skb, 0);
3800 if (eth_p_mpls(first->protocol))
3801 ip.hdr = skb_inner_network_header(skb);
3803 ip.hdr = skb_network_header(skb);
3804 l4.hdr = skb_checksum_start(skb);
3806 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3807 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3809 /* initialize outer IP header fields */
3810 if (ip.v4->version == 4) {
3811 unsigned char *csum_start = skb_checksum_start(skb);
3812 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3813 int len = csum_start - trans_start;
3815 /* IP header will have to cancel out any data that
3816 * is not a part of the outer IP header, so set to
3817 * a reverse csum if needed, else init check to 0.
3819 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3820 csum_fold(csum_partial(trans_start,
3822 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3825 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3826 IXGBE_TX_FLAGS_CSUM |
3827 IXGBE_TX_FLAGS_IPV4;
3829 ip.v6->payload_len = 0;
3830 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3831 IXGBE_TX_FLAGS_CSUM;
3834 /* determine offset of inner transport header */
3835 l4_offset = l4.hdr - skb->data;
3837 /* compute length of segmentation header */
3838 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3840 /* remove payload length from inner checksum */
3841 paylen = skb->len - l4_offset;
3842 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3844 /* update gso size and bytecount with header size */
3845 first->gso_segs = skb_shinfo(skb)->gso_segs;
3846 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3848 /* mss_l4len_id: use 1 as index for TSO */
3849 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3850 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3851 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3853 fceof_saidx |= itd->pfsa;
3854 type_tucmd |= itd->flags | itd->trailer_len;
3856 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3857 vlan_macip_lens = l4.hdr - ip.hdr;
3858 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3859 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3861 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3867 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3868 struct ixgbevf_tx_buffer *first,
3869 struct ixgbevf_ipsec_tx_data *itd)
3871 struct sk_buff *skb = first->skb;
3872 u32 vlan_macip_lens = 0;
3873 u32 fceof_saidx = 0;
3876 if (skb->ip_summed != CHECKSUM_PARTIAL)
3879 switch (skb->csum_offset) {
3880 case offsetof(struct tcphdr, check):
3881 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3883 case offsetof(struct udphdr, check):
3885 case offsetof(struct sctphdr, checksum):
3886 /* validate that this is actually an SCTP request */
3887 if (skb_csum_is_sctp(skb)) {
3888 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3893 skb_checksum_help(skb);
3897 if (first->protocol == htons(ETH_P_IP))
3898 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3900 /* update TX checksum flag */
3901 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3902 vlan_macip_lens = skb_checksum_start_offset(skb) -
3903 skb_network_offset(skb);
3905 /* vlan_macip_lens: MACLEN, VLAN tag */
3906 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3907 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3909 fceof_saidx |= itd->pfsa;
3910 type_tucmd |= itd->flags | itd->trailer_len;
3912 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3913 fceof_saidx, type_tucmd, 0);
3916 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3918 /* set type for advanced descriptor with frame checksum insertion */
3919 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3920 IXGBE_ADVTXD_DCMD_IFCS |
3921 IXGBE_ADVTXD_DCMD_DEXT);
3923 /* set HW VLAN bit if VLAN is present */
3924 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3925 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3927 /* set segmentation enable bits for TSO/FSO */
3928 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3929 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3934 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3935 u32 tx_flags, unsigned int paylen)
3937 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3939 /* enable L4 checksum for TSO and TX checksum offload */
3940 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3941 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3943 /* enble IPv4 checksum for TSO */
3944 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3945 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3948 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3949 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3951 /* use index 1 context for TSO/FSO/FCOE/IPSEC */
3952 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3953 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3955 /* Check Context must be set if Tx switch is enabled, which it
3956 * always is for case where virtual functions are running
3958 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3960 tx_desc->read.olinfo_status = olinfo_status;
3963 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3964 struct ixgbevf_tx_buffer *first,
3967 struct sk_buff *skb = first->skb;
3968 struct ixgbevf_tx_buffer *tx_buffer;
3969 union ixgbe_adv_tx_desc *tx_desc;
3972 unsigned int data_len, size;
3973 u32 tx_flags = first->tx_flags;
3974 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3975 u16 i = tx_ring->next_to_use;
3977 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3979 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3981 size = skb_headlen(skb);
3982 data_len = skb->data_len;
3984 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3988 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3989 if (dma_mapping_error(tx_ring->dev, dma))
3992 /* record length, and DMA address */
3993 dma_unmap_len_set(tx_buffer, len, size);
3994 dma_unmap_addr_set(tx_buffer, dma, dma);
3996 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3998 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3999 tx_desc->read.cmd_type_len =
4000 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
4004 if (i == tx_ring->count) {
4005 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4008 tx_desc->read.olinfo_status = 0;
4010 dma += IXGBE_MAX_DATA_PER_TXD;
4011 size -= IXGBE_MAX_DATA_PER_TXD;
4013 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4016 if (likely(!data_len))
4019 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4023 if (i == tx_ring->count) {
4024 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4027 tx_desc->read.olinfo_status = 0;
4029 size = skb_frag_size(frag);
4032 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4035 tx_buffer = &tx_ring->tx_buffer_info[i];
4038 /* write last descriptor with RS and EOP bits */
4039 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4040 tx_desc->read.cmd_type_len = cmd_type;
4042 /* set the timestamp */
4043 first->time_stamp = jiffies;
4045 skb_tx_timestamp(skb);
4047 /* Force memory writes to complete before letting h/w know there
4048 * are new descriptors to fetch. (Only applicable for weak-ordered
4049 * memory model archs, such as IA-64).
4051 * We also need this memory barrier (wmb) to make certain all of the
4052 * status bits have been updated before next_to_watch is written.
4056 /* set next_to_watch value indicating a packet is present */
4057 first->next_to_watch = tx_desc;
4060 if (i == tx_ring->count)
4063 tx_ring->next_to_use = i;
4065 /* notify HW of packet */
4066 ixgbevf_write_tail(tx_ring, i);
4070 dev_err(tx_ring->dev, "TX DMA map failed\n");
4071 tx_buffer = &tx_ring->tx_buffer_info[i];
4073 /* clear dma mappings for failed tx_buffer_info map */
4074 while (tx_buffer != first) {
4075 if (dma_unmap_len(tx_buffer, len))
4076 dma_unmap_page(tx_ring->dev,
4077 dma_unmap_addr(tx_buffer, dma),
4078 dma_unmap_len(tx_buffer, len),
4080 dma_unmap_len_set(tx_buffer, len, 0);
4083 i += tx_ring->count;
4084 tx_buffer = &tx_ring->tx_buffer_info[i];
4087 if (dma_unmap_len(tx_buffer, len))
4088 dma_unmap_single(tx_ring->dev,
4089 dma_unmap_addr(tx_buffer, dma),
4090 dma_unmap_len(tx_buffer, len),
4092 dma_unmap_len_set(tx_buffer, len, 0);
4094 dev_kfree_skb_any(tx_buffer->skb);
4095 tx_buffer->skb = NULL;
4097 tx_ring->next_to_use = i;
4100 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4102 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4103 /* Herbert's original patch had:
4104 * smp_mb__after_netif_stop_queue();
4105 * but since that doesn't exist yet, just open code it.
4109 /* We need to check again in a case another CPU has just
4110 * made room available.
4112 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4115 /* A reprieve! - use start_queue because it doesn't call schedule */
4116 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4117 ++tx_ring->tx_stats.restart_queue;
4122 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4124 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4126 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4129 static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4130 struct ixgbevf_ring *tx_ring)
4132 struct ixgbevf_tx_buffer *first;
4135 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4136 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4137 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4141 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4143 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4144 dev_kfree_skb_any(skb);
4145 return NETDEV_TX_OK;
4148 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
4149 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
4150 * + 2 desc gap to keep tail from touching head,
4151 * + 1 desc for context descriptor,
4152 * otherwise try next time
4154 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4155 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4156 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
4158 count += TXD_USE_COUNT(skb_frag_size(frag));
4161 count += skb_shinfo(skb)->nr_frags;
4163 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4164 tx_ring->tx_stats.tx_busy++;
4165 return NETDEV_TX_BUSY;
4168 /* record the location of the first descriptor for this packet */
4169 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4171 first->bytecount = skb->len;
4172 first->gso_segs = 1;
4174 if (skb_vlan_tag_present(skb)) {
4175 tx_flags |= skb_vlan_tag_get(skb);
4176 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4177 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4180 /* record initial flags and protocol */
4181 first->tx_flags = tx_flags;
4182 first->protocol = vlan_get_protocol(skb);
4184 #ifdef CONFIG_IXGBEVF_IPSEC
4185 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4188 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4192 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4194 ixgbevf_tx_map(tx_ring, first, hdr_len);
4196 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4198 return NETDEV_TX_OK;
4201 dev_kfree_skb_any(first->skb);
4204 return NETDEV_TX_OK;
4207 static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4209 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4210 struct ixgbevf_ring *tx_ring;
4212 if (skb->len <= 0) {
4213 dev_kfree_skb_any(skb);
4214 return NETDEV_TX_OK;
4217 /* The minimum packet size for olinfo paylen is 17 so pad the skb
4218 * in order to meet this minimum size requirement.
4220 if (skb->len < 17) {
4221 if (skb_padto(skb, 17))
4222 return NETDEV_TX_OK;
4226 tx_ring = adapter->tx_ring[skb->queue_mapping];
4227 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4231 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4232 * @netdev: network interface device structure
4233 * @p: pointer to an address structure
4235 * Returns 0 on success, negative on failure
4237 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4239 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4240 struct ixgbe_hw *hw = &adapter->hw;
4241 struct sockaddr *addr = p;
4244 if (!is_valid_ether_addr(addr->sa_data))
4245 return -EADDRNOTAVAIL;
4247 spin_lock_bh(&adapter->mbx_lock);
4249 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4251 spin_unlock_bh(&adapter->mbx_lock);
4256 ether_addr_copy(hw->mac.addr, addr->sa_data);
4257 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4258 eth_hw_addr_set(netdev, addr->sa_data);
4264 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4265 * @netdev: network interface device structure
4266 * @new_mtu: new value for maximum frame size
4268 * Returns 0 on success, negative on failure
4270 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4272 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4273 struct ixgbe_hw *hw = &adapter->hw;
4274 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4277 /* prevent MTU being changed to a size unsupported by XDP */
4278 if (adapter->xdp_prog) {
4279 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4283 spin_lock_bh(&adapter->mbx_lock);
4284 /* notify the PF of our intent to use this size of frame */
4285 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4286 spin_unlock_bh(&adapter->mbx_lock);
4290 hw_dbg(hw, "changing MTU from %d to %d\n",
4291 netdev->mtu, new_mtu);
4293 /* must set new MTU before calling down or up */
4294 WRITE_ONCE(netdev->mtu, new_mtu);
4296 if (netif_running(netdev))
4297 ixgbevf_reinit_locked(adapter);
4302 static int ixgbevf_suspend(struct device *dev_d)
4304 struct net_device *netdev = dev_get_drvdata(dev_d);
4305 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4308 netif_device_detach(netdev);
4310 if (netif_running(netdev))
4311 ixgbevf_close_suspend(adapter);
4313 ixgbevf_clear_interrupt_scheme(adapter);
4319 static int ixgbevf_resume(struct device *dev_d)
4321 struct pci_dev *pdev = to_pci_dev(dev_d);
4322 struct net_device *netdev = pci_get_drvdata(pdev);
4323 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4326 adapter->hw.hw_addr = adapter->io_addr;
4327 smp_mb__before_atomic();
4328 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4329 pci_set_master(pdev);
4331 ixgbevf_reset(adapter);
4334 err = ixgbevf_init_interrupt_scheme(adapter);
4335 if (!err && netif_running(netdev))
4336 err = ixgbevf_open(netdev);
4341 netif_device_attach(netdev);
4346 static void ixgbevf_shutdown(struct pci_dev *pdev)
4348 ixgbevf_suspend(&pdev->dev);
4351 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4352 const struct ixgbevf_ring *ring)
4359 start = u64_stats_fetch_begin(&ring->syncp);
4360 bytes = ring->stats.bytes;
4361 packets = ring->stats.packets;
4362 } while (u64_stats_fetch_retry(&ring->syncp, start));
4363 stats->tx_bytes += bytes;
4364 stats->tx_packets += packets;
4368 static void ixgbevf_get_stats(struct net_device *netdev,
4369 struct rtnl_link_stats64 *stats)
4371 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4374 const struct ixgbevf_ring *ring;
4377 ixgbevf_update_stats(adapter);
4379 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4382 for (i = 0; i < adapter->num_rx_queues; i++) {
4383 ring = adapter->rx_ring[i];
4385 start = u64_stats_fetch_begin(&ring->syncp);
4386 bytes = ring->stats.bytes;
4387 packets = ring->stats.packets;
4388 } while (u64_stats_fetch_retry(&ring->syncp, start));
4389 stats->rx_bytes += bytes;
4390 stats->rx_packets += packets;
4393 for (i = 0; i < adapter->num_tx_queues; i++) {
4394 ring = adapter->tx_ring[i];
4395 ixgbevf_get_tx_ring_stats(stats, ring);
4398 for (i = 0; i < adapter->num_xdp_queues; i++) {
4399 ring = adapter->xdp_ring[i];
4400 ixgbevf_get_tx_ring_stats(stats, ring);
4405 #define IXGBEVF_MAX_MAC_HDR_LEN 127
4406 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4408 static netdev_features_t
4409 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4410 netdev_features_t features)
4412 unsigned int network_hdr_len, mac_hdr_len;
4414 /* Make certain the headers can be described by a context descriptor */
4415 mac_hdr_len = skb_network_offset(skb);
4416 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4417 return features & ~(NETIF_F_HW_CSUM |
4419 NETIF_F_HW_VLAN_CTAG_TX |
4423 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4424 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4425 return features & ~(NETIF_F_HW_CSUM |
4430 /* We can only support IPV4 TSO in tunnels if we can mangle the
4431 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4433 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4434 features &= ~NETIF_F_TSO;
4439 static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4441 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4442 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4443 struct bpf_prog *old_prog;
4445 /* verify ixgbevf ring attributes are sufficient for XDP */
4446 for (i = 0; i < adapter->num_rx_queues; i++) {
4447 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4449 if (frame_size > ixgbevf_rx_bufsz(ring))
4453 old_prog = xchg(&adapter->xdp_prog, prog);
4455 /* If transitioning XDP modes reconfigure rings */
4456 if (!!prog != !!old_prog) {
4457 /* Hardware has to reinitialize queues and interrupts to
4458 * match packet buffer alignment. Unfortunately, the
4459 * hardware is not flexible enough to do this dynamically.
4461 if (netif_running(dev))
4464 ixgbevf_clear_interrupt_scheme(adapter);
4465 ixgbevf_init_interrupt_scheme(adapter);
4467 if (netif_running(dev))
4470 for (i = 0; i < adapter->num_rx_queues; i++)
4471 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4475 bpf_prog_put(old_prog);
4480 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4482 switch (xdp->command) {
4483 case XDP_SETUP_PROG:
4484 return ixgbevf_xdp_setup(dev, xdp->prog);
4490 static const struct net_device_ops ixgbevf_netdev_ops = {
4491 .ndo_open = ixgbevf_open,
4492 .ndo_stop = ixgbevf_close,
4493 .ndo_start_xmit = ixgbevf_xmit_frame,
4494 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4495 .ndo_get_stats64 = ixgbevf_get_stats,
4496 .ndo_validate_addr = eth_validate_addr,
4497 .ndo_set_mac_address = ixgbevf_set_mac,
4498 .ndo_change_mtu = ixgbevf_change_mtu,
4499 .ndo_tx_timeout = ixgbevf_tx_timeout,
4500 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4501 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4502 .ndo_features_check = ixgbevf_features_check,
4503 .ndo_bpf = ixgbevf_xdp,
4506 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4508 dev->netdev_ops = &ixgbevf_netdev_ops;
4509 ixgbevf_set_ethtool_ops(dev);
4510 dev->watchdog_timeo = 5 * HZ;
4514 * ixgbevf_probe - Device Initialization Routine
4515 * @pdev: PCI device information struct
4516 * @ent: entry in ixgbevf_pci_tbl
4518 * Returns 0 on success, negative on failure
4520 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4521 * The OS initialization, configuring of the adapter private structure,
4522 * and a hardware reset occur.
4524 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4526 struct net_device *netdev;
4527 struct ixgbevf_adapter *adapter = NULL;
4528 struct ixgbe_hw *hw = NULL;
4529 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4530 bool disable_dev = false;
4533 err = pci_enable_device(pdev);
4537 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4539 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4543 err = pci_request_regions(pdev, ixgbevf_driver_name);
4545 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4549 pci_set_master(pdev);
4551 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4555 goto err_alloc_etherdev;
4558 SET_NETDEV_DEV(netdev, &pdev->dev);
4560 adapter = netdev_priv(netdev);
4562 adapter->netdev = netdev;
4563 adapter->pdev = pdev;
4566 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4568 /* call save state here in standalone driver because it relies on
4569 * adapter struct to exist, and needs to call netdev_priv
4571 pci_save_state(pdev);
4573 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4574 pci_resource_len(pdev, 0));
4575 adapter->io_addr = hw->hw_addr;
4581 ixgbevf_assign_netdev_ops(netdev);
4584 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4585 hw->mac.type = ii->mac;
4587 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
4588 sizeof(struct ixgbe_mbx_operations));
4590 /* setup the private structure */
4591 err = ixgbevf_sw_init(adapter);
4595 /* The HW MAC address was set and/or determined in sw_init */
4596 if (!is_valid_ether_addr(netdev->dev_addr)) {
4597 pr_err("invalid MAC address\n");
4602 netdev->hw_features = NETIF_F_SG |
4609 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4610 NETIF_F_GSO_GRE_CSUM | \
4611 NETIF_F_GSO_IPXIP4 | \
4612 NETIF_F_GSO_IPXIP6 | \
4613 NETIF_F_GSO_UDP_TUNNEL | \
4614 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4616 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4617 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4618 IXGBEVF_GSO_PARTIAL_FEATURES;
4620 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
4622 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4623 netdev->mpls_features |= NETIF_F_SG |
4627 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4628 netdev->hw_enc_features |= netdev->vlan_features;
4630 /* set this bit last since it cannot be part of vlan_features */
4631 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4632 NETIF_F_HW_VLAN_CTAG_RX |
4633 NETIF_F_HW_VLAN_CTAG_TX;
4635 netdev->priv_flags |= IFF_UNICAST_FLT;
4636 netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
4638 /* MTU range: 68 - 1504 or 9710 */
4639 netdev->min_mtu = ETH_MIN_MTU;
4640 switch (adapter->hw.api_version) {
4641 case ixgbe_mbox_api_11:
4642 case ixgbe_mbox_api_12:
4643 case ixgbe_mbox_api_13:
4644 case ixgbe_mbox_api_14:
4645 case ixgbe_mbox_api_15:
4646 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4647 (ETH_HLEN + ETH_FCS_LEN);
4650 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4651 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4652 (ETH_HLEN + ETH_FCS_LEN);
4654 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4658 if (IXGBE_REMOVED(hw->hw_addr)) {
4663 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4665 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4666 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4667 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4669 err = ixgbevf_init_interrupt_scheme(adapter);
4673 strcpy(netdev->name, "eth%d");
4675 err = register_netdev(netdev);
4679 pci_set_drvdata(pdev, netdev);
4680 netif_carrier_off(netdev);
4681 ixgbevf_init_ipsec_offload(adapter);
4683 ixgbevf_init_last_counter_stats(adapter);
4685 /* print the VF info */
4686 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4687 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4689 switch (hw->mac.type) {
4690 case ixgbe_mac_X550_vf:
4691 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4693 case ixgbe_mac_X540_vf:
4694 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4696 case ixgbe_mac_82599_vf:
4698 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4705 ixgbevf_clear_interrupt_scheme(adapter);
4707 ixgbevf_reset_interrupt_capability(adapter);
4708 iounmap(adapter->io_addr);
4709 kfree(adapter->rss_key);
4711 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4712 free_netdev(netdev);
4714 pci_release_regions(pdev);
4717 if (!adapter || disable_dev)
4718 pci_disable_device(pdev);
4723 * ixgbevf_remove - Device Removal Routine
4724 * @pdev: PCI device information struct
4726 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4727 * that it should release a PCI device. The could be caused by a
4728 * Hot-Plug event, or because the driver is going to be removed from
4731 static void ixgbevf_remove(struct pci_dev *pdev)
4733 struct net_device *netdev = pci_get_drvdata(pdev);
4734 struct ixgbevf_adapter *adapter;
4740 adapter = netdev_priv(netdev);
4742 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4743 cancel_work_sync(&adapter->service_task);
4745 if (netdev->reg_state == NETREG_REGISTERED)
4746 unregister_netdev(netdev);
4748 ixgbevf_stop_ipsec_offload(adapter);
4749 ixgbevf_clear_interrupt_scheme(adapter);
4750 ixgbevf_reset_interrupt_capability(adapter);
4752 iounmap(adapter->io_addr);
4753 pci_release_regions(pdev);
4755 hw_dbg(&adapter->hw, "Remove complete\n");
4757 kfree(adapter->rss_key);
4758 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4759 free_netdev(netdev);
4762 pci_disable_device(pdev);
4766 * ixgbevf_io_error_detected - called when PCI error is detected
4767 * @pdev: Pointer to PCI device
4768 * @state: The current pci connection state
4770 * This function is called after a PCI bus error affecting
4771 * this device has been detected.
4773 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4774 pci_channel_state_t state)
4776 struct net_device *netdev = pci_get_drvdata(pdev);
4777 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4779 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4780 return PCI_ERS_RESULT_DISCONNECT;
4783 netif_device_detach(netdev);
4785 if (netif_running(netdev))
4786 ixgbevf_close_suspend(adapter);
4788 if (state == pci_channel_io_perm_failure) {
4790 return PCI_ERS_RESULT_DISCONNECT;
4793 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4794 pci_disable_device(pdev);
4797 /* Request a slot reset. */
4798 return PCI_ERS_RESULT_NEED_RESET;
4802 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4803 * @pdev: Pointer to PCI device
4805 * Restart the card from scratch, as if from a cold-boot. Implementation
4806 * resembles the first-half of the ixgbevf_resume routine.
4808 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4810 struct net_device *netdev = pci_get_drvdata(pdev);
4811 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4813 if (pci_enable_device_mem(pdev)) {
4815 "Cannot re-enable PCI device after reset.\n");
4816 return PCI_ERS_RESULT_DISCONNECT;
4819 adapter->hw.hw_addr = adapter->io_addr;
4820 smp_mb__before_atomic();
4821 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4822 pci_set_master(pdev);
4824 ixgbevf_reset(adapter);
4826 return PCI_ERS_RESULT_RECOVERED;
4830 * ixgbevf_io_resume - called when traffic can start flowing again.
4831 * @pdev: Pointer to PCI device
4833 * This callback is called when the error recovery driver tells us that
4834 * its OK to resume normal operation. Implementation resembles the
4835 * second-half of the ixgbevf_resume routine.
4837 static void ixgbevf_io_resume(struct pci_dev *pdev)
4839 struct net_device *netdev = pci_get_drvdata(pdev);
4842 if (netif_running(netdev))
4843 ixgbevf_open(netdev);
4845 netif_device_attach(netdev);
4849 /* PCI Error Recovery (ERS) */
4850 static const struct pci_error_handlers ixgbevf_err_handler = {
4851 .error_detected = ixgbevf_io_error_detected,
4852 .slot_reset = ixgbevf_io_slot_reset,
4853 .resume = ixgbevf_io_resume,
4856 static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
4858 static struct pci_driver ixgbevf_driver = {
4859 .name = ixgbevf_driver_name,
4860 .id_table = ixgbevf_pci_tbl,
4861 .probe = ixgbevf_probe,
4862 .remove = ixgbevf_remove,
4864 /* Power Management Hooks */
4865 .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops),
4867 .shutdown = ixgbevf_shutdown,
4868 .err_handler = &ixgbevf_err_handler
4872 * ixgbevf_init_module - Driver Registration Routine
4874 * ixgbevf_init_module is the first routine called when the driver is
4875 * loaded. All it does is register with the PCI subsystem.
4877 static int __init ixgbevf_init_module(void)
4881 pr_info("%s\n", ixgbevf_driver_string);
4882 pr_info("%s\n", ixgbevf_copyright);
4883 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4885 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4889 err = pci_register_driver(&ixgbevf_driver);
4891 destroy_workqueue(ixgbevf_wq);
4898 module_init(ixgbevf_init_module);
4901 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4903 * ixgbevf_exit_module is called just before the driver is removed
4906 static void __exit ixgbevf_exit_module(void)
4908 pci_unregister_driver(&ixgbevf_driver);
4910 destroy_workqueue(ixgbevf_wq);
4917 * ixgbevf_get_hw_dev_name - return device name string
4918 * used by hardware layer to print debugging information
4919 * @hw: pointer to private hardware struct
4921 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4923 struct ixgbevf_adapter *adapter = hw->back;
4925 return adapter->netdev->name;
4929 module_exit(ixgbevf_exit_module);
4931 /* ixgbevf_main.c */