1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
43 const char i40e_driver_name[] = "i40e";
44 static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 1
51 #define DRV_VERSION_BUILD 14
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str[] = DRV_VERSION;
56 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
60 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
61 static int i40e_add_vsi(struct i40e_vsi *vsi);
62 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
63 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64 static int i40e_setup_misc_vector(struct i40e_pf *pf);
65 static void i40e_determine_queue_usage(struct i40e_pf *pf);
66 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68 static int i40e_reset(struct i40e_pf *pf);
69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
71 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
72 static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
73 struct i40e_cloud_filter *filter,
75 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
76 struct i40e_cloud_filter *filter,
78 static int i40e_get_capabilities(struct i40e_pf *pf,
79 enum i40e_admin_queue_opc list_type);
82 /* i40e_pci_tbl - PCI Device ID Table
84 * Last entry must be all 0s
86 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
87 * Class, Class Mask, private data (not used) }
89 static const struct pci_device_id i40e_pci_tbl[] = {
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
100 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
101 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
102 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
103 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
104 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
105 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
106 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
107 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
108 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
109 /* required last entry */
112 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
114 #define I40E_MAX_VF_COUNT 128
115 static int debug = -1;
116 module_param(debug, uint, 0);
117 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
120 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(DRV_VERSION);
124 static struct workqueue_struct *i40e_wq;
127 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to fill out
130 * @size: size of memory requested
131 * @alignment: what to align the allocation to
133 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
134 u64 size, u32 alignment)
136 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
138 mem->size = ALIGN(size, alignment);
139 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
140 &mem->pa, GFP_KERNEL);
148 * i40e_free_dma_mem_d - OS specific memory free for shared code
149 * @hw: pointer to the HW structure
150 * @mem: ptr to mem struct to free
152 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
154 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
156 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
165 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
166 * @hw: pointer to the HW structure
167 * @mem: ptr to mem struct to fill out
168 * @size: size of memory requested
170 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
174 mem->va = kzalloc(size, GFP_KERNEL);
183 * i40e_free_virt_mem_d - OS specific memory free for shared code
184 * @hw: pointer to the HW structure
185 * @mem: ptr to mem struct to free
187 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
189 /* it's ok to kfree a NULL pointer */
198 * i40e_get_lump - find a lump of free generic resource
199 * @pf: board private structure
200 * @pile: the pile of resource to search
201 * @needed: the number of items needed
202 * @id: an owner id to stick on the items assigned
204 * Returns the base item index of the lump, or negative for error
206 * The search_hint trick and lack of advanced fit-finding only work
207 * because we're highly likely to have all the same size lump requests.
208 * Linear search time and any fragmentation should be minimal.
210 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
216 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
217 dev_info(&pf->pdev->dev,
218 "param err: pile=%p needed=%d id=0x%04x\n",
223 /* start the linear search with an imperfect hint */
224 i = pile->search_hint;
225 while (i < pile->num_entries) {
226 /* skip already allocated entries */
227 if (pile->list[i] & I40E_PILE_VALID_BIT) {
232 /* do we have enough in this lump? */
233 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
234 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
239 /* there was enough, so assign it to the requestor */
240 for (j = 0; j < needed; j++)
241 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
243 pile->search_hint = i + j;
247 /* not enough, so skip over it and continue looking */
255 * i40e_put_lump - return a lump of generic resource
256 * @pile: the pile of resource to search
257 * @index: the base item index
258 * @id: the owner id of the items assigned
260 * Returns the count of items in the lump
262 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
264 int valid_id = (id | I40E_PILE_VALID_BIT);
268 if (!pile || index >= pile->num_entries)
272 i < pile->num_entries && pile->list[i] == valid_id;
278 if (count && index < pile->search_hint)
279 pile->search_hint = index;
285 * i40e_find_vsi_from_id - searches for the vsi with the given id
286 * @pf - the pf structure to search for the vsi
287 * @id - id of the vsi it is searching for
289 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
293 for (i = 0; i < pf->num_alloc_vsi; i++)
294 if (pf->vsi[i] && (pf->vsi[i]->id == id))
301 * i40e_service_event_schedule - Schedule the service task to wake up
302 * @pf: board private structure
304 * If not already scheduled, this puts the task into the work queue
306 void i40e_service_event_schedule(struct i40e_pf *pf)
308 if (!test_bit(__I40E_DOWN, pf->state) &&
309 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
310 queue_work(i40e_wq, &pf->service_task);
314 * i40e_tx_timeout - Respond to a Tx Hang
315 * @netdev: network interface device structure
317 * If any port has noticed a Tx timeout, it is likely that the whole
318 * device is munged, not just the one netdev port, so go for the full
321 static void i40e_tx_timeout(struct net_device *netdev)
323 struct i40e_netdev_priv *np = netdev_priv(netdev);
324 struct i40e_vsi *vsi = np->vsi;
325 struct i40e_pf *pf = vsi->back;
326 struct i40e_ring *tx_ring = NULL;
327 unsigned int i, hung_queue = 0;
330 pf->tx_timeout_count++;
332 /* find the stopped queue the same way the stack does */
333 for (i = 0; i < netdev->num_tx_queues; i++) {
334 struct netdev_queue *q;
335 unsigned long trans_start;
337 q = netdev_get_tx_queue(netdev, i);
338 trans_start = q->trans_start;
339 if (netif_xmit_stopped(q) &&
341 (trans_start + netdev->watchdog_timeo))) {
347 if (i == netdev->num_tx_queues) {
348 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
350 /* now that we have an index, find the tx_ring struct */
351 for (i = 0; i < vsi->num_queue_pairs; i++) {
352 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
354 vsi->tx_rings[i]->queue_index) {
355 tx_ring = vsi->tx_rings[i];
362 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
363 pf->tx_timeout_recovery_level = 1; /* reset after some time */
364 else if (time_before(jiffies,
365 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
366 return; /* don't do any new action before the next timeout */
369 head = i40e_get_head(tx_ring);
370 /* Read interrupt register */
371 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
373 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
374 tx_ring->vsi->base_vector - 1));
376 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
378 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
379 vsi->seid, hung_queue, tx_ring->next_to_clean,
380 head, tx_ring->next_to_use,
381 readl(tx_ring->tail), val);
384 pf->tx_timeout_last_recovery = jiffies;
385 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
386 pf->tx_timeout_recovery_level, hung_queue);
388 switch (pf->tx_timeout_recovery_level) {
390 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
393 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
396 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
399 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
403 i40e_service_event_schedule(pf);
404 pf->tx_timeout_recovery_level++;
408 * i40e_get_vsi_stats_struct - Get System Network Statistics
409 * @vsi: the VSI we care about
411 * Returns the address of the device statistics structure.
412 * The statistics are actually updated from the service task.
414 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
416 return &vsi->net_stats;
420 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
421 * @ring: Tx ring to get statistics from
422 * @stats: statistics entry to be updated
424 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
425 struct rtnl_link_stats64 *stats)
431 start = u64_stats_fetch_begin_irq(&ring->syncp);
432 packets = ring->stats.packets;
433 bytes = ring->stats.bytes;
434 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
436 stats->tx_packets += packets;
437 stats->tx_bytes += bytes;
441 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
442 * @netdev: network interface device structure
444 * Returns the address of the device statistics structure.
445 * The statistics are actually updated from the service task.
447 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
448 struct rtnl_link_stats64 *stats)
450 struct i40e_netdev_priv *np = netdev_priv(netdev);
451 struct i40e_ring *tx_ring, *rx_ring;
452 struct i40e_vsi *vsi = np->vsi;
453 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
456 if (test_bit(__I40E_VSI_DOWN, vsi->state))
463 for (i = 0; i < vsi->num_queue_pairs; i++) {
467 tx_ring = READ_ONCE(vsi->tx_rings[i]);
470 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
472 rx_ring = &tx_ring[1];
475 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
476 packets = rx_ring->stats.packets;
477 bytes = rx_ring->stats.bytes;
478 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
480 stats->rx_packets += packets;
481 stats->rx_bytes += bytes;
483 if (i40e_enabled_xdp_vsi(vsi))
484 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
488 /* following stats updated by i40e_watchdog_subtask() */
489 stats->multicast = vsi_stats->multicast;
490 stats->tx_errors = vsi_stats->tx_errors;
491 stats->tx_dropped = vsi_stats->tx_dropped;
492 stats->rx_errors = vsi_stats->rx_errors;
493 stats->rx_dropped = vsi_stats->rx_dropped;
494 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
495 stats->rx_length_errors = vsi_stats->rx_length_errors;
499 * i40e_vsi_reset_stats - Resets all stats of the given vsi
500 * @vsi: the VSI to have its stats reset
502 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
504 struct rtnl_link_stats64 *ns;
510 ns = i40e_get_vsi_stats_struct(vsi);
511 memset(ns, 0, sizeof(*ns));
512 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
513 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
514 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
515 if (vsi->rx_rings && vsi->rx_rings[0]) {
516 for (i = 0; i < vsi->num_queue_pairs; i++) {
517 memset(&vsi->rx_rings[i]->stats, 0,
518 sizeof(vsi->rx_rings[i]->stats));
519 memset(&vsi->rx_rings[i]->rx_stats, 0,
520 sizeof(vsi->rx_rings[i]->rx_stats));
521 memset(&vsi->tx_rings[i]->stats, 0,
522 sizeof(vsi->tx_rings[i]->stats));
523 memset(&vsi->tx_rings[i]->tx_stats, 0,
524 sizeof(vsi->tx_rings[i]->tx_stats));
527 vsi->stat_offsets_loaded = false;
531 * i40e_pf_reset_stats - Reset all of the stats for the given PF
532 * @pf: the PF to be reset
534 void i40e_pf_reset_stats(struct i40e_pf *pf)
538 memset(&pf->stats, 0, sizeof(pf->stats));
539 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
540 pf->stat_offsets_loaded = false;
542 for (i = 0; i < I40E_MAX_VEB; i++) {
544 memset(&pf->veb[i]->stats, 0,
545 sizeof(pf->veb[i]->stats));
546 memset(&pf->veb[i]->stats_offsets, 0,
547 sizeof(pf->veb[i]->stats_offsets));
548 pf->veb[i]->stat_offsets_loaded = false;
551 pf->hw_csum_rx_error = 0;
555 * i40e_stat_update48 - read and update a 48 bit stat from the chip
556 * @hw: ptr to the hardware info
557 * @hireg: the high 32 bit reg to read
558 * @loreg: the low 32 bit reg to read
559 * @offset_loaded: has the initial offset been loaded yet
560 * @offset: ptr to current offset value
561 * @stat: ptr to the stat
563 * Since the device stats are not reset at PFReset, they likely will not
564 * be zeroed when the driver starts. We'll save the first values read
565 * and use them as offsets to be subtracted from the raw values in order
566 * to report stats that count from zero. In the process, we also manage
567 * the potential roll-over.
569 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
570 bool offset_loaded, u64 *offset, u64 *stat)
574 if (hw->device_id == I40E_DEV_ID_QEMU) {
575 new_data = rd32(hw, loreg);
576 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
578 new_data = rd64(hw, loreg);
582 if (likely(new_data >= *offset))
583 *stat = new_data - *offset;
585 *stat = (new_data + BIT_ULL(48)) - *offset;
586 *stat &= 0xFFFFFFFFFFFFULL;
590 * i40e_stat_update32 - read and update a 32 bit stat from the chip
591 * @hw: ptr to the hardware info
592 * @reg: the hw reg to read
593 * @offset_loaded: has the initial offset been loaded yet
594 * @offset: ptr to current offset value
595 * @stat: ptr to the stat
597 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
598 bool offset_loaded, u64 *offset, u64 *stat)
602 new_data = rd32(hw, reg);
605 if (likely(new_data >= *offset))
606 *stat = (u32)(new_data - *offset);
608 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
612 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
613 * @hw: ptr to the hardware info
614 * @reg: the hw reg to read and clear
615 * @stat: ptr to the stat
617 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
619 u32 new_data = rd32(hw, reg);
621 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
626 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
627 * @vsi: the VSI to be updated
629 void i40e_update_eth_stats(struct i40e_vsi *vsi)
631 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
632 struct i40e_pf *pf = vsi->back;
633 struct i40e_hw *hw = &pf->hw;
634 struct i40e_eth_stats *oes;
635 struct i40e_eth_stats *es; /* device's eth stats */
637 es = &vsi->eth_stats;
638 oes = &vsi->eth_stats_offsets;
640 /* Gather up the stats that the hw collects */
641 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->tx_errors, &es->tx_errors);
644 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->rx_discards, &es->rx_discards);
647 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
650 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->tx_errors, &es->tx_errors);
654 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
655 I40E_GLV_GORCL(stat_idx),
656 vsi->stat_offsets_loaded,
657 &oes->rx_bytes, &es->rx_bytes);
658 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
659 I40E_GLV_UPRCL(stat_idx),
660 vsi->stat_offsets_loaded,
661 &oes->rx_unicast, &es->rx_unicast);
662 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
663 I40E_GLV_MPRCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->rx_multicast, &es->rx_multicast);
666 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
667 I40E_GLV_BPRCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->rx_broadcast, &es->rx_broadcast);
671 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
672 I40E_GLV_GOTCL(stat_idx),
673 vsi->stat_offsets_loaded,
674 &oes->tx_bytes, &es->tx_bytes);
675 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
676 I40E_GLV_UPTCL(stat_idx),
677 vsi->stat_offsets_loaded,
678 &oes->tx_unicast, &es->tx_unicast);
679 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
680 I40E_GLV_MPTCL(stat_idx),
681 vsi->stat_offsets_loaded,
682 &oes->tx_multicast, &es->tx_multicast);
683 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
684 I40E_GLV_BPTCL(stat_idx),
685 vsi->stat_offsets_loaded,
686 &oes->tx_broadcast, &es->tx_broadcast);
687 vsi->stat_offsets_loaded = true;
691 * i40e_update_veb_stats - Update Switch component statistics
692 * @veb: the VEB being updated
694 static void i40e_update_veb_stats(struct i40e_veb *veb)
696 struct i40e_pf *pf = veb->pf;
697 struct i40e_hw *hw = &pf->hw;
698 struct i40e_eth_stats *oes;
699 struct i40e_eth_stats *es; /* device's eth stats */
700 struct i40e_veb_tc_stats *veb_oes;
701 struct i40e_veb_tc_stats *veb_es;
704 idx = veb->stats_idx;
706 oes = &veb->stats_offsets;
707 veb_es = &veb->tc_stats;
708 veb_oes = &veb->tc_stats_offsets;
710 /* Gather up the stats that the hw collects */
711 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
712 veb->stat_offsets_loaded,
713 &oes->tx_discards, &es->tx_discards);
714 if (hw->revision_id > 0)
715 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
716 veb->stat_offsets_loaded,
717 &oes->rx_unknown_protocol,
718 &es->rx_unknown_protocol);
719 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_bytes, &es->rx_bytes);
722 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
723 veb->stat_offsets_loaded,
724 &oes->rx_unicast, &es->rx_unicast);
725 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
726 veb->stat_offsets_loaded,
727 &oes->rx_multicast, &es->rx_multicast);
728 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
729 veb->stat_offsets_loaded,
730 &oes->rx_broadcast, &es->rx_broadcast);
732 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_bytes, &es->tx_bytes);
735 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
736 veb->stat_offsets_loaded,
737 &oes->tx_unicast, &es->tx_unicast);
738 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
739 veb->stat_offsets_loaded,
740 &oes->tx_multicast, &es->tx_multicast);
741 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
742 veb->stat_offsets_loaded,
743 &oes->tx_broadcast, &es->tx_broadcast);
744 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
745 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
746 I40E_GLVEBTC_RPCL(i, idx),
747 veb->stat_offsets_loaded,
748 &veb_oes->tc_rx_packets[i],
749 &veb_es->tc_rx_packets[i]);
750 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
751 I40E_GLVEBTC_RBCL(i, idx),
752 veb->stat_offsets_loaded,
753 &veb_oes->tc_rx_bytes[i],
754 &veb_es->tc_rx_bytes[i]);
755 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
756 I40E_GLVEBTC_TPCL(i, idx),
757 veb->stat_offsets_loaded,
758 &veb_oes->tc_tx_packets[i],
759 &veb_es->tc_tx_packets[i]);
760 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
761 I40E_GLVEBTC_TBCL(i, idx),
762 veb->stat_offsets_loaded,
763 &veb_oes->tc_tx_bytes[i],
764 &veb_es->tc_tx_bytes[i]);
766 veb->stat_offsets_loaded = true;
770 * i40e_update_vsi_stats - Update the vsi statistics counters.
771 * @vsi: the VSI to be updated
773 * There are a few instances where we store the same stat in a
774 * couple of different structs. This is partly because we have
775 * the netdev stats that need to be filled out, which is slightly
776 * different from the "eth_stats" defined by the chip and used in
777 * VF communications. We sort it out here.
779 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
781 struct i40e_pf *pf = vsi->back;
782 struct rtnl_link_stats64 *ons;
783 struct rtnl_link_stats64 *ns; /* netdev stats */
784 struct i40e_eth_stats *oes;
785 struct i40e_eth_stats *es; /* device's eth stats */
786 u32 tx_restart, tx_busy;
797 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
798 test_bit(__I40E_CONFIG_BUSY, pf->state))
801 ns = i40e_get_vsi_stats_struct(vsi);
802 ons = &vsi->net_stats_offsets;
803 es = &vsi->eth_stats;
804 oes = &vsi->eth_stats_offsets;
806 /* Gather up the netdev and vsi stats that the driver collects
807 * on the fly during packet processing
811 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
815 for (q = 0; q < vsi->num_queue_pairs; q++) {
817 p = READ_ONCE(vsi->tx_rings[q]);
820 start = u64_stats_fetch_begin_irq(&p->syncp);
821 packets = p->stats.packets;
822 bytes = p->stats.bytes;
823 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
826 tx_restart += p->tx_stats.restart_queue;
827 tx_busy += p->tx_stats.tx_busy;
828 tx_linearize += p->tx_stats.tx_linearize;
829 tx_force_wb += p->tx_stats.tx_force_wb;
831 /* Rx queue is part of the same block as Tx queue */
834 start = u64_stats_fetch_begin_irq(&p->syncp);
835 packets = p->stats.packets;
836 bytes = p->stats.bytes;
837 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
840 rx_buf += p->rx_stats.alloc_buff_failed;
841 rx_page += p->rx_stats.alloc_page_failed;
844 vsi->tx_restart = tx_restart;
845 vsi->tx_busy = tx_busy;
846 vsi->tx_linearize = tx_linearize;
847 vsi->tx_force_wb = tx_force_wb;
848 vsi->rx_page_failed = rx_page;
849 vsi->rx_buf_failed = rx_buf;
851 ns->rx_packets = rx_p;
853 ns->tx_packets = tx_p;
856 /* update netdev stats from eth stats */
857 i40e_update_eth_stats(vsi);
858 ons->tx_errors = oes->tx_errors;
859 ns->tx_errors = es->tx_errors;
860 ons->multicast = oes->rx_multicast;
861 ns->multicast = es->rx_multicast;
862 ons->rx_dropped = oes->rx_discards;
863 ns->rx_dropped = es->rx_discards;
864 ons->tx_dropped = oes->tx_discards;
865 ns->tx_dropped = es->tx_discards;
867 /* pull in a couple PF stats if this is the main vsi */
868 if (vsi == pf->vsi[pf->lan_vsi]) {
869 ns->rx_crc_errors = pf->stats.crc_errors;
870 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
871 ns->rx_length_errors = pf->stats.rx_length_errors;
876 * i40e_update_pf_stats - Update the PF statistics counters.
877 * @pf: the PF to be updated
879 static void i40e_update_pf_stats(struct i40e_pf *pf)
881 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
882 struct i40e_hw_port_stats *nsd = &pf->stats;
883 struct i40e_hw *hw = &pf->hw;
887 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
888 I40E_GLPRT_GORCL(hw->port),
889 pf->stat_offsets_loaded,
890 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
891 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
892 I40E_GLPRT_GOTCL(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
895 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
896 pf->stat_offsets_loaded,
897 &osd->eth.rx_discards,
898 &nsd->eth.rx_discards);
899 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
900 I40E_GLPRT_UPRCL(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->eth.rx_unicast,
903 &nsd->eth.rx_unicast);
904 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
905 I40E_GLPRT_MPRCL(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->eth.rx_multicast,
908 &nsd->eth.rx_multicast);
909 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
910 I40E_GLPRT_BPRCL(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->eth.rx_broadcast,
913 &nsd->eth.rx_broadcast);
914 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
915 I40E_GLPRT_UPTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_unicast,
918 &nsd->eth.tx_unicast);
919 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
920 I40E_GLPRT_MPTCL(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->eth.tx_multicast,
923 &nsd->eth.tx_multicast);
924 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
925 I40E_GLPRT_BPTCL(hw->port),
926 pf->stat_offsets_loaded,
927 &osd->eth.tx_broadcast,
928 &nsd->eth.tx_broadcast);
930 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->tx_dropped_link_down,
933 &nsd->tx_dropped_link_down);
935 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
936 pf->stat_offsets_loaded,
937 &osd->crc_errors, &nsd->crc_errors);
939 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
940 pf->stat_offsets_loaded,
941 &osd->illegal_bytes, &nsd->illegal_bytes);
943 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->mac_local_faults,
946 &nsd->mac_local_faults);
947 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
948 pf->stat_offsets_loaded,
949 &osd->mac_remote_faults,
950 &nsd->mac_remote_faults);
952 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->rx_length_errors,
955 &nsd->rx_length_errors);
957 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xon_rx, &nsd->link_xon_rx);
960 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->link_xon_tx, &nsd->link_xon_tx);
963 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->link_xoff_rx, &nsd->link_xoff_rx);
966 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->link_xoff_tx, &nsd->link_xoff_tx);
970 for (i = 0; i < 8; i++) {
971 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
972 pf->stat_offsets_loaded,
973 &osd->priority_xoff_rx[i],
974 &nsd->priority_xoff_rx[i]);
975 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
976 pf->stat_offsets_loaded,
977 &osd->priority_xon_rx[i],
978 &nsd->priority_xon_rx[i]);
979 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_tx[i],
982 &nsd->priority_xon_tx[i]);
983 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
984 pf->stat_offsets_loaded,
985 &osd->priority_xoff_tx[i],
986 &nsd->priority_xoff_tx[i]);
987 i40e_stat_update32(hw,
988 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
989 pf->stat_offsets_loaded,
990 &osd->priority_xon_2_xoff[i],
991 &nsd->priority_xon_2_xoff[i]);
994 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
995 I40E_GLPRT_PRC64L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->rx_size_64, &nsd->rx_size_64);
998 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
999 I40E_GLPRT_PRC127L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->rx_size_127, &nsd->rx_size_127);
1002 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1003 I40E_GLPRT_PRC255L(hw->port),
1004 pf->stat_offsets_loaded,
1005 &osd->rx_size_255, &nsd->rx_size_255);
1006 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1007 I40E_GLPRT_PRC511L(hw->port),
1008 pf->stat_offsets_loaded,
1009 &osd->rx_size_511, &nsd->rx_size_511);
1010 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1011 I40E_GLPRT_PRC1023L(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->rx_size_1023, &nsd->rx_size_1023);
1014 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1015 I40E_GLPRT_PRC1522L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->rx_size_1522, &nsd->rx_size_1522);
1018 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1019 I40E_GLPRT_PRC9522L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->rx_size_big, &nsd->rx_size_big);
1023 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1024 I40E_GLPRT_PTC64L(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->tx_size_64, &nsd->tx_size_64);
1027 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1028 I40E_GLPRT_PTC127L(hw->port),
1029 pf->stat_offsets_loaded,
1030 &osd->tx_size_127, &nsd->tx_size_127);
1031 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1032 I40E_GLPRT_PTC255L(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->tx_size_255, &nsd->tx_size_255);
1035 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1036 I40E_GLPRT_PTC511L(hw->port),
1037 pf->stat_offsets_loaded,
1038 &osd->tx_size_511, &nsd->tx_size_511);
1039 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1040 I40E_GLPRT_PTC1023L(hw->port),
1041 pf->stat_offsets_loaded,
1042 &osd->tx_size_1023, &nsd->tx_size_1023);
1043 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1044 I40E_GLPRT_PTC1522L(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->tx_size_1522, &nsd->tx_size_1522);
1047 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1048 I40E_GLPRT_PTC9522L(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->tx_size_big, &nsd->tx_size_big);
1052 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_undersize, &nsd->rx_undersize);
1055 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->rx_fragments, &nsd->rx_fragments);
1058 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->rx_oversize, &nsd->rx_oversize);
1061 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1062 pf->stat_offsets_loaded,
1063 &osd->rx_jabber, &nsd->rx_jabber);
1066 i40e_stat_update_and_clear32(hw,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1068 &nsd->fd_atr_match);
1069 i40e_stat_update_and_clear32(hw,
1070 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1072 i40e_stat_update_and_clear32(hw,
1073 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1074 &nsd->fd_atr_tunnel_match);
1076 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1077 nsd->tx_lpi_status =
1078 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1079 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1080 nsd->rx_lpi_status =
1081 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1082 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1083 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1084 pf->stat_offsets_loaded,
1085 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1086 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1087 pf->stat_offsets_loaded,
1088 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1090 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1091 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
1092 nsd->fd_sb_status = true;
1094 nsd->fd_sb_status = false;
1096 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1097 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
1098 nsd->fd_atr_status = true;
1100 nsd->fd_atr_status = false;
1102 pf->stat_offsets_loaded = true;
1106 * i40e_update_stats - Update the various statistics counters.
1107 * @vsi: the VSI to be updated
1109 * Update the various stats for this VSI and its related entities.
1111 void i40e_update_stats(struct i40e_vsi *vsi)
1113 struct i40e_pf *pf = vsi->back;
1115 if (vsi == pf->vsi[pf->lan_vsi])
1116 i40e_update_pf_stats(pf);
1118 i40e_update_vsi_stats(vsi);
1122 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1123 * @vsi: the VSI to be searched
1124 * @macaddr: the MAC address
1127 * Returns ptr to the filter object or NULL
1129 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1130 const u8 *macaddr, s16 vlan)
1132 struct i40e_mac_filter *f;
1135 if (!vsi || !macaddr)
1138 key = i40e_addr_to_hkey(macaddr);
1139 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1140 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1148 * i40e_find_mac - Find a mac addr in the macvlan filters list
1149 * @vsi: the VSI to be searched
1150 * @macaddr: the MAC address we are searching for
1152 * Returns the first filter with the provided MAC address or NULL if
1153 * MAC address was not found
1155 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1157 struct i40e_mac_filter *f;
1160 if (!vsi || !macaddr)
1163 key = i40e_addr_to_hkey(macaddr);
1164 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1165 if ((ether_addr_equal(macaddr, f->macaddr)))
1172 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1173 * @vsi: the VSI to be searched
1175 * Returns true if VSI is in vlan mode or false otherwise
1177 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1179 /* If we have a PVID, always operate in VLAN mode */
1183 /* We need to operate in VLAN mode whenever we have any filters with
1184 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1185 * time, incurring search cost repeatedly. However, we can notice two
1188 * 1) the only place where we can gain a VLAN filter is in
1191 * 2) the only place where filters are actually removed is in
1192 * i40e_sync_filters_subtask.
1194 * Thus, we can simply use a boolean value, has_vlan_filters which we
1195 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1196 * we have to perform the full search after deleting filters in
1197 * i40e_sync_filters_subtask, but we already have to search
1198 * filters here and can perform the check at the same time. This
1199 * results in avoiding embedding a loop for VLAN mode inside another
1200 * loop over all the filters, and should maintain correctness as noted
1203 return vsi->has_vlan_filter;
1207 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1208 * @vsi: the VSI to configure
1209 * @tmp_add_list: list of filters ready to be added
1210 * @tmp_del_list: list of filters ready to be deleted
1211 * @vlan_filters: the number of active VLAN filters
1213 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1214 * behave as expected. If we have any active VLAN filters remaining or about
1215 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1216 * so that they only match against untagged traffic. If we no longer have any
1217 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1218 * so that they match against both tagged and untagged traffic. In this way,
1219 * we ensure that we correctly receive the desired traffic. This ensures that
1220 * when we have an active VLAN we will receive only untagged traffic and
1221 * traffic matching active VLANs. If we have no active VLANs then we will
1222 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1224 * Finally, in a similar fashion, this function also corrects filters when
1225 * there is an active PVID assigned to this VSI.
1227 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1229 * This function is only expected to be called from within
1230 * i40e_sync_vsi_filters.
1232 * NOTE: This function expects to be called while under the
1233 * mac_filter_hash_lock
1235 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1236 struct hlist_head *tmp_add_list,
1237 struct hlist_head *tmp_del_list,
1240 s16 pvid = le16_to_cpu(vsi->info.pvid);
1241 struct i40e_mac_filter *f, *add_head;
1242 struct i40e_new_mac_filter *new;
1243 struct hlist_node *h;
1246 /* To determine if a particular filter needs to be replaced we
1247 * have the three following conditions:
1249 * a) if we have a PVID assigned, then all filters which are
1250 * not marked as VLAN=PVID must be replaced with filters that
1252 * b) otherwise, if we have any active VLANS, all filters
1253 * which are marked as VLAN=-1 must be replaced with
1254 * filters marked as VLAN=0
1255 * c) finally, if we do not have any active VLANS, all filters
1256 * which are marked as VLAN=0 must be replaced with filters
1260 /* Update the filters about to be added in place */
1261 hlist_for_each_entry(new, tmp_add_list, hlist) {
1262 if (pvid && new->f->vlan != pvid)
1263 new->f->vlan = pvid;
1264 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1266 else if (!vlan_filters && new->f->vlan == 0)
1267 new->f->vlan = I40E_VLAN_ANY;
1270 /* Update the remaining active filters */
1271 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1272 /* Combine the checks for whether a filter needs to be changed
1273 * and then determine the new VLAN inside the if block, in
1274 * order to avoid duplicating code for adding the new filter
1275 * then deleting the old filter.
1277 if ((pvid && f->vlan != pvid) ||
1278 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1279 (!vlan_filters && f->vlan == 0)) {
1280 /* Determine the new vlan we will be adding */
1283 else if (vlan_filters)
1286 new_vlan = I40E_VLAN_ANY;
1288 /* Create the new filter */
1289 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1293 /* Create a temporary i40e_new_mac_filter */
1294 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1299 new->state = add_head->state;
1301 /* Add the new filter to the tmp list */
1302 hlist_add_head(&new->hlist, tmp_add_list);
1304 /* Put the original filter into the delete list */
1305 f->state = I40E_FILTER_REMOVE;
1306 hash_del(&f->hlist);
1307 hlist_add_head(&f->hlist, tmp_del_list);
1311 vsi->has_vlan_filter = !!vlan_filters;
1317 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1318 * @vsi: the PF Main VSI - inappropriate for any other VSI
1319 * @macaddr: the MAC address
1321 * Remove whatever filter the firmware set up so the driver can manage
1322 * its own filtering intelligently.
1324 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1326 struct i40e_aqc_remove_macvlan_element_data element;
1327 struct i40e_pf *pf = vsi->back;
1329 /* Only appropriate for the PF main VSI */
1330 if (vsi->type != I40E_VSI_MAIN)
1333 memset(&element, 0, sizeof(element));
1334 ether_addr_copy(element.mac_addr, macaddr);
1335 element.vlan_tag = 0;
1336 /* Ignore error returns, some firmware does it this way... */
1337 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1338 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1340 memset(&element, 0, sizeof(element));
1341 ether_addr_copy(element.mac_addr, macaddr);
1342 element.vlan_tag = 0;
1343 /* ...and some firmware does it this way. */
1344 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1345 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1346 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1350 * i40e_add_filter - Add a mac/vlan filter to the VSI
1351 * @vsi: the VSI to be searched
1352 * @macaddr: the MAC address
1355 * Returns ptr to the filter object or NULL when no memory available.
1357 * NOTE: This function is expected to be called with mac_filter_hash_lock
1360 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1361 const u8 *macaddr, s16 vlan)
1363 struct i40e_mac_filter *f;
1366 if (!vsi || !macaddr)
1369 f = i40e_find_filter(vsi, macaddr, vlan);
1371 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1375 /* Update the boolean indicating if we need to function in
1379 vsi->has_vlan_filter = true;
1381 ether_addr_copy(f->macaddr, macaddr);
1383 /* If we're in overflow promisc mode, set the state directly
1384 * to failed, so we don't bother to try sending the filter
1387 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
1388 f->state = I40E_FILTER_FAILED;
1390 f->state = I40E_FILTER_NEW;
1391 INIT_HLIST_NODE(&f->hlist);
1393 key = i40e_addr_to_hkey(macaddr);
1394 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1396 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1397 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1400 /* If we're asked to add a filter that has been marked for removal, it
1401 * is safe to simply restore it to active state. __i40e_del_filter
1402 * will have simply deleted any filters which were previously marked
1403 * NEW or FAILED, so if it is currently marked REMOVE it must have
1404 * previously been ACTIVE. Since we haven't yet run the sync filters
1405 * task, just restore this filter to the ACTIVE state so that the
1406 * sync task leaves it in place
1408 if (f->state == I40E_FILTER_REMOVE)
1409 f->state = I40E_FILTER_ACTIVE;
1415 * __i40e_del_filter - Remove a specific filter from the VSI
1416 * @vsi: VSI to remove from
1417 * @f: the filter to remove from the list
1419 * This function should be called instead of i40e_del_filter only if you know
1420 * the exact filter you will remove already, such as via i40e_find_filter or
1423 * NOTE: This function is expected to be called with mac_filter_hash_lock
1425 * ANOTHER NOTE: This function MUST be called from within the context of
1426 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1427 * instead of list_for_each_entry().
1429 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1434 /* If the filter was never added to firmware then we can just delete it
1435 * directly and we don't want to set the status to remove or else an
1436 * admin queue command will unnecessarily fire.
1438 if ((f->state == I40E_FILTER_FAILED) ||
1439 (f->state == I40E_FILTER_NEW)) {
1440 hash_del(&f->hlist);
1443 f->state = I40E_FILTER_REMOVE;
1446 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1447 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1451 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1452 * @vsi: the VSI to be searched
1453 * @macaddr: the MAC address
1456 * NOTE: This function is expected to be called with mac_filter_hash_lock
1458 * ANOTHER NOTE: This function MUST be called from within the context of
1459 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1460 * instead of list_for_each_entry().
1462 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1464 struct i40e_mac_filter *f;
1466 if (!vsi || !macaddr)
1469 f = i40e_find_filter(vsi, macaddr, vlan);
1470 __i40e_del_filter(vsi, f);
1474 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1475 * @vsi: the VSI to be searched
1476 * @macaddr: the mac address to be filtered
1478 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1479 * go through all the macvlan filters and add a macvlan filter for each
1480 * unique vlan that already exists. If a PVID has been assigned, instead only
1481 * add the macaddr to that VLAN.
1483 * Returns last filter added on success, else NULL
1485 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1488 struct i40e_mac_filter *f, *add = NULL;
1489 struct hlist_node *h;
1493 return i40e_add_filter(vsi, macaddr,
1494 le16_to_cpu(vsi->info.pvid));
1496 if (!i40e_is_vsi_in_vlan(vsi))
1497 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1499 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1500 if (f->state == I40E_FILTER_REMOVE)
1502 add = i40e_add_filter(vsi, macaddr, f->vlan);
1511 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1512 * @vsi: the VSI to be searched
1513 * @macaddr: the mac address to be removed
1515 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1518 * Returns 0 for success, or error
1520 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1522 struct i40e_mac_filter *f;
1523 struct hlist_node *h;
1527 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1528 "Missing mac_filter_hash_lock\n");
1529 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1530 if (ether_addr_equal(macaddr, f->macaddr)) {
1531 __i40e_del_filter(vsi, f);
1543 * i40e_set_mac - NDO callback to set mac address
1544 * @netdev: network interface device structure
1545 * @p: pointer to an address structure
1547 * Returns 0 on success, negative on failure
1549 static int i40e_set_mac(struct net_device *netdev, void *p)
1551 struct i40e_netdev_priv *np = netdev_priv(netdev);
1552 struct i40e_vsi *vsi = np->vsi;
1553 struct i40e_pf *pf = vsi->back;
1554 struct i40e_hw *hw = &pf->hw;
1555 struct sockaddr *addr = p;
1557 if (!is_valid_ether_addr(addr->sa_data))
1558 return -EADDRNOTAVAIL;
1560 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1561 netdev_info(netdev, "already using mac address %pM\n",
1566 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1567 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1568 return -EADDRNOTAVAIL;
1570 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1571 netdev_info(netdev, "returning to hw mac address %pM\n",
1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1576 spin_lock_bh(&vsi->mac_filter_hash_lock);
1577 i40e_del_mac_filter(vsi, netdev->dev_addr);
1578 i40e_add_mac_filter(vsi, addr->sa_data);
1579 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1580 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1581 if (vsi->type == I40E_VSI_MAIN) {
1584 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1585 I40E_AQC_WRITE_TYPE_LAA_WOL,
1586 addr->sa_data, NULL);
1588 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1589 i40e_stat_str(hw, ret),
1590 i40e_aq_str(hw, hw->aq.asq_last_status));
1593 /* schedule our worker thread which will take care of
1594 * applying the new filter changes
1596 i40e_service_event_schedule(vsi->back);
1601 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1602 * @vsi: vsi structure
1603 * @seed: RSS hash seed
1605 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1606 u8 *lut, u16 lut_size)
1608 struct i40e_pf *pf = vsi->back;
1609 struct i40e_hw *hw = &pf->hw;
1613 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1614 (struct i40e_aqc_get_set_rss_key_data *)seed;
1615 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1617 dev_info(&pf->pdev->dev,
1618 "Cannot set RSS key, err %s aq_err %s\n",
1619 i40e_stat_str(hw, ret),
1620 i40e_aq_str(hw, hw->aq.asq_last_status));
1625 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1627 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1629 dev_info(&pf->pdev->dev,
1630 "Cannot set RSS lut, err %s aq_err %s\n",
1631 i40e_stat_str(hw, ret),
1632 i40e_aq_str(hw, hw->aq.asq_last_status));
1640 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1641 * @vsi: VSI structure
1643 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1645 struct i40e_pf *pf = vsi->back;
1646 u8 seed[I40E_HKEY_ARRAY_SIZE];
1650 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1653 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1654 vsi->num_queue_pairs);
1657 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1661 /* Use the user configured hash keys and lookup table if there is one,
1662 * otherwise use default
1664 if (vsi->rss_lut_user)
1665 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1667 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1668 if (vsi->rss_hkey_user)
1669 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1671 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1672 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1678 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1679 * @vsi: the VSI being configured,
1680 * @ctxt: VSI context structure
1681 * @enabled_tc: number of traffic classes to enable
1683 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1685 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1686 struct i40e_vsi_context *ctxt,
1689 u16 qcount = 0, max_qcount, qmap, sections = 0;
1690 int i, override_q, pow, num_qps, ret;
1691 u8 netdev_tc = 0, offset = 0;
1693 if (vsi->type != I40E_VSI_MAIN)
1695 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1696 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1697 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1698 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1699 num_qps = vsi->mqprio_qopt.qopt.count[0];
1701 /* find the next higher power-of-2 of num queue pairs */
1702 pow = ilog2(num_qps);
1703 if (!is_power_of_2(num_qps))
1705 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1706 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1708 /* Setup queue offset/count for all TCs for given VSI */
1709 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1710 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1711 /* See if the given TC is enabled for the given VSI */
1712 if (vsi->tc_config.enabled_tc & BIT(i)) {
1713 offset = vsi->mqprio_qopt.qopt.offset[i];
1714 qcount = vsi->mqprio_qopt.qopt.count[i];
1715 if (qcount > max_qcount)
1716 max_qcount = qcount;
1717 vsi->tc_config.tc_info[i].qoffset = offset;
1718 vsi->tc_config.tc_info[i].qcount = qcount;
1719 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1721 /* TC is not enabled so set the offset to
1722 * default queue and allocate one queue
1725 vsi->tc_config.tc_info[i].qoffset = 0;
1726 vsi->tc_config.tc_info[i].qcount = 1;
1727 vsi->tc_config.tc_info[i].netdev_tc = 0;
1731 /* Set actual Tx/Rx queue pairs */
1732 vsi->num_queue_pairs = offset + qcount;
1734 /* Setup queue TC[0].qmap for given VSI context */
1735 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1736 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1737 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1738 ctxt->info.valid_sections |= cpu_to_le16(sections);
1740 /* Reconfigure RSS for main VSI with max queue count */
1741 vsi->rss_size = max_qcount;
1742 ret = i40e_vsi_config_rss(vsi);
1744 dev_info(&vsi->back->pdev->dev,
1745 "Failed to reconfig rss for num_queues (%u)\n",
1749 vsi->reconfig_rss = true;
1750 dev_dbg(&vsi->back->pdev->dev,
1751 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1753 /* Find queue count available for channel VSIs and starting offset
1756 override_q = vsi->mqprio_qopt.qopt.count[0];
1757 if (override_q && override_q < vsi->num_queue_pairs) {
1758 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1759 vsi->next_base_queue = override_q;
1765 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1766 * @vsi: the VSI being setup
1767 * @ctxt: VSI context structure
1768 * @enabled_tc: Enabled TCs bitmap
1769 * @is_add: True if called before Add VSI
1771 * Setup VSI queue mapping for enabled traffic classes.
1773 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1774 struct i40e_vsi_context *ctxt,
1778 struct i40e_pf *pf = vsi->back;
1788 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1791 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1792 /* Find numtc from enabled TC bitmap */
1793 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1794 if (enabled_tc & BIT(i)) /* TC is enabled */
1798 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1802 /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
1806 vsi->tc_config.numtc = numtc;
1807 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1808 /* Number of queues per enabled TC */
1809 qcount = vsi->alloc_queue_pairs;
1811 num_tc_qps = qcount / numtc;
1812 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1814 /* Setup queue offset/count for all TCs for given VSI */
1815 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1816 /* See if the given TC is enabled for the given VSI */
1817 if (vsi->tc_config.enabled_tc & BIT(i)) {
1821 switch (vsi->type) {
1823 qcount = min_t(int, pf->alloc_rss_size,
1827 case I40E_VSI_SRIOV:
1828 case I40E_VSI_VMDQ2:
1830 qcount = num_tc_qps;
1834 vsi->tc_config.tc_info[i].qoffset = offset;
1835 vsi->tc_config.tc_info[i].qcount = qcount;
1837 /* find the next higher power-of-2 of num queue pairs */
1840 while (num_qps && (BIT_ULL(pow) < qcount)) {
1845 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1847 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1848 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1852 /* TC is not enabled so set the offset to
1853 * default queue and allocate one queue
1856 vsi->tc_config.tc_info[i].qoffset = 0;
1857 vsi->tc_config.tc_info[i].qcount = 1;
1858 vsi->tc_config.tc_info[i].netdev_tc = 0;
1862 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1865 /* Set actual Tx/Rx queue pairs */
1866 vsi->num_queue_pairs = offset;
1867 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1868 if (vsi->req_queue_pairs > 0)
1869 vsi->num_queue_pairs = vsi->req_queue_pairs;
1870 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1871 vsi->num_queue_pairs = pf->num_lan_msix;
1874 /* Scheduler section valid can only be set for ADD VSI */
1876 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1878 ctxt->info.up_enable_bits = enabled_tc;
1880 if (vsi->type == I40E_VSI_SRIOV) {
1881 ctxt->info.mapping_flags |=
1882 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1883 for (i = 0; i < vsi->num_queue_pairs; i++)
1884 ctxt->info.queue_mapping[i] =
1885 cpu_to_le16(vsi->base_queue + i);
1887 ctxt->info.mapping_flags |=
1888 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1889 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1891 ctxt->info.valid_sections |= cpu_to_le16(sections);
1895 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1896 * @netdev: the netdevice
1897 * @addr: address to add
1899 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1900 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1902 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1904 struct i40e_netdev_priv *np = netdev_priv(netdev);
1905 struct i40e_vsi *vsi = np->vsi;
1907 if (i40e_add_mac_filter(vsi, addr))
1914 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1915 * @netdev: the netdevice
1916 * @addr: address to add
1918 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1919 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1921 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1923 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi;
1926 i40e_del_mac_filter(vsi, addr);
1932 * i40e_set_rx_mode - NDO callback to set the netdev filters
1933 * @netdev: network interface device structure
1935 static void i40e_set_rx_mode(struct net_device *netdev)
1937 struct i40e_netdev_priv *np = netdev_priv(netdev);
1938 struct i40e_vsi *vsi = np->vsi;
1940 spin_lock_bh(&vsi->mac_filter_hash_lock);
1942 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1943 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1945 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1947 /* check for other flag changes */
1948 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1949 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1950 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1955 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1956 * @vsi: Pointer to VSI struct
1957 * @from: Pointer to list which contains MAC filter entries - changes to
1958 * those entries needs to be undone.
1960 * MAC filter entries from this list were slated for deletion.
1962 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1963 struct hlist_head *from)
1965 struct i40e_mac_filter *f;
1966 struct hlist_node *h;
1968 hlist_for_each_entry_safe(f, h, from, hlist) {
1969 u64 key = i40e_addr_to_hkey(f->macaddr);
1971 /* Move the element back into MAC filter list*/
1972 hlist_del(&f->hlist);
1973 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1978 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1979 * @vsi: Pointer to vsi struct
1980 * @from: Pointer to list which contains MAC filter entries - changes to
1981 * those entries needs to be undone.
1983 * MAC filter entries from this list were slated for addition.
1985 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1986 struct hlist_head *from)
1988 struct i40e_new_mac_filter *new;
1989 struct hlist_node *h;
1991 hlist_for_each_entry_safe(new, h, from, hlist) {
1992 /* We can simply free the wrapper structure */
1993 hlist_del(&new->hlist);
1999 * i40e_next_entry - Get the next non-broadcast filter from a list
2000 * @next: pointer to filter in list
2002 * Returns the next non-broadcast filter in the list. Required so that we
2003 * ignore broadcast filters within the list, since these are not handled via
2004 * the normal firmware update path.
2007 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2009 hlist_for_each_entry_continue(next, hlist) {
2010 if (!is_broadcast_ether_addr(next->f->macaddr))
2018 * i40e_update_filter_state - Update filter state based on return data
2020 * @count: Number of filters added
2021 * @add_list: return data from fw
2022 * @head: pointer to first filter in current batch
2024 * MAC filter entries from list were slated to be added to device. Returns
2025 * number of successful filters. Note that 0 does NOT mean success!
2028 i40e_update_filter_state(int count,
2029 struct i40e_aqc_add_macvlan_element_data *add_list,
2030 struct i40e_new_mac_filter *add_head)
2035 for (i = 0; i < count; i++) {
2036 /* Always check status of each filter. We don't need to check
2037 * the firmware return status because we pre-set the filter
2038 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2039 * request to the adminq. Thus, if it no longer matches then
2040 * we know the filter is active.
2042 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2043 add_head->state = I40E_FILTER_FAILED;
2045 add_head->state = I40E_FILTER_ACTIVE;
2049 add_head = i40e_next_filter(add_head);
2058 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2059 * @vsi: ptr to the VSI
2060 * @vsi_name: name to display in messages
2061 * @list: the list of filters to send to firmware
2062 * @num_del: the number of filters to delete
2063 * @retval: Set to -EIO on failure to delete
2065 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2066 * *retval instead of a return value so that success does not force ret_val to
2067 * be set to 0. This ensures that a sequence of calls to this function
2068 * preserve the previous value of *retval on successful delete.
2071 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2072 struct i40e_aqc_remove_macvlan_element_data *list,
2073 int num_del, int *retval)
2075 struct i40e_hw *hw = &vsi->back->hw;
2079 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2080 aq_err = hw->aq.asq_last_status;
2082 /* Explicitly ignore and do not report when firmware returns ENOENT */
2083 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2085 dev_info(&vsi->back->pdev->dev,
2086 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2087 vsi_name, i40e_stat_str(hw, aq_ret),
2088 i40e_aq_str(hw, aq_err));
2093 * i40e_aqc_add_filters - Request firmware to add a set of filters
2094 * @vsi: ptr to the VSI
2095 * @vsi_name: name to display in messages
2096 * @list: the list of filters to send to firmware
2097 * @add_head: Position in the add hlist
2098 * @num_add: the number of filters to add
2099 * @promisc_change: set to true on exit if promiscuous mode was forced on
2101 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2102 * promisc_changed to true if the firmware has run out of space for more
2106 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2107 struct i40e_aqc_add_macvlan_element_data *list,
2108 struct i40e_new_mac_filter *add_head,
2109 int num_add, bool *promisc_changed)
2111 struct i40e_hw *hw = &vsi->back->hw;
2114 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2115 aq_err = hw->aq.asq_last_status;
2116 fcnt = i40e_update_filter_state(num_add, list, add_head);
2118 if (fcnt != num_add) {
2119 *promisc_changed = true;
2120 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2121 dev_warn(&vsi->back->pdev->dev,
2122 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2123 i40e_aq_str(hw, aq_err),
2129 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2130 * @vsi: pointer to the VSI
2133 * This function sets or clears the promiscuous broadcast flags for VLAN
2134 * filters in order to properly receive broadcast frames. Assumes that only
2135 * broadcast filters are passed.
2137 * Returns status indicating success or failure;
2140 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2141 struct i40e_mac_filter *f)
2143 bool enable = f->state == I40E_FILTER_NEW;
2144 struct i40e_hw *hw = &vsi->back->hw;
2147 if (f->vlan == I40E_VLAN_ANY) {
2148 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2153 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2161 dev_warn(&vsi->back->pdev->dev,
2162 "Error %s setting broadcast promiscuous mode on %s\n",
2163 i40e_aq_str(hw, hw->aq.asq_last_status),
2170 * i40e_set_promiscuous - set promiscuous mode
2171 * @pf: board private structure
2172 * @promisc: promisc on or off
2174 * There are different ways of setting promiscuous mode on a PF depending on
2175 * what state/environment we're in. This identifies and sets it appropriately.
2176 * Returns 0 on success.
2178 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2180 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2181 struct i40e_hw *hw = &pf->hw;
2184 if (vsi->type == I40E_VSI_MAIN &&
2185 pf->lan_veb != I40E_NO_VEB &&
2186 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2187 /* set defport ON for Main VSI instead of true promisc
2188 * this way we will get all unicast/multicast and VLAN
2189 * promisc behavior but will not get VF or VMDq traffic
2190 * replicated on the Main VSI.
2193 aq_ret = i40e_aq_set_default_vsi(hw,
2197 aq_ret = i40e_aq_clear_default_vsi(hw,
2201 dev_info(&pf->pdev->dev,
2202 "Set default VSI failed, err %s, aq_err %s\n",
2203 i40e_stat_str(hw, aq_ret),
2204 i40e_aq_str(hw, hw->aq.asq_last_status));
2207 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2213 dev_info(&pf->pdev->dev,
2214 "set unicast promisc failed, err %s, aq_err %s\n",
2215 i40e_stat_str(hw, aq_ret),
2216 i40e_aq_str(hw, hw->aq.asq_last_status));
2218 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2223 dev_info(&pf->pdev->dev,
2224 "set multicast promisc failed, err %s, aq_err %s\n",
2225 i40e_stat_str(hw, aq_ret),
2226 i40e_aq_str(hw, hw->aq.asq_last_status));
2231 pf->cur_promisc = promisc;
2237 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2238 * @vsi: ptr to the VSI
2240 * Push any outstanding VSI filter changes through the AdminQ.
2242 * Returns 0 or error value
2244 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2246 struct hlist_head tmp_add_list, tmp_del_list;
2247 struct i40e_mac_filter *f;
2248 struct i40e_new_mac_filter *new, *add_head = NULL;
2249 struct i40e_hw *hw = &vsi->back->hw;
2250 unsigned int failed_filters = 0;
2251 unsigned int vlan_filters = 0;
2252 bool promisc_changed = false;
2253 char vsi_name[16] = "PF";
2254 int filter_list_len = 0;
2255 i40e_status aq_ret = 0;
2256 u32 changed_flags = 0;
2257 struct hlist_node *h;
2266 /* empty array typed pointers, kcalloc later */
2267 struct i40e_aqc_add_macvlan_element_data *add_list;
2268 struct i40e_aqc_remove_macvlan_element_data *del_list;
2270 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2271 usleep_range(1000, 2000);
2275 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2276 vsi->current_netdev_flags = vsi->netdev->flags;
2279 INIT_HLIST_HEAD(&tmp_add_list);
2280 INIT_HLIST_HEAD(&tmp_del_list);
2282 if (vsi->type == I40E_VSI_SRIOV)
2283 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2284 else if (vsi->type != I40E_VSI_MAIN)
2285 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2287 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2288 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2290 spin_lock_bh(&vsi->mac_filter_hash_lock);
2291 /* Create a list of filters to delete. */
2292 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2293 if (f->state == I40E_FILTER_REMOVE) {
2294 /* Move the element into temporary del_list */
2295 hash_del(&f->hlist);
2296 hlist_add_head(&f->hlist, &tmp_del_list);
2298 /* Avoid counting removed filters */
2301 if (f->state == I40E_FILTER_NEW) {
2302 /* Create a temporary i40e_new_mac_filter */
2303 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2305 goto err_no_memory_locked;
2307 /* Store pointer to the real filter */
2309 new->state = f->state;
2311 /* Add it to the hash list */
2312 hlist_add_head(&new->hlist, &tmp_add_list);
2315 /* Count the number of active (current and new) VLAN
2316 * filters we have now. Does not count filters which
2317 * are marked for deletion.
2323 retval = i40e_correct_mac_vlan_filters(vsi,
2328 goto err_no_memory_locked;
2330 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2333 /* Now process 'del_list' outside the lock */
2334 if (!hlist_empty(&tmp_del_list)) {
2335 filter_list_len = hw->aq.asq_buf_size /
2336 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2337 list_size = filter_list_len *
2338 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2339 del_list = kzalloc(list_size, GFP_ATOMIC);
2343 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2346 /* handle broadcast filters by updating the broadcast
2347 * promiscuous flag and release filter list.
2349 if (is_broadcast_ether_addr(f->macaddr)) {
2350 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2352 hlist_del(&f->hlist);
2357 /* add to delete list */
2358 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2359 if (f->vlan == I40E_VLAN_ANY) {
2360 del_list[num_del].vlan_tag = 0;
2361 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2363 del_list[num_del].vlan_tag =
2364 cpu_to_le16((u16)(f->vlan));
2367 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2368 del_list[num_del].flags = cmd_flags;
2371 /* flush a full buffer */
2372 if (num_del == filter_list_len) {
2373 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2375 memset(del_list, 0, list_size);
2378 /* Release memory for MAC filter entries which were
2379 * synced up with HW.
2381 hlist_del(&f->hlist);
2386 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2394 if (!hlist_empty(&tmp_add_list)) {
2395 /* Do all the adds now. */
2396 filter_list_len = hw->aq.asq_buf_size /
2397 sizeof(struct i40e_aqc_add_macvlan_element_data);
2398 list_size = filter_list_len *
2399 sizeof(struct i40e_aqc_add_macvlan_element_data);
2400 add_list = kzalloc(list_size, GFP_ATOMIC);
2405 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2406 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2408 new->state = I40E_FILTER_FAILED;
2412 /* handle broadcast filters by updating the broadcast
2413 * promiscuous flag instead of adding a MAC filter.
2415 if (is_broadcast_ether_addr(new->f->macaddr)) {
2416 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2418 new->state = I40E_FILTER_FAILED;
2420 new->state = I40E_FILTER_ACTIVE;
2424 /* add to add array */
2428 ether_addr_copy(add_list[num_add].mac_addr,
2430 if (new->f->vlan == I40E_VLAN_ANY) {
2431 add_list[num_add].vlan_tag = 0;
2432 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2434 add_list[num_add].vlan_tag =
2435 cpu_to_le16((u16)(new->f->vlan));
2437 add_list[num_add].queue_number = 0;
2438 /* set invalid match method for later detection */
2439 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2440 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2441 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2444 /* flush a full buffer */
2445 if (num_add == filter_list_len) {
2446 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2449 memset(add_list, 0, list_size);
2454 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2455 num_add, &promisc_changed);
2457 /* Now move all of the filters from the temp add list back to
2460 spin_lock_bh(&vsi->mac_filter_hash_lock);
2461 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2462 /* Only update the state if we're still NEW */
2463 if (new->f->state == I40E_FILTER_NEW)
2464 new->f->state = new->state;
2465 hlist_del(&new->hlist);
2468 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2473 /* Determine the number of active and failed filters. */
2474 spin_lock_bh(&vsi->mac_filter_hash_lock);
2475 vsi->active_filters = 0;
2476 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2477 if (f->state == I40E_FILTER_ACTIVE)
2478 vsi->active_filters++;
2479 else if (f->state == I40E_FILTER_FAILED)
2482 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2484 /* If promiscuous mode has changed, we need to calculate a new
2485 * threshold for when we are safe to exit
2487 if (promisc_changed)
2488 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2490 /* Check if we are able to exit overflow promiscuous mode. We can
2491 * safely exit if we didn't just enter, we no longer have any failed
2492 * filters, and we have reduced filters below the threshold value.
2494 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
2495 !promisc_changed && !failed_filters &&
2496 (vsi->active_filters < vsi->promisc_threshold)) {
2497 dev_info(&pf->pdev->dev,
2498 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2500 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2501 promisc_changed = true;
2502 vsi->promisc_threshold = 0;
2505 /* if the VF is not trusted do not do promisc */
2506 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2507 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2511 /* check for changes in promiscuous modes */
2512 if (changed_flags & IFF_ALLMULTI) {
2513 bool cur_multipromisc;
2515 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2516 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2521 retval = i40e_aq_rc_to_posix(aq_ret,
2522 hw->aq.asq_last_status);
2523 dev_info(&pf->pdev->dev,
2524 "set multi promisc failed on %s, err %s aq_err %s\n",
2526 i40e_stat_str(hw, aq_ret),
2527 i40e_aq_str(hw, hw->aq.asq_last_status));
2531 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
2534 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2535 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2537 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2539 retval = i40e_aq_rc_to_posix(aq_ret,
2540 hw->aq.asq_last_status);
2541 dev_info(&pf->pdev->dev,
2542 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2543 cur_promisc ? "on" : "off",
2545 i40e_stat_str(hw, aq_ret),
2546 i40e_aq_str(hw, hw->aq.asq_last_status));
2550 /* if something went wrong then set the changed flag so we try again */
2552 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2554 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2558 /* Restore elements on the temporary add and delete lists */
2559 spin_lock_bh(&vsi->mac_filter_hash_lock);
2560 err_no_memory_locked:
2561 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2562 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2563 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2565 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2566 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2571 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2572 * @pf: board private structure
2574 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2578 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2580 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2582 for (v = 0; v < pf->num_alloc_vsi; v++) {
2584 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2585 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2588 /* come back and try again later */
2589 pf->flags |= I40E_FLAG_FILTER_SYNC;
2597 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2600 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2602 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2603 return I40E_RXBUFFER_2048;
2605 return I40E_RXBUFFER_3072;
2609 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2610 * @netdev: network interface device structure
2611 * @new_mtu: new value for maximum frame size
2613 * Returns 0 on success, negative on failure
2615 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2617 struct i40e_netdev_priv *np = netdev_priv(netdev);
2618 struct i40e_vsi *vsi = np->vsi;
2619 struct i40e_pf *pf = vsi->back;
2621 if (i40e_enabled_xdp_vsi(vsi)) {
2622 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2624 if (frame_size > i40e_max_xdp_frame_size(vsi))
2628 netdev_info(netdev, "changing MTU from %d to %d\n",
2629 netdev->mtu, new_mtu);
2630 netdev->mtu = new_mtu;
2631 if (netif_running(netdev))
2632 i40e_vsi_reinit_locked(vsi);
2633 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2634 I40E_FLAG_CLIENT_L2_CHANGE);
2639 * i40e_ioctl - Access the hwtstamp interface
2640 * @netdev: network interface device structure
2641 * @ifr: interface request data
2642 * @cmd: ioctl command
2644 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2646 struct i40e_netdev_priv *np = netdev_priv(netdev);
2647 struct i40e_pf *pf = np->vsi->back;
2651 return i40e_ptp_get_ts_config(pf, ifr);
2653 return i40e_ptp_set_ts_config(pf, ifr);
2660 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2661 * @vsi: the vsi being adjusted
2663 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2665 struct i40e_vsi_context ctxt;
2668 if ((vsi->info.valid_sections &
2669 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2670 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2671 return; /* already enabled */
2673 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2674 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2675 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2677 ctxt.seid = vsi->seid;
2678 ctxt.info = vsi->info;
2679 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2681 dev_info(&vsi->back->pdev->dev,
2682 "update vlan stripping failed, err %s aq_err %s\n",
2683 i40e_stat_str(&vsi->back->hw, ret),
2684 i40e_aq_str(&vsi->back->hw,
2685 vsi->back->hw.aq.asq_last_status));
2690 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2691 * @vsi: the vsi being adjusted
2693 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2695 struct i40e_vsi_context ctxt;
2698 if ((vsi->info.valid_sections &
2699 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2700 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2701 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2702 return; /* already disabled */
2704 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2705 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2706 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2708 ctxt.seid = vsi->seid;
2709 ctxt.info = vsi->info;
2710 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2712 dev_info(&vsi->back->pdev->dev,
2713 "update vlan stripping failed, err %s aq_err %s\n",
2714 i40e_stat_str(&vsi->back->hw, ret),
2715 i40e_aq_str(&vsi->back->hw,
2716 vsi->back->hw.aq.asq_last_status));
2721 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2722 * @netdev: network interface to be adjusted
2723 * @features: netdev features to test if VLAN offload is enabled or not
2725 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2727 struct i40e_netdev_priv *np = netdev_priv(netdev);
2728 struct i40e_vsi *vsi = np->vsi;
2730 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2731 i40e_vlan_stripping_enable(vsi);
2733 i40e_vlan_stripping_disable(vsi);
2737 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2738 * @vsi: the vsi being configured
2739 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2741 * This is a helper function for adding a new MAC/VLAN filter with the
2742 * specified VLAN for each existing MAC address already in the hash table.
2743 * This function does *not* perform any accounting to update filters based on
2746 * NOTE: this function expects to be called while under the
2747 * mac_filter_hash_lock
2749 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2751 struct i40e_mac_filter *f, *add_f;
2752 struct hlist_node *h;
2755 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2756 if (f->state == I40E_FILTER_REMOVE)
2758 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2760 dev_info(&vsi->back->pdev->dev,
2761 "Could not add vlan filter %d for %pM\n",
2771 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2772 * @vsi: the VSI being configured
2773 * @vid: VLAN id to be added
2775 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2782 /* The network stack will attempt to add VID=0, with the intention to
2783 * receive priority tagged packets with a VLAN of 0. Our HW receives
2784 * these packets by default when configured to receive untagged
2785 * packets, so we don't need to add a filter for this case.
2786 * Additionally, HW interprets adding a VID=0 filter as meaning to
2787 * receive *only* tagged traffic and stops receiving untagged traffic.
2788 * Thus, we do not want to actually add a filter for VID=0
2793 /* Locked once because all functions invoked below iterates list*/
2794 spin_lock_bh(&vsi->mac_filter_hash_lock);
2795 err = i40e_add_vlan_all_mac(vsi, vid);
2796 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2800 /* schedule our worker thread which will take care of
2801 * applying the new filter changes
2803 i40e_service_event_schedule(vsi->back);
2808 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2809 * @vsi: the vsi being configured
2810 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2812 * This function should be used to remove all VLAN filters which match the
2813 * given VID. It does not schedule the service event and does not take the
2814 * mac_filter_hash_lock so it may be combined with other operations under
2815 * a single invocation of the mac_filter_hash_lock.
2817 * NOTE: this function expects to be called while under the
2818 * mac_filter_hash_lock
2820 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2822 struct i40e_mac_filter *f;
2823 struct hlist_node *h;
2826 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2828 __i40e_del_filter(vsi, f);
2833 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2834 * @vsi: the VSI being configured
2835 * @vid: VLAN id to be removed
2837 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2839 if (!vid || vsi->info.pvid)
2842 spin_lock_bh(&vsi->mac_filter_hash_lock);
2843 i40e_rm_vlan_all_mac(vsi, vid);
2844 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2846 /* schedule our worker thread which will take care of
2847 * applying the new filter changes
2849 i40e_service_event_schedule(vsi->back);
2853 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2854 * @netdev: network interface to be adjusted
2855 * @vid: vlan id to be added
2857 * net_device_ops implementation for adding vlan ids
2859 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2860 __always_unused __be16 proto, u16 vid)
2862 struct i40e_netdev_priv *np = netdev_priv(netdev);
2863 struct i40e_vsi *vsi = np->vsi;
2866 if (vid >= VLAN_N_VID)
2869 ret = i40e_vsi_add_vlan(vsi, vid);
2871 set_bit(vid, vsi->active_vlans);
2877 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2878 * @netdev: network interface to be adjusted
2879 * @vid: vlan id to be removed
2881 * net_device_ops implementation for removing vlan ids
2883 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2884 __always_unused __be16 proto, u16 vid)
2886 struct i40e_netdev_priv *np = netdev_priv(netdev);
2887 struct i40e_vsi *vsi = np->vsi;
2889 /* return code is ignored as there is nothing a user
2890 * can do about failure to remove and a log message was
2891 * already printed from the other function
2893 i40e_vsi_kill_vlan(vsi, vid);
2895 clear_bit(vid, vsi->active_vlans);
2901 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2902 * @vsi: the vsi being brought back up
2904 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2911 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2913 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2914 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2919 * i40e_vsi_add_pvid - Add pvid for the VSI
2920 * @vsi: the vsi being adjusted
2921 * @vid: the vlan id to set as a PVID
2923 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2925 struct i40e_vsi_context ctxt;
2928 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2929 vsi->info.pvid = cpu_to_le16(vid);
2930 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2931 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2932 I40E_AQ_VSI_PVLAN_EMOD_STR;
2934 ctxt.seid = vsi->seid;
2935 ctxt.info = vsi->info;
2936 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2938 dev_info(&vsi->back->pdev->dev,
2939 "add pvid failed, err %s aq_err %s\n",
2940 i40e_stat_str(&vsi->back->hw, ret),
2941 i40e_aq_str(&vsi->back->hw,
2942 vsi->back->hw.aq.asq_last_status));
2950 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2951 * @vsi: the vsi being adjusted
2953 * Just use the vlan_rx_register() service to put it back to normal
2955 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2957 i40e_vlan_stripping_disable(vsi);
2963 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2964 * @vsi: ptr to the VSI
2966 * If this function returns with an error, then it's possible one or
2967 * more of the rings is populated (while the rest are not). It is the
2968 * callers duty to clean those orphaned rings.
2970 * Return 0 on success, negative on failure
2972 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2976 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2977 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2979 if (!i40e_enabled_xdp_vsi(vsi))
2982 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2983 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2989 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2990 * @vsi: ptr to the VSI
2992 * Free VSI's transmit software resources
2994 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2998 if (vsi->tx_rings) {
2999 for (i = 0; i < vsi->num_queue_pairs; i++)
3000 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3001 i40e_free_tx_resources(vsi->tx_rings[i]);
3004 if (vsi->xdp_rings) {
3005 for (i = 0; i < vsi->num_queue_pairs; i++)
3006 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3007 i40e_free_tx_resources(vsi->xdp_rings[i]);
3012 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3013 * @vsi: ptr to the VSI
3015 * If this function returns with an error, then it's possible one or
3016 * more of the rings is populated (while the rest are not). It is the
3017 * callers duty to clean those orphaned rings.
3019 * Return 0 on success, negative on failure
3021 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3025 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3026 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3031 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3032 * @vsi: ptr to the VSI
3034 * Free all receive software resources
3036 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3043 for (i = 0; i < vsi->num_queue_pairs; i++)
3044 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3045 i40e_free_rx_resources(vsi->rx_rings[i]);
3049 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3050 * @ring: The Tx ring to configure
3052 * This enables/disables XPS for a given Tx descriptor ring
3053 * based on the TCs enabled for the VSI that ring belongs to.
3055 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3059 if (!ring->q_vector || !ring->netdev || ring->ch)
3062 /* We only initialize XPS once, so as not to overwrite user settings */
3063 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3066 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3067 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3072 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3073 * @ring: The Tx ring to configure
3075 * Configure the Tx descriptor ring in the HMC context.
3077 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3079 struct i40e_vsi *vsi = ring->vsi;
3080 u16 pf_q = vsi->base_queue + ring->queue_index;
3081 struct i40e_hw *hw = &vsi->back->hw;
3082 struct i40e_hmc_obj_txq tx_ctx;
3083 i40e_status err = 0;
3086 /* some ATR related tx ring init */
3087 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3088 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3089 ring->atr_count = 0;
3091 ring->atr_sample_rate = 0;
3095 i40e_config_xps_tx_ring(ring);
3097 /* clear the context structure first */
3098 memset(&tx_ctx, 0, sizeof(tx_ctx));
3100 tx_ctx.new_context = 1;
3101 tx_ctx.base = (ring->dma / 128);
3102 tx_ctx.qlen = ring->count;
3103 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3104 I40E_FLAG_FD_ATR_ENABLED));
3105 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3106 /* FDIR VSI tx ring can still use RS bit and writebacks */
3107 if (vsi->type != I40E_VSI_FDIR)
3108 tx_ctx.head_wb_ena = 1;
3109 tx_ctx.head_wb_addr = ring->dma +
3110 (ring->count * sizeof(struct i40e_tx_desc));
3112 /* As part of VSI creation/update, FW allocates certain
3113 * Tx arbitration queue sets for each TC enabled for
3114 * the VSI. The FW returns the handles to these queue
3115 * sets as part of the response buffer to Add VSI,
3116 * Update VSI, etc. AQ commands. It is expected that
3117 * these queue set handles be associated with the Tx
3118 * queues by the driver as part of the TX queue context
3119 * initialization. This has to be done regardless of
3120 * DCB as by default everything is mapped to TC0.
3125 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3128 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3130 tx_ctx.rdylist_act = 0;
3132 /* clear the context in the HMC */
3133 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3135 dev_info(&vsi->back->pdev->dev,
3136 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3137 ring->queue_index, pf_q, err);
3141 /* set the context in the HMC */
3142 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3144 dev_info(&vsi->back->pdev->dev,
3145 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3146 ring->queue_index, pf_q, err);
3150 /* Now associate this queue with this PCI function */
3152 if (ring->ch->type == I40E_VSI_VMDQ2)
3153 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3157 qtx_ctl |= (ring->ch->vsi_number <<
3158 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3159 I40E_QTX_CTL_VFVM_INDX_MASK;
3161 if (vsi->type == I40E_VSI_VMDQ2) {
3162 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3163 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3164 I40E_QTX_CTL_VFVM_INDX_MASK;
3166 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3170 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3171 I40E_QTX_CTL_PF_INDX_MASK);
3172 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3175 /* cache tail off for easier writes later */
3176 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3182 * i40e_configure_rx_ring - Configure a receive ring context
3183 * @ring: The Rx ring to configure
3185 * Configure the Rx descriptor ring in the HMC context.
3187 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3189 struct i40e_vsi *vsi = ring->vsi;
3190 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3191 u16 pf_q = vsi->base_queue + ring->queue_index;
3192 struct i40e_hw *hw = &vsi->back->hw;
3193 struct i40e_hmc_obj_rxq rx_ctx;
3194 i40e_status err = 0;
3196 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3198 /* clear the context structure first */
3199 memset(&rx_ctx, 0, sizeof(rx_ctx));
3201 ring->rx_buf_len = vsi->rx_buf_len;
3203 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3204 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3206 rx_ctx.base = (ring->dma / 128);
3207 rx_ctx.qlen = ring->count;
3209 /* use 32 byte descriptors */
3212 /* descriptor type is always zero
3215 rx_ctx.hsplit_0 = 0;
3217 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3218 if (hw->revision_id == 0)
3219 rx_ctx.lrxqthresh = 0;
3221 rx_ctx.lrxqthresh = 1;
3222 rx_ctx.crcstrip = 1;
3224 /* this controls whether VLAN is stripped from inner headers */
3226 /* set the prefena field to 1 because the manual says to */
3229 /* clear the context in the HMC */
3230 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3232 dev_info(&vsi->back->pdev->dev,
3233 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3234 ring->queue_index, pf_q, err);
3238 /* set the context in the HMC */
3239 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3241 dev_info(&vsi->back->pdev->dev,
3242 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3243 ring->queue_index, pf_q, err);
3247 /* configure Rx buffer alignment */
3248 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3249 clear_ring_build_skb_enabled(ring);
3251 set_ring_build_skb_enabled(ring);
3253 /* cache tail for quicker writes, and clear the reg before use */
3254 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3255 writel(0, ring->tail);
3257 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3263 * i40e_vsi_configure_tx - Configure the VSI for Tx
3264 * @vsi: VSI structure describing this set of rings and resources
3266 * Configure the Tx VSI for operation.
3268 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3273 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3274 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3276 if (!i40e_enabled_xdp_vsi(vsi))
3279 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3280 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3286 * i40e_vsi_configure_rx - Configure the VSI for Rx
3287 * @vsi: the VSI being configured
3289 * Configure the Rx VSI for operation.
3291 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3296 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3297 vsi->max_frame = I40E_MAX_RXBUFFER;
3298 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3299 #if (PAGE_SIZE < 8192)
3300 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3301 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3302 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3303 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3306 vsi->max_frame = I40E_MAX_RXBUFFER;
3307 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3311 /* set up individual rings */
3312 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3313 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3319 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3320 * @vsi: ptr to the VSI
3322 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3324 struct i40e_ring *tx_ring, *rx_ring;
3325 u16 qoffset, qcount;
3328 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3329 /* Reset the TC information */
3330 for (i = 0; i < vsi->num_queue_pairs; i++) {
3331 rx_ring = vsi->rx_rings[i];
3332 tx_ring = vsi->tx_rings[i];
3333 rx_ring->dcb_tc = 0;
3334 tx_ring->dcb_tc = 0;
3339 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3340 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3343 qoffset = vsi->tc_config.tc_info[n].qoffset;
3344 qcount = vsi->tc_config.tc_info[n].qcount;
3345 for (i = qoffset; i < (qoffset + qcount); i++) {
3346 rx_ring = vsi->rx_rings[i];
3347 tx_ring = vsi->tx_rings[i];
3348 rx_ring->dcb_tc = n;
3349 tx_ring->dcb_tc = n;
3355 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3356 * @vsi: ptr to the VSI
3358 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3361 i40e_set_rx_mode(vsi->netdev);
3365 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3366 * @vsi: Pointer to the targeted VSI
3368 * This function replays the hlist on the hw where all the SB Flow Director
3369 * filters were saved.
3371 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3373 struct i40e_fdir_filter *filter;
3374 struct i40e_pf *pf = vsi->back;
3375 struct hlist_node *node;
3377 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3380 /* Reset FDir counters as we're replaying all existing filters */
3381 pf->fd_tcp4_filter_cnt = 0;
3382 pf->fd_udp4_filter_cnt = 0;
3383 pf->fd_sctp4_filter_cnt = 0;
3384 pf->fd_ip4_filter_cnt = 0;
3386 hlist_for_each_entry_safe(filter, node,
3387 &pf->fdir_filter_list, fdir_node) {
3388 i40e_add_del_fdir(vsi, filter, true);
3393 * i40e_vsi_configure - Set up the VSI for action
3394 * @vsi: the VSI being configured
3396 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3400 i40e_set_vsi_rx_mode(vsi);
3401 i40e_restore_vlan(vsi);
3402 i40e_vsi_config_dcb_rings(vsi);
3403 err = i40e_vsi_configure_tx(vsi);
3405 err = i40e_vsi_configure_rx(vsi);
3411 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3412 * @vsi: the VSI being configured
3414 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3416 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3417 struct i40e_pf *pf = vsi->back;
3418 struct i40e_hw *hw = &pf->hw;
3423 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3424 * and PFINT_LNKLSTn registers, e.g.:
3425 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3427 qp = vsi->base_queue;
3428 vector = vsi->base_vector;
3429 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3430 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3432 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3433 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3434 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3435 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3437 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3438 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3439 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3441 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3442 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3444 /* Linked list for the queuepairs assigned to this vector */
3445 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3446 for (q = 0; q < q_vector->num_ringpairs; q++) {
3447 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3450 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3451 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3452 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3453 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3454 (I40E_QUEUE_TYPE_TX <<
3455 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3457 wr32(hw, I40E_QINT_RQCTL(qp), val);
3460 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3461 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3462 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3463 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3464 (I40E_QUEUE_TYPE_TX <<
3465 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3467 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3470 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3471 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3472 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3473 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3474 (I40E_QUEUE_TYPE_RX <<
3475 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3477 /* Terminate the linked list */
3478 if (q == (q_vector->num_ringpairs - 1))
3479 val |= (I40E_QUEUE_END_OF_LIST <<
3480 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3482 wr32(hw, I40E_QINT_TQCTL(qp), val);
3491 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3492 * @hw: ptr to the hardware info
3494 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3496 struct i40e_hw *hw = &pf->hw;
3499 /* clear things first */
3500 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3501 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3503 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3504 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3505 I40E_PFINT_ICR0_ENA_GRST_MASK |
3506 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3507 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3508 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3509 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3510 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3512 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3513 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3515 if (pf->flags & I40E_FLAG_PTP)
3516 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3518 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3520 /* SW_ITR_IDX = 0, but don't change INTENA */
3521 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3522 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3524 /* OTHER_ITR_IDX = 0 */
3525 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3529 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3530 * @vsi: the VSI being configured
3532 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3534 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3535 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3536 struct i40e_pf *pf = vsi->back;
3537 struct i40e_hw *hw = &pf->hw;
3540 /* set the ITR configuration */
3541 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3542 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3543 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3544 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3545 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3546 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3547 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3549 i40e_enable_misc_int_causes(pf);
3551 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3552 wr32(hw, I40E_PFINT_LNKLST0, 0);
3554 /* Associate the queue pair to the vector and enable the queue int */
3555 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3556 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3557 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3558 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3560 wr32(hw, I40E_QINT_RQCTL(0), val);
3562 if (i40e_enabled_xdp_vsi(vsi)) {
3563 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3564 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3566 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3568 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3571 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3572 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3573 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3575 wr32(hw, I40E_QINT_TQCTL(0), val);
3580 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3581 * @pf: board private structure
3583 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3585 struct i40e_hw *hw = &pf->hw;
3587 wr32(hw, I40E_PFINT_DYN_CTL0,
3588 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3593 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3594 * @pf: board private structure
3596 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3598 struct i40e_hw *hw = &pf->hw;
3601 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3602 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3603 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3605 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3610 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3611 * @irq: interrupt number
3612 * @data: pointer to a q_vector
3614 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3616 struct i40e_q_vector *q_vector = data;
3618 if (!q_vector->tx.ring && !q_vector->rx.ring)
3621 napi_schedule_irqoff(&q_vector->napi);
3627 * i40e_irq_affinity_notify - Callback for affinity changes
3628 * @notify: context as to what irq was changed
3629 * @mask: the new affinity mask
3631 * This is a callback function used by the irq_set_affinity_notifier function
3632 * so that we may register to receive changes to the irq affinity masks.
3634 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3635 const cpumask_t *mask)
3637 struct i40e_q_vector *q_vector =
3638 container_of(notify, struct i40e_q_vector, affinity_notify);
3640 cpumask_copy(&q_vector->affinity_mask, mask);
3644 * i40e_irq_affinity_release - Callback for affinity notifier release
3645 * @ref: internal core kernel usage
3647 * This is a callback function used by the irq_set_affinity_notifier function
3648 * to inform the current notification subscriber that they will no longer
3649 * receive notifications.
3651 static void i40e_irq_affinity_release(struct kref *ref) {}
3654 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3655 * @vsi: the VSI being configured
3656 * @basename: name for the vector
3658 * Allocates MSI-X vectors and requests interrupts from the kernel.
3660 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3662 int q_vectors = vsi->num_q_vectors;
3663 struct i40e_pf *pf = vsi->back;
3664 int base = vsi->base_vector;
3671 for (vector = 0; vector < q_vectors; vector++) {
3672 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3674 irq_num = pf->msix_entries[base + vector].vector;
3676 if (q_vector->tx.ring && q_vector->rx.ring) {
3677 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3678 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3680 } else if (q_vector->rx.ring) {
3681 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3682 "%s-%s-%d", basename, "rx", rx_int_idx++);
3683 } else if (q_vector->tx.ring) {
3684 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3685 "%s-%s-%d", basename, "tx", tx_int_idx++);
3687 /* skip this unused q_vector */
3690 err = request_irq(irq_num,
3696 dev_info(&pf->pdev->dev,
3697 "MSIX request_irq failed, error: %d\n", err);
3698 goto free_queue_irqs;
3701 /* register for affinity change notifications */
3702 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3703 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3704 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3705 /* Spread affinity hints out across online CPUs.
3707 * get_cpu_mask returns a static constant mask with
3708 * a permanent lifetime so it's ok to pass to
3709 * irq_set_affinity_hint without making a copy.
3711 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3712 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3715 vsi->irqs_ready = true;
3721 irq_num = pf->msix_entries[base + vector].vector;
3722 irq_set_affinity_notifier(irq_num, NULL);
3723 irq_set_affinity_hint(irq_num, NULL);
3724 free_irq(irq_num, &vsi->q_vectors[vector]);
3730 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3731 * @vsi: the VSI being un-configured
3733 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3735 struct i40e_pf *pf = vsi->back;
3736 struct i40e_hw *hw = &pf->hw;
3737 int base = vsi->base_vector;
3740 /* disable interrupt causation from each queue */
3741 for (i = 0; i < vsi->num_queue_pairs; i++) {
3744 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3745 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3746 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3748 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3749 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3750 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3752 if (!i40e_enabled_xdp_vsi(vsi))
3754 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3757 /* disable each interrupt */
3758 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3759 for (i = vsi->base_vector;
3760 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3761 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3764 for (i = 0; i < vsi->num_q_vectors; i++)
3765 synchronize_irq(pf->msix_entries[i + base].vector);
3767 /* Legacy and MSI mode - this stops all interrupt handling */
3768 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3769 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3771 synchronize_irq(pf->pdev->irq);
3776 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3777 * @vsi: the VSI being configured
3779 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3781 struct i40e_pf *pf = vsi->back;
3784 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3785 for (i = 0; i < vsi->num_q_vectors; i++)
3786 i40e_irq_dynamic_enable(vsi, i);
3788 i40e_irq_dynamic_enable_icr0(pf);
3791 i40e_flush(&pf->hw);
3796 * i40e_free_misc_vector - Free the vector that handles non-queue events
3797 * @pf: board private structure
3799 static void i40e_free_misc_vector(struct i40e_pf *pf)
3802 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3803 i40e_flush(&pf->hw);
3805 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3806 synchronize_irq(pf->msix_entries[0].vector);
3807 free_irq(pf->msix_entries[0].vector, pf);
3808 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3813 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3814 * @irq: interrupt number
3815 * @data: pointer to a q_vector
3817 * This is the handler used for all MSI/Legacy interrupts, and deals
3818 * with both queue and non-queue interrupts. This is also used in
3819 * MSIX mode to handle the non-queue interrupts.
3821 static irqreturn_t i40e_intr(int irq, void *data)
3823 struct i40e_pf *pf = (struct i40e_pf *)data;
3824 struct i40e_hw *hw = &pf->hw;
3825 irqreturn_t ret = IRQ_NONE;
3826 u32 icr0, icr0_remaining;
3829 icr0 = rd32(hw, I40E_PFINT_ICR0);
3830 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3832 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3833 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3836 /* if interrupt but no bits showing, must be SWINT */
3837 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3838 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3841 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3842 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3843 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3844 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3845 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3848 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3849 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3850 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3851 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3853 /* We do not have a way to disarm Queue causes while leaving
3854 * interrupt enabled for all other causes, ideally
3855 * interrupt should be disabled while we are in NAPI but
3856 * this is not a performance path and napi_schedule()
3857 * can deal with rescheduling.
3859 if (!test_bit(__I40E_DOWN, pf->state))
3860 napi_schedule_irqoff(&q_vector->napi);
3863 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3864 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3865 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3866 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3869 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3870 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3871 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3874 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3875 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3876 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3879 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3880 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3881 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3882 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3883 val = rd32(hw, I40E_GLGEN_RSTAT);
3884 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3885 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3886 if (val == I40E_RESET_CORER) {
3888 } else if (val == I40E_RESET_GLOBR) {
3890 } else if (val == I40E_RESET_EMPR) {
3892 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3896 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3897 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3898 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3899 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3900 rd32(hw, I40E_PFHMC_ERRORINFO),
3901 rd32(hw, I40E_PFHMC_ERRORDATA));
3904 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3905 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3907 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3908 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3909 i40e_ptp_tx_hwtstamp(pf);
3913 /* If a critical error is pending we have no choice but to reset the
3915 * Report and mask out any remaining unexpected interrupts.
3917 icr0_remaining = icr0 & ena_mask;
3918 if (icr0_remaining) {
3919 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3921 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3922 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3923 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3924 dev_info(&pf->pdev->dev, "device will be reset\n");
3925 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3926 i40e_service_event_schedule(pf);
3928 ena_mask &= ~icr0_remaining;
3933 /* re-enable interrupt causes */
3934 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3935 if (!test_bit(__I40E_DOWN, pf->state)) {
3936 i40e_service_event_schedule(pf);
3937 i40e_irq_dynamic_enable_icr0(pf);
3944 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3945 * @tx_ring: tx ring to clean
3946 * @budget: how many cleans we're allowed
3948 * Returns true if there's any budget left (e.g. the clean is finished)
3950 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3952 struct i40e_vsi *vsi = tx_ring->vsi;
3953 u16 i = tx_ring->next_to_clean;
3954 struct i40e_tx_buffer *tx_buf;
3955 struct i40e_tx_desc *tx_desc;
3957 tx_buf = &tx_ring->tx_bi[i];
3958 tx_desc = I40E_TX_DESC(tx_ring, i);
3959 i -= tx_ring->count;
3962 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3964 /* if next_to_watch is not set then there is no work pending */
3968 /* prevent any other reads prior to eop_desc */
3971 /* if the descriptor isn't done, no work yet to do */
3972 if (!(eop_desc->cmd_type_offset_bsz &
3973 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3976 /* clear next_to_watch to prevent false hangs */
3977 tx_buf->next_to_watch = NULL;
3979 tx_desc->buffer_addr = 0;
3980 tx_desc->cmd_type_offset_bsz = 0;
3981 /* move past filter desc */
3986 i -= tx_ring->count;
3987 tx_buf = tx_ring->tx_bi;
3988 tx_desc = I40E_TX_DESC(tx_ring, 0);
3990 /* unmap skb header data */
3991 dma_unmap_single(tx_ring->dev,
3992 dma_unmap_addr(tx_buf, dma),
3993 dma_unmap_len(tx_buf, len),
3995 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3996 kfree(tx_buf->raw_buf);
3998 tx_buf->raw_buf = NULL;
3999 tx_buf->tx_flags = 0;
4000 tx_buf->next_to_watch = NULL;
4001 dma_unmap_len_set(tx_buf, len, 0);
4002 tx_desc->buffer_addr = 0;
4003 tx_desc->cmd_type_offset_bsz = 0;
4005 /* move us past the eop_desc for start of next FD desc */
4010 i -= tx_ring->count;
4011 tx_buf = tx_ring->tx_bi;
4012 tx_desc = I40E_TX_DESC(tx_ring, 0);
4015 /* update budget accounting */
4017 } while (likely(budget));
4019 i += tx_ring->count;
4020 tx_ring->next_to_clean = i;
4022 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4023 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4029 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4030 * @irq: interrupt number
4031 * @data: pointer to a q_vector
4033 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4035 struct i40e_q_vector *q_vector = data;
4036 struct i40e_vsi *vsi;
4038 if (!q_vector->tx.ring)
4041 vsi = q_vector->tx.ring->vsi;
4042 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4048 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4049 * @vsi: the VSI being configured
4050 * @v_idx: vector index
4051 * @qp_idx: queue pair index
4053 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4055 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4056 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4057 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4059 tx_ring->q_vector = q_vector;
4060 tx_ring->next = q_vector->tx.ring;
4061 q_vector->tx.ring = tx_ring;
4062 q_vector->tx.count++;
4064 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4065 if (i40e_enabled_xdp_vsi(vsi)) {
4066 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4068 xdp_ring->q_vector = q_vector;
4069 xdp_ring->next = q_vector->tx.ring;
4070 q_vector->tx.ring = xdp_ring;
4071 q_vector->tx.count++;
4074 rx_ring->q_vector = q_vector;
4075 rx_ring->next = q_vector->rx.ring;
4076 q_vector->rx.ring = rx_ring;
4077 q_vector->rx.count++;
4081 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4082 * @vsi: the VSI being configured
4084 * This function maps descriptor rings to the queue-specific vectors
4085 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4086 * one vector per queue pair, but on a constrained vector budget, we
4087 * group the queue pairs as "efficiently" as possible.
4089 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4091 int qp_remaining = vsi->num_queue_pairs;
4092 int q_vectors = vsi->num_q_vectors;
4097 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4098 * group them so there are multiple queues per vector.
4099 * It is also important to go through all the vectors available to be
4100 * sure that if we don't use all the vectors, that the remaining vectors
4101 * are cleared. This is especially important when decreasing the
4102 * number of queues in use.
4104 for (; v_start < q_vectors; v_start++) {
4105 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4107 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4109 q_vector->num_ringpairs = num_ringpairs;
4111 q_vector->rx.count = 0;
4112 q_vector->tx.count = 0;
4113 q_vector->rx.ring = NULL;
4114 q_vector->tx.ring = NULL;
4116 while (num_ringpairs--) {
4117 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4125 * i40e_vsi_request_irq - Request IRQ from the OS
4126 * @vsi: the VSI being configured
4127 * @basename: name for the vector
4129 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4131 struct i40e_pf *pf = vsi->back;
4134 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4135 err = i40e_vsi_request_irq_msix(vsi, basename);
4136 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4137 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4140 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4144 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4149 #ifdef CONFIG_NET_POLL_CONTROLLER
4151 * i40e_netpoll - A Polling 'interrupt' handler
4152 * @netdev: network interface device structure
4154 * This is used by netconsole to send skbs without having to re-enable
4155 * interrupts. It's not called while the normal interrupt routine is executing.
4157 static void i40e_netpoll(struct net_device *netdev)
4159 struct i40e_netdev_priv *np = netdev_priv(netdev);
4160 struct i40e_vsi *vsi = np->vsi;
4161 struct i40e_pf *pf = vsi->back;
4164 /* if interface is down do nothing */
4165 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4168 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4169 for (i = 0; i < vsi->num_q_vectors; i++)
4170 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4172 i40e_intr(pf->pdev->irq, netdev);
4177 #define I40E_QTX_ENA_WAIT_COUNT 50
4180 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4181 * @pf: the PF being configured
4182 * @pf_q: the PF queue
4183 * @enable: enable or disable state of the queue
4185 * This routine will wait for the given Tx queue of the PF to reach the
4186 * enabled or disabled state.
4187 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4188 * multiple retries; else will return 0 in case of success.
4190 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4195 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4196 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4197 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4200 usleep_range(10, 20);
4202 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4209 * i40e_control_tx_q - Start or stop a particular Tx queue
4210 * @pf: the PF structure
4211 * @pf_q: the PF queue to configure
4212 * @enable: start or stop the queue
4214 * This function enables or disables a single queue. Note that any delay
4215 * required after the operation is expected to be handled by the caller of
4218 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4220 struct i40e_hw *hw = &pf->hw;
4224 /* warn the TX unit of coming changes */
4225 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4227 usleep_range(10, 20);
4229 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4230 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4231 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4232 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4234 usleep_range(1000, 2000);
4237 /* Skip if the queue is already in the requested state */
4238 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4241 /* turn on/off the queue */
4243 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4244 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4246 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4249 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4253 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4255 * @pf: the PF structure
4256 * @pf_q: the PF queue to configure
4257 * @is_xdp: true if the queue is used for XDP
4258 * @enable: start or stop the queue
4260 static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4261 bool is_xdp, bool enable)
4265 i40e_control_tx_q(pf, pf_q, enable);
4267 /* wait for the change to finish */
4268 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4270 dev_info(&pf->pdev->dev,
4271 "VSI seid %d %sTx ring %d %sable timeout\n",
4272 seid, (is_xdp ? "XDP " : ""), pf_q,
4273 (enable ? "en" : "dis"));
4280 * i40e_vsi_control_tx - Start or stop a VSI's rings
4281 * @vsi: the VSI being configured
4282 * @enable: start or stop the rings
4284 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4286 struct i40e_pf *pf = vsi->back;
4287 int i, pf_q, ret = 0;
4289 pf_q = vsi->base_queue;
4290 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4291 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4293 false /*is xdp*/, enable);
4297 if (!i40e_enabled_xdp_vsi(vsi))
4300 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4301 pf_q + vsi->alloc_queue_pairs,
4302 true /*is xdp*/, enable);
4311 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4312 * @pf: the PF being configured
4313 * @pf_q: the PF queue
4314 * @enable: enable or disable state of the queue
4316 * This routine will wait for the given Rx queue of the PF to reach the
4317 * enabled or disabled state.
4318 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4319 * multiple retries; else will return 0 in case of success.
4321 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4326 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4327 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4328 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4331 usleep_range(10, 20);
4333 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4340 * i40e_control_rx_q - Start or stop a particular Rx queue
4341 * @pf: the PF structure
4342 * @pf_q: the PF queue to configure
4343 * @enable: start or stop the queue
4345 * This function enables or disables a single queue. Note that any delay
4346 * required after the operation is expected to be handled by the caller of
4349 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4351 struct i40e_hw *hw = &pf->hw;
4355 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4356 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4357 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4358 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4360 usleep_range(1000, 2000);
4363 /* Skip if the queue is already in the requested state */
4364 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4367 /* turn on/off the queue */
4369 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4371 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4373 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4377 * i40e_vsi_control_rx - Start or stop a VSI's rings
4378 * @vsi: the VSI being configured
4379 * @enable: start or stop the rings
4381 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4383 struct i40e_pf *pf = vsi->back;
4384 int i, pf_q, ret = 0;
4386 pf_q = vsi->base_queue;
4387 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4388 i40e_control_rx_q(pf, pf_q, enable);
4390 /* wait for the change to finish */
4391 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4393 dev_info(&pf->pdev->dev,
4394 "VSI seid %d Rx ring %d %sable timeout\n",
4395 vsi->seid, pf_q, (enable ? "en" : "dis"));
4400 /* Due to HW errata, on Rx disable only, the register can indicate done
4401 * before it really is. Needs 50ms to be sure
4410 * i40e_vsi_start_rings - Start a VSI's rings
4411 * @vsi: the VSI being configured
4413 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4417 /* do rx first for enable and last for disable */
4418 ret = i40e_vsi_control_rx(vsi, true);
4421 ret = i40e_vsi_control_tx(vsi, true);
4427 * i40e_vsi_stop_rings - Stop a VSI's rings
4428 * @vsi: the VSI being configured
4430 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4432 /* When port TX is suspended, don't wait */
4433 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4434 return i40e_vsi_stop_rings_no_wait(vsi);
4436 /* do rx first for enable and last for disable
4437 * Ignore return value, we need to shutdown whatever we can
4439 i40e_vsi_control_tx(vsi, false);
4440 i40e_vsi_control_rx(vsi, false);
4444 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4445 * @vsi: the VSI being shutdown
4447 * This function stops all the rings for a VSI but does not delay to verify
4448 * that rings have been disabled. It is expected that the caller is shutting
4449 * down multiple VSIs at once and will delay together for all the VSIs after
4450 * initiating the shutdown. This is particularly useful for shutting down lots
4451 * of VFs together. Otherwise, a large delay can be incurred while configuring
4452 * each VSI in serial.
4454 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4456 struct i40e_pf *pf = vsi->back;
4459 pf_q = vsi->base_queue;
4460 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4461 i40e_control_tx_q(pf, pf_q, false);
4462 i40e_control_rx_q(pf, pf_q, false);
4467 * i40e_vsi_free_irq - Free the irq association with the OS
4468 * @vsi: the VSI being configured
4470 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4472 struct i40e_pf *pf = vsi->back;
4473 struct i40e_hw *hw = &pf->hw;
4474 int base = vsi->base_vector;
4478 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4479 if (!vsi->q_vectors)
4482 if (!vsi->irqs_ready)
4485 vsi->irqs_ready = false;
4486 for (i = 0; i < vsi->num_q_vectors; i++) {
4491 irq_num = pf->msix_entries[vector].vector;
4493 /* free only the irqs that were actually requested */
4494 if (!vsi->q_vectors[i] ||
4495 !vsi->q_vectors[i]->num_ringpairs)
4498 /* clear the affinity notifier in the IRQ descriptor */
4499 irq_set_affinity_notifier(irq_num, NULL);
4500 /* remove our suggested affinity mask for this IRQ */
4501 irq_set_affinity_hint(irq_num, NULL);
4502 synchronize_irq(irq_num);
4503 free_irq(irq_num, vsi->q_vectors[i]);
4505 /* Tear down the interrupt queue link list
4507 * We know that they come in pairs and always
4508 * the Rx first, then the Tx. To clear the
4509 * link list, stick the EOL value into the
4510 * next_q field of the registers.
4512 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4513 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4514 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4515 val |= I40E_QUEUE_END_OF_LIST
4516 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4517 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4519 while (qp != I40E_QUEUE_END_OF_LIST) {
4522 val = rd32(hw, I40E_QINT_RQCTL(qp));
4524 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4525 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4526 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4527 I40E_QINT_RQCTL_INTEVENT_MASK);
4529 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4530 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4532 wr32(hw, I40E_QINT_RQCTL(qp), val);
4534 val = rd32(hw, I40E_QINT_TQCTL(qp));
4536 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4537 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4539 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4540 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4541 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4542 I40E_QINT_TQCTL_INTEVENT_MASK);
4544 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4545 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4547 wr32(hw, I40E_QINT_TQCTL(qp), val);
4552 free_irq(pf->pdev->irq, pf);
4554 val = rd32(hw, I40E_PFINT_LNKLST0);
4555 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4556 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4557 val |= I40E_QUEUE_END_OF_LIST
4558 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4559 wr32(hw, I40E_PFINT_LNKLST0, val);
4561 val = rd32(hw, I40E_QINT_RQCTL(qp));
4562 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4563 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4564 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4565 I40E_QINT_RQCTL_INTEVENT_MASK);
4567 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4568 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4570 wr32(hw, I40E_QINT_RQCTL(qp), val);
4572 val = rd32(hw, I40E_QINT_TQCTL(qp));
4574 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4575 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4576 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4577 I40E_QINT_TQCTL_INTEVENT_MASK);
4579 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4580 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4582 wr32(hw, I40E_QINT_TQCTL(qp), val);
4587 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4588 * @vsi: the VSI being configured
4589 * @v_idx: Index of vector to be freed
4591 * This function frees the memory allocated to the q_vector. In addition if
4592 * NAPI is enabled it will delete any references to the NAPI struct prior
4593 * to freeing the q_vector.
4595 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4597 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4598 struct i40e_ring *ring;
4603 /* disassociate q_vector from rings */
4604 i40e_for_each_ring(ring, q_vector->tx)
4605 ring->q_vector = NULL;
4607 i40e_for_each_ring(ring, q_vector->rx)
4608 ring->q_vector = NULL;
4610 /* only VSI w/ an associated netdev is set up w/ NAPI */
4612 netif_napi_del(&q_vector->napi);
4614 vsi->q_vectors[v_idx] = NULL;
4616 kfree_rcu(q_vector, rcu);
4620 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4621 * @vsi: the VSI being un-configured
4623 * This frees the memory allocated to the q_vectors and
4624 * deletes references to the NAPI struct.
4626 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4630 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4631 i40e_free_q_vector(vsi, v_idx);
4635 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4636 * @pf: board private structure
4638 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4640 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4641 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4642 pci_disable_msix(pf->pdev);
4643 kfree(pf->msix_entries);
4644 pf->msix_entries = NULL;
4645 kfree(pf->irq_pile);
4646 pf->irq_pile = NULL;
4647 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4648 pci_disable_msi(pf->pdev);
4650 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4654 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4655 * @pf: board private structure
4657 * We go through and clear interrupt specific resources and reset the structure
4658 * to pre-load conditions
4660 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4664 i40e_free_misc_vector(pf);
4666 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4667 I40E_IWARP_IRQ_PILE_ID);
4669 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4670 for (i = 0; i < pf->num_alloc_vsi; i++)
4672 i40e_vsi_free_q_vectors(pf->vsi[i]);
4673 i40e_reset_interrupt_capability(pf);
4677 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4678 * @vsi: the VSI being configured
4680 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4687 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4688 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4690 if (q_vector->rx.ring || q_vector->tx.ring)
4691 napi_enable(&q_vector->napi);
4696 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4697 * @vsi: the VSI being configured
4699 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4706 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4707 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4709 if (q_vector->rx.ring || q_vector->tx.ring)
4710 napi_disable(&q_vector->napi);
4715 * i40e_vsi_close - Shut down a VSI
4716 * @vsi: the vsi to be quelled
4718 static void i40e_vsi_close(struct i40e_vsi *vsi)
4720 struct i40e_pf *pf = vsi->back;
4721 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4723 i40e_vsi_free_irq(vsi);
4724 i40e_vsi_free_tx_resources(vsi);
4725 i40e_vsi_free_rx_resources(vsi);
4726 vsi->current_netdev_flags = 0;
4727 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4728 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4729 pf->flags |= I40E_FLAG_CLIENT_RESET;
4733 * i40e_quiesce_vsi - Pause a given VSI
4734 * @vsi: the VSI being paused
4736 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4738 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4741 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4742 if (vsi->netdev && netif_running(vsi->netdev))
4743 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4745 i40e_vsi_close(vsi);
4749 * i40e_unquiesce_vsi - Resume a given VSI
4750 * @vsi: the VSI being resumed
4752 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4754 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4757 if (vsi->netdev && netif_running(vsi->netdev))
4758 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4760 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4764 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4767 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4771 for (v = 0; v < pf->num_alloc_vsi; v++) {
4773 i40e_quiesce_vsi(pf->vsi[v]);
4778 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4781 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4785 for (v = 0; v < pf->num_alloc_vsi; v++) {
4787 i40e_unquiesce_vsi(pf->vsi[v]);
4792 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4793 * @vsi: the VSI being configured
4795 * Wait until all queues on a given VSI have been disabled.
4797 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4799 struct i40e_pf *pf = vsi->back;
4802 pf_q = vsi->base_queue;
4803 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4804 /* Check and wait for the Tx queue */
4805 ret = i40e_pf_txq_wait(pf, pf_q, false);
4807 dev_info(&pf->pdev->dev,
4808 "VSI seid %d Tx ring %d disable timeout\n",
4813 if (!i40e_enabled_xdp_vsi(vsi))
4816 /* Check and wait for the XDP Tx queue */
4817 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4820 dev_info(&pf->pdev->dev,
4821 "VSI seid %d XDP Tx ring %d disable timeout\n",
4826 /* Check and wait for the Rx queue */
4827 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4829 dev_info(&pf->pdev->dev,
4830 "VSI seid %d Rx ring %d disable timeout\n",
4839 #ifdef CONFIG_I40E_DCB
4841 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4844 * This function waits for the queues to be in disabled state for all the
4845 * VSIs that are managed by this PF.
4847 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4851 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4853 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4865 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4866 * @q_idx: TX queue number
4867 * @vsi: Pointer to VSI struct
4869 * This function checks specified queue for given VSI. Detects hung condition.
4870 * We proactively detect hung TX queues by checking if interrupts are disabled
4871 * but there are pending descriptors. If it appears hung, attempt to recover
4872 * by triggering a SW interrupt.
4874 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4876 struct i40e_ring *tx_ring = NULL;
4878 u32 val, tx_pending;
4883 /* now that we have an index, find the tx_ring struct */
4884 for (i = 0; i < vsi->num_queue_pairs; i++) {
4885 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4886 if (q_idx == vsi->tx_rings[i]->queue_index) {
4887 tx_ring = vsi->tx_rings[i];
4896 /* Read interrupt register */
4897 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4899 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4900 tx_ring->vsi->base_vector - 1));
4902 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4904 tx_pending = i40e_get_tx_pending(tx_ring);
4906 /* Interrupts are disabled and TX pending is non-zero,
4907 * trigger the SW interrupt (don't wait). Worst case
4908 * there will be one extra interrupt which may result
4909 * into not cleaning any queues because queues are cleaned.
4911 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4912 i40e_force_wb(vsi, tx_ring->q_vector);
4916 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4917 * @pf: pointer to PF struct
4919 * LAN VSI has netdev and netdev has TX queues. This function is to check
4920 * each of those TX queues if they are hung, trigger recovery by issuing
4923 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4925 struct net_device *netdev;
4926 struct i40e_vsi *vsi;
4929 /* Only for LAN VSI */
4930 vsi = pf->vsi[pf->lan_vsi];
4935 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4936 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4937 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4940 /* Make sure type is MAIN VSI */
4941 if (vsi->type != I40E_VSI_MAIN)
4944 netdev = vsi->netdev;
4948 /* Bail out if netif_carrier is not OK */
4949 if (!netif_carrier_ok(netdev))
4952 /* Go thru' TX queues for netdev */
4953 for (i = 0; i < netdev->num_tx_queues; i++) {
4954 struct netdev_queue *q;
4956 q = netdev_get_tx_queue(netdev, i);
4958 i40e_detect_recover_hung_queue(i, vsi);
4963 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4964 * @pf: pointer to PF
4966 * Get TC map for ISCSI PF type that will include iSCSI TC
4969 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4971 struct i40e_dcb_app_priority_table app;
4972 struct i40e_hw *hw = &pf->hw;
4973 u8 enabled_tc = 1; /* TC0 is always enabled */
4975 /* Get the iSCSI APP TLV */
4976 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4978 for (i = 0; i < dcbcfg->numapps; i++) {
4979 app = dcbcfg->app[i];
4980 if (app.selector == I40E_APP_SEL_TCPIP &&
4981 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4982 tc = dcbcfg->etscfg.prioritytable[app.priority];
4983 enabled_tc |= BIT(tc);
4992 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4993 * @dcbcfg: the corresponding DCBx configuration structure
4995 * Return the number of TCs from given DCBx configuration
4997 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4999 int i, tc_unused = 0;
5003 /* Scan the ETS Config Priority Table to find
5004 * traffic class enabled for a given priority
5005 * and create a bitmask of enabled TCs
5007 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5008 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5010 /* Now scan the bitmask to check for
5011 * contiguous TCs starting with TC0
5013 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5014 if (num_tc & BIT(i)) {
5018 pr_err("Non-contiguous TC - Disabling DCB\n");
5026 /* There is always at least TC0 */
5034 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5035 * @dcbcfg: the corresponding DCBx configuration structure
5037 * Query the current DCB configuration and return the number of
5038 * traffic classes enabled from the given DCBX config
5040 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5042 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5046 for (i = 0; i < num_tc; i++)
5047 enabled_tc |= BIT(i);
5053 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5054 * @pf: PF being queried
5056 * Query the current MQPRIO configuration and return the number of
5057 * traffic classes enabled.
5059 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5061 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5062 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5063 u8 enabled_tc = 1, i;
5065 for (i = 1; i < num_tc; i++)
5066 enabled_tc |= BIT(i);
5071 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5072 * @pf: PF being queried
5074 * Return number of traffic classes enabled for the given PF
5076 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5078 struct i40e_hw *hw = &pf->hw;
5079 u8 i, enabled_tc = 1;
5081 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5083 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5084 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5086 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5087 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5090 /* SFP mode will be enabled for all TCs on port */
5091 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5092 return i40e_dcb_get_num_tc(dcbcfg);
5094 /* MFP mode return count of enabled TCs for this PF */
5095 if (pf->hw.func_caps.iscsi)
5096 enabled_tc = i40e_get_iscsi_tc_map(pf);
5098 return 1; /* Only TC0 */
5100 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5101 if (enabled_tc & BIT(i))
5108 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5109 * @pf: PF being queried
5111 * Return a bitmap for enabled traffic classes for this PF.
5113 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5115 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5116 return i40e_mqprio_get_enabled_tc(pf);
5118 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5121 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5122 return I40E_DEFAULT_TRAFFIC_CLASS;
5124 /* SFP mode we want PF to be enabled for all TCs */
5125 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5126 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5128 /* MFP enabled and iSCSI PF type */
5129 if (pf->hw.func_caps.iscsi)
5130 return i40e_get_iscsi_tc_map(pf);
5132 return I40E_DEFAULT_TRAFFIC_CLASS;
5136 * i40e_vsi_get_bw_info - Query VSI BW Information
5137 * @vsi: the VSI being queried
5139 * Returns 0 on success, negative value on failure
5141 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5143 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5144 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5145 struct i40e_pf *pf = vsi->back;
5146 struct i40e_hw *hw = &pf->hw;
5151 /* Get the VSI level BW configuration */
5152 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5154 dev_info(&pf->pdev->dev,
5155 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5156 i40e_stat_str(&pf->hw, ret),
5157 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5161 /* Get the VSI level BW configuration per TC */
5162 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5165 dev_info(&pf->pdev->dev,
5166 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5167 i40e_stat_str(&pf->hw, ret),
5168 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5172 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5173 dev_info(&pf->pdev->dev,
5174 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5175 bw_config.tc_valid_bits,
5176 bw_ets_config.tc_valid_bits);
5177 /* Still continuing */
5180 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5181 vsi->bw_max_quanta = bw_config.max_bw;
5182 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5183 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5184 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5185 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5186 vsi->bw_ets_limit_credits[i] =
5187 le16_to_cpu(bw_ets_config.credits[i]);
5188 /* 3 bits out of 4 for each TC */
5189 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5196 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5197 * @vsi: the VSI being configured
5198 * @enabled_tc: TC bitmap
5199 * @bw_credits: BW shared credits per TC
5201 * Returns 0 on success, negative value on failure
5203 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5206 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5210 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
5212 if (!vsi->mqprio_qopt.qopt.hw) {
5213 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5215 dev_info(&vsi->back->pdev->dev,
5216 "Failed to reset tx rate for vsi->seid %u\n",
5220 bw_data.tc_valid_bits = enabled_tc;
5221 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5222 bw_data.tc_bw_credits[i] = bw_share[i];
5224 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
5227 dev_info(&vsi->back->pdev->dev,
5228 "AQ command Config VSI BW allocation per TC failed = %d\n",
5229 vsi->back->hw.aq.asq_last_status);
5233 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5234 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5240 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5241 * @vsi: the VSI being configured
5242 * @enabled_tc: TC map to be enabled
5245 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5247 struct net_device *netdev = vsi->netdev;
5248 struct i40e_pf *pf = vsi->back;
5249 struct i40e_hw *hw = &pf->hw;
5252 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5258 netdev_reset_tc(netdev);
5262 /* Set up actual enabled TCs on the VSI */
5263 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5266 /* set per TC queues for the VSI */
5267 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5268 /* Only set TC queues for enabled tcs
5270 * e.g. For a VSI that has TC0 and TC3 enabled the
5271 * enabled_tc bitmap would be 0x00001001; the driver
5272 * will set the numtc for netdev as 2 that will be
5273 * referenced by the netdev layer as TC 0 and 1.
5275 if (vsi->tc_config.enabled_tc & BIT(i))
5276 netdev_set_tc_queue(netdev,
5277 vsi->tc_config.tc_info[i].netdev_tc,
5278 vsi->tc_config.tc_info[i].qcount,
5279 vsi->tc_config.tc_info[i].qoffset);
5282 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5285 /* Assign UP2TC map for the VSI */
5286 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5287 /* Get the actual TC# for the UP */
5288 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5289 /* Get the mapped netdev TC# for the UP */
5290 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5291 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5296 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5297 * @vsi: the VSI being configured
5298 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5300 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5301 struct i40e_vsi_context *ctxt)
5303 /* copy just the sections touched not the entire info
5304 * since not all sections are valid as returned by
5307 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5308 memcpy(&vsi->info.queue_mapping,
5309 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5310 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5311 sizeof(vsi->info.tc_mapping));
5315 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5316 * @vsi: VSI to be configured
5317 * @enabled_tc: TC bitmap
5319 * This configures a particular VSI for TCs that are mapped to the
5320 * given TC bitmap. It uses default bandwidth share for TCs across
5321 * VSIs to configure TC for a particular VSI.
5324 * It is expected that the VSI queues have been quisced before calling
5327 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5329 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5330 struct i40e_vsi_context ctxt;
5334 /* Check if enabled_tc is same as existing or new TCs */
5335 if (vsi->tc_config.enabled_tc == enabled_tc &&
5336 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5339 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5340 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5341 if (enabled_tc & BIT(i))
5345 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5347 dev_info(&vsi->back->pdev->dev,
5348 "Failed configuring TC map %d for VSI %d\n",
5349 enabled_tc, vsi->seid);
5353 /* Update Queue Pairs Mapping for currently enabled UPs */
5354 ctxt.seid = vsi->seid;
5355 ctxt.pf_num = vsi->back->hw.pf_id;
5357 ctxt.uplink_seid = vsi->uplink_seid;
5358 ctxt.info = vsi->info;
5359 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5360 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5364 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5367 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5370 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5371 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5372 vsi->num_queue_pairs);
5373 ret = i40e_vsi_config_rss(vsi);
5375 dev_info(&vsi->back->pdev->dev,
5376 "Failed to reconfig rss for num_queues\n");
5379 vsi->reconfig_rss = false;
5381 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5382 ctxt.info.valid_sections |=
5383 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5384 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5387 /* Update the VSI after updating the VSI queue-mapping
5390 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5392 dev_info(&vsi->back->pdev->dev,
5393 "Update vsi tc config failed, err %s aq_err %s\n",
5394 i40e_stat_str(&vsi->back->hw, ret),
5395 i40e_aq_str(&vsi->back->hw,
5396 vsi->back->hw.aq.asq_last_status));
5399 /* update the local VSI info with updated queue map */
5400 i40e_vsi_update_queue_map(vsi, &ctxt);
5401 vsi->info.valid_sections = 0;
5403 /* Update current VSI BW information */
5404 ret = i40e_vsi_get_bw_info(vsi);
5406 dev_info(&vsi->back->pdev->dev,
5407 "Failed updating vsi bw info, err %s aq_err %s\n",
5408 i40e_stat_str(&vsi->back->hw, ret),
5409 i40e_aq_str(&vsi->back->hw,
5410 vsi->back->hw.aq.asq_last_status));
5414 /* Update the netdev TC setup */
5415 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5421 * i40e_get_link_speed - Returns link speed for the interface
5422 * @vsi: VSI to be configured
5425 int i40e_get_link_speed(struct i40e_vsi *vsi)
5427 struct i40e_pf *pf = vsi->back;
5429 switch (pf->hw.phy.link_info.link_speed) {
5430 case I40E_LINK_SPEED_40GB:
5432 case I40E_LINK_SPEED_25GB:
5434 case I40E_LINK_SPEED_20GB:
5436 case I40E_LINK_SPEED_10GB:
5438 case I40E_LINK_SPEED_1GB:
5446 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5447 * @vsi: VSI to be configured
5448 * @seid: seid of the channel/VSI
5449 * @max_tx_rate: max TX rate to be configured as BW limit
5451 * Helper function to set BW limit for a given VSI
5453 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5455 struct i40e_pf *pf = vsi->back;
5460 speed = i40e_get_link_speed(vsi);
5461 if (max_tx_rate > speed) {
5462 dev_err(&pf->pdev->dev,
5463 "Invalid max tx rate %llu specified for VSI seid %d.",
5467 if (max_tx_rate && max_tx_rate < 50) {
5468 dev_warn(&pf->pdev->dev,
5469 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5473 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5474 credits = max_tx_rate;
5475 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5476 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5477 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5479 dev_err(&pf->pdev->dev,
5480 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5481 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5482 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5487 * i40e_remove_queue_channels - Remove queue channels for the TCs
5488 * @vsi: VSI to be configured
5490 * Remove queue channels for the TCs
5492 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5494 enum i40e_admin_queue_err last_aq_status;
5495 struct i40e_cloud_filter *cfilter;
5496 struct i40e_channel *ch, *ch_tmp;
5497 struct i40e_pf *pf = vsi->back;
5498 struct hlist_node *node;
5501 /* Reset rss size that was stored when reconfiguring rss for
5502 * channel VSIs with non-power-of-2 queue count.
5504 vsi->current_rss_size = 0;
5506 /* perform cleanup for channels if they exist */
5507 if (list_empty(&vsi->ch_list))
5510 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5511 struct i40e_vsi *p_vsi;
5513 list_del(&ch->list);
5514 p_vsi = ch->parent_vsi;
5515 if (!p_vsi || !ch->initialized) {
5519 /* Reset queue contexts */
5520 for (i = 0; i < ch->num_queue_pairs; i++) {
5521 struct i40e_ring *tx_ring, *rx_ring;
5524 pf_q = ch->base_queue + i;
5525 tx_ring = vsi->tx_rings[pf_q];
5528 rx_ring = vsi->rx_rings[pf_q];
5532 /* Reset BW configured for this VSI via mqprio */
5533 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5535 dev_info(&vsi->back->pdev->dev,
5536 "Failed to reset tx rate for ch->seid %u\n",
5539 /* delete cloud filters associated with this channel */
5540 hlist_for_each_entry_safe(cfilter, node,
5541 &pf->cloud_filter_list, cloud_node) {
5542 if (cfilter->seid != ch->seid)
5545 hash_del(&cfilter->cloud_node);
5546 if (cfilter->dst_port)
5547 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5551 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5553 last_aq_status = pf->hw.aq.asq_last_status;
5555 dev_info(&pf->pdev->dev,
5556 "Failed to delete cloud filter, err %s aq_err %s\n",
5557 i40e_stat_str(&pf->hw, ret),
5558 i40e_aq_str(&pf->hw, last_aq_status));
5562 /* delete VSI from FW */
5563 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5566 dev_err(&vsi->back->pdev->dev,
5567 "unable to remove channel (%d) for parent VSI(%d)\n",
5568 ch->seid, p_vsi->seid);
5571 INIT_LIST_HEAD(&vsi->ch_list);
5575 * i40e_is_any_channel - channel exist or not
5576 * @vsi: ptr to VSI to which channels are associated with
5578 * Returns true or false if channel(s) exist for associated VSI or not
5580 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5582 struct i40e_channel *ch, *ch_tmp;
5584 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5585 if (ch->initialized)
5593 * i40e_get_max_queues_for_channel
5594 * @vsi: ptr to VSI to which channels are associated with
5596 * Helper function which returns max value among the queue counts set on the
5597 * channels/TCs created.
5599 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5601 struct i40e_channel *ch, *ch_tmp;
5604 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5605 if (!ch->initialized)
5607 if (ch->num_queue_pairs > max)
5608 max = ch->num_queue_pairs;
5615 * i40e_validate_num_queues - validate num_queues w.r.t channel
5616 * @pf: ptr to PF device
5617 * @num_queues: number of queues
5618 * @vsi: the parent VSI
5619 * @reconfig_rss: indicates should the RSS be reconfigured or not
5621 * This function validates number of queues in the context of new channel
5622 * which is being established and determines if RSS should be reconfigured
5623 * or not for parent VSI.
5625 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5626 struct i40e_vsi *vsi, bool *reconfig_rss)
5633 *reconfig_rss = false;
5634 if (vsi->current_rss_size) {
5635 if (num_queues > vsi->current_rss_size) {
5636 dev_dbg(&pf->pdev->dev,
5637 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5638 num_queues, vsi->current_rss_size);
5640 } else if ((num_queues < vsi->current_rss_size) &&
5641 (!is_power_of_2(num_queues))) {
5642 dev_dbg(&pf->pdev->dev,
5643 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5644 num_queues, vsi->current_rss_size);
5649 if (!is_power_of_2(num_queues)) {
5650 /* Find the max num_queues configured for channel if channel
5652 * if channel exist, then enforce 'num_queues' to be more than
5653 * max ever queues configured for channel.
5655 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5656 if (num_queues < max_ch_queues) {
5657 dev_dbg(&pf->pdev->dev,
5658 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5659 num_queues, max_ch_queues);
5662 *reconfig_rss = true;
5669 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5670 * @vsi: the VSI being setup
5671 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5673 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5675 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5677 struct i40e_pf *pf = vsi->back;
5678 u8 seed[I40E_HKEY_ARRAY_SIZE];
5679 struct i40e_hw *hw = &pf->hw;
5687 if (rss_size > vsi->rss_size)
5690 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5691 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5695 /* Ignoring user configured lut if there is one */
5696 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5698 /* Use user configured hash key if there is one, otherwise
5701 if (vsi->rss_hkey_user)
5702 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5704 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5706 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5708 dev_info(&pf->pdev->dev,
5709 "Cannot set RSS lut, err %s aq_err %s\n",
5710 i40e_stat_str(hw, ret),
5711 i40e_aq_str(hw, hw->aq.asq_last_status));
5717 /* Do the update w.r.t. storing rss_size */
5718 if (!vsi->orig_rss_size)
5719 vsi->orig_rss_size = vsi->rss_size;
5720 vsi->current_rss_size = local_rss_size;
5726 * i40e_channel_setup_queue_map - Setup a channel queue map
5727 * @pf: ptr to PF device
5728 * @vsi: the VSI being setup
5729 * @ctxt: VSI context structure
5730 * @ch: ptr to channel structure
5732 * Setup queue map for a specific channel
5734 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5735 struct i40e_vsi_context *ctxt,
5736 struct i40e_channel *ch)
5738 u16 qcount, qmap, sections = 0;
5742 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5743 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5745 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5746 ch->num_queue_pairs = qcount;
5748 /* find the next higher power-of-2 of num queue pairs */
5749 pow = ilog2(qcount);
5750 if (!is_power_of_2(qcount))
5753 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5754 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5756 /* Setup queue TC[0].qmap for given VSI context */
5757 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5759 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5760 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5761 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5762 ctxt->info.valid_sections |= cpu_to_le16(sections);
5766 * i40e_add_channel - add a channel by adding VSI
5767 * @pf: ptr to PF device
5768 * @uplink_seid: underlying HW switching element (VEB) ID
5769 * @ch: ptr to channel structure
5771 * Add a channel (VSI) using add_vsi and queue_map
5773 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5774 struct i40e_channel *ch)
5776 struct i40e_hw *hw = &pf->hw;
5777 struct i40e_vsi_context ctxt;
5778 u8 enabled_tc = 0x1; /* TC0 enabled */
5781 if (ch->type != I40E_VSI_VMDQ2) {
5782 dev_info(&pf->pdev->dev,
5783 "add new vsi failed, ch->type %d\n", ch->type);
5787 memset(&ctxt, 0, sizeof(ctxt));
5788 ctxt.pf_num = hw->pf_id;
5790 ctxt.uplink_seid = uplink_seid;
5791 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5792 if (ch->type == I40E_VSI_VMDQ2)
5793 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5795 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5796 ctxt.info.valid_sections |=
5797 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5798 ctxt.info.switch_id =
5799 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5802 /* Set queue map for a given VSI context */
5803 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5805 /* Now time to create VSI */
5806 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5808 dev_info(&pf->pdev->dev,
5809 "add new vsi failed, err %s aq_err %s\n",
5810 i40e_stat_str(&pf->hw, ret),
5811 i40e_aq_str(&pf->hw,
5812 pf->hw.aq.asq_last_status));
5816 /* Success, update channel */
5817 ch->enabled_tc = enabled_tc;
5818 ch->seid = ctxt.seid;
5819 ch->vsi_number = ctxt.vsi_number;
5820 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5822 /* copy just the sections touched not the entire info
5823 * since not all sections are valid as returned by
5826 ch->info.mapping_flags = ctxt.info.mapping_flags;
5827 memcpy(&ch->info.queue_mapping,
5828 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5829 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5830 sizeof(ctxt.info.tc_mapping));
5835 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5838 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5842 bw_data.tc_valid_bits = ch->enabled_tc;
5843 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5844 bw_data.tc_bw_credits[i] = bw_share[i];
5846 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5849 dev_info(&vsi->back->pdev->dev,
5850 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5851 vsi->back->hw.aq.asq_last_status, ch->seid);
5855 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5856 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5862 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5863 * @pf: ptr to PF device
5864 * @vsi: the VSI being setup
5865 * @ch: ptr to channel structure
5867 * Configure TX rings associated with channel (VSI) since queues are being
5870 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5871 struct i40e_vsi *vsi,
5872 struct i40e_channel *ch)
5876 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5878 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5879 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5880 if (ch->enabled_tc & BIT(i))
5884 /* configure BW for new VSI */
5885 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5887 dev_info(&vsi->back->pdev->dev,
5888 "Failed configuring TC map %d for channel (seid %u)\n",
5889 ch->enabled_tc, ch->seid);
5893 for (i = 0; i < ch->num_queue_pairs; i++) {
5894 struct i40e_ring *tx_ring, *rx_ring;
5897 pf_q = ch->base_queue + i;
5899 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5902 tx_ring = vsi->tx_rings[pf_q];
5905 /* Get the RX ring ptr */
5906 rx_ring = vsi->rx_rings[pf_q];
5914 * i40e_setup_hw_channel - setup new channel
5915 * @pf: ptr to PF device
5916 * @vsi: the VSI being setup
5917 * @ch: ptr to channel structure
5918 * @uplink_seid: underlying HW switching element (VEB) ID
5919 * @type: type of channel to be created (VMDq2/VF)
5921 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5922 * and configures TX rings accordingly
5924 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5925 struct i40e_vsi *vsi,
5926 struct i40e_channel *ch,
5927 u16 uplink_seid, u8 type)
5931 ch->initialized = false;
5932 ch->base_queue = vsi->next_base_queue;
5935 /* Proceed with creation of channel (VMDq2) VSI */
5936 ret = i40e_add_channel(pf, uplink_seid, ch);
5938 dev_info(&pf->pdev->dev,
5939 "failed to add_channel using uplink_seid %u\n",
5944 /* Mark the successful creation of channel */
5945 ch->initialized = true;
5947 /* Reconfigure TX queues using QTX_CTL register */
5948 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
5950 dev_info(&pf->pdev->dev,
5951 "failed to configure TX rings for channel %u\n",
5956 /* update 'next_base_queue' */
5957 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
5958 dev_dbg(&pf->pdev->dev,
5959 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5960 ch->seid, ch->vsi_number, ch->stat_counter_idx,
5961 ch->num_queue_pairs,
5962 vsi->next_base_queue);
5967 * i40e_setup_channel - setup new channel using uplink element
5968 * @pf: ptr to PF device
5969 * @type: type of channel to be created (VMDq2/VF)
5970 * @uplink_seid: underlying HW switching element (VEB) ID
5971 * @ch: ptr to channel structure
5973 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5974 * and uplink switching element (uplink_seid)
5976 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
5977 struct i40e_channel *ch)
5983 if (vsi->type == I40E_VSI_MAIN) {
5984 vsi_type = I40E_VSI_VMDQ2;
5986 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
5991 /* underlying switching element */
5992 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5994 /* create channel (VSI), configure TX rings */
5995 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
5997 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6001 return ch->initialized ? true : false;
6005 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6006 * @vsi: ptr to VSI which has PF backing
6008 * Sets up switch mode correctly if it needs to be changed and perform
6009 * what are allowed modes.
6011 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6014 struct i40e_pf *pf = vsi->back;
6015 struct i40e_hw *hw = &pf->hw;
6018 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6022 if (hw->dev_caps.switch_mode) {
6023 /* if switch mode is set, support mode2 (non-tunneled for
6024 * cloud filter) for now
6026 u32 switch_mode = hw->dev_caps.switch_mode &
6027 I40E_SWITCH_MODE_MASK;
6028 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6029 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6031 dev_err(&pf->pdev->dev,
6032 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6033 hw->dev_caps.switch_mode);
6038 /* Set Bit 7 to be valid */
6039 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6041 /* Set L4type to both TCP and UDP support */
6042 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
6044 /* Set cloud filter mode */
6045 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6047 /* Prep mode field for set_switch_config */
6048 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6049 pf->last_sw_conf_valid_flags,
6051 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6052 dev_err(&pf->pdev->dev,
6053 "couldn't set switch config bits, err %s aq_err %s\n",
6054 i40e_stat_str(hw, ret),
6056 hw->aq.asq_last_status));
6062 * i40e_create_queue_channel - function to create channel
6063 * @vsi: VSI to be configured
6064 * @ch: ptr to channel (it contains channel specific params)
6066 * This function creates channel (VSI) using num_queues specified by user,
6067 * reconfigs RSS if needed.
6069 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6070 struct i40e_channel *ch)
6072 struct i40e_pf *pf = vsi->back;
6079 if (!ch->num_queue_pairs) {
6080 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6081 ch->num_queue_pairs);
6085 /* validate user requested num_queues for channel */
6086 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6089 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6090 ch->num_queue_pairs);
6094 /* By default we are in VEPA mode, if this is the first VF/VMDq
6095 * VSI to be added switch to VEB mode.
6097 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6098 (!i40e_is_any_channel(vsi))) {
6099 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6100 dev_dbg(&pf->pdev->dev,
6101 "Failed to create channel. Override queues (%u) not power of 2\n",
6102 vsi->tc_config.tc_info[0].qcount);
6106 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6107 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6109 if (vsi->type == I40E_VSI_MAIN) {
6110 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6111 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6114 i40e_do_reset_safe(pf,
6115 I40E_PF_RESET_FLAG);
6118 /* now onwards for main VSI, number of queues will be value
6119 * of TC0's queue count
6123 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6124 * it should be more than num_queues
6126 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6127 dev_dbg(&pf->pdev->dev,
6128 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6129 vsi->cnt_q_avail, ch->num_queue_pairs);
6133 /* reconfig_rss only if vsi type is MAIN_VSI */
6134 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6135 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6137 dev_info(&pf->pdev->dev,
6138 "Error: unable to reconfig rss for num_queues (%u)\n",
6139 ch->num_queue_pairs);
6144 if (!i40e_setup_channel(pf, vsi, ch)) {
6145 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6149 dev_info(&pf->pdev->dev,
6150 "Setup channel (id:%u) utilizing num_queues %d\n",
6151 ch->seid, ch->num_queue_pairs);
6153 /* configure VSI for BW limit */
6154 if (ch->max_tx_rate) {
6155 u64 credits = ch->max_tx_rate;
6157 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6160 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6161 dev_dbg(&pf->pdev->dev,
6162 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6168 /* in case of VF, this will be main SRIOV VSI */
6169 ch->parent_vsi = vsi;
6171 /* and update main_vsi's count for queue_available to use */
6172 vsi->cnt_q_avail -= ch->num_queue_pairs;
6178 * i40e_configure_queue_channels - Add queue channel for the given TCs
6179 * @vsi: VSI to be configured
6181 * Configures queue channel mapping to the given TCs
6183 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6185 struct i40e_channel *ch;
6189 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6190 vsi->tc_seid_map[0] = vsi->seid;
6191 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6192 if (vsi->tc_config.enabled_tc & BIT(i)) {
6193 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6199 INIT_LIST_HEAD(&ch->list);
6200 ch->num_queue_pairs =
6201 vsi->tc_config.tc_info[i].qcount;
6203 vsi->tc_config.tc_info[i].qoffset;
6205 /* Bandwidth limit through tc interface is in bytes/s,
6208 max_rate = vsi->mqprio_qopt.max_rate[i];
6209 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6210 ch->max_tx_rate = max_rate;
6212 list_add_tail(&ch->list, &vsi->ch_list);
6214 ret = i40e_create_queue_channel(vsi, ch);
6216 dev_err(&vsi->back->pdev->dev,
6217 "Failed creating queue channel with TC%d: queues %d\n",
6218 i, ch->num_queue_pairs);
6221 vsi->tc_seid_map[i] = ch->seid;
6227 i40e_remove_queue_channels(vsi);
6232 * i40e_veb_config_tc - Configure TCs for given VEB
6234 * @enabled_tc: TC bitmap
6236 * Configures given TC bitmap for VEB (switching) element
6238 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6240 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6241 struct i40e_pf *pf = veb->pf;
6245 /* No TCs or already enabled TCs just return */
6246 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6249 bw_data.tc_valid_bits = enabled_tc;
6250 /* bw_data.absolute_credits is not set (relative) */
6252 /* Enable ETS TCs with equal BW Share for now */
6253 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6254 if (enabled_tc & BIT(i))
6255 bw_data.tc_bw_share_credits[i] = 1;
6258 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6261 dev_info(&pf->pdev->dev,
6262 "VEB bw config failed, err %s aq_err %s\n",
6263 i40e_stat_str(&pf->hw, ret),
6264 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6268 /* Update the BW information */
6269 ret = i40e_veb_get_bw_info(veb);
6271 dev_info(&pf->pdev->dev,
6272 "Failed getting veb bw config, err %s aq_err %s\n",
6273 i40e_stat_str(&pf->hw, ret),
6274 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6281 #ifdef CONFIG_I40E_DCB
6283 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6286 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6287 * the caller would've quiesce all the VSIs before calling
6290 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6296 /* Enable the TCs available on PF to all VEBs */
6297 tc_map = i40e_pf_get_tc_map(pf);
6298 for (v = 0; v < I40E_MAX_VEB; v++) {
6301 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6303 dev_info(&pf->pdev->dev,
6304 "Failed configuring TC for VEB seid=%d\n",
6306 /* Will try to configure as many components */
6310 /* Update each VSI */
6311 for (v = 0; v < pf->num_alloc_vsi; v++) {
6315 /* - Enable all TCs for the LAN VSI
6316 * - For all others keep them at TC0 for now
6318 if (v == pf->lan_vsi)
6319 tc_map = i40e_pf_get_tc_map(pf);
6321 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6323 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6325 dev_info(&pf->pdev->dev,
6326 "Failed configuring TC for VSI seid=%d\n",
6328 /* Will try to configure as many components */
6330 /* Re-configure VSI vectors based on updated TC map */
6331 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6332 if (pf->vsi[v]->netdev)
6333 i40e_dcbnl_set_all(pf->vsi[v]);
6339 * i40e_resume_port_tx - Resume port Tx
6342 * Resume a port's Tx and issue a PF reset in case of failure to
6345 static int i40e_resume_port_tx(struct i40e_pf *pf)
6347 struct i40e_hw *hw = &pf->hw;
6350 ret = i40e_aq_resume_port_tx(hw, NULL);
6352 dev_info(&pf->pdev->dev,
6353 "Resume Port Tx failed, err %s aq_err %s\n",
6354 i40e_stat_str(&pf->hw, ret),
6355 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6356 /* Schedule PF reset to recover */
6357 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6358 i40e_service_event_schedule(pf);
6365 * i40e_init_pf_dcb - Initialize DCB configuration
6366 * @pf: PF being configured
6368 * Query the current DCB configuration and cache it
6369 * in the hardware structure
6371 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6373 struct i40e_hw *hw = &pf->hw;
6376 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
6377 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
6380 /* Get the initial DCB configuration */
6381 err = i40e_init_dcb(hw);
6383 /* Device/Function is not DCBX capable */
6384 if ((!hw->func_caps.dcb) ||
6385 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6386 dev_info(&pf->pdev->dev,
6387 "DCBX offload is not supported or is disabled for this PF.\n");
6389 /* When status is not DISABLED then DCBX in FW */
6390 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6391 DCB_CAP_DCBX_VER_IEEE;
6393 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6394 /* Enable DCB tagging only when more than one TC
6395 * or explicitly disable if only one TC
6397 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6398 pf->flags |= I40E_FLAG_DCB_ENABLED;
6400 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6401 dev_dbg(&pf->pdev->dev,
6402 "DCBX offload is supported for this PF.\n");
6405 dev_info(&pf->pdev->dev,
6406 "Query for DCB configuration failed, err %s aq_err %s\n",
6407 i40e_stat_str(&pf->hw, err),
6408 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6414 #endif /* CONFIG_I40E_DCB */
6415 #define SPEED_SIZE 14
6418 * i40e_print_link_message - print link up or down
6419 * @vsi: the VSI for which link needs a message
6421 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6423 enum i40e_aq_link_speed new_speed;
6424 struct i40e_pf *pf = vsi->back;
6425 char *speed = "Unknown";
6426 char *fc = "Unknown";
6431 new_speed = pf->hw.phy.link_info.link_speed;
6433 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6435 vsi->current_isup = isup;
6436 vsi->current_speed = new_speed;
6438 netdev_info(vsi->netdev, "NIC Link is Down\n");
6442 /* Warn user if link speed on NPAR enabled partition is not at
6445 if (pf->hw.func_caps.npar_enable &&
6446 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6447 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6448 netdev_warn(vsi->netdev,
6449 "The partition detected link speed that is less than 10Gbps\n");
6451 switch (pf->hw.phy.link_info.link_speed) {
6452 case I40E_LINK_SPEED_40GB:
6455 case I40E_LINK_SPEED_20GB:
6458 case I40E_LINK_SPEED_25GB:
6461 case I40E_LINK_SPEED_10GB:
6464 case I40E_LINK_SPEED_1GB:
6467 case I40E_LINK_SPEED_100MB:
6474 switch (pf->hw.fc.current_mode) {
6478 case I40E_FC_TX_PAUSE:
6481 case I40E_FC_RX_PAUSE:
6489 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6490 req_fec = ", Requested FEC: None";
6491 fec = ", FEC: None";
6492 an = ", Autoneg: False";
6494 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6495 an = ", Autoneg: True";
6497 if (pf->hw.phy.link_info.fec_info &
6498 I40E_AQ_CONFIG_FEC_KR_ENA)
6499 fec = ", FEC: CL74 FC-FEC/BASE-R";
6500 else if (pf->hw.phy.link_info.fec_info &
6501 I40E_AQ_CONFIG_FEC_RS_ENA)
6502 fec = ", FEC: CL108 RS-FEC";
6504 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6505 * both RS and FC are requested
6507 if (vsi->back->hw.phy.link_info.req_fec_info &
6508 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6509 if (vsi->back->hw.phy.link_info.req_fec_info &
6510 I40E_AQ_REQUEST_FEC_RS)
6511 req_fec = ", Requested FEC: CL108 RS-FEC";
6513 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6517 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6518 speed, req_fec, fec, an, fc);
6522 * i40e_up_complete - Finish the last steps of bringing up a connection
6523 * @vsi: the VSI being configured
6525 static int i40e_up_complete(struct i40e_vsi *vsi)
6527 struct i40e_pf *pf = vsi->back;
6530 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6531 i40e_vsi_configure_msix(vsi);
6533 i40e_configure_msi_and_legacy(vsi);
6536 err = i40e_vsi_start_rings(vsi);
6540 clear_bit(__I40E_VSI_DOWN, vsi->state);
6541 i40e_napi_enable_all(vsi);
6542 i40e_vsi_enable_irq(vsi);
6544 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6546 i40e_print_link_message(vsi, true);
6547 netif_tx_start_all_queues(vsi->netdev);
6548 netif_carrier_on(vsi->netdev);
6551 /* replay FDIR SB filters */
6552 if (vsi->type == I40E_VSI_FDIR) {
6553 /* reset fd counters */
6556 i40e_fdir_filter_restore(vsi);
6559 /* On the next run of the service_task, notify any clients of the new
6562 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
6563 i40e_service_event_schedule(pf);
6569 * i40e_vsi_reinit_locked - Reset the VSI
6570 * @vsi: the VSI being configured
6572 * Rebuild the ring structs after some configuration
6573 * has changed, e.g. MTU size.
6575 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6577 struct i40e_pf *pf = vsi->back;
6579 WARN_ON(in_interrupt());
6580 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6581 usleep_range(1000, 2000);
6585 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6589 * i40e_up - Bring the connection back up after being down
6590 * @vsi: the VSI being configured
6592 int i40e_up(struct i40e_vsi *vsi)
6596 err = i40e_vsi_configure(vsi);
6598 err = i40e_up_complete(vsi);
6604 * i40e_down - Shutdown the connection processing
6605 * @vsi: the VSI being stopped
6607 void i40e_down(struct i40e_vsi *vsi)
6611 /* It is assumed that the caller of this function
6612 * sets the vsi->state __I40E_VSI_DOWN bit.
6615 netif_carrier_off(vsi->netdev);
6616 netif_tx_disable(vsi->netdev);
6618 i40e_vsi_disable_irq(vsi);
6619 i40e_vsi_stop_rings(vsi);
6620 i40e_napi_disable_all(vsi);
6622 for (i = 0; i < vsi->num_queue_pairs; i++) {
6623 i40e_clean_tx_ring(vsi->tx_rings[i]);
6624 if (i40e_enabled_xdp_vsi(vsi))
6625 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6626 i40e_clean_rx_ring(vsi->rx_rings[i]);
6632 * i40e_validate_mqprio_qopt- validate queue mapping info
6633 * @vsi: the VSI being configured
6634 * @mqprio_qopt: queue parametrs
6636 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6637 struct tc_mqprio_qopt_offload *mqprio_qopt)
6639 u64 sum_max_rate = 0;
6643 if (mqprio_qopt->qopt.offset[0] != 0 ||
6644 mqprio_qopt->qopt.num_tc < 1 ||
6645 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6647 for (i = 0; ; i++) {
6648 if (!mqprio_qopt->qopt.count[i])
6650 if (mqprio_qopt->min_rate[i]) {
6651 dev_err(&vsi->back->pdev->dev,
6652 "Invalid min tx rate (greater than 0) specified\n");
6655 max_rate = mqprio_qopt->max_rate[i];
6656 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6657 sum_max_rate += max_rate;
6659 if (i >= mqprio_qopt->qopt.num_tc - 1)
6661 if (mqprio_qopt->qopt.offset[i + 1] !=
6662 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6665 if (vsi->num_queue_pairs <
6666 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6669 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6670 dev_err(&vsi->back->pdev->dev,
6671 "Invalid max tx rate specified\n");
6678 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6679 * @vsi: the VSI being configured
6681 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6686 /* Only TC0 is enabled */
6687 vsi->tc_config.numtc = 1;
6688 vsi->tc_config.enabled_tc = 1;
6689 qcount = min_t(int, vsi->alloc_queue_pairs,
6690 i40e_pf_get_max_q_per_tc(vsi->back));
6691 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6692 /* For the TC that is not enabled set the offset to to default
6693 * queue and allocate one queue for the given TC.
6695 vsi->tc_config.tc_info[i].qoffset = 0;
6697 vsi->tc_config.tc_info[i].qcount = qcount;
6699 vsi->tc_config.tc_info[i].qcount = 1;
6700 vsi->tc_config.tc_info[i].netdev_tc = 0;
6705 * i40e_setup_tc - configure multiple traffic classes
6706 * @netdev: net device to configure
6707 * @type_data: tc offload data
6709 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6711 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6712 struct i40e_netdev_priv *np = netdev_priv(netdev);
6713 struct i40e_vsi *vsi = np->vsi;
6714 struct i40e_pf *pf = vsi->back;
6715 u8 enabled_tc = 0, num_tc, hw;
6716 bool need_reset = false;
6721 num_tc = mqprio_qopt->qopt.num_tc;
6722 hw = mqprio_qopt->qopt.hw;
6723 mode = mqprio_qopt->mode;
6725 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6726 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6730 /* Check if MFP enabled */
6731 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6733 "Configuring TC not supported in MFP mode\n");
6737 case TC_MQPRIO_MODE_DCB:
6738 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6740 /* Check if DCB enabled to continue */
6741 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6743 "DCB is not enabled for adapter\n");
6747 /* Check whether tc count is within enabled limit */
6748 if (num_tc > i40e_pf_get_num_tc(pf)) {
6750 "TC count greater than enabled on link for adapter\n");
6754 case TC_MQPRIO_MODE_CHANNEL:
6755 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6757 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6760 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6762 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6765 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6766 sizeof(*mqprio_qopt));
6767 pf->flags |= I40E_FLAG_TC_MQPRIO;
6768 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6775 /* Generate TC map for number of tc requested */
6776 for (i = 0; i < num_tc; i++)
6777 enabled_tc |= BIT(i);
6779 /* Requesting same TC configuration as already enabled */
6780 if (enabled_tc == vsi->tc_config.enabled_tc &&
6781 mode != TC_MQPRIO_MODE_CHANNEL)
6784 /* Quiesce VSI queues */
6785 i40e_quiesce_vsi(vsi);
6787 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6788 i40e_remove_queue_channels(vsi);
6790 /* Configure VSI for enabled TCs */
6791 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6793 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6799 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6800 if (vsi->mqprio_qopt.max_rate[0]) {
6801 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6803 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
6804 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6806 u64 credits = max_tx_rate;
6808 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6809 dev_dbg(&vsi->back->pdev->dev,
6810 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6819 ret = i40e_configure_queue_channels(vsi);
6822 "Failed configuring queue channels\n");
6829 /* Reset the configuration data to defaults, only TC0 is enabled */
6831 i40e_vsi_set_default_tc_config(vsi);
6836 i40e_unquiesce_vsi(vsi);
6841 * i40e_set_cld_element - sets cloud filter element data
6842 * @filter: cloud filter rule
6843 * @cld: ptr to cloud filter element data
6845 * This is helper function to copy data into cloud filter element
6848 i40e_set_cld_element(struct i40e_cloud_filter *filter,
6849 struct i40e_aqc_cloud_filters_element_data *cld)
6854 memset(cld, 0, sizeof(*cld));
6855 ether_addr_copy(cld->outer_mac, filter->dst_mac);
6856 ether_addr_copy(cld->inner_mac, filter->src_mac);
6858 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
6861 if (filter->n_proto == ETH_P_IPV6) {
6862 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6863 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
6865 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
6866 ipa = cpu_to_le32(ipa);
6867 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
6870 ipa = be32_to_cpu(filter->dst_ipv4);
6871 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
6874 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
6876 /* tenant_id is not supported by FW now, once the support is enabled
6877 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6879 if (filter->tenant_id)
6884 * i40e_add_del_cloud_filter - Add/del cloud filter
6885 * @vsi: pointer to VSI
6886 * @filter: cloud filter rule
6887 * @add: if true, add, if false, delete
6889 * Add or delete a cloud filter for a specific flow spec.
6890 * Returns 0 if the filter were successfully added.
6892 static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
6893 struct i40e_cloud_filter *filter, bool add)
6895 struct i40e_aqc_cloud_filters_element_data cld_filter;
6896 struct i40e_pf *pf = vsi->back;
6898 static const u16 flag_table[128] = {
6899 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
6900 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
6901 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
6902 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
6903 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
6904 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
6905 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
6906 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
6907 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
6908 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
6909 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
6910 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
6911 [I40E_CLOUD_FILTER_FLAGS_IIP] =
6912 I40E_AQC_ADD_CLOUD_FILTER_IIP,
6915 if (filter->flags >= ARRAY_SIZE(flag_table))
6916 return I40E_ERR_CONFIG;
6918 /* copy element needed to add cloud filter from filter */
6919 i40e_set_cld_element(filter, &cld_filter);
6921 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
6922 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
6923 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
6925 if (filter->n_proto == ETH_P_IPV6)
6926 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
6927 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
6929 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
6930 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
6933 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
6936 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
6939 dev_dbg(&pf->pdev->dev,
6940 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
6941 add ? "add" : "delete", filter->dst_port, ret,
6942 pf->hw.aq.asq_last_status);
6944 dev_info(&pf->pdev->dev,
6945 "%s cloud filter for VSI: %d\n",
6946 add ? "Added" : "Deleted", filter->seid);
6951 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
6952 * @vsi: pointer to VSI
6953 * @filter: cloud filter rule
6954 * @add: if true, add, if false, delete
6956 * Add or delete a cloud filter for a specific flow spec using big buffer.
6957 * Returns 0 if the filter were successfully added.
6959 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6960 struct i40e_cloud_filter *filter,
6963 struct i40e_aqc_cloud_filters_element_bb cld_filter;
6964 struct i40e_pf *pf = vsi->back;
6967 /* Both (src/dst) valid mac_addr are not supported */
6968 if ((is_valid_ether_addr(filter->dst_mac) &&
6969 is_valid_ether_addr(filter->src_mac)) ||
6970 (is_multicast_ether_addr(filter->dst_mac) &&
6971 is_multicast_ether_addr(filter->src_mac)))
6974 /* Make sure port is specified, otherwise bail out, for channel
6975 * specific cloud filter needs 'L4 port' to be non-zero
6977 if (!filter->dst_port)
6980 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter->src_port || filter->src_ipv4 ||
6982 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6985 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter, &cld_filter.element);
6988 if (is_valid_ether_addr(filter->dst_mac) ||
6989 is_valid_ether_addr(filter->src_mac) ||
6990 is_multicast_ether_addr(filter->dst_mac) ||
6991 is_multicast_ether_addr(filter->src_mac)) {
6992 /* MAC + IP : unsupported mode */
6993 if (filter->dst_ipv4)
6996 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value
6998 * and update if vlan is present or not
7000 cld_filter.element.flags =
7001 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7003 if (filter->vlan_id) {
7004 cld_filter.element.flags =
7005 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7008 } else if (filter->dst_ipv4 ||
7009 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7010 cld_filter.element.flags =
7011 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7012 if (filter->n_proto == ETH_P_IPV6)
7013 cld_filter.element.flags |=
7014 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7016 cld_filter.element.flags |=
7017 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7019 dev_err(&pf->pdev->dev,
7020 "either mac or ip has to be valid for cloud filter\n");
7024 /* Now copy L4 port in Byte 6..7 in general fields */
7025 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7026 be16_to_cpu(filter->dst_port);
7029 /* Validate current device switch mode, change if necessary */
7030 ret = i40e_validate_and_set_switch_mode(vsi);
7032 dev_err(&pf->pdev->dev,
7033 "failed to set switch mode, ret %d\n",
7038 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7041 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7046 dev_dbg(&pf->pdev->dev,
7047 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7048 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7050 dev_info(&pf->pdev->dev,
7051 "%s cloud filter for VSI: %d, L4 port: %d\n",
7052 add ? "add" : "delete", filter->seid,
7053 ntohs(filter->dst_port));
7058 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7059 * @vsi: Pointer to VSI
7060 * @cls_flower: Pointer to struct tc_cls_flower_offload
7061 * @filter: Pointer to cloud filter structure
7064 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7065 struct tc_cls_flower_offload *f,
7066 struct i40e_cloud_filter *filter)
7068 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7069 struct i40e_pf *pf = vsi->back;
7072 if (f->dissector->used_keys &
7073 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7074 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7075 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7076 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7077 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7078 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7079 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7080 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7081 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7082 f->dissector->used_keys);
7086 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7087 struct flow_dissector_key_keyid *key =
7088 skb_flow_dissector_target(f->dissector,
7089 FLOW_DISSECTOR_KEY_ENC_KEYID,
7092 struct flow_dissector_key_keyid *mask =
7093 skb_flow_dissector_target(f->dissector,
7094 FLOW_DISSECTOR_KEY_ENC_KEYID,
7097 if (mask->keyid != 0)
7098 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7100 filter->tenant_id = be32_to_cpu(key->keyid);
7103 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7104 struct flow_dissector_key_basic *key =
7105 skb_flow_dissector_target(f->dissector,
7106 FLOW_DISSECTOR_KEY_BASIC,
7109 struct flow_dissector_key_basic *mask =
7110 skb_flow_dissector_target(f->dissector,
7111 FLOW_DISSECTOR_KEY_BASIC,
7114 n_proto_key = ntohs(key->n_proto);
7115 n_proto_mask = ntohs(mask->n_proto);
7117 if (n_proto_key == ETH_P_ALL) {
7121 filter->n_proto = n_proto_key & n_proto_mask;
7122 filter->ip_proto = key->ip_proto;
7125 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7126 struct flow_dissector_key_eth_addrs *key =
7127 skb_flow_dissector_target(f->dissector,
7128 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7131 struct flow_dissector_key_eth_addrs *mask =
7132 skb_flow_dissector_target(f->dissector,
7133 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7136 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7137 if (!is_zero_ether_addr(mask->dst)) {
7138 if (is_broadcast_ether_addr(mask->dst)) {
7139 field_flags |= I40E_CLOUD_FIELD_OMAC;
7141 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7143 return I40E_ERR_CONFIG;
7147 if (!is_zero_ether_addr(mask->src)) {
7148 if (is_broadcast_ether_addr(mask->src)) {
7149 field_flags |= I40E_CLOUD_FIELD_IMAC;
7151 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7153 return I40E_ERR_CONFIG;
7156 ether_addr_copy(filter->dst_mac, key->dst);
7157 ether_addr_copy(filter->src_mac, key->src);
7160 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7161 struct flow_dissector_key_vlan *key =
7162 skb_flow_dissector_target(f->dissector,
7163 FLOW_DISSECTOR_KEY_VLAN,
7165 struct flow_dissector_key_vlan *mask =
7166 skb_flow_dissector_target(f->dissector,
7167 FLOW_DISSECTOR_KEY_VLAN,
7170 if (mask->vlan_id) {
7171 if (mask->vlan_id == VLAN_VID_MASK) {
7172 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7175 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7177 return I40E_ERR_CONFIG;
7181 filter->vlan_id = cpu_to_be16(key->vlan_id);
7184 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7185 struct flow_dissector_key_control *key =
7186 skb_flow_dissector_target(f->dissector,
7187 FLOW_DISSECTOR_KEY_CONTROL,
7190 addr_type = key->addr_type;
7193 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7194 struct flow_dissector_key_ipv4_addrs *key =
7195 skb_flow_dissector_target(f->dissector,
7196 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7198 struct flow_dissector_key_ipv4_addrs *mask =
7199 skb_flow_dissector_target(f->dissector,
7200 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7204 if (mask->dst == cpu_to_be32(0xffffffff)) {
7205 field_flags |= I40E_CLOUD_FIELD_IIP;
7207 mask->dst = be32_to_cpu(mask->dst);
7208 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
7210 return I40E_ERR_CONFIG;
7215 if (mask->src == cpu_to_be32(0xffffffff)) {
7216 field_flags |= I40E_CLOUD_FIELD_IIP;
7218 mask->src = be32_to_cpu(mask->src);
7219 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
7221 return I40E_ERR_CONFIG;
7225 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7226 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7227 return I40E_ERR_CONFIG;
7229 filter->dst_ipv4 = key->dst;
7230 filter->src_ipv4 = key->src;
7233 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7234 struct flow_dissector_key_ipv6_addrs *key =
7235 skb_flow_dissector_target(f->dissector,
7236 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7238 struct flow_dissector_key_ipv6_addrs *mask =
7239 skb_flow_dissector_target(f->dissector,
7240 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7243 /* src and dest IPV6 address should not be LOOPBACK
7244 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7246 if (ipv6_addr_loopback(&key->dst) ||
7247 ipv6_addr_loopback(&key->src)) {
7248 dev_err(&pf->pdev->dev,
7249 "Bad ipv6, addr is LOOPBACK\n");
7250 return I40E_ERR_CONFIG;
7252 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
7253 field_flags |= I40E_CLOUD_FIELD_IIP;
7255 memcpy(&filter->src_ipv6, &key->src.s6_addr32,
7256 sizeof(filter->src_ipv6));
7257 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
7258 sizeof(filter->dst_ipv6));
7261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7262 struct flow_dissector_key_ports *key =
7263 skb_flow_dissector_target(f->dissector,
7264 FLOW_DISSECTOR_KEY_PORTS,
7266 struct flow_dissector_key_ports *mask =
7267 skb_flow_dissector_target(f->dissector,
7268 FLOW_DISSECTOR_KEY_PORTS,
7272 if (mask->src == cpu_to_be16(0xffff)) {
7273 field_flags |= I40E_CLOUD_FIELD_IIP;
7275 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7276 be16_to_cpu(mask->src));
7277 return I40E_ERR_CONFIG;
7282 if (mask->dst == cpu_to_be16(0xffff)) {
7283 field_flags |= I40E_CLOUD_FIELD_IIP;
7285 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7286 be16_to_cpu(mask->dst));
7287 return I40E_ERR_CONFIG;
7291 filter->dst_port = key->dst;
7292 filter->src_port = key->src;
7294 switch (filter->ip_proto) {
7299 dev_err(&pf->pdev->dev,
7300 "Only UDP and TCP transport are supported\n");
7304 filter->flags = field_flags;
7309 * i40e_handle_tclass: Forward to a traffic class on the device
7310 * @vsi: Pointer to VSI
7311 * @tc: traffic class index on the device
7312 * @filter: Pointer to cloud filter structure
7315 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7316 struct i40e_cloud_filter *filter)
7318 struct i40e_channel *ch, *ch_tmp;
7320 /* direct to a traffic class on the same device */
7322 filter->seid = vsi->seid;
7324 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7325 if (!filter->dst_port) {
7326 dev_err(&vsi->back->pdev->dev,
7327 "Specify destination port to direct to traffic class that is not default\n");
7330 if (list_empty(&vsi->ch_list))
7332 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7334 if (ch->seid == vsi->tc_seid_map[tc])
7335 filter->seid = ch->seid;
7339 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7344 * i40e_configure_clsflower - Configure tc flower filters
7345 * @vsi: Pointer to VSI
7346 * @cls_flower: Pointer to struct tc_cls_flower_offload
7349 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7350 struct tc_cls_flower_offload *cls_flower)
7352 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7353 struct i40e_cloud_filter *filter = NULL;
7354 struct i40e_pf *pf = vsi->back;
7358 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7363 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7366 if (pf->fdir_pf_active_filters ||
7367 (!hlist_empty(&pf->fdir_filter_list))) {
7368 dev_err(&vsi->back->pdev->dev,
7369 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7373 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7374 dev_err(&vsi->back->pdev->dev,
7375 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7376 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7377 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7380 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7384 filter->cookie = cls_flower->cookie;
7386 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7390 err = i40e_handle_tclass(vsi, tc, filter);
7394 /* Add cloud filter */
7395 if (filter->dst_port)
7396 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7398 err = i40e_add_del_cloud_filter(vsi, filter, true);
7401 dev_err(&pf->pdev->dev,
7402 "Failed to add cloud filter, err %s\n",
7403 i40e_stat_str(&pf->hw, err));
7407 /* add filter to the ordered list */
7408 INIT_HLIST_NODE(&filter->cloud_node);
7410 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7412 pf->num_cloud_filters++;
7421 * i40e_find_cloud_filter - Find the could filter in the list
7422 * @vsi: Pointer to VSI
7423 * @cookie: filter specific cookie
7426 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7427 unsigned long *cookie)
7429 struct i40e_cloud_filter *filter = NULL;
7430 struct hlist_node *node2;
7432 hlist_for_each_entry_safe(filter, node2,
7433 &vsi->back->cloud_filter_list, cloud_node)
7434 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7440 * i40e_delete_clsflower - Remove tc flower filters
7441 * @vsi: Pointer to VSI
7442 * @cls_flower: Pointer to struct tc_cls_flower_offload
7445 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7446 struct tc_cls_flower_offload *cls_flower)
7448 struct i40e_cloud_filter *filter = NULL;
7449 struct i40e_pf *pf = vsi->back;
7452 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7457 hash_del(&filter->cloud_node);
7459 if (filter->dst_port)
7460 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7462 err = i40e_add_del_cloud_filter(vsi, filter, false);
7466 dev_err(&pf->pdev->dev,
7467 "Failed to delete cloud filter, err %s\n",
7468 i40e_stat_str(&pf->hw, err));
7469 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7472 pf->num_cloud_filters--;
7473 if (!pf->num_cloud_filters)
7474 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7475 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7476 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7477 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7478 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7484 * i40e_setup_tc_cls_flower - flower classifier offloads
7485 * @netdev: net device to configure
7486 * @type_data: offload data
7488 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7489 struct tc_cls_flower_offload *cls_flower)
7491 struct i40e_vsi *vsi = np->vsi;
7493 if (cls_flower->common.chain_index)
7496 switch (cls_flower->command) {
7497 case TC_CLSFLOWER_REPLACE:
7498 return i40e_configure_clsflower(vsi, cls_flower);
7499 case TC_CLSFLOWER_DESTROY:
7500 return i40e_delete_clsflower(vsi, cls_flower);
7501 case TC_CLSFLOWER_STATS:
7508 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7511 struct i40e_netdev_priv *np = cb_priv;
7514 case TC_SETUP_CLSFLOWER:
7515 return i40e_setup_tc_cls_flower(np, type_data);
7522 static int i40e_setup_tc_block(struct net_device *dev,
7523 struct tc_block_offload *f)
7525 struct i40e_netdev_priv *np = netdev_priv(dev);
7527 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7530 switch (f->command) {
7532 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7534 case TC_BLOCK_UNBIND:
7535 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7542 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7546 case TC_SETUP_QDISC_MQPRIO:
7547 return i40e_setup_tc(netdev, type_data);
7548 case TC_SETUP_BLOCK:
7549 return i40e_setup_tc_block(netdev, type_data);
7556 * i40e_open - Called when a network interface is made active
7557 * @netdev: network interface device structure
7559 * The open entry point is called when a network interface is made
7560 * active by the system (IFF_UP). At this point all resources needed
7561 * for transmit and receive operations are allocated, the interrupt
7562 * handler is registered with the OS, the netdev watchdog subtask is
7563 * enabled, and the stack is notified that the interface is ready.
7565 * Returns 0 on success, negative value on failure
7567 int i40e_open(struct net_device *netdev)
7569 struct i40e_netdev_priv *np = netdev_priv(netdev);
7570 struct i40e_vsi *vsi = np->vsi;
7571 struct i40e_pf *pf = vsi->back;
7574 /* disallow open during test or if eeprom is broken */
7575 if (test_bit(__I40E_TESTING, pf->state) ||
7576 test_bit(__I40E_BAD_EEPROM, pf->state))
7579 netif_carrier_off(netdev);
7581 err = i40e_vsi_open(vsi);
7585 /* configure global TSO hardware offload settings */
7586 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7587 TCP_FLAG_FIN) >> 16);
7588 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7590 TCP_FLAG_CWR) >> 16);
7591 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7593 udp_tunnel_get_rx_info(netdev);
7600 * @vsi: the VSI to open
7602 * Finish initialization of the VSI.
7604 * Returns 0 on success, negative value on failure
7606 * Note: expects to be called while under rtnl_lock()
7608 int i40e_vsi_open(struct i40e_vsi *vsi)
7610 struct i40e_pf *pf = vsi->back;
7611 char int_name[I40E_INT_NAME_STR_LEN];
7614 /* allocate descriptors */
7615 err = i40e_vsi_setup_tx_resources(vsi);
7618 err = i40e_vsi_setup_rx_resources(vsi);
7622 err = i40e_vsi_configure(vsi);
7627 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7628 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7629 err = i40e_vsi_request_irq(vsi, int_name);
7633 /* Notify the stack of the actual queue counts. */
7634 err = netif_set_real_num_tx_queues(vsi->netdev,
7635 vsi->num_queue_pairs);
7637 goto err_set_queues;
7639 err = netif_set_real_num_rx_queues(vsi->netdev,
7640 vsi->num_queue_pairs);
7642 goto err_set_queues;
7644 } else if (vsi->type == I40E_VSI_FDIR) {
7645 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7646 dev_driver_string(&pf->pdev->dev),
7647 dev_name(&pf->pdev->dev));
7648 err = i40e_vsi_request_irq(vsi, int_name);
7655 err = i40e_up_complete(vsi);
7657 goto err_up_complete;
7664 i40e_vsi_free_irq(vsi);
7666 i40e_vsi_free_rx_resources(vsi);
7668 i40e_vsi_free_tx_resources(vsi);
7669 if (vsi == pf->vsi[pf->lan_vsi])
7670 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7676 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7677 * @pf: Pointer to PF
7679 * This function destroys the hlist where all the Flow Director
7680 * filters were saved.
7682 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7684 struct i40e_fdir_filter *filter;
7685 struct i40e_flex_pit *pit_entry, *tmp;
7686 struct hlist_node *node2;
7688 hlist_for_each_entry_safe(filter, node2,
7689 &pf->fdir_filter_list, fdir_node) {
7690 hlist_del(&filter->fdir_node);
7694 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7695 list_del(&pit_entry->list);
7698 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7700 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7701 list_del(&pit_entry->list);
7704 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7706 pf->fdir_pf_active_filters = 0;
7707 pf->fd_tcp4_filter_cnt = 0;
7708 pf->fd_udp4_filter_cnt = 0;
7709 pf->fd_sctp4_filter_cnt = 0;
7710 pf->fd_ip4_filter_cnt = 0;
7712 /* Reprogram the default input set for TCP/IPv4 */
7713 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7714 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7715 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7717 /* Reprogram the default input set for UDP/IPv4 */
7718 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7719 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7720 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7722 /* Reprogram the default input set for SCTP/IPv4 */
7723 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7724 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7725 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7727 /* Reprogram the default input set for Other/IPv4 */
7728 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7729 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7733 * i40e_cloud_filter_exit - Cleans up the cloud filters
7734 * @pf: Pointer to PF
7736 * This function destroys the hlist where all the cloud filters
7739 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7741 struct i40e_cloud_filter *cfilter;
7742 struct hlist_node *node;
7744 hlist_for_each_entry_safe(cfilter, node,
7745 &pf->cloud_filter_list, cloud_node) {
7746 hlist_del(&cfilter->cloud_node);
7749 pf->num_cloud_filters = 0;
7751 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7752 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7753 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7754 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7755 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7760 * i40e_close - Disables a network interface
7761 * @netdev: network interface device structure
7763 * The close entry point is called when an interface is de-activated
7764 * by the OS. The hardware is still under the driver's control, but
7765 * this netdev interface is disabled.
7767 * Returns 0, this is not allowed to fail
7769 int i40e_close(struct net_device *netdev)
7771 struct i40e_netdev_priv *np = netdev_priv(netdev);
7772 struct i40e_vsi *vsi = np->vsi;
7774 i40e_vsi_close(vsi);
7780 * i40e_do_reset - Start a PF or Core Reset sequence
7781 * @pf: board private structure
7782 * @reset_flags: which reset is requested
7783 * @lock_acquired: indicates whether or not the lock has been acquired
7784 * before this function was called.
7786 * The essential difference in resets is that the PF Reset
7787 * doesn't clear the packet buffers, doesn't reset the PE
7788 * firmware, and doesn't bother the other PFs on the chip.
7790 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7794 WARN_ON(in_interrupt());
7797 /* do the biggest reset indicated */
7798 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
7800 /* Request a Global Reset
7802 * This will start the chip's countdown to the actual full
7803 * chip reset event, and a warning interrupt to be sent
7804 * to all PFs, including the requestor. Our handler
7805 * for the warning interrupt will deal with the shutdown
7806 * and recovery of the switch setup.
7808 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
7809 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7810 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7811 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7813 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
7815 /* Request a Core Reset
7817 * Same as Global Reset, except does *not* include the MAC/PHY
7819 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
7820 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7821 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7822 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7823 i40e_flush(&pf->hw);
7825 } else if (reset_flags & I40E_PF_RESET_FLAG) {
7827 /* Request a PF Reset
7829 * Resets only the PF-specific registers
7831 * This goes directly to the tear-down and rebuild of
7832 * the switch, since we need to do all the recovery as
7833 * for the Core Reset.
7835 dev_dbg(&pf->pdev->dev, "PFR requested\n");
7836 i40e_handle_reset_warning(pf, lock_acquired);
7838 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
7841 /* Find the VSI(s) that requested a re-init */
7842 dev_info(&pf->pdev->dev,
7843 "VSI reinit requested\n");
7844 for (v = 0; v < pf->num_alloc_vsi; v++) {
7845 struct i40e_vsi *vsi = pf->vsi[v];
7848 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
7850 i40e_vsi_reinit_locked(pf->vsi[v]);
7852 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
7855 /* Find the VSI(s) that needs to be brought down */
7856 dev_info(&pf->pdev->dev, "VSI down requested\n");
7857 for (v = 0; v < pf->num_alloc_vsi; v++) {
7858 struct i40e_vsi *vsi = pf->vsi[v];
7861 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
7863 set_bit(__I40E_VSI_DOWN, vsi->state);
7868 dev_info(&pf->pdev->dev,
7869 "bad reset request 0x%08x\n", reset_flags);
7873 #ifdef CONFIG_I40E_DCB
7875 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7876 * @pf: board private structure
7877 * @old_cfg: current DCB config
7878 * @new_cfg: new DCB config
7880 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
7881 struct i40e_dcbx_config *old_cfg,
7882 struct i40e_dcbx_config *new_cfg)
7884 bool need_reconfig = false;
7886 /* Check if ETS configuration has changed */
7887 if (memcmp(&new_cfg->etscfg,
7889 sizeof(new_cfg->etscfg))) {
7890 /* If Priority Table has changed reconfig is needed */
7891 if (memcmp(&new_cfg->etscfg.prioritytable,
7892 &old_cfg->etscfg.prioritytable,
7893 sizeof(new_cfg->etscfg.prioritytable))) {
7894 need_reconfig = true;
7895 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
7898 if (memcmp(&new_cfg->etscfg.tcbwtable,
7899 &old_cfg->etscfg.tcbwtable,
7900 sizeof(new_cfg->etscfg.tcbwtable)))
7901 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
7903 if (memcmp(&new_cfg->etscfg.tsatable,
7904 &old_cfg->etscfg.tsatable,
7905 sizeof(new_cfg->etscfg.tsatable)))
7906 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
7909 /* Check if PFC configuration has changed */
7910 if (memcmp(&new_cfg->pfc,
7912 sizeof(new_cfg->pfc))) {
7913 need_reconfig = true;
7914 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
7917 /* Check if APP Table has changed */
7918 if (memcmp(&new_cfg->app,
7920 sizeof(new_cfg->app))) {
7921 need_reconfig = true;
7922 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
7925 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
7926 return need_reconfig;
7930 * i40e_handle_lldp_event - Handle LLDP Change MIB event
7931 * @pf: board private structure
7932 * @e: event info posted on ARQ
7934 static int i40e_handle_lldp_event(struct i40e_pf *pf,
7935 struct i40e_arq_event_info *e)
7937 struct i40e_aqc_lldp_get_mib *mib =
7938 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
7939 struct i40e_hw *hw = &pf->hw;
7940 struct i40e_dcbx_config tmp_dcbx_cfg;
7941 bool need_reconfig = false;
7945 /* Not DCB capable or capability disabled */
7946 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
7949 /* Ignore if event is not for Nearest Bridge */
7950 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
7951 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
7952 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
7953 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
7956 /* Check MIB Type and return if event for Remote MIB update */
7957 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
7958 dev_dbg(&pf->pdev->dev,
7959 "LLDP event mib type %s\n", type ? "remote" : "local");
7960 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
7961 /* Update the remote cached instance and return */
7962 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
7963 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
7964 &hw->remote_dcbx_config);
7968 /* Store the old configuration */
7969 tmp_dcbx_cfg = hw->local_dcbx_config;
7971 /* Reset the old DCBx configuration data */
7972 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
7973 /* Get updated DCBX data from firmware */
7974 ret = i40e_get_dcb_config(&pf->hw);
7976 dev_info(&pf->pdev->dev,
7977 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
7978 i40e_stat_str(&pf->hw, ret),
7979 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7983 /* No change detected in DCBX configs */
7984 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
7985 sizeof(tmp_dcbx_cfg))) {
7986 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
7990 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
7991 &hw->local_dcbx_config);
7993 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
7998 /* Enable DCB tagging only when more than one TC */
7999 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8000 pf->flags |= I40E_FLAG_DCB_ENABLED;
8002 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8004 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8005 /* Reconfiguration needed quiesce all VSIs */
8006 i40e_pf_quiesce_all_vsi(pf);
8008 /* Changes in configuration update VEB/VSI */
8009 i40e_dcb_reconfigure(pf);
8011 ret = i40e_resume_port_tx(pf);
8013 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8014 /* In case of error no point in resuming VSIs */
8018 /* Wait for the PF's queues to be disabled */
8019 ret = i40e_pf_wait_queues_disabled(pf);
8021 /* Schedule PF reset to recover */
8022 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8023 i40e_service_event_schedule(pf);
8025 i40e_pf_unquiesce_all_vsi(pf);
8026 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
8027 I40E_FLAG_CLIENT_L2_CHANGE);
8033 #endif /* CONFIG_I40E_DCB */
8036 * i40e_do_reset_safe - Protected reset path for userland calls.
8037 * @pf: board private structure
8038 * @reset_flags: which reset is requested
8041 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8044 i40e_do_reset(pf, reset_flags, true);
8049 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8050 * @pf: board private structure
8051 * @e: event info posted on ARQ
8053 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8056 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8057 struct i40e_arq_event_info *e)
8059 struct i40e_aqc_lan_overflow *data =
8060 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8061 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8062 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8063 struct i40e_hw *hw = &pf->hw;
8067 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8070 /* Queue belongs to VF, find the VF and issue VF reset */
8071 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8072 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8073 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8074 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8075 vf_id -= hw->func_caps.vf_base_id;
8076 vf = &pf->vf[vf_id];
8077 i40e_vc_notify_vf_reset(vf);
8078 /* Allow VF to process pending reset notification */
8080 i40e_reset_vf(vf, false);
8085 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8086 * @pf: board private structure
8088 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8092 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8093 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8098 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8099 * @pf: board private structure
8101 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8105 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8106 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8107 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8108 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8113 * i40e_get_global_fd_count - Get total FD filters programmed on device
8114 * @pf: board private structure
8116 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8120 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8121 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8122 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8123 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8128 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8129 * @pf: board private structure
8131 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8133 struct i40e_fdir_filter *filter;
8134 u32 fcnt_prog, fcnt_avail;
8135 struct hlist_node *node;
8137 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8140 /* Check if we have enough room to re-enable FDir SB capability. */
8141 fcnt_prog = i40e_get_global_fd_count(pf);
8142 fcnt_avail = pf->fdir_pf_filter_count;
8143 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8144 (pf->fd_add_err == 0) ||
8145 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
8146 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
8147 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
8148 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8149 (I40E_DEBUG_FD & pf->hw.debug_mask))
8150 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8154 /* We should wait for even more space before re-enabling ATR.
8155 * Additionally, we cannot enable ATR as long as we still have TCP SB
8158 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8159 (pf->fd_tcp4_filter_cnt == 0)) {
8160 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
8161 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
8162 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8163 (I40E_DEBUG_FD & pf->hw.debug_mask))
8164 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8168 /* if hw had a problem adding a filter, delete it */
8169 if (pf->fd_inv > 0) {
8170 hlist_for_each_entry_safe(filter, node,
8171 &pf->fdir_filter_list, fdir_node) {
8172 if (filter->fd_id == pf->fd_inv) {
8173 hlist_del(&filter->fdir_node);
8175 pf->fdir_pf_active_filters--;
8182 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8183 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8185 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8186 * @pf: board private structure
8188 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8190 unsigned long min_flush_time;
8191 int flush_wait_retry = 50;
8192 bool disable_atr = false;
8196 if (!time_after(jiffies, pf->fd_flush_timestamp +
8197 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8200 /* If the flush is happening too quick and we have mostly SB rules we
8201 * should not re-enable ATR for some time.
8203 min_flush_time = pf->fd_flush_timestamp +
8204 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8205 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8207 if (!(time_after(jiffies, min_flush_time)) &&
8208 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8209 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8210 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8214 pf->fd_flush_timestamp = jiffies;
8215 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
8216 /* flush all filters */
8217 wr32(&pf->hw, I40E_PFQF_CTL_1,
8218 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8219 i40e_flush(&pf->hw);
8223 /* Check FD flush status every 5-6msec */
8224 usleep_range(5000, 6000);
8225 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8226 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8228 } while (flush_wait_retry--);
8229 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8230 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8232 /* replay sideband filters */
8233 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8234 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8235 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
8236 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8237 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8238 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8243 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8244 * @pf: board private structure
8246 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8248 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8251 /* We can see up to 256 filter programming desc in transit if the filters are
8252 * being applied really fast; before we see the first
8253 * filter miss error on Rx queue 0. Accumulating enough error messages before
8254 * reacting will make sure we don't cause flush too often.
8256 #define I40E_MAX_FD_PROGRAM_ERROR 256
8259 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8260 * @pf: board private structure
8262 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8265 /* if interface is down do nothing */
8266 if (test_bit(__I40E_DOWN, pf->state))
8269 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8270 i40e_fdir_flush_and_replay(pf);
8272 i40e_fdir_check_and_reenable(pf);
8277 * i40e_vsi_link_event - notify VSI of a link event
8278 * @vsi: vsi to be notified
8279 * @link_up: link up or down
8281 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8283 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8286 switch (vsi->type) {
8288 if (!vsi->netdev || !vsi->netdev_registered)
8292 netif_carrier_on(vsi->netdev);
8293 netif_tx_wake_all_queues(vsi->netdev);
8295 netif_carrier_off(vsi->netdev);
8296 netif_tx_stop_all_queues(vsi->netdev);
8300 case I40E_VSI_SRIOV:
8301 case I40E_VSI_VMDQ2:
8303 case I40E_VSI_IWARP:
8304 case I40E_VSI_MIRROR:
8306 /* there is no notification for other VSIs */
8312 * i40e_veb_link_event - notify elements on the veb of a link event
8313 * @veb: veb to be notified
8314 * @link_up: link up or down
8316 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8321 if (!veb || !veb->pf)
8325 /* depth first... */
8326 for (i = 0; i < I40E_MAX_VEB; i++)
8327 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8328 i40e_veb_link_event(pf->veb[i], link_up);
8330 /* ... now the local VSIs */
8331 for (i = 0; i < pf->num_alloc_vsi; i++)
8332 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8333 i40e_vsi_link_event(pf->vsi[i], link_up);
8337 * i40e_link_event - Update netif_carrier status
8338 * @pf: board private structure
8340 static void i40e_link_event(struct i40e_pf *pf)
8342 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8343 u8 new_link_speed, old_link_speed;
8345 bool new_link, old_link;
8347 /* save off old link status information */
8348 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
8350 /* set this to force the get_link_status call to refresh state */
8351 pf->hw.phy.get_link_info = true;
8353 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8355 status = i40e_get_link_status(&pf->hw, &new_link);
8357 /* On success, disable temp link polling */
8358 if (status == I40E_SUCCESS) {
8359 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
8360 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
8362 /* Enable link polling temporarily until i40e_get_link_status
8363 * returns I40E_SUCCESS
8365 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
8366 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8371 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8372 new_link_speed = pf->hw.phy.link_info.link_speed;
8374 if (new_link == old_link &&
8375 new_link_speed == old_link_speed &&
8376 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8377 new_link == netif_carrier_ok(vsi->netdev)))
8380 i40e_print_link_message(vsi, new_link);
8382 /* Notify the base of the switch tree connected to
8383 * the link. Floating VEBs are not notified.
8385 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8386 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8388 i40e_vsi_link_event(vsi, new_link);
8391 i40e_vc_notify_link_state(pf);
8393 if (pf->flags & I40E_FLAG_PTP)
8394 i40e_ptp_set_increment(pf);
8398 * i40e_watchdog_subtask - periodic checks not using event driven response
8399 * @pf: board private structure
8401 static void i40e_watchdog_subtask(struct i40e_pf *pf)
8405 /* if interface is down do nothing */
8406 if (test_bit(__I40E_DOWN, pf->state) ||
8407 test_bit(__I40E_CONFIG_BUSY, pf->state))
8410 /* make sure we don't do these things too often */
8411 if (time_before(jiffies, (pf->service_timer_previous +
8412 pf->service_timer_period)))
8414 pf->service_timer_previous = jiffies;
8416 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8417 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
8418 i40e_link_event(pf);
8420 /* Update the stats for active netdevs so the network stack
8421 * can look at updated numbers whenever it cares to
8423 for (i = 0; i < pf->num_alloc_vsi; i++)
8424 if (pf->vsi[i] && pf->vsi[i]->netdev)
8425 i40e_update_stats(pf->vsi[i]);
8427 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8428 /* Update the stats for the active switching components */
8429 for (i = 0; i < I40E_MAX_VEB; i++)
8431 i40e_update_veb_stats(pf->veb[i]);
8434 i40e_ptp_rx_hang(pf);
8435 i40e_ptp_tx_hang(pf);
8439 * i40e_reset_subtask - Set up for resetting the device and driver
8440 * @pf: board private structure
8442 static void i40e_reset_subtask(struct i40e_pf *pf)
8444 u32 reset_flags = 0;
8446 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8447 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8448 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8450 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8451 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8452 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8454 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8455 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8456 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8458 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8459 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8460 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8462 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8463 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8464 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8467 /* If there's a recovery already waiting, it takes
8468 * precedence before starting a new reset sequence.
8470 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8471 i40e_prep_for_reset(pf, false);
8473 i40e_rebuild(pf, false, false);
8476 /* If we're already down or resetting, just bail */
8478 !test_bit(__I40E_DOWN, pf->state) &&
8479 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8480 i40e_do_reset(pf, reset_flags, false);
8485 * i40e_handle_link_event - Handle link event
8486 * @pf: board private structure
8487 * @e: event info posted on ARQ
8489 static void i40e_handle_link_event(struct i40e_pf *pf,
8490 struct i40e_arq_event_info *e)
8492 struct i40e_aqc_get_link_status *status =
8493 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8495 /* Do a new status request to re-enable LSE reporting
8496 * and load new status information into the hw struct
8497 * This completely ignores any state information
8498 * in the ARQ event info, instead choosing to always
8499 * issue the AQ update link status command.
8501 i40e_link_event(pf);
8503 /* Check if module meets thermal requirements */
8504 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8505 dev_err(&pf->pdev->dev,
8506 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8507 dev_err(&pf->pdev->dev,
8508 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8510 /* check for unqualified module, if link is down, suppress
8511 * the message if link was forced to be down.
8513 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8514 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8515 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8516 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8517 dev_err(&pf->pdev->dev,
8518 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8519 dev_err(&pf->pdev->dev,
8520 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8526 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8527 * @pf: board private structure
8529 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8531 struct i40e_arq_event_info event;
8532 struct i40e_hw *hw = &pf->hw;
8539 /* Do not run clean AQ when PF reset fails */
8540 if (test_bit(__I40E_RESET_FAILED, pf->state))
8543 /* check for error indications */
8544 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8546 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8547 if (hw->debug_mask & I40E_DEBUG_AQ)
8548 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8549 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8551 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8552 if (hw->debug_mask & I40E_DEBUG_AQ)
8553 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8554 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8555 pf->arq_overflows++;
8557 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8558 if (hw->debug_mask & I40E_DEBUG_AQ)
8559 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8560 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8563 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8565 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8567 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8568 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8569 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8570 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8572 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8573 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8574 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8575 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8577 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8578 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8579 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8580 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8583 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8585 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8586 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8591 ret = i40e_clean_arq_element(hw, &event, &pending);
8592 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8595 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8599 opcode = le16_to_cpu(event.desc.opcode);
8602 case i40e_aqc_opc_get_link_status:
8603 i40e_handle_link_event(pf, &event);
8605 case i40e_aqc_opc_send_msg_to_pf:
8606 ret = i40e_vc_process_vf_msg(pf,
8607 le16_to_cpu(event.desc.retval),
8608 le32_to_cpu(event.desc.cookie_high),
8609 le32_to_cpu(event.desc.cookie_low),
8613 case i40e_aqc_opc_lldp_update_mib:
8614 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8615 #ifdef CONFIG_I40E_DCB
8617 ret = i40e_handle_lldp_event(pf, &event);
8619 #endif /* CONFIG_I40E_DCB */
8621 case i40e_aqc_opc_event_lan_overflow:
8622 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8623 i40e_handle_lan_overflow_event(pf, &event);
8625 case i40e_aqc_opc_send_msg_to_peer:
8626 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8628 case i40e_aqc_opc_nvm_erase:
8629 case i40e_aqc_opc_nvm_update:
8630 case i40e_aqc_opc_oem_post_update:
8631 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8632 "ARQ NVM operation 0x%04x completed\n",
8636 dev_info(&pf->pdev->dev,
8637 "ARQ: Unknown event 0x%04x ignored\n",
8641 } while (i++ < pf->adminq_work_limit);
8643 if (i < pf->adminq_work_limit)
8644 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8646 /* re-enable Admin queue interrupt cause */
8647 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8648 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8649 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8652 kfree(event.msg_buf);
8656 * i40e_verify_eeprom - make sure eeprom is good to use
8657 * @pf: board private structure
8659 static void i40e_verify_eeprom(struct i40e_pf *pf)
8663 err = i40e_diag_eeprom_test(&pf->hw);
8665 /* retry in case of garbage read */
8666 err = i40e_diag_eeprom_test(&pf->hw);
8668 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8670 set_bit(__I40E_BAD_EEPROM, pf->state);
8674 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8675 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8676 clear_bit(__I40E_BAD_EEPROM, pf->state);
8681 * i40e_enable_pf_switch_lb
8682 * @pf: pointer to the PF structure
8684 * enable switch loop back or die - no point in a return value
8686 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8688 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8689 struct i40e_vsi_context ctxt;
8692 ctxt.seid = pf->main_vsi_seid;
8693 ctxt.pf_num = pf->hw.pf_id;
8695 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8697 dev_info(&pf->pdev->dev,
8698 "couldn't get PF vsi config, err %s aq_err %s\n",
8699 i40e_stat_str(&pf->hw, ret),
8700 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8703 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8704 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8705 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8707 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8709 dev_info(&pf->pdev->dev,
8710 "update vsi switch failed, err %s aq_err %s\n",
8711 i40e_stat_str(&pf->hw, ret),
8712 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8717 * i40e_disable_pf_switch_lb
8718 * @pf: pointer to the PF structure
8720 * disable switch loop back or die - no point in a return value
8722 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8724 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8725 struct i40e_vsi_context ctxt;
8728 ctxt.seid = pf->main_vsi_seid;
8729 ctxt.pf_num = pf->hw.pf_id;
8731 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8733 dev_info(&pf->pdev->dev,
8734 "couldn't get PF vsi config, err %s aq_err %s\n",
8735 i40e_stat_str(&pf->hw, ret),
8736 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8739 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8740 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8741 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8743 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8745 dev_info(&pf->pdev->dev,
8746 "update vsi switch failed, err %s aq_err %s\n",
8747 i40e_stat_str(&pf->hw, ret),
8748 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8753 * i40e_config_bridge_mode - Configure the HW bridge mode
8754 * @veb: pointer to the bridge instance
8756 * Configure the loop back mode for the LAN VSI that is downlink to the
8757 * specified HW bridge instance. It is expected this function is called
8758 * when a new HW bridge is instantiated.
8760 static void i40e_config_bridge_mode(struct i40e_veb *veb)
8762 struct i40e_pf *pf = veb->pf;
8764 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8765 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8766 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8767 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8768 i40e_disable_pf_switch_lb(pf);
8770 i40e_enable_pf_switch_lb(pf);
8774 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8775 * @veb: pointer to the VEB instance
8777 * This is a recursive function that first builds the attached VSIs then
8778 * recurses in to build the next layer of VEB. We track the connections
8779 * through our own index numbers because the seid's from the HW could
8780 * change across the reset.
8782 static int i40e_reconstitute_veb(struct i40e_veb *veb)
8784 struct i40e_vsi *ctl_vsi = NULL;
8785 struct i40e_pf *pf = veb->pf;
8789 /* build VSI that owns this VEB, temporarily attached to base VEB */
8790 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
8792 pf->vsi[v]->veb_idx == veb->idx &&
8793 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8794 ctl_vsi = pf->vsi[v];
8799 dev_info(&pf->pdev->dev,
8800 "missing owner VSI for veb_idx %d\n", veb->idx);
8802 goto end_reconstitute;
8804 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8805 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8806 ret = i40e_add_vsi(ctl_vsi);
8808 dev_info(&pf->pdev->dev,
8809 "rebuild of veb_idx %d owner VSI failed: %d\n",
8811 goto end_reconstitute;
8813 i40e_vsi_reset_stats(ctl_vsi);
8815 /* create the VEB in the switch and move the VSI onto the VEB */
8816 ret = i40e_add_veb(veb, ctl_vsi);
8818 goto end_reconstitute;
8820 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
8821 veb->bridge_mode = BRIDGE_MODE_VEB;
8823 veb->bridge_mode = BRIDGE_MODE_VEPA;
8824 i40e_config_bridge_mode(veb);
8826 /* create the remaining VSIs attached to this VEB */
8827 for (v = 0; v < pf->num_alloc_vsi; v++) {
8828 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
8831 if (pf->vsi[v]->veb_idx == veb->idx) {
8832 struct i40e_vsi *vsi = pf->vsi[v];
8834 vsi->uplink_seid = veb->seid;
8835 ret = i40e_add_vsi(vsi);
8837 dev_info(&pf->pdev->dev,
8838 "rebuild of vsi_idx %d failed: %d\n",
8840 goto end_reconstitute;
8842 i40e_vsi_reset_stats(vsi);
8846 /* create any VEBs attached to this VEB - RECURSION */
8847 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8848 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
8849 pf->veb[veb_idx]->uplink_seid = veb->seid;
8850 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
8861 * i40e_get_capabilities - get info about the HW
8862 * @pf: the PF struct
8864 static int i40e_get_capabilities(struct i40e_pf *pf,
8865 enum i40e_admin_queue_opc list_type)
8867 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
8872 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
8874 cap_buf = kzalloc(buf_len, GFP_KERNEL);
8878 /* this loads the data into the hw struct for us */
8879 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
8880 &data_size, list_type,
8882 /* data loaded, buffer no longer needed */
8885 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
8886 /* retry with a larger buffer */
8887 buf_len = data_size;
8888 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
8889 dev_info(&pf->pdev->dev,
8890 "capability discovery failed, err %s aq_err %s\n",
8891 i40e_stat_str(&pf->hw, err),
8892 i40e_aq_str(&pf->hw,
8893 pf->hw.aq.asq_last_status));
8898 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
8899 if (list_type == i40e_aqc_opc_list_func_capabilities) {
8900 dev_info(&pf->pdev->dev,
8901 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
8902 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
8903 pf->hw.func_caps.num_msix_vectors,
8904 pf->hw.func_caps.num_msix_vectors_vf,
8905 pf->hw.func_caps.fd_filters_guaranteed,
8906 pf->hw.func_caps.fd_filters_best_effort,
8907 pf->hw.func_caps.num_tx_qp,
8908 pf->hw.func_caps.num_vsis);
8909 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
8910 dev_info(&pf->pdev->dev,
8911 "switch_mode=0x%04x, function_valid=0x%08x\n",
8912 pf->hw.dev_caps.switch_mode,
8913 pf->hw.dev_caps.valid_functions);
8914 dev_info(&pf->pdev->dev,
8915 "SR-IOV=%d, num_vfs for all function=%u\n",
8916 pf->hw.dev_caps.sr_iov_1_1,
8917 pf->hw.dev_caps.num_vfs);
8918 dev_info(&pf->pdev->dev,
8919 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
8920 pf->hw.dev_caps.num_vsis,
8921 pf->hw.dev_caps.num_rx_qp,
8922 pf->hw.dev_caps.num_tx_qp);
8925 if (list_type == i40e_aqc_opc_list_func_capabilities) {
8926 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
8927 + pf->hw.func_caps.num_vfs)
8928 if (pf->hw.revision_id == 0 &&
8929 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
8930 dev_info(&pf->pdev->dev,
8931 "got num_vsis %d, setting num_vsis to %d\n",
8932 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
8933 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
8939 static int i40e_vsi_clear(struct i40e_vsi *vsi);
8942 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
8943 * @pf: board private structure
8945 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
8947 struct i40e_vsi *vsi;
8949 /* quick workaround for an NVM issue that leaves a critical register
8952 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
8953 static const u32 hkey[] = {
8954 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
8955 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
8956 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
8960 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
8961 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
8964 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8967 /* find existing VSI and see if it needs configuring */
8968 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
8970 /* create a new VSI if none exists */
8972 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
8973 pf->vsi[pf->lan_vsi]->seid, 0);
8975 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8976 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8977 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
8982 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
8986 * i40e_fdir_teardown - release the Flow Director resources
8987 * @pf: board private structure
8989 static void i40e_fdir_teardown(struct i40e_pf *pf)
8991 struct i40e_vsi *vsi;
8993 i40e_fdir_filter_exit(pf);
8994 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
8996 i40e_vsi_release(vsi);
9000 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9002 * @seid: seid of main or channel VSIs
9004 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9005 * existed before reset
9007 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9009 struct i40e_cloud_filter *cfilter;
9010 struct i40e_pf *pf = vsi->back;
9011 struct hlist_node *node;
9014 /* Add cloud filters back if they exist */
9015 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9017 if (cfilter->seid != seid)
9020 if (cfilter->dst_port)
9021 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9024 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9027 dev_dbg(&pf->pdev->dev,
9028 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9029 i40e_stat_str(&pf->hw, ret),
9030 i40e_aq_str(&pf->hw,
9031 pf->hw.aq.asq_last_status));
9039 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9042 * Rebuilds channel VSIs if they existed before reset
9044 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9046 struct i40e_channel *ch, *ch_tmp;
9049 if (list_empty(&vsi->ch_list))
9052 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9053 if (!ch->initialized)
9055 /* Proceed with creation of channel (VMDq2) VSI */
9056 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9058 dev_info(&vsi->back->pdev->dev,
9059 "failed to rebuild channels using uplink_seid %u\n",
9063 if (ch->max_tx_rate) {
9064 u64 credits = ch->max_tx_rate;
9066 if (i40e_set_bw_limit(vsi, ch->seid,
9070 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9071 dev_dbg(&vsi->back->pdev->dev,
9072 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9077 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9079 dev_dbg(&vsi->back->pdev->dev,
9080 "Failed to rebuild cloud filters for channel VSI %u\n",
9089 * i40e_prep_for_reset - prep for the core to reset
9090 * @pf: board private structure
9091 * @lock_acquired: indicates whether or not the lock has been acquired
9092 * before this function was called.
9094 * Close up the VFs and other things in prep for PF Reset.
9096 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9098 struct i40e_hw *hw = &pf->hw;
9099 i40e_status ret = 0;
9102 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9103 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9105 if (i40e_check_asq_alive(&pf->hw))
9106 i40e_vc_notify_reset(pf);
9108 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9110 /* quiesce the VSIs and their queues that are not already DOWN */
9111 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9114 i40e_pf_quiesce_all_vsi(pf);
9118 for (v = 0; v < pf->num_alloc_vsi; v++) {
9120 pf->vsi[v]->seid = 0;
9123 i40e_shutdown_adminq(&pf->hw);
9125 /* call shutdown HMC */
9126 if (hw->hmc.hmc_obj) {
9127 ret = i40e_shutdown_lan_hmc(hw);
9129 dev_warn(&pf->pdev->dev,
9130 "shutdown_lan_hmc failed: %d\n", ret);
9135 * i40e_send_version - update firmware with driver version
9138 static void i40e_send_version(struct i40e_pf *pf)
9140 struct i40e_driver_version dv;
9142 dv.major_version = DRV_VERSION_MAJOR;
9143 dv.minor_version = DRV_VERSION_MINOR;
9144 dv.build_version = DRV_VERSION_BUILD;
9145 dv.subbuild_version = 0;
9146 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9147 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9151 * i40e_get_oem_version - get OEM specific version information
9152 * @hw: pointer to the hardware structure
9154 static void i40e_get_oem_version(struct i40e_hw *hw)
9156 u16 block_offset = 0xffff;
9157 u16 block_length = 0;
9158 u16 capabilities = 0;
9162 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9163 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9164 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9165 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9166 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9167 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9168 #define I40E_NVM_OEM_LENGTH 3
9170 /* Check if pointer to OEM version block is valid. */
9171 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9172 if (block_offset == 0xffff)
9175 /* Check if OEM version block has correct length. */
9176 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9178 if (block_length < I40E_NVM_OEM_LENGTH)
9181 /* Check if OEM version format is as expected. */
9182 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9184 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9187 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9189 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9191 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9192 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9196 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9197 * @pf: board private structure
9199 static int i40e_reset(struct i40e_pf *pf)
9201 struct i40e_hw *hw = &pf->hw;
9204 ret = i40e_pf_reset(hw);
9206 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9207 set_bit(__I40E_RESET_FAILED, pf->state);
9208 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9216 * i40e_rebuild - rebuild using a saved config
9217 * @pf: board private structure
9218 * @reinit: if the Main VSI needs to re-initialized.
9219 * @lock_acquired: indicates whether or not the lock has been acquired
9220 * before this function was called.
9222 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9224 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9225 struct i40e_hw *hw = &pf->hw;
9226 u8 set_fc_aq_fail = 0;
9231 if (test_bit(__I40E_DOWN, pf->state))
9232 goto clear_recovery;
9233 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9235 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9236 ret = i40e_init_adminq(&pf->hw);
9238 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9239 i40e_stat_str(&pf->hw, ret),
9240 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9241 goto clear_recovery;
9243 i40e_get_oem_version(&pf->hw);
9245 /* re-verify the eeprom if we just had an EMP reset */
9246 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9247 i40e_verify_eeprom(pf);
9249 i40e_clear_pxe_mode(hw);
9250 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9252 goto end_core_reset;
9254 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9255 hw->func_caps.num_rx_qp, 0, 0);
9257 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9258 goto end_core_reset;
9260 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9262 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9263 goto end_core_reset;
9266 #ifdef CONFIG_I40E_DCB
9267 ret = i40e_init_pf_dcb(pf);
9269 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9270 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9271 /* Continue without DCB enabled */
9273 #endif /* CONFIG_I40E_DCB */
9274 /* do basic switch setup */
9277 ret = i40e_setup_pf_switch(pf, reinit);
9281 /* The driver only wants link up/down and module qualification
9282 * reports from firmware. Note the negative logic.
9284 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9285 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9286 I40E_AQ_EVENT_MEDIA_NA |
9287 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9289 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9290 i40e_stat_str(&pf->hw, ret),
9291 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9293 /* make sure our flow control settings are restored */
9294 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
9296 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
9297 i40e_stat_str(&pf->hw, ret),
9298 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9300 /* Rebuild the VSIs and VEBs that existed before reset.
9301 * They are still in our local switch element arrays, so only
9302 * need to rebuild the switch model in the HW.
9304 * If there were VEBs but the reconstitution failed, we'll try
9305 * try to recover minimal use by getting the basic PF VSI working.
9307 if (vsi->uplink_seid != pf->mac_seid) {
9308 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9309 /* find the one VEB connected to the MAC, and find orphans */
9310 for (v = 0; v < I40E_MAX_VEB; v++) {
9314 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9315 pf->veb[v]->uplink_seid == 0) {
9316 ret = i40e_reconstitute_veb(pf->veb[v]);
9321 /* If Main VEB failed, we're in deep doodoo,
9322 * so give up rebuilding the switch and set up
9323 * for minimal rebuild of PF VSI.
9324 * If orphan failed, we'll report the error
9325 * but try to keep going.
9327 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9328 dev_info(&pf->pdev->dev,
9329 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9331 vsi->uplink_seid = pf->mac_seid;
9333 } else if (pf->veb[v]->uplink_seid == 0) {
9334 dev_info(&pf->pdev->dev,
9335 "rebuild of orphan VEB failed: %d\n",
9342 if (vsi->uplink_seid == pf->mac_seid) {
9343 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9344 /* no VEB, so rebuild only the Main VSI */
9345 ret = i40e_add_vsi(vsi);
9347 dev_info(&pf->pdev->dev,
9348 "rebuild of Main VSI failed: %d\n", ret);
9353 if (vsi->mqprio_qopt.max_rate[0]) {
9354 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9357 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
9358 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9362 credits = max_tx_rate;
9363 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9364 dev_dbg(&vsi->back->pdev->dev,
9365 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9371 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9375 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9376 * for this main VSI if they exist
9378 ret = i40e_rebuild_channels(vsi);
9382 /* Reconfigure hardware for allowing smaller MSS in the case
9383 * of TSO, so that we avoid the MDD being fired and causing
9384 * a reset in the case of small MSS+TSO.
9386 #define I40E_REG_MSS 0x000E64DC
9387 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9388 #define I40E_64BYTE_MSS 0x400000
9389 val = rd32(hw, I40E_REG_MSS);
9390 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9391 val &= ~I40E_REG_MSS_MIN_MASK;
9392 val |= I40E_64BYTE_MSS;
9393 wr32(hw, I40E_REG_MSS, val);
9396 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9398 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9400 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9401 i40e_stat_str(&pf->hw, ret),
9402 i40e_aq_str(&pf->hw,
9403 pf->hw.aq.asq_last_status));
9405 /* reinit the misc interrupt */
9406 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9407 ret = i40e_setup_misc_vector(pf);
9409 /* Add a filter to drop all Flow control frames from any VSI from being
9410 * transmitted. By doing so we stop a malicious VF from sending out
9411 * PAUSE or PFC frames and potentially controlling traffic for other
9413 * The FW can still send Flow control frames if enabled.
9415 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9418 /* restart the VSIs that were rebuilt and running before the reset */
9419 i40e_pf_unquiesce_all_vsi(pf);
9421 /* Release the RTNL lock before we start resetting VFs */
9425 /* Restore promiscuous settings */
9426 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9428 dev_warn(&pf->pdev->dev,
9429 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9430 pf->cur_promisc ? "on" : "off",
9431 i40e_stat_str(&pf->hw, ret),
9432 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9434 i40e_reset_all_vfs(pf, true);
9436 /* tell the firmware that we're starting */
9437 i40e_send_version(pf);
9439 /* We've already released the lock, so don't do it again */
9440 goto end_core_reset;
9446 clear_bit(__I40E_RESET_FAILED, pf->state);
9448 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9452 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9453 * @pf: board private structure
9454 * @reinit: if the Main VSI needs to re-initialized.
9455 * @lock_acquired: indicates whether or not the lock has been acquired
9456 * before this function was called.
9458 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9462 /* Now we wait for GRST to settle out.
9463 * We don't have to delete the VEBs or VSIs from the hw switch
9464 * because the reset will make them disappear.
9466 ret = i40e_reset(pf);
9468 i40e_rebuild(pf, reinit, lock_acquired);
9472 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9473 * @pf: board private structure
9475 * Close up the VFs and other things in prep for a Core Reset,
9476 * then get ready to rebuild the world.
9477 * @lock_acquired: indicates whether or not the lock has been acquired
9478 * before this function was called.
9480 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9482 i40e_prep_for_reset(pf, lock_acquired);
9483 i40e_reset_and_rebuild(pf, false, lock_acquired);
9487 * i40e_handle_mdd_event
9488 * @pf: pointer to the PF structure
9490 * Called from the MDD irq handler to identify possibly malicious vfs
9492 static void i40e_handle_mdd_event(struct i40e_pf *pf)
9494 struct i40e_hw *hw = &pf->hw;
9495 bool mdd_detected = false;
9496 bool pf_mdd_detected = false;
9501 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9504 /* find what triggered the MDD event */
9505 reg = rd32(hw, I40E_GL_MDET_TX);
9506 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9507 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9508 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9509 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9510 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9511 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9512 I40E_GL_MDET_TX_EVENT_SHIFT;
9513 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9514 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9515 pf->hw.func_caps.base_queue;
9516 if (netif_msg_tx_err(pf))
9517 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9518 event, queue, pf_num, vf_num);
9519 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9520 mdd_detected = true;
9522 reg = rd32(hw, I40E_GL_MDET_RX);
9523 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9524 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9525 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9526 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9527 I40E_GL_MDET_RX_EVENT_SHIFT;
9528 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9529 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9530 pf->hw.func_caps.base_queue;
9531 if (netif_msg_rx_err(pf))
9532 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9533 event, queue, func);
9534 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9535 mdd_detected = true;
9539 reg = rd32(hw, I40E_PF_MDET_TX);
9540 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9541 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9542 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9543 pf_mdd_detected = true;
9545 reg = rd32(hw, I40E_PF_MDET_RX);
9546 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9547 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9548 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9549 pf_mdd_detected = true;
9551 /* Queue belongs to the PF, initiate a reset */
9552 if (pf_mdd_detected) {
9553 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9554 i40e_service_event_schedule(pf);
9558 /* see if one of the VFs needs its hand slapped */
9559 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9561 reg = rd32(hw, I40E_VP_MDET_TX(i));
9562 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9563 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9564 vf->num_mdd_events++;
9565 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9569 reg = rd32(hw, I40E_VP_MDET_RX(i));
9570 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9571 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9572 vf->num_mdd_events++;
9573 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9577 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9578 dev_info(&pf->pdev->dev,
9579 "Too many MDD events on VF %d, disabled\n", i);
9580 dev_info(&pf->pdev->dev,
9581 "Use PF Control I/F to re-enable the VF\n");
9582 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9586 /* re-enable mdd interrupt cause */
9587 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9588 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9589 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9590 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9594 static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
9596 switch (port->type) {
9597 case UDP_TUNNEL_TYPE_VXLAN:
9599 case UDP_TUNNEL_TYPE_GENEVE:
9607 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9608 * @pf: board private structure
9610 static void i40e_sync_udp_filters(struct i40e_pf *pf)
9614 /* loop through and set pending bit for all active UDP filters */
9615 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9616 if (pf->udp_ports[i].port)
9617 pf->pending_udp_bitmap |= BIT_ULL(i);
9620 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9624 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9625 * @pf: board private structure
9627 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9629 struct i40e_hw *hw = &pf->hw;
9634 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
9637 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
9639 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9640 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9641 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9642 port = pf->udp_ports[i].port;
9644 ret = i40e_aq_add_udp_tunnel(hw, port,
9645 pf->udp_ports[i].type,
9648 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
9651 dev_info(&pf->pdev->dev,
9652 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9653 i40e_tunnel_name(&pf->udp_ports[i]),
9654 port ? "add" : "delete",
9656 i40e_stat_str(&pf->hw, ret),
9657 i40e_aq_str(&pf->hw,
9658 pf->hw.aq.asq_last_status));
9659 pf->udp_ports[i].port = 0;
9666 * i40e_service_task - Run the driver's async subtasks
9667 * @work: pointer to work_struct containing our data
9669 static void i40e_service_task(struct work_struct *work)
9671 struct i40e_pf *pf = container_of(work,
9674 unsigned long start_time = jiffies;
9676 /* don't bother with service tasks if a reset is in progress */
9677 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9680 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9683 i40e_detect_recover_hung(pf);
9684 i40e_sync_filters_subtask(pf);
9685 i40e_reset_subtask(pf);
9686 i40e_handle_mdd_event(pf);
9687 i40e_vc_process_vflr_event(pf);
9688 i40e_watchdog_subtask(pf);
9689 i40e_fdir_reinit_subtask(pf);
9690 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
9691 /* Client subtask will reopen next time through. */
9692 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
9693 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
9695 i40e_client_subtask(pf);
9696 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
9697 i40e_notify_client_of_l2_param_changes(
9698 pf->vsi[pf->lan_vsi]);
9699 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
9702 i40e_sync_filters_subtask(pf);
9703 i40e_sync_udp_filters_subtask(pf);
9704 i40e_clean_adminq_subtask(pf);
9706 /* flush memory to make sure state is correct before next watchdog */
9707 smp_mb__before_atomic();
9708 clear_bit(__I40E_SERVICE_SCHED, pf->state);
9710 /* If the tasks have taken longer than one timer cycle or there
9711 * is more work to be done, reschedule the service task now
9712 * rather than wait for the timer to tick again.
9714 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
9715 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
9716 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
9717 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
9718 i40e_service_event_schedule(pf);
9722 * i40e_service_timer - timer callback
9723 * @data: pointer to PF struct
9725 static void i40e_service_timer(struct timer_list *t)
9727 struct i40e_pf *pf = from_timer(pf, t, service_timer);
9729 mod_timer(&pf->service_timer,
9730 round_jiffies(jiffies + pf->service_timer_period));
9731 i40e_service_event_schedule(pf);
9735 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9736 * @vsi: the VSI being configured
9738 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
9740 struct i40e_pf *pf = vsi->back;
9742 switch (vsi->type) {
9744 vsi->alloc_queue_pairs = pf->num_lan_qps;
9745 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9746 I40E_REQ_DESCRIPTOR_MULTIPLE);
9747 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9748 vsi->num_q_vectors = pf->num_lan_msix;
9750 vsi->num_q_vectors = 1;
9755 vsi->alloc_queue_pairs = 1;
9756 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
9757 I40E_REQ_DESCRIPTOR_MULTIPLE);
9758 vsi->num_q_vectors = pf->num_fdsb_msix;
9761 case I40E_VSI_VMDQ2:
9762 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
9763 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9764 I40E_REQ_DESCRIPTOR_MULTIPLE);
9765 vsi->num_q_vectors = pf->num_vmdq_msix;
9768 case I40E_VSI_SRIOV:
9769 vsi->alloc_queue_pairs = pf->num_vf_qps;
9770 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9771 I40E_REQ_DESCRIPTOR_MULTIPLE);
9783 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9785 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
9787 * On error: returns error code (negative)
9788 * On success: returns 0
9790 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
9792 struct i40e_ring **next_rings;
9796 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
9797 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
9798 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
9799 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
9802 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
9803 if (i40e_enabled_xdp_vsi(vsi)) {
9804 vsi->xdp_rings = next_rings;
9805 next_rings += vsi->alloc_queue_pairs;
9807 vsi->rx_rings = next_rings;
9809 if (alloc_qvectors) {
9810 /* allocate memory for q_vector pointers */
9811 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
9812 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
9813 if (!vsi->q_vectors) {
9821 kfree(vsi->tx_rings);
9826 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
9827 * @pf: board private structure
9828 * @type: type of VSI
9830 * On error: returns error code (negative)
9831 * On success: returns vsi index in PF (positive)
9833 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
9836 struct i40e_vsi *vsi;
9840 /* Need to protect the allocation of the VSIs at the PF level */
9841 mutex_lock(&pf->switch_mutex);
9843 /* VSI list may be fragmented if VSI creation/destruction has
9844 * been happening. We can afford to do a quick scan to look
9845 * for any free VSIs in the list.
9847 * find next empty vsi slot, looping back around if necessary
9850 while (i < pf->num_alloc_vsi && pf->vsi[i])
9852 if (i >= pf->num_alloc_vsi) {
9854 while (i < pf->next_vsi && pf->vsi[i])
9858 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
9859 vsi_idx = i; /* Found one! */
9862 goto unlock_pf; /* out of VSI slots! */
9866 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
9873 set_bit(__I40E_VSI_DOWN, vsi->state);
9876 vsi->int_rate_limit = 0;
9877 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
9878 pf->rss_table_size : 64;
9879 vsi->netdev_registered = false;
9880 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
9881 hash_init(vsi->mac_filter_hash);
9882 vsi->irqs_ready = false;
9884 ret = i40e_set_num_rings_in_vsi(vsi);
9888 ret = i40e_vsi_alloc_arrays(vsi, true);
9892 /* Setup default MSIX irq handler for VSI */
9893 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
9895 /* Initialize VSI lock */
9896 spin_lock_init(&vsi->mac_filter_hash_lock);
9897 pf->vsi[vsi_idx] = vsi;
9902 pf->next_vsi = i - 1;
9905 mutex_unlock(&pf->switch_mutex);
9910 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
9911 * @type: VSI pointer
9912 * @free_qvectors: a bool to specify if q_vectors need to be freed.
9914 * On error: returns error code (negative)
9915 * On success: returns 0
9917 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
9919 /* free the ring and vector containers */
9920 if (free_qvectors) {
9921 kfree(vsi->q_vectors);
9922 vsi->q_vectors = NULL;
9924 kfree(vsi->tx_rings);
9925 vsi->tx_rings = NULL;
9926 vsi->rx_rings = NULL;
9927 vsi->xdp_rings = NULL;
9931 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
9933 * @vsi: Pointer to VSI structure
9935 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
9940 kfree(vsi->rss_hkey_user);
9941 vsi->rss_hkey_user = NULL;
9943 kfree(vsi->rss_lut_user);
9944 vsi->rss_lut_user = NULL;
9948 * i40e_vsi_clear - Deallocate the VSI provided
9949 * @vsi: the VSI being un-configured
9951 static int i40e_vsi_clear(struct i40e_vsi *vsi)
9962 mutex_lock(&pf->switch_mutex);
9963 if (!pf->vsi[vsi->idx]) {
9964 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
9965 vsi->idx, vsi->idx, vsi, vsi->type);
9969 if (pf->vsi[vsi->idx] != vsi) {
9970 dev_err(&pf->pdev->dev,
9971 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
9972 pf->vsi[vsi->idx]->idx,
9974 pf->vsi[vsi->idx]->type,
9975 vsi->idx, vsi, vsi->type);
9979 /* updates the PF for this cleared vsi */
9980 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9981 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
9983 i40e_vsi_free_arrays(vsi, true);
9984 i40e_clear_rss_config_user(vsi);
9986 pf->vsi[vsi->idx] = NULL;
9987 if (vsi->idx < pf->next_vsi)
9988 pf->next_vsi = vsi->idx;
9991 mutex_unlock(&pf->switch_mutex);
9999 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10000 * @vsi: the VSI being cleaned
10002 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10006 if (vsi->tx_rings && vsi->tx_rings[0]) {
10007 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10008 kfree_rcu(vsi->tx_rings[i], rcu);
10009 vsi->tx_rings[i] = NULL;
10010 vsi->rx_rings[i] = NULL;
10011 if (vsi->xdp_rings)
10012 vsi->xdp_rings[i] = NULL;
10018 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10019 * @vsi: the VSI being configured
10021 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10023 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10024 struct i40e_pf *pf = vsi->back;
10025 struct i40e_ring *ring;
10027 /* Set basic values in the rings to be used later during open() */
10028 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10029 /* allocate space for both Tx and Rx in one shot */
10030 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10034 ring->queue_index = i;
10035 ring->reg_idx = vsi->base_queue + i;
10036 ring->ring_active = false;
10038 ring->netdev = vsi->netdev;
10039 ring->dev = &pf->pdev->dev;
10040 ring->count = vsi->num_desc;
10043 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10044 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10045 ring->tx_itr_setting = pf->tx_itr_default;
10046 vsi->tx_rings[i] = ring++;
10048 if (!i40e_enabled_xdp_vsi(vsi))
10051 ring->queue_index = vsi->alloc_queue_pairs + i;
10052 ring->reg_idx = vsi->base_queue + ring->queue_index;
10053 ring->ring_active = false;
10055 ring->netdev = NULL;
10056 ring->dev = &pf->pdev->dev;
10057 ring->count = vsi->num_desc;
10060 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10061 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10062 set_ring_xdp(ring);
10063 ring->tx_itr_setting = pf->tx_itr_default;
10064 vsi->xdp_rings[i] = ring++;
10067 ring->queue_index = i;
10068 ring->reg_idx = vsi->base_queue + i;
10069 ring->ring_active = false;
10071 ring->netdev = vsi->netdev;
10072 ring->dev = &pf->pdev->dev;
10073 ring->count = vsi->num_desc;
10076 ring->rx_itr_setting = pf->rx_itr_default;
10077 vsi->rx_rings[i] = ring;
10083 i40e_vsi_clear_rings(vsi);
10088 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10089 * @pf: board private structure
10090 * @vectors: the number of MSI-X vectors to request
10092 * Returns the number of vectors reserved, or error
10094 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10096 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10097 I40E_MIN_MSIX, vectors);
10099 dev_info(&pf->pdev->dev,
10100 "MSI-X vector reservation failed: %d\n", vectors);
10108 * i40e_init_msix - Setup the MSIX capability
10109 * @pf: board private structure
10111 * Work with the OS to set up the MSIX vectors needed.
10113 * Returns the number of vectors reserved or negative on failure
10115 static int i40e_init_msix(struct i40e_pf *pf)
10117 struct i40e_hw *hw = &pf->hw;
10118 int cpus, extra_vectors;
10122 int iwarp_requested = 0;
10124 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10127 /* The number of vectors we'll request will be comprised of:
10128 * - Add 1 for "other" cause for Admin Queue events, etc.
10129 * - The number of LAN queue pairs
10130 * - Queues being used for RSS.
10131 * We don't need as many as max_rss_size vectors.
10132 * use rss_size instead in the calculation since that
10133 * is governed by number of cpus in the system.
10134 * - assumes symmetric Tx/Rx pairing
10135 * - The number of VMDq pairs
10136 * - The CPU count within the NUMA node if iWARP is enabled
10137 * Once we count this up, try the request.
10139 * If we can't get what we want, we'll simplify to nearly nothing
10140 * and try again. If that still fails, we punt.
10142 vectors_left = hw->func_caps.num_msix_vectors;
10145 /* reserve one vector for miscellaneous handler */
10146 if (vectors_left) {
10151 /* reserve some vectors for the main PF traffic queues. Initially we
10152 * only reserve at most 50% of the available vectors, in the case that
10153 * the number of online CPUs is large. This ensures that we can enable
10154 * extra features as well. Once we've enabled the other features, we
10155 * will use any remaining vectors to reach as close as we can to the
10156 * number of online CPUs.
10158 cpus = num_online_cpus();
10159 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10160 vectors_left -= pf->num_lan_msix;
10162 /* reserve one vector for sideband flow director */
10163 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10164 if (vectors_left) {
10165 pf->num_fdsb_msix = 1;
10169 pf->num_fdsb_msix = 0;
10173 /* can we reserve enough for iWARP? */
10174 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10175 iwarp_requested = pf->num_iwarp_msix;
10178 pf->num_iwarp_msix = 0;
10179 else if (vectors_left < pf->num_iwarp_msix)
10180 pf->num_iwarp_msix = 1;
10181 v_budget += pf->num_iwarp_msix;
10182 vectors_left -= pf->num_iwarp_msix;
10185 /* any vectors left over go for VMDq support */
10186 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10187 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
10188 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
10190 if (!vectors_left) {
10191 pf->num_vmdq_msix = 0;
10192 pf->num_vmdq_qps = 0;
10194 /* if we're short on vectors for what's desired, we limit
10195 * the queues per vmdq. If this is still more than are
10196 * available, the user will need to change the number of
10197 * queues/vectors used by the PF later with the ethtool
10200 if (vmdq_vecs < vmdq_vecs_wanted)
10201 pf->num_vmdq_qps = 1;
10202 pf->num_vmdq_msix = pf->num_vmdq_qps;
10204 v_budget += vmdq_vecs;
10205 vectors_left -= vmdq_vecs;
10209 /* On systems with a large number of SMP cores, we previously limited
10210 * the number of vectors for num_lan_msix to be at most 50% of the
10211 * available vectors, to allow for other features. Now, we add back
10212 * the remaining vectors. However, we ensure that the total
10213 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10214 * calculate the number of vectors we can add without going over the
10215 * cap of CPUs. For systems with a small number of CPUs this will be
10218 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10219 pf->num_lan_msix += extra_vectors;
10220 vectors_left -= extra_vectors;
10222 WARN(vectors_left < 0,
10223 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10225 v_budget += pf->num_lan_msix;
10226 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10228 if (!pf->msix_entries)
10231 for (i = 0; i < v_budget; i++)
10232 pf->msix_entries[i].entry = i;
10233 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10235 if (v_actual < I40E_MIN_MSIX) {
10236 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10237 kfree(pf->msix_entries);
10238 pf->msix_entries = NULL;
10239 pci_disable_msix(pf->pdev);
10242 } else if (v_actual == I40E_MIN_MSIX) {
10243 /* Adjust for minimal MSIX use */
10244 pf->num_vmdq_vsis = 0;
10245 pf->num_vmdq_qps = 0;
10246 pf->num_lan_qps = 1;
10247 pf->num_lan_msix = 1;
10249 } else if (v_actual != v_budget) {
10250 /* If we have limited resources, we will start with no vectors
10251 * for the special features and then allocate vectors to some
10252 * of these features based on the policy and at the end disable
10253 * the features that did not get any vectors.
10257 dev_info(&pf->pdev->dev,
10258 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10259 v_actual, v_budget);
10260 /* reserve the misc vector */
10261 vec = v_actual - 1;
10263 /* Scale vector usage down */
10264 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
10265 pf->num_vmdq_vsis = 1;
10266 pf->num_vmdq_qps = 1;
10268 /* partition out the remaining vectors */
10271 pf->num_lan_msix = 1;
10274 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10275 pf->num_lan_msix = 1;
10276 pf->num_iwarp_msix = 1;
10278 pf->num_lan_msix = 2;
10282 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10283 pf->num_iwarp_msix = min_t(int, (vec / 3),
10285 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10286 I40E_DEFAULT_NUM_VMDQ_VSI);
10288 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10289 I40E_DEFAULT_NUM_VMDQ_VSI);
10291 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10292 pf->num_fdsb_msix = 1;
10295 pf->num_lan_msix = min_t(int,
10296 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10298 pf->num_lan_qps = pf->num_lan_msix;
10303 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10304 (pf->num_fdsb_msix == 0)) {
10305 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10306 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10307 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10309 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10310 (pf->num_vmdq_msix == 0)) {
10311 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10312 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10315 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10316 (pf->num_iwarp_msix == 0)) {
10317 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10318 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10320 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10321 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10323 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10325 pf->num_iwarp_msix);
10331 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10332 * @vsi: the VSI being configured
10333 * @v_idx: index of the vector in the vsi struct
10334 * @cpu: cpu to be used on affinity_mask
10336 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10338 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10340 struct i40e_q_vector *q_vector;
10342 /* allocate q_vector */
10343 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10347 q_vector->vsi = vsi;
10348 q_vector->v_idx = v_idx;
10349 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10352 netif_napi_add(vsi->netdev, &q_vector->napi,
10353 i40e_napi_poll, NAPI_POLL_WEIGHT);
10355 q_vector->rx.latency_range = I40E_LOW_LATENCY;
10356 q_vector->tx.latency_range = I40E_LOW_LATENCY;
10358 /* tie q_vector and vsi together */
10359 vsi->q_vectors[v_idx] = q_vector;
10365 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10366 * @vsi: the VSI being configured
10368 * We allocate one q_vector per queue interrupt. If allocation fails we
10371 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10373 struct i40e_pf *pf = vsi->back;
10374 int err, v_idx, num_q_vectors, current_cpu;
10376 /* if not MSIX, give the one vector only to the LAN VSI */
10377 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10378 num_q_vectors = vsi->num_q_vectors;
10379 else if (vsi == pf->vsi[pf->lan_vsi])
10384 current_cpu = cpumask_first(cpu_online_mask);
10386 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10387 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10390 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10391 if (unlikely(current_cpu >= nr_cpu_ids))
10392 current_cpu = cpumask_first(cpu_online_mask);
10399 i40e_free_q_vector(vsi, v_idx);
10405 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10406 * @pf: board private structure to initialize
10408 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10413 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10414 vectors = i40e_init_msix(pf);
10416 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10417 I40E_FLAG_IWARP_ENABLED |
10418 I40E_FLAG_RSS_ENABLED |
10419 I40E_FLAG_DCB_CAPABLE |
10420 I40E_FLAG_DCB_ENABLED |
10421 I40E_FLAG_SRIOV_ENABLED |
10422 I40E_FLAG_FD_SB_ENABLED |
10423 I40E_FLAG_FD_ATR_ENABLED |
10424 I40E_FLAG_VMDQ_ENABLED);
10425 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10427 /* rework the queue expectations without MSIX */
10428 i40e_determine_queue_usage(pf);
10432 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10433 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10434 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10435 vectors = pci_enable_msi(pf->pdev);
10437 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10439 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10441 vectors = 1; /* one MSI or Legacy vector */
10444 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10445 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10447 /* set up vector assignment tracking */
10448 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10449 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10450 if (!pf->irq_pile) {
10451 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
10454 pf->irq_pile->num_entries = vectors;
10455 pf->irq_pile->search_hint = 0;
10457 /* track first vector for misc interrupts, ignore return */
10458 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10464 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10465 * @pf: private board data structure
10467 * Restore the interrupt scheme that was cleared when we suspended the
10468 * device. This should be called during resume to re-allocate the q_vectors
10469 * and reacquire IRQs.
10471 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10475 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10476 * scheme. We need to re-enabled them here in order to attempt to
10477 * re-acquire the MSI or MSI-X vectors
10479 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10481 err = i40e_init_interrupt_scheme(pf);
10485 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10486 * rings together again.
10488 for (i = 0; i < pf->num_alloc_vsi; i++) {
10490 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10493 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10497 err = i40e_setup_misc_vector(pf);
10506 i40e_vsi_free_q_vectors(pf->vsi[i]);
10513 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10514 * @pf: board private structure
10516 * This sets up the handler for MSIX 0, which is used to manage the
10517 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10518 * when in MSI or Legacy interrupt mode.
10520 static int i40e_setup_misc_vector(struct i40e_pf *pf)
10522 struct i40e_hw *hw = &pf->hw;
10525 /* Only request the IRQ once, the first time through. */
10526 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10527 err = request_irq(pf->msix_entries[0].vector,
10528 i40e_intr, 0, pf->int_name, pf);
10530 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10531 dev_info(&pf->pdev->dev,
10532 "request_irq for %s failed: %d\n",
10533 pf->int_name, err);
10538 i40e_enable_misc_int_causes(pf);
10540 /* associate no queues to the misc vector */
10541 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10542 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
10546 i40e_irq_dynamic_enable_icr0(pf);
10552 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10553 * @vsi: Pointer to vsi structure
10554 * @seed: Buffter to store the hash keys
10555 * @lut: Buffer to store the lookup table entries
10556 * @lut_size: Size of buffer to store the lookup table entries
10558 * Return 0 on success, negative on failure
10560 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10561 u8 *lut, u16 lut_size)
10563 struct i40e_pf *pf = vsi->back;
10564 struct i40e_hw *hw = &pf->hw;
10568 ret = i40e_aq_get_rss_key(hw, vsi->id,
10569 (struct i40e_aqc_get_set_rss_key_data *)seed);
10571 dev_info(&pf->pdev->dev,
10572 "Cannot get RSS key, err %s aq_err %s\n",
10573 i40e_stat_str(&pf->hw, ret),
10574 i40e_aq_str(&pf->hw,
10575 pf->hw.aq.asq_last_status));
10581 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10583 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10585 dev_info(&pf->pdev->dev,
10586 "Cannot get RSS lut, err %s aq_err %s\n",
10587 i40e_stat_str(&pf->hw, ret),
10588 i40e_aq_str(&pf->hw,
10589 pf->hw.aq.asq_last_status));
10598 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10599 * @vsi: Pointer to vsi structure
10600 * @seed: RSS hash seed
10601 * @lut: Lookup table
10602 * @lut_size: Lookup table size
10604 * Returns 0 on success, negative on failure
10606 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10607 const u8 *lut, u16 lut_size)
10609 struct i40e_pf *pf = vsi->back;
10610 struct i40e_hw *hw = &pf->hw;
10611 u16 vf_id = vsi->vf_id;
10614 /* Fill out hash function seed */
10616 u32 *seed_dw = (u32 *)seed;
10618 if (vsi->type == I40E_VSI_MAIN) {
10619 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10620 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10621 } else if (vsi->type == I40E_VSI_SRIOV) {
10622 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10623 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10625 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10630 u32 *lut_dw = (u32 *)lut;
10632 if (vsi->type == I40E_VSI_MAIN) {
10633 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10635 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10636 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10637 } else if (vsi->type == I40E_VSI_SRIOV) {
10638 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10640 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10641 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10643 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10652 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10653 * @vsi: Pointer to VSI structure
10654 * @seed: Buffer to store the keys
10655 * @lut: Buffer to store the lookup table entries
10656 * @lut_size: Size of buffer to store the lookup table entries
10658 * Returns 0 on success, negative on failure
10660 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10661 u8 *lut, u16 lut_size)
10663 struct i40e_pf *pf = vsi->back;
10664 struct i40e_hw *hw = &pf->hw;
10668 u32 *seed_dw = (u32 *)seed;
10670 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10671 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
10674 u32 *lut_dw = (u32 *)lut;
10676 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10678 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10679 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
10686 * i40e_config_rss - Configure RSS keys and lut
10687 * @vsi: Pointer to VSI structure
10688 * @seed: RSS hash seed
10689 * @lut: Lookup table
10690 * @lut_size: Lookup table size
10692 * Returns 0 on success, negative on failure
10694 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10696 struct i40e_pf *pf = vsi->back;
10698 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10699 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
10701 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
10705 * i40e_get_rss - Get RSS keys and lut
10706 * @vsi: Pointer to VSI structure
10707 * @seed: Buffer to store the keys
10708 * @lut: Buffer to store the lookup table entries
10709 * lut_size: Size of buffer to store the lookup table entries
10711 * Returns 0 on success, negative on failure
10713 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10715 struct i40e_pf *pf = vsi->back;
10717 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10718 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
10720 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
10724 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10725 * @pf: Pointer to board private structure
10726 * @lut: Lookup table
10727 * @rss_table_size: Lookup table size
10728 * @rss_size: Range of queue number for hashing
10730 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
10731 u16 rss_table_size, u16 rss_size)
10735 for (i = 0; i < rss_table_size; i++)
10736 lut[i] = i % rss_size;
10740 * i40e_pf_config_rss - Prepare for RSS if used
10741 * @pf: board private structure
10743 static int i40e_pf_config_rss(struct i40e_pf *pf)
10745 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10746 u8 seed[I40E_HKEY_ARRAY_SIZE];
10748 struct i40e_hw *hw = &pf->hw;
10753 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10754 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
10755 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
10756 hena |= i40e_pf_get_default_rss_hena(pf);
10758 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
10759 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
10761 /* Determine the RSS table size based on the hardware capabilities */
10762 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
10763 reg_val = (pf->rss_table_size == 512) ?
10764 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
10765 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
10766 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
10768 /* Determine the RSS size of the VSI */
10769 if (!vsi->rss_size) {
10772 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
10773 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
10775 if (!vsi->rss_size)
10778 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
10782 /* Use user configured lut if there is one, otherwise use default */
10783 if (vsi->rss_lut_user)
10784 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
10786 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
10788 /* Use user configured hash key if there is one, otherwise
10791 if (vsi->rss_hkey_user)
10792 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
10794 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
10795 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
10802 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
10803 * @pf: board private structure
10804 * @queue_count: the requested queue count for rss.
10806 * returns 0 if rss is not enabled, if enabled returns the final rss queue
10807 * count which may be different from the requested queue count.
10808 * Note: expects to be called while under rtnl_lock()
10810 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
10812 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10815 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
10818 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
10820 if (queue_count != vsi->num_queue_pairs) {
10823 vsi->req_queue_pairs = queue_count;
10824 i40e_prep_for_reset(pf, true);
10826 pf->alloc_rss_size = new_rss_size;
10828 i40e_reset_and_rebuild(pf, true, true);
10830 /* Discard the user configured hash keys and lut, if less
10831 * queues are enabled.
10833 if (queue_count < vsi->rss_size) {
10834 i40e_clear_rss_config_user(vsi);
10835 dev_dbg(&pf->pdev->dev,
10836 "discard user configured hash keys and lut\n");
10839 /* Reset vsi->rss_size, as number of enabled queues changed */
10840 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
10841 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
10843 i40e_pf_config_rss(pf);
10845 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
10846 vsi->req_queue_pairs, pf->rss_size_max);
10847 return pf->alloc_rss_size;
10851 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
10852 * @pf: board private structure
10854 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
10856 i40e_status status;
10857 bool min_valid, max_valid;
10858 u32 max_bw, min_bw;
10860 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
10861 &min_valid, &max_valid);
10865 pf->min_bw = min_bw;
10867 pf->max_bw = max_bw;
10874 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
10875 * @pf: board private structure
10877 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
10879 struct i40e_aqc_configure_partition_bw_data bw_data;
10880 i40e_status status;
10882 /* Set the valid bit for this PF */
10883 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
10884 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
10885 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
10887 /* Set the new bandwidths */
10888 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
10894 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
10895 * @pf: board private structure
10897 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
10899 /* Commit temporary BW setting to permanent NVM image */
10900 enum i40e_admin_queue_err last_aq_status;
10904 if (pf->hw.partition_id != 1) {
10905 dev_info(&pf->pdev->dev,
10906 "Commit BW only works on partition 1! This is partition %d",
10907 pf->hw.partition_id);
10908 ret = I40E_NOT_SUPPORTED;
10909 goto bw_commit_out;
10912 /* Acquire NVM for read access */
10913 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
10914 last_aq_status = pf->hw.aq.asq_last_status;
10916 dev_info(&pf->pdev->dev,
10917 "Cannot acquire NVM for read access, err %s aq_err %s\n",
10918 i40e_stat_str(&pf->hw, ret),
10919 i40e_aq_str(&pf->hw, last_aq_status));
10920 goto bw_commit_out;
10923 /* Read word 0x10 of NVM - SW compatibility word 1 */
10924 ret = i40e_aq_read_nvm(&pf->hw,
10925 I40E_SR_NVM_CONTROL_WORD,
10926 0x10, sizeof(nvm_word), &nvm_word,
10928 /* Save off last admin queue command status before releasing
10931 last_aq_status = pf->hw.aq.asq_last_status;
10932 i40e_release_nvm(&pf->hw);
10934 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
10935 i40e_stat_str(&pf->hw, ret),
10936 i40e_aq_str(&pf->hw, last_aq_status));
10937 goto bw_commit_out;
10940 /* Wait a bit for NVM release to complete */
10943 /* Acquire NVM for write access */
10944 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
10945 last_aq_status = pf->hw.aq.asq_last_status;
10947 dev_info(&pf->pdev->dev,
10948 "Cannot acquire NVM for write access, err %s aq_err %s\n",
10949 i40e_stat_str(&pf->hw, ret),
10950 i40e_aq_str(&pf->hw, last_aq_status));
10951 goto bw_commit_out;
10953 /* Write it back out unchanged to initiate update NVM,
10954 * which will force a write of the shadow (alt) RAM to
10955 * the NVM - thus storing the bandwidth values permanently.
10957 ret = i40e_aq_update_nvm(&pf->hw,
10958 I40E_SR_NVM_CONTROL_WORD,
10959 0x10, sizeof(nvm_word),
10960 &nvm_word, true, NULL);
10961 /* Save off last admin queue command status before releasing
10964 last_aq_status = pf->hw.aq.asq_last_status;
10965 i40e_release_nvm(&pf->hw);
10967 dev_info(&pf->pdev->dev,
10968 "BW settings NOT SAVED, err %s aq_err %s\n",
10969 i40e_stat_str(&pf->hw, ret),
10970 i40e_aq_str(&pf->hw, last_aq_status));
10977 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
10978 * @pf: board private structure to initialize
10980 * i40e_sw_init initializes the Adapter private data structure.
10981 * Fields are initialized based on PCI device information and
10982 * OS network device settings (MTU size).
10984 static int i40e_sw_init(struct i40e_pf *pf)
10989 /* Set default capability flags */
10990 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
10991 I40E_FLAG_MSI_ENABLED |
10992 I40E_FLAG_MSIX_ENABLED;
10994 /* Set default ITR */
10995 pf->rx_itr_default = I40E_ITR_RX_DEF;
10996 pf->tx_itr_default = I40E_ITR_TX_DEF;
10998 /* Depending on PF configurations, it is possible that the RSS
10999 * maximum might end up larger than the available queues
11001 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11002 pf->alloc_rss_size = 1;
11003 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11004 pf->rss_size_max = min_t(int, pf->rss_size_max,
11005 pf->hw.func_caps.num_tx_qp);
11006 if (pf->hw.func_caps.rss) {
11007 pf->flags |= I40E_FLAG_RSS_ENABLED;
11008 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11009 num_online_cpus());
11012 /* MFP mode enabled */
11013 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11014 pf->flags |= I40E_FLAG_MFP_ENABLED;
11015 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11016 if (i40e_get_partition_bw_setting(pf)) {
11017 dev_warn(&pf->pdev->dev,
11018 "Could not get partition bw settings\n");
11020 dev_info(&pf->pdev->dev,
11021 "Partition BW Min = %8.8x, Max = %8.8x\n",
11022 pf->min_bw, pf->max_bw);
11024 /* nudge the Tx scheduler */
11025 i40e_set_partition_bw_setting(pf);
11029 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11030 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11031 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11032 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11033 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11034 pf->hw.num_partitions > 1)
11035 dev_info(&pf->pdev->dev,
11036 "Flow Director Sideband mode Disabled in MFP mode\n");
11038 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11039 pf->fdir_pf_filter_count =
11040 pf->hw.func_caps.fd_filters_guaranteed;
11041 pf->hw.fdir_shared_filter_count =
11042 pf->hw.func_caps.fd_filters_best_effort;
11045 if (pf->hw.mac.type == I40E_MAC_X722) {
11046 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11047 I40E_HW_128_QP_RSS_CAPABLE |
11048 I40E_HW_ATR_EVICT_CAPABLE |
11049 I40E_HW_WB_ON_ITR_CAPABLE |
11050 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11051 I40E_HW_NO_PCI_LINK_CHECK |
11052 I40E_HW_USE_SET_LLDP_MIB |
11053 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11054 I40E_HW_PTP_L4_CAPABLE |
11055 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11056 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11058 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11059 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11060 I40E_FDEVICT_PCTYPE_DEFAULT) {
11061 dev_warn(&pf->pdev->dev,
11062 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11063 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11065 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11066 ((pf->hw.aq.api_maj_ver == 1) &&
11067 (pf->hw.aq.api_min_ver > 4))) {
11068 /* Supported in FW API version higher than 1.4 */
11069 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11072 /* Enable HW ATR eviction if possible */
11073 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11074 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11076 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11077 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11078 (pf->hw.aq.fw_maj_ver < 4))) {
11079 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11080 /* No DCB support for FW < v4.33 */
11081 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11084 /* Disable FW LLDP if FW < v4.3 */
11085 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11086 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11087 (pf->hw.aq.fw_maj_ver < 4)))
11088 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11090 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11091 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11092 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11093 (pf->hw.aq.fw_maj_ver >= 5)))
11094 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11096 /* Enable PTP L4 if FW > v6.0 */
11097 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11098 pf->hw.aq.fw_maj_ver >= 6)
11099 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11101 if (pf->hw.func_caps.vmdq) {
11102 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11103 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11104 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11107 if (pf->hw.func_caps.iwarp) {
11108 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11109 /* IWARP needs one extra vector for CQP just like MISC.*/
11110 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11113 #ifdef CONFIG_PCI_IOV
11114 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11115 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11116 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11117 pf->num_req_vfs = min_t(int,
11118 pf->hw.func_caps.num_vfs,
11119 I40E_MAX_VF_COUNT);
11121 #endif /* CONFIG_PCI_IOV */
11122 pf->eeprom_version = 0xDEAD;
11123 pf->lan_veb = I40E_NO_VEB;
11124 pf->lan_vsi = I40E_NO_VSI;
11126 /* By default FW has this off for performance reasons */
11127 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11129 /* set up queue assignment tracking */
11130 size = sizeof(struct i40e_lump_tracking)
11131 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11132 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11133 if (!pf->qp_pile) {
11137 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11138 pf->qp_pile->search_hint = 0;
11140 pf->tx_timeout_recovery_level = 1;
11142 mutex_init(&pf->switch_mutex);
11149 * i40e_set_ntuple - set the ntuple feature flag and take action
11150 * @pf: board private structure to initialize
11151 * @features: the feature set that the stack is suggesting
11153 * returns a bool to indicate if reset needs to happen
11155 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11157 bool need_reset = false;
11159 /* Check if Flow Director n-tuple support was enabled or disabled. If
11160 * the state changed, we need to reset.
11162 if (features & NETIF_F_NTUPLE) {
11163 /* Enable filters and mark for reset */
11164 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11166 /* enable FD_SB only if there is MSI-X vector and no cloud
11169 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11170 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11171 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11174 /* turn off filters, mark for reset and clear SW filter list */
11175 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11177 i40e_fdir_filter_exit(pf);
11179 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
11180 I40E_FLAG_FD_SB_AUTO_DISABLED);
11181 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11183 /* reset fd counters */
11184 pf->fd_add_err = 0;
11185 pf->fd_atr_cnt = 0;
11186 /* if ATR was auto disabled it can be re-enabled. */
11187 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
11188 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
11189 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11190 (I40E_DEBUG_FD & pf->hw.debug_mask))
11191 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11198 * i40e_clear_rss_lut - clear the rx hash lookup table
11199 * @vsi: the VSI being configured
11201 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11203 struct i40e_pf *pf = vsi->back;
11204 struct i40e_hw *hw = &pf->hw;
11205 u16 vf_id = vsi->vf_id;
11208 if (vsi->type == I40E_VSI_MAIN) {
11209 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11210 wr32(hw, I40E_PFQF_HLUT(i), 0);
11211 } else if (vsi->type == I40E_VSI_SRIOV) {
11212 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11213 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11215 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11220 * i40e_set_features - set the netdev feature flags
11221 * @netdev: ptr to the netdev being adjusted
11222 * @features: the feature set that the stack is suggesting
11223 * Note: expects to be called while under rtnl_lock()
11225 static int i40e_set_features(struct net_device *netdev,
11226 netdev_features_t features)
11228 struct i40e_netdev_priv *np = netdev_priv(netdev);
11229 struct i40e_vsi *vsi = np->vsi;
11230 struct i40e_pf *pf = vsi->back;
11233 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11234 i40e_pf_config_rss(pf);
11235 else if (!(features & NETIF_F_RXHASH) &&
11236 netdev->features & NETIF_F_RXHASH)
11237 i40e_clear_rss_lut(vsi);
11239 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11240 i40e_vlan_stripping_enable(vsi);
11242 i40e_vlan_stripping_disable(vsi);
11244 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11245 dev_err(&pf->pdev->dev,
11246 "Offloaded tc filters active, can't turn hw_tc_offload off");
11250 need_reset = i40e_set_ntuple(pf, features);
11253 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11259 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11260 * @pf: board private structure
11261 * @port: The UDP port to look up
11263 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11265 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11269 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11270 if (pf->udp_ports[i].port == port)
11278 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11279 * @netdev: This physical port's netdev
11280 * @ti: Tunnel endpoint information
11282 static void i40e_udp_tunnel_add(struct net_device *netdev,
11283 struct udp_tunnel_info *ti)
11285 struct i40e_netdev_priv *np = netdev_priv(netdev);
11286 struct i40e_vsi *vsi = np->vsi;
11287 struct i40e_pf *pf = vsi->back;
11288 u16 port = ntohs(ti->port);
11292 idx = i40e_get_udp_port_idx(pf, port);
11294 /* Check if port already exists */
11295 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11296 netdev_info(netdev, "port %d already offloaded\n", port);
11300 /* Now check if there is space to add the new port */
11301 next_idx = i40e_get_udp_port_idx(pf, 0);
11303 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11304 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11309 switch (ti->type) {
11310 case UDP_TUNNEL_TYPE_VXLAN:
11311 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11313 case UDP_TUNNEL_TYPE_GENEVE:
11314 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11316 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11322 /* New port: add it and mark its index in the bitmap */
11323 pf->udp_ports[next_idx].port = port;
11324 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11325 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
11329 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11330 * @netdev: This physical port's netdev
11331 * @ti: Tunnel endpoint information
11333 static void i40e_udp_tunnel_del(struct net_device *netdev,
11334 struct udp_tunnel_info *ti)
11336 struct i40e_netdev_priv *np = netdev_priv(netdev);
11337 struct i40e_vsi *vsi = np->vsi;
11338 struct i40e_pf *pf = vsi->back;
11339 u16 port = ntohs(ti->port);
11342 idx = i40e_get_udp_port_idx(pf, port);
11344 /* Check if port already exists */
11345 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11348 switch (ti->type) {
11349 case UDP_TUNNEL_TYPE_VXLAN:
11350 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11353 case UDP_TUNNEL_TYPE_GENEVE:
11354 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11361 /* if port exists, set it to 0 (mark for deletion)
11362 * and make it pending
11364 pf->udp_ports[idx].port = 0;
11365 pf->pending_udp_bitmap |= BIT_ULL(idx);
11366 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
11370 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11374 static int i40e_get_phys_port_id(struct net_device *netdev,
11375 struct netdev_phys_item_id *ppid)
11377 struct i40e_netdev_priv *np = netdev_priv(netdev);
11378 struct i40e_pf *pf = np->vsi->back;
11379 struct i40e_hw *hw = &pf->hw;
11381 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11382 return -EOPNOTSUPP;
11384 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11385 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11391 * i40e_ndo_fdb_add - add an entry to the hardware database
11392 * @ndm: the input from the stack
11393 * @tb: pointer to array of nladdr (unused)
11394 * @dev: the net device pointer
11395 * @addr: the MAC address entry being added
11396 * @flags: instructions from stack about fdb operation
11398 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11399 struct net_device *dev,
11400 const unsigned char *addr, u16 vid,
11403 struct i40e_netdev_priv *np = netdev_priv(dev);
11404 struct i40e_pf *pf = np->vsi->back;
11407 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11408 return -EOPNOTSUPP;
11411 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11415 /* Hardware does not support aging addresses so if a
11416 * ndm_state is given only allow permanent addresses
11418 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11419 netdev_info(dev, "FDB only supports static addresses\n");
11423 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11424 err = dev_uc_add_excl(dev, addr);
11425 else if (is_multicast_ether_addr(addr))
11426 err = dev_mc_add_excl(dev, addr);
11430 /* Only return duplicate errors if NLM_F_EXCL is set */
11431 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11438 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11439 * @dev: the netdev being configured
11440 * @nlh: RTNL message
11442 * Inserts a new hardware bridge if not already created and
11443 * enables the bridging mode requested (VEB or VEPA). If the
11444 * hardware bridge has already been inserted and the request
11445 * is to change the mode then that requires a PF reset to
11446 * allow rebuild of the components with required hardware
11447 * bridge mode enabled.
11449 * Note: expects to be called while under rtnl_lock()
11451 static int i40e_ndo_bridge_setlink(struct net_device *dev,
11452 struct nlmsghdr *nlh,
11455 struct i40e_netdev_priv *np = netdev_priv(dev);
11456 struct i40e_vsi *vsi = np->vsi;
11457 struct i40e_pf *pf = vsi->back;
11458 struct i40e_veb *veb = NULL;
11459 struct nlattr *attr, *br_spec;
11462 /* Only for PF VSI for now */
11463 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11464 return -EOPNOTSUPP;
11466 /* Find the HW bridge for PF VSI */
11467 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11468 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11472 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11474 nla_for_each_nested(attr, br_spec, rem) {
11477 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11480 mode = nla_get_u16(attr);
11481 if ((mode != BRIDGE_MODE_VEPA) &&
11482 (mode != BRIDGE_MODE_VEB))
11485 /* Insert a new HW bridge */
11487 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11488 vsi->tc_config.enabled_tc);
11490 veb->bridge_mode = mode;
11491 i40e_config_bridge_mode(veb);
11493 /* No Bridge HW offload available */
11497 } else if (mode != veb->bridge_mode) {
11498 /* Existing HW bridge but different mode needs reset */
11499 veb->bridge_mode = mode;
11500 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11501 if (mode == BRIDGE_MODE_VEB)
11502 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11504 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11505 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11514 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11517 * @seq: RTNL message seq #
11518 * @dev: the netdev being configured
11519 * @filter_mask: unused
11520 * @nlflags: netlink flags passed in
11522 * Return the mode in which the hardware bridge is operating in
11525 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11526 struct net_device *dev,
11527 u32 __always_unused filter_mask,
11530 struct i40e_netdev_priv *np = netdev_priv(dev);
11531 struct i40e_vsi *vsi = np->vsi;
11532 struct i40e_pf *pf = vsi->back;
11533 struct i40e_veb *veb = NULL;
11536 /* Only for PF VSI for now */
11537 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11538 return -EOPNOTSUPP;
11540 /* Find the HW bridge for the PF VSI */
11541 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11542 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11549 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11550 0, 0, nlflags, filter_mask, NULL);
11554 * i40e_features_check - Validate encapsulated packet conforms to limits
11556 * @dev: This physical port's netdev
11557 * @features: Offload features that the stack believes apply
11559 static netdev_features_t i40e_features_check(struct sk_buff *skb,
11560 struct net_device *dev,
11561 netdev_features_t features)
11565 /* No point in doing any of this if neither checksum nor GSO are
11566 * being requested for this frame. We can rule out both by just
11567 * checking for CHECKSUM_PARTIAL
11569 if (skb->ip_summed != CHECKSUM_PARTIAL)
11572 /* We cannot support GSO if the MSS is going to be less than
11573 * 64 bytes. If it is then we need to drop support for GSO.
11575 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11576 features &= ~NETIF_F_GSO_MASK;
11578 /* MACLEN can support at most 63 words */
11579 len = skb_network_header(skb) - skb->data;
11580 if (len & ~(63 * 2))
11583 /* IPLEN and EIPLEN can support at most 127 dwords */
11584 len = skb_transport_header(skb) - skb_network_header(skb);
11585 if (len & ~(127 * 4))
11588 if (skb->encapsulation) {
11589 /* L4TUNLEN can support 127 words */
11590 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11591 if (len & ~(127 * 2))
11594 /* IPLEN can support at most 127 dwords */
11595 len = skb_inner_transport_header(skb) -
11596 skb_inner_network_header(skb);
11597 if (len & ~(127 * 4))
11601 /* No need to validate L4LEN as TCP is the only protocol with a
11602 * a flexible value and we support all possible values supported
11603 * by TCP, which is at most 15 dwords
11608 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11612 * i40e_xdp_setup - add/remove an XDP program
11613 * @vsi: VSI to changed
11614 * @prog: XDP program
11616 static int i40e_xdp_setup(struct i40e_vsi *vsi,
11617 struct bpf_prog *prog)
11619 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11620 struct i40e_pf *pf = vsi->back;
11621 struct bpf_prog *old_prog;
11625 /* Don't allow frames that span over multiple buffers */
11626 if (frame_size > vsi->rx_buf_len)
11629 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
11632 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11633 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
11636 i40e_prep_for_reset(pf, true);
11638 old_prog = xchg(&vsi->xdp_prog, prog);
11641 i40e_reset_and_rebuild(pf, true, true);
11643 for (i = 0; i < vsi->num_queue_pairs; i++)
11644 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
11647 bpf_prog_put(old_prog);
11653 * i40e_xdp - implements ndo_bpf for i40e
11655 * @xdp: XDP command
11657 static int i40e_xdp(struct net_device *dev,
11658 struct netdev_bpf *xdp)
11660 struct i40e_netdev_priv *np = netdev_priv(dev);
11661 struct i40e_vsi *vsi = np->vsi;
11663 if (vsi->type != I40E_VSI_MAIN)
11666 switch (xdp->command) {
11667 case XDP_SETUP_PROG:
11668 return i40e_xdp_setup(vsi, xdp->prog);
11669 case XDP_QUERY_PROG:
11670 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
11671 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
11678 static const struct net_device_ops i40e_netdev_ops = {
11679 .ndo_open = i40e_open,
11680 .ndo_stop = i40e_close,
11681 .ndo_start_xmit = i40e_lan_xmit_frame,
11682 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
11683 .ndo_set_rx_mode = i40e_set_rx_mode,
11684 .ndo_validate_addr = eth_validate_addr,
11685 .ndo_set_mac_address = i40e_set_mac,
11686 .ndo_change_mtu = i40e_change_mtu,
11687 .ndo_do_ioctl = i40e_ioctl,
11688 .ndo_tx_timeout = i40e_tx_timeout,
11689 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
11690 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
11691 #ifdef CONFIG_NET_POLL_CONTROLLER
11692 .ndo_poll_controller = i40e_netpoll,
11694 .ndo_setup_tc = __i40e_setup_tc,
11695 .ndo_set_features = i40e_set_features,
11696 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
11697 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
11698 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
11699 .ndo_get_vf_config = i40e_ndo_get_vf_config,
11700 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
11701 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
11702 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
11703 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
11704 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
11705 .ndo_get_phys_port_id = i40e_get_phys_port_id,
11706 .ndo_fdb_add = i40e_ndo_fdb_add,
11707 .ndo_features_check = i40e_features_check,
11708 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
11709 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
11710 .ndo_bpf = i40e_xdp,
11714 * i40e_config_netdev - Setup the netdev flags
11715 * @vsi: the VSI being configured
11717 * Returns 0 on success, negative value on failure
11719 static int i40e_config_netdev(struct i40e_vsi *vsi)
11721 struct i40e_pf *pf = vsi->back;
11722 struct i40e_hw *hw = &pf->hw;
11723 struct i40e_netdev_priv *np;
11724 struct net_device *netdev;
11725 u8 broadcast[ETH_ALEN];
11726 u8 mac_addr[ETH_ALEN];
11728 netdev_features_t hw_enc_features;
11729 netdev_features_t hw_features;
11731 etherdev_size = sizeof(struct i40e_netdev_priv);
11732 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
11736 vsi->netdev = netdev;
11737 np = netdev_priv(netdev);
11740 hw_enc_features = NETIF_F_SG |
11742 NETIF_F_IPV6_CSUM |
11744 NETIF_F_SOFT_FEATURES |
11749 NETIF_F_GSO_GRE_CSUM |
11750 NETIF_F_GSO_PARTIAL |
11751 NETIF_F_GSO_UDP_TUNNEL |
11752 NETIF_F_GSO_UDP_TUNNEL_CSUM |
11758 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
11759 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
11761 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
11763 netdev->hw_enc_features |= hw_enc_features;
11765 /* record features VLANs can make use of */
11766 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
11768 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
11769 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
11771 hw_features = hw_enc_features |
11772 NETIF_F_HW_VLAN_CTAG_TX |
11773 NETIF_F_HW_VLAN_CTAG_RX;
11775 netdev->hw_features |= hw_features;
11777 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
11778 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
11780 if (vsi->type == I40E_VSI_MAIN) {
11781 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
11782 ether_addr_copy(mac_addr, hw->mac.perm_addr);
11783 /* The following steps are necessary for two reasons. First,
11784 * some older NVM configurations load a default MAC-VLAN
11785 * filter that will accept any tagged packet, and we want to
11786 * replace this with a normal filter. Additionally, it is
11787 * possible our MAC address was provided by the platform using
11788 * Open Firmware or similar.
11790 * Thus, we need to remove the default filter and install one
11791 * specific to the MAC address.
11793 i40e_rm_default_mac_filter(vsi, mac_addr);
11794 spin_lock_bh(&vsi->mac_filter_hash_lock);
11795 i40e_add_mac_filter(vsi, mac_addr);
11796 spin_unlock_bh(&vsi->mac_filter_hash_lock);
11798 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
11799 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
11800 * the end, which is 4 bytes long, so force truncation of the
11801 * original name by IFNAMSIZ - 4
11803 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
11805 pf->vsi[pf->lan_vsi]->netdev->name);
11806 random_ether_addr(mac_addr);
11808 spin_lock_bh(&vsi->mac_filter_hash_lock);
11809 i40e_add_mac_filter(vsi, mac_addr);
11810 spin_unlock_bh(&vsi->mac_filter_hash_lock);
11813 /* Add the broadcast filter so that we initially will receive
11814 * broadcast packets. Note that when a new VLAN is first added the
11815 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
11816 * specific filters as part of transitioning into "vlan" operation.
11817 * When more VLANs are added, the driver will copy each existing MAC
11818 * filter and add it for the new VLAN.
11820 * Broadcast filters are handled specially by
11821 * i40e_sync_filters_subtask, as the driver must to set the broadcast
11822 * promiscuous bit instead of adding this directly as a MAC/VLAN
11823 * filter. The subtask will update the correct broadcast promiscuous
11824 * bits as VLANs become active or inactive.
11826 eth_broadcast_addr(broadcast);
11827 spin_lock_bh(&vsi->mac_filter_hash_lock);
11828 i40e_add_mac_filter(vsi, broadcast);
11829 spin_unlock_bh(&vsi->mac_filter_hash_lock);
11831 ether_addr_copy(netdev->dev_addr, mac_addr);
11832 ether_addr_copy(netdev->perm_addr, mac_addr);
11834 netdev->priv_flags |= IFF_UNICAST_FLT;
11835 netdev->priv_flags |= IFF_SUPP_NOFCS;
11836 /* Setup netdev TC information */
11837 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
11839 netdev->netdev_ops = &i40e_netdev_ops;
11840 netdev->watchdog_timeo = 5 * HZ;
11841 i40e_set_ethtool_ops(netdev);
11843 /* MTU range: 68 - 9706 */
11844 netdev->min_mtu = ETH_MIN_MTU;
11845 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
11851 * i40e_vsi_delete - Delete a VSI from the switch
11852 * @vsi: the VSI being removed
11854 * Returns 0 on success, negative value on failure
11856 static void i40e_vsi_delete(struct i40e_vsi *vsi)
11858 /* remove default VSI is not allowed */
11859 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
11862 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
11866 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
11867 * @vsi: the VSI being queried
11869 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
11871 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
11873 struct i40e_veb *veb;
11874 struct i40e_pf *pf = vsi->back;
11876 /* Uplink is not a bridge so default to VEB */
11877 if (vsi->veb_idx == I40E_NO_VEB)
11880 veb = pf->veb[vsi->veb_idx];
11882 dev_info(&pf->pdev->dev,
11883 "There is no veb associated with the bridge\n");
11887 /* Uplink is a bridge in VEPA mode */
11888 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
11891 /* Uplink is a bridge in VEB mode */
11895 /* VEPA is now default bridge, so return 0 */
11900 * i40e_add_vsi - Add a VSI to the switch
11901 * @vsi: the VSI being configured
11903 * This initializes a VSI context depending on the VSI type to be added and
11904 * passes it down to the add_vsi aq command.
11906 static int i40e_add_vsi(struct i40e_vsi *vsi)
11909 struct i40e_pf *pf = vsi->back;
11910 struct i40e_hw *hw = &pf->hw;
11911 struct i40e_vsi_context ctxt;
11912 struct i40e_mac_filter *f;
11913 struct hlist_node *h;
11916 u8 enabled_tc = 0x1; /* TC0 enabled */
11919 memset(&ctxt, 0, sizeof(ctxt));
11920 switch (vsi->type) {
11921 case I40E_VSI_MAIN:
11922 /* The PF's main VSI is already setup as part of the
11923 * device initialization, so we'll not bother with
11924 * the add_vsi call, but we will retrieve the current
11927 ctxt.seid = pf->main_vsi_seid;
11928 ctxt.pf_num = pf->hw.pf_id;
11930 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
11931 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
11933 dev_info(&pf->pdev->dev,
11934 "couldn't get PF vsi config, err %s aq_err %s\n",
11935 i40e_stat_str(&pf->hw, ret),
11936 i40e_aq_str(&pf->hw,
11937 pf->hw.aq.asq_last_status));
11940 vsi->info = ctxt.info;
11941 vsi->info.valid_sections = 0;
11943 vsi->seid = ctxt.seid;
11944 vsi->id = ctxt.vsi_number;
11946 enabled_tc = i40e_pf_get_tc_map(pf);
11948 /* Source pruning is enabled by default, so the flag is
11949 * negative logic - if it's set, we need to fiddle with
11950 * the VSI to disable source pruning.
11952 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
11953 memset(&ctxt, 0, sizeof(ctxt));
11954 ctxt.seid = pf->main_vsi_seid;
11955 ctxt.pf_num = pf->hw.pf_id;
11957 ctxt.info.valid_sections |=
11958 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
11959 ctxt.info.switch_id =
11960 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
11961 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11963 dev_info(&pf->pdev->dev,
11964 "update vsi failed, err %s aq_err %s\n",
11965 i40e_stat_str(&pf->hw, ret),
11966 i40e_aq_str(&pf->hw,
11967 pf->hw.aq.asq_last_status));
11973 /* MFP mode setup queue map and update VSI */
11974 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
11975 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
11976 memset(&ctxt, 0, sizeof(ctxt));
11977 ctxt.seid = pf->main_vsi_seid;
11978 ctxt.pf_num = pf->hw.pf_id;
11980 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
11981 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11983 dev_info(&pf->pdev->dev,
11984 "update vsi failed, err %s aq_err %s\n",
11985 i40e_stat_str(&pf->hw, ret),
11986 i40e_aq_str(&pf->hw,
11987 pf->hw.aq.asq_last_status));
11991 /* update the local VSI info queue map */
11992 i40e_vsi_update_queue_map(vsi, &ctxt);
11993 vsi->info.valid_sections = 0;
11995 /* Default/Main VSI is only enabled for TC0
11996 * reconfigure it to enable all TCs that are
11997 * available on the port in SFP mode.
11998 * For MFP case the iSCSI PF would use this
11999 * flow to enable LAN+iSCSI TC.
12001 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12003 /* Single TC condition is not fatal,
12004 * message and continue
12006 dev_info(&pf->pdev->dev,
12007 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12009 i40e_stat_str(&pf->hw, ret),
12010 i40e_aq_str(&pf->hw,
12011 pf->hw.aq.asq_last_status));
12016 case I40E_VSI_FDIR:
12017 ctxt.pf_num = hw->pf_id;
12019 ctxt.uplink_seid = vsi->uplink_seid;
12020 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12021 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12022 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12023 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12024 ctxt.info.valid_sections |=
12025 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12026 ctxt.info.switch_id =
12027 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12029 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12032 case I40E_VSI_VMDQ2:
12033 ctxt.pf_num = hw->pf_id;
12035 ctxt.uplink_seid = vsi->uplink_seid;
12036 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12037 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12039 /* This VSI is connected to VEB so the switch_id
12040 * should be set to zero by default.
12042 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12043 ctxt.info.valid_sections |=
12044 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12045 ctxt.info.switch_id =
12046 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12049 /* Setup the VSI tx/rx queue map for TC0 only for now */
12050 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12053 case I40E_VSI_SRIOV:
12054 ctxt.pf_num = hw->pf_id;
12055 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12056 ctxt.uplink_seid = vsi->uplink_seid;
12057 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12058 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12060 /* This VSI is connected to VEB so the switch_id
12061 * should be set to zero by default.
12063 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12064 ctxt.info.valid_sections |=
12065 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12066 ctxt.info.switch_id =
12067 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12070 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12071 ctxt.info.valid_sections |=
12072 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12073 ctxt.info.queueing_opt_flags |=
12074 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12075 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12078 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12079 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12080 if (pf->vf[vsi->vf_id].spoofchk) {
12081 ctxt.info.valid_sections |=
12082 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12083 ctxt.info.sec_flags |=
12084 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12085 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12087 /* Setup the VSI tx/rx queue map for TC0 only for now */
12088 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12091 case I40E_VSI_IWARP:
12092 /* send down message to iWARP */
12099 if (vsi->type != I40E_VSI_MAIN) {
12100 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12102 dev_info(&vsi->back->pdev->dev,
12103 "add vsi failed, err %s aq_err %s\n",
12104 i40e_stat_str(&pf->hw, ret),
12105 i40e_aq_str(&pf->hw,
12106 pf->hw.aq.asq_last_status));
12110 vsi->info = ctxt.info;
12111 vsi->info.valid_sections = 0;
12112 vsi->seid = ctxt.seid;
12113 vsi->id = ctxt.vsi_number;
12116 vsi->active_filters = 0;
12117 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12118 spin_lock_bh(&vsi->mac_filter_hash_lock);
12119 /* If macvlan filters already exist, force them to get loaded */
12120 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12121 f->state = I40E_FILTER_NEW;
12124 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12127 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12128 pf->flags |= I40E_FLAG_FILTER_SYNC;
12131 /* Update VSI BW information */
12132 ret = i40e_vsi_get_bw_info(vsi);
12134 dev_info(&pf->pdev->dev,
12135 "couldn't get vsi bw info, err %s aq_err %s\n",
12136 i40e_stat_str(&pf->hw, ret),
12137 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12138 /* VSI is already added so not tearing that up */
12147 * i40e_vsi_release - Delete a VSI and free its resources
12148 * @vsi: the VSI being removed
12150 * Returns 0 on success or < 0 on error
12152 int i40e_vsi_release(struct i40e_vsi *vsi)
12154 struct i40e_mac_filter *f;
12155 struct hlist_node *h;
12156 struct i40e_veb *veb = NULL;
12157 struct i40e_pf *pf;
12163 /* release of a VEB-owner or last VSI is not allowed */
12164 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12165 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12166 vsi->seid, vsi->uplink_seid);
12169 if (vsi == pf->vsi[pf->lan_vsi] &&
12170 !test_bit(__I40E_DOWN, pf->state)) {
12171 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12175 uplink_seid = vsi->uplink_seid;
12176 if (vsi->type != I40E_VSI_SRIOV) {
12177 if (vsi->netdev_registered) {
12178 vsi->netdev_registered = false;
12180 /* results in a call to i40e_close() */
12181 unregister_netdev(vsi->netdev);
12184 i40e_vsi_close(vsi);
12186 i40e_vsi_disable_irq(vsi);
12189 spin_lock_bh(&vsi->mac_filter_hash_lock);
12191 /* clear the sync flag on all filters */
12193 __dev_uc_unsync(vsi->netdev, NULL);
12194 __dev_mc_unsync(vsi->netdev, NULL);
12197 /* make sure any remaining filters are marked for deletion */
12198 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12199 __i40e_del_filter(vsi, f);
12201 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12203 i40e_sync_vsi_filters(vsi);
12205 i40e_vsi_delete(vsi);
12206 i40e_vsi_free_q_vectors(vsi);
12208 free_netdev(vsi->netdev);
12209 vsi->netdev = NULL;
12211 i40e_vsi_clear_rings(vsi);
12212 i40e_vsi_clear(vsi);
12214 /* If this was the last thing on the VEB, except for the
12215 * controlling VSI, remove the VEB, which puts the controlling
12216 * VSI onto the next level down in the switch.
12218 * Well, okay, there's one more exception here: don't remove
12219 * the orphan VEBs yet. We'll wait for an explicit remove request
12220 * from up the network stack.
12222 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12224 pf->vsi[i]->uplink_seid == uplink_seid &&
12225 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12226 n++; /* count the VSIs */
12229 for (i = 0; i < I40E_MAX_VEB; i++) {
12232 if (pf->veb[i]->uplink_seid == uplink_seid)
12233 n++; /* count the VEBs */
12234 if (pf->veb[i]->seid == uplink_seid)
12237 if (n == 0 && veb && veb->uplink_seid != 0)
12238 i40e_veb_release(veb);
12244 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12245 * @vsi: ptr to the VSI
12247 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12248 * corresponding SW VSI structure and initializes num_queue_pairs for the
12249 * newly allocated VSI.
12251 * Returns 0 on success or negative on failure
12253 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12256 struct i40e_pf *pf = vsi->back;
12258 if (vsi->q_vectors[0]) {
12259 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12264 if (vsi->base_vector) {
12265 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12266 vsi->seid, vsi->base_vector);
12270 ret = i40e_vsi_alloc_q_vectors(vsi);
12272 dev_info(&pf->pdev->dev,
12273 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12274 vsi->num_q_vectors, vsi->seid, ret);
12275 vsi->num_q_vectors = 0;
12276 goto vector_setup_out;
12279 /* In Legacy mode, we do not have to get any other vector since we
12280 * piggyback on the misc/ICR0 for queue interrupts.
12282 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12284 if (vsi->num_q_vectors)
12285 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12286 vsi->num_q_vectors, vsi->idx);
12287 if (vsi->base_vector < 0) {
12288 dev_info(&pf->pdev->dev,
12289 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12290 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12291 i40e_vsi_free_q_vectors(vsi);
12293 goto vector_setup_out;
12301 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12302 * @vsi: pointer to the vsi.
12304 * This re-allocates a vsi's queue resources.
12306 * Returns pointer to the successfully allocated and configured VSI sw struct
12307 * on success, otherwise returns NULL on failure.
12309 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12311 u16 alloc_queue_pairs;
12312 struct i40e_pf *pf;
12321 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12322 i40e_vsi_clear_rings(vsi);
12324 i40e_vsi_free_arrays(vsi, false);
12325 i40e_set_num_rings_in_vsi(vsi);
12326 ret = i40e_vsi_alloc_arrays(vsi, false);
12330 alloc_queue_pairs = vsi->alloc_queue_pairs *
12331 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12333 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12335 dev_info(&pf->pdev->dev,
12336 "failed to get tracking for %d queues for VSI %d err %d\n",
12337 alloc_queue_pairs, vsi->seid, ret);
12340 vsi->base_queue = ret;
12342 /* Update the FW view of the VSI. Force a reset of TC and queue
12343 * layout configurations.
12345 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12346 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12347 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12348 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12349 if (vsi->type == I40E_VSI_MAIN)
12350 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12352 /* assign it some queues */
12353 ret = i40e_alloc_rings(vsi);
12357 /* map all of the rings to the q_vectors */
12358 i40e_vsi_map_rings_to_vectors(vsi);
12362 i40e_vsi_free_q_vectors(vsi);
12363 if (vsi->netdev_registered) {
12364 vsi->netdev_registered = false;
12365 unregister_netdev(vsi->netdev);
12366 free_netdev(vsi->netdev);
12367 vsi->netdev = NULL;
12369 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12371 i40e_vsi_clear(vsi);
12376 * i40e_vsi_setup - Set up a VSI by a given type
12377 * @pf: board private structure
12379 * @uplink_seid: the switch element to link to
12380 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12382 * This allocates the sw VSI structure and its queue resources, then add a VSI
12383 * to the identified VEB.
12385 * Returns pointer to the successfully allocated and configure VSI sw struct on
12386 * success, otherwise returns NULL on failure.
12388 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12389 u16 uplink_seid, u32 param1)
12391 struct i40e_vsi *vsi = NULL;
12392 struct i40e_veb *veb = NULL;
12393 u16 alloc_queue_pairs;
12397 /* The requested uplink_seid must be either
12398 * - the PF's port seid
12399 * no VEB is needed because this is the PF
12400 * or this is a Flow Director special case VSI
12401 * - seid of an existing VEB
12402 * - seid of a VSI that owns an existing VEB
12403 * - seid of a VSI that doesn't own a VEB
12404 * a new VEB is created and the VSI becomes the owner
12405 * - seid of the PF VSI, which is what creates the first VEB
12406 * this is a special case of the previous
12408 * Find which uplink_seid we were given and create a new VEB if needed
12410 for (i = 0; i < I40E_MAX_VEB; i++) {
12411 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12417 if (!veb && uplink_seid != pf->mac_seid) {
12419 for (i = 0; i < pf->num_alloc_vsi; i++) {
12420 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12426 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12431 if (vsi->uplink_seid == pf->mac_seid)
12432 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12433 vsi->tc_config.enabled_tc);
12434 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12435 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12436 vsi->tc_config.enabled_tc);
12438 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12439 dev_info(&vsi->back->pdev->dev,
12440 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12443 /* We come up by default in VEPA mode if SRIOV is not
12444 * already enabled, in which case we can't force VEPA
12447 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12448 veb->bridge_mode = BRIDGE_MODE_VEPA;
12449 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12451 i40e_config_bridge_mode(veb);
12453 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12454 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12458 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12462 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12463 uplink_seid = veb->seid;
12466 /* get vsi sw struct */
12467 v_idx = i40e_vsi_mem_alloc(pf, type);
12470 vsi = pf->vsi[v_idx];
12474 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12476 if (type == I40E_VSI_MAIN)
12477 pf->lan_vsi = v_idx;
12478 else if (type == I40E_VSI_SRIOV)
12479 vsi->vf_id = param1;
12480 /* assign it some queues */
12481 alloc_queue_pairs = vsi->alloc_queue_pairs *
12482 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12484 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12486 dev_info(&pf->pdev->dev,
12487 "failed to get tracking for %d queues for VSI %d err=%d\n",
12488 alloc_queue_pairs, vsi->seid, ret);
12491 vsi->base_queue = ret;
12493 /* get a VSI from the hardware */
12494 vsi->uplink_seid = uplink_seid;
12495 ret = i40e_add_vsi(vsi);
12499 switch (vsi->type) {
12500 /* setup the netdev if needed */
12501 case I40E_VSI_MAIN:
12502 case I40E_VSI_VMDQ2:
12503 ret = i40e_config_netdev(vsi);
12506 ret = register_netdev(vsi->netdev);
12509 vsi->netdev_registered = true;
12510 netif_carrier_off(vsi->netdev);
12511 #ifdef CONFIG_I40E_DCB
12512 /* Setup DCB netlink interface */
12513 i40e_dcbnl_setup(vsi);
12514 #endif /* CONFIG_I40E_DCB */
12517 case I40E_VSI_FDIR:
12518 /* set up vectors and rings if needed */
12519 ret = i40e_vsi_setup_vectors(vsi);
12523 ret = i40e_alloc_rings(vsi);
12527 /* map all of the rings to the q_vectors */
12528 i40e_vsi_map_rings_to_vectors(vsi);
12530 i40e_vsi_reset_stats(vsi);
12534 /* no netdev or rings for the other VSI types */
12538 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
12539 (vsi->type == I40E_VSI_VMDQ2)) {
12540 ret = i40e_vsi_config_rss(vsi);
12545 i40e_vsi_free_q_vectors(vsi);
12547 if (vsi->netdev_registered) {
12548 vsi->netdev_registered = false;
12549 unregister_netdev(vsi->netdev);
12550 free_netdev(vsi->netdev);
12551 vsi->netdev = NULL;
12554 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12556 i40e_vsi_clear(vsi);
12562 * i40e_veb_get_bw_info - Query VEB BW information
12563 * @veb: the veb to query
12565 * Query the Tx scheduler BW configuration data for given VEB
12567 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
12569 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
12570 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
12571 struct i40e_pf *pf = veb->pf;
12572 struct i40e_hw *hw = &pf->hw;
12577 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
12580 dev_info(&pf->pdev->dev,
12581 "query veb bw config failed, err %s aq_err %s\n",
12582 i40e_stat_str(&pf->hw, ret),
12583 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12587 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
12590 dev_info(&pf->pdev->dev,
12591 "query veb bw ets config failed, err %s aq_err %s\n",
12592 i40e_stat_str(&pf->hw, ret),
12593 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12597 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
12598 veb->bw_max_quanta = ets_data.tc_bw_max;
12599 veb->is_abs_credits = bw_data.absolute_credits_enable;
12600 veb->enabled_tc = ets_data.tc_valid_bits;
12601 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
12602 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
12603 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12604 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
12605 veb->bw_tc_limit_credits[i] =
12606 le16_to_cpu(bw_data.tc_bw_limits[i]);
12607 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
12615 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12616 * @pf: board private structure
12618 * On error: returns error code (negative)
12619 * On success: returns vsi index in PF (positive)
12621 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
12624 struct i40e_veb *veb;
12627 /* Need to protect the allocation of switch elements at the PF level */
12628 mutex_lock(&pf->switch_mutex);
12630 /* VEB list may be fragmented if VEB creation/destruction has
12631 * been happening. We can afford to do a quick scan to look
12632 * for any free slots in the list.
12634 * find next empty veb slot, looping back around if necessary
12637 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
12639 if (i >= I40E_MAX_VEB) {
12641 goto err_alloc_veb; /* out of VEB slots! */
12644 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
12647 goto err_alloc_veb;
12651 veb->enabled_tc = 1;
12656 mutex_unlock(&pf->switch_mutex);
12661 * i40e_switch_branch_release - Delete a branch of the switch tree
12662 * @branch: where to start deleting
12664 * This uses recursion to find the tips of the branch to be
12665 * removed, deleting until we get back to and can delete this VEB.
12667 static void i40e_switch_branch_release(struct i40e_veb *branch)
12669 struct i40e_pf *pf = branch->pf;
12670 u16 branch_seid = branch->seid;
12671 u16 veb_idx = branch->idx;
12674 /* release any VEBs on this VEB - RECURSION */
12675 for (i = 0; i < I40E_MAX_VEB; i++) {
12678 if (pf->veb[i]->uplink_seid == branch->seid)
12679 i40e_switch_branch_release(pf->veb[i]);
12682 /* Release the VSIs on this VEB, but not the owner VSI.
12684 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
12685 * the VEB itself, so don't use (*branch) after this loop.
12687 for (i = 0; i < pf->num_alloc_vsi; i++) {
12690 if (pf->vsi[i]->uplink_seid == branch_seid &&
12691 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12692 i40e_vsi_release(pf->vsi[i]);
12696 /* There's one corner case where the VEB might not have been
12697 * removed, so double check it here and remove it if needed.
12698 * This case happens if the veb was created from the debugfs
12699 * commands and no VSIs were added to it.
12701 if (pf->veb[veb_idx])
12702 i40e_veb_release(pf->veb[veb_idx]);
12706 * i40e_veb_clear - remove veb struct
12707 * @veb: the veb to remove
12709 static void i40e_veb_clear(struct i40e_veb *veb)
12715 struct i40e_pf *pf = veb->pf;
12717 mutex_lock(&pf->switch_mutex);
12718 if (pf->veb[veb->idx] == veb)
12719 pf->veb[veb->idx] = NULL;
12720 mutex_unlock(&pf->switch_mutex);
12727 * i40e_veb_release - Delete a VEB and free its resources
12728 * @veb: the VEB being removed
12730 void i40e_veb_release(struct i40e_veb *veb)
12732 struct i40e_vsi *vsi = NULL;
12733 struct i40e_pf *pf;
12738 /* find the remaining VSI and check for extras */
12739 for (i = 0; i < pf->num_alloc_vsi; i++) {
12740 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
12746 dev_info(&pf->pdev->dev,
12747 "can't remove VEB %d with %d VSIs left\n",
12752 /* move the remaining VSI to uplink veb */
12753 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
12754 if (veb->uplink_seid) {
12755 vsi->uplink_seid = veb->uplink_seid;
12756 if (veb->uplink_seid == pf->mac_seid)
12757 vsi->veb_idx = I40E_NO_VEB;
12759 vsi->veb_idx = veb->veb_idx;
12762 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
12763 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
12766 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
12767 i40e_veb_clear(veb);
12771 * i40e_add_veb - create the VEB in the switch
12772 * @veb: the VEB to be instantiated
12773 * @vsi: the controlling VSI
12775 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
12777 struct i40e_pf *pf = veb->pf;
12778 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
12781 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
12782 veb->enabled_tc, false,
12783 &veb->seid, enable_stats, NULL);
12785 /* get a VEB from the hardware */
12787 dev_info(&pf->pdev->dev,
12788 "couldn't add VEB, err %s aq_err %s\n",
12789 i40e_stat_str(&pf->hw, ret),
12790 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12794 /* get statistics counter */
12795 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
12796 &veb->stats_idx, NULL, NULL, NULL);
12798 dev_info(&pf->pdev->dev,
12799 "couldn't get VEB statistics idx, err %s aq_err %s\n",
12800 i40e_stat_str(&pf->hw, ret),
12801 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12804 ret = i40e_veb_get_bw_info(veb);
12806 dev_info(&pf->pdev->dev,
12807 "couldn't get VEB bw info, err %s aq_err %s\n",
12808 i40e_stat_str(&pf->hw, ret),
12809 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12810 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
12814 vsi->uplink_seid = veb->seid;
12815 vsi->veb_idx = veb->idx;
12816 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12822 * i40e_veb_setup - Set up a VEB
12823 * @pf: board private structure
12824 * @flags: VEB setup flags
12825 * @uplink_seid: the switch element to link to
12826 * @vsi_seid: the initial VSI seid
12827 * @enabled_tc: Enabled TC bit-map
12829 * This allocates the sw VEB structure and links it into the switch
12830 * It is possible and legal for this to be a duplicate of an already
12831 * existing VEB. It is also possible for both uplink and vsi seids
12832 * to be zero, in order to create a floating VEB.
12834 * Returns pointer to the successfully allocated VEB sw struct on
12835 * success, otherwise returns NULL on failure.
12837 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
12838 u16 uplink_seid, u16 vsi_seid,
12841 struct i40e_veb *veb, *uplink_veb = NULL;
12842 int vsi_idx, veb_idx;
12845 /* if one seid is 0, the other must be 0 to create a floating relay */
12846 if ((uplink_seid == 0 || vsi_seid == 0) &&
12847 (uplink_seid + vsi_seid != 0)) {
12848 dev_info(&pf->pdev->dev,
12849 "one, not both seid's are 0: uplink=%d vsi=%d\n",
12850 uplink_seid, vsi_seid);
12854 /* make sure there is such a vsi and uplink */
12855 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
12856 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
12858 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
12859 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
12864 if (uplink_seid && uplink_seid != pf->mac_seid) {
12865 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
12866 if (pf->veb[veb_idx] &&
12867 pf->veb[veb_idx]->seid == uplink_seid) {
12868 uplink_veb = pf->veb[veb_idx];
12873 dev_info(&pf->pdev->dev,
12874 "uplink seid %d not found\n", uplink_seid);
12879 /* get veb sw struct */
12880 veb_idx = i40e_veb_mem_alloc(pf);
12883 veb = pf->veb[veb_idx];
12884 veb->flags = flags;
12885 veb->uplink_seid = uplink_seid;
12886 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
12887 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
12889 /* create the VEB in the switch */
12890 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
12893 if (vsi_idx == pf->lan_vsi)
12894 pf->lan_veb = veb->idx;
12899 i40e_veb_clear(veb);
12905 * i40e_setup_pf_switch_element - set PF vars based on switch type
12906 * @pf: board private structure
12907 * @ele: element we are building info from
12908 * @num_reported: total number of elements
12909 * @printconfig: should we print the contents
12911 * helper function to assist in extracting a few useful SEID values.
12913 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
12914 struct i40e_aqc_switch_config_element_resp *ele,
12915 u16 num_reported, bool printconfig)
12917 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
12918 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
12919 u8 element_type = ele->element_type;
12920 u16 seid = le16_to_cpu(ele->seid);
12923 dev_info(&pf->pdev->dev,
12924 "type=%d seid=%d uplink=%d downlink=%d\n",
12925 element_type, seid, uplink_seid, downlink_seid);
12927 switch (element_type) {
12928 case I40E_SWITCH_ELEMENT_TYPE_MAC:
12929 pf->mac_seid = seid;
12931 case I40E_SWITCH_ELEMENT_TYPE_VEB:
12933 if (uplink_seid != pf->mac_seid)
12935 if (pf->lan_veb == I40E_NO_VEB) {
12938 /* find existing or else empty VEB */
12939 for (v = 0; v < I40E_MAX_VEB; v++) {
12940 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
12945 if (pf->lan_veb == I40E_NO_VEB) {
12946 v = i40e_veb_mem_alloc(pf);
12953 pf->veb[pf->lan_veb]->seid = seid;
12954 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
12955 pf->veb[pf->lan_veb]->pf = pf;
12956 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
12958 case I40E_SWITCH_ELEMENT_TYPE_VSI:
12959 if (num_reported != 1)
12961 /* This is immediately after a reset so we can assume this is
12964 pf->mac_seid = uplink_seid;
12965 pf->pf_seid = downlink_seid;
12966 pf->main_vsi_seid = seid;
12968 dev_info(&pf->pdev->dev,
12969 "pf_seid=%d main_vsi_seid=%d\n",
12970 pf->pf_seid, pf->main_vsi_seid);
12972 case I40E_SWITCH_ELEMENT_TYPE_PF:
12973 case I40E_SWITCH_ELEMENT_TYPE_VF:
12974 case I40E_SWITCH_ELEMENT_TYPE_EMP:
12975 case I40E_SWITCH_ELEMENT_TYPE_BMC:
12976 case I40E_SWITCH_ELEMENT_TYPE_PE:
12977 case I40E_SWITCH_ELEMENT_TYPE_PA:
12978 /* ignore these for now */
12981 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
12982 element_type, seid);
12988 * i40e_fetch_switch_configuration - Get switch config from firmware
12989 * @pf: board private structure
12990 * @printconfig: should we print the contents
12992 * Get the current switch configuration from the device and
12993 * extract a few useful SEID values.
12995 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
12997 struct i40e_aqc_get_switch_config_resp *sw_config;
13003 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13007 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13009 u16 num_reported, num_total;
13011 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13015 dev_info(&pf->pdev->dev,
13016 "get switch config failed err %s aq_err %s\n",
13017 i40e_stat_str(&pf->hw, ret),
13018 i40e_aq_str(&pf->hw,
13019 pf->hw.aq.asq_last_status));
13024 num_reported = le16_to_cpu(sw_config->header.num_reported);
13025 num_total = le16_to_cpu(sw_config->header.num_total);
13028 dev_info(&pf->pdev->dev,
13029 "header: %d reported %d total\n",
13030 num_reported, num_total);
13032 for (i = 0; i < num_reported; i++) {
13033 struct i40e_aqc_switch_config_element_resp *ele =
13034 &sw_config->element[i];
13036 i40e_setup_pf_switch_element(pf, ele, num_reported,
13039 } while (next_seid != 0);
13046 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13047 * @pf: board private structure
13048 * @reinit: if the Main VSI needs to re-initialized.
13050 * Returns 0 on success, negative value on failure
13052 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13057 /* find out what's out there already */
13058 ret = i40e_fetch_switch_configuration(pf, false);
13060 dev_info(&pf->pdev->dev,
13061 "couldn't fetch switch config, err %s aq_err %s\n",
13062 i40e_stat_str(&pf->hw, ret),
13063 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13066 i40e_pf_reset_stats(pf);
13068 /* set the switch config bit for the whole device to
13069 * support limited promisc or true promisc
13070 * when user requests promisc. The default is limited
13074 if ((pf->hw.pf_id == 0) &&
13075 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13076 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13077 pf->last_sw_conf_flags = flags;
13080 if (pf->hw.pf_id == 0) {
13083 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13084 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13086 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13087 dev_info(&pf->pdev->dev,
13088 "couldn't set switch config bits, err %s aq_err %s\n",
13089 i40e_stat_str(&pf->hw, ret),
13090 i40e_aq_str(&pf->hw,
13091 pf->hw.aq.asq_last_status));
13092 /* not a fatal problem, just keep going */
13094 pf->last_sw_conf_valid_flags = valid_flags;
13097 /* first time setup */
13098 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13099 struct i40e_vsi *vsi = NULL;
13102 /* Set up the PF VSI associated with the PF's main VSI
13103 * that is already in the HW switch
13105 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13106 uplink_seid = pf->veb[pf->lan_veb]->seid;
13108 uplink_seid = pf->mac_seid;
13109 if (pf->lan_vsi == I40E_NO_VSI)
13110 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13112 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13114 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13115 i40e_cloud_filter_exit(pf);
13116 i40e_fdir_teardown(pf);
13120 /* force a reset of TC and queue layout configurations */
13121 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13123 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13124 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13125 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13127 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13129 i40e_fdir_sb_setup(pf);
13131 /* Setup static PF queue filter control settings */
13132 ret = i40e_setup_pf_filter_control(pf);
13134 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13136 /* Failure here should not stop continuing other steps */
13139 /* enable RSS in the HW, even for only one queue, as the stack can use
13142 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13143 i40e_pf_config_rss(pf);
13145 /* fill in link information and enable LSE reporting */
13146 i40e_link_event(pf);
13148 /* Initialize user-specific link properties */
13149 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13150 I40E_AQ_AN_COMPLETED) ? true : false);
13154 /* repopulate tunnel port filters */
13155 i40e_sync_udp_filters(pf);
13161 * i40e_determine_queue_usage - Work out queue distribution
13162 * @pf: board private structure
13164 static void i40e_determine_queue_usage(struct i40e_pf *pf)
13169 pf->num_lan_qps = 0;
13171 /* Find the max queues to be put into basic use. We'll always be
13172 * using TC0, whether or not DCB is running, and TC0 will get the
13175 queues_left = pf->hw.func_caps.num_tx_qp;
13177 if ((queues_left == 1) ||
13178 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13179 /* one qp for PF, no queues for anything else */
13181 pf->alloc_rss_size = pf->num_lan_qps = 1;
13183 /* make sure all the fancies are disabled */
13184 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13185 I40E_FLAG_IWARP_ENABLED |
13186 I40E_FLAG_FD_SB_ENABLED |
13187 I40E_FLAG_FD_ATR_ENABLED |
13188 I40E_FLAG_DCB_CAPABLE |
13189 I40E_FLAG_DCB_ENABLED |
13190 I40E_FLAG_SRIOV_ENABLED |
13191 I40E_FLAG_VMDQ_ENABLED);
13192 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13193 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13194 I40E_FLAG_FD_SB_ENABLED |
13195 I40E_FLAG_FD_ATR_ENABLED |
13196 I40E_FLAG_DCB_CAPABLE))) {
13197 /* one qp for PF */
13198 pf->alloc_rss_size = pf->num_lan_qps = 1;
13199 queues_left -= pf->num_lan_qps;
13201 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13202 I40E_FLAG_IWARP_ENABLED |
13203 I40E_FLAG_FD_SB_ENABLED |
13204 I40E_FLAG_FD_ATR_ENABLED |
13205 I40E_FLAG_DCB_ENABLED |
13206 I40E_FLAG_VMDQ_ENABLED);
13207 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13209 /* Not enough queues for all TCs */
13210 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13211 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13212 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13213 I40E_FLAG_DCB_ENABLED);
13214 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13217 /* limit lan qps to the smaller of qps, cpus or msix */
13218 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13219 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13220 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13221 pf->num_lan_qps = q_max;
13223 queues_left -= pf->num_lan_qps;
13226 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13227 if (queues_left > 1) {
13228 queues_left -= 1; /* save 1 queue for FD */
13230 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13231 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13232 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13236 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13237 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13238 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13239 (queues_left / pf->num_vf_qps));
13240 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13243 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13244 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13245 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13246 (queues_left / pf->num_vmdq_qps));
13247 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13250 pf->queues_left = queues_left;
13251 dev_dbg(&pf->pdev->dev,
13252 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13253 pf->hw.func_caps.num_tx_qp,
13254 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13255 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13256 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13261 * i40e_setup_pf_filter_control - Setup PF static filter control
13262 * @pf: PF to be setup
13264 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13265 * settings. If PE/FCoE are enabled then it will also set the per PF
13266 * based filter sizes required for them. It also enables Flow director,
13267 * ethertype and macvlan type filter settings for the pf.
13269 * Returns 0 on success, negative on failure
13271 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13273 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13275 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13277 /* Flow Director is enabled */
13278 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13279 settings->enable_fdir = true;
13281 /* Ethtype and MACVLAN filters enabled for PF */
13282 settings->enable_ethtype = true;
13283 settings->enable_macvlan = true;
13285 if (i40e_set_filter_control(&pf->hw, settings))
13291 #define INFO_STRING_LEN 255
13292 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13293 static void i40e_print_features(struct i40e_pf *pf)
13295 struct i40e_hw *hw = &pf->hw;
13299 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13303 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13304 #ifdef CONFIG_PCI_IOV
13305 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13307 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13308 pf->hw.func_caps.num_vsis,
13309 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13310 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13311 i += snprintf(&buf[i], REMAIN(i), " RSS");
13312 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13313 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13314 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13315 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13316 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13318 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13319 i += snprintf(&buf[i], REMAIN(i), " DCB");
13320 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13321 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13322 if (pf->flags & I40E_FLAG_PTP)
13323 i += snprintf(&buf[i], REMAIN(i), " PTP");
13324 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13325 i += snprintf(&buf[i], REMAIN(i), " VEB");
13327 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13329 dev_info(&pf->pdev->dev, "%s\n", buf);
13331 WARN_ON(i > INFO_STRING_LEN);
13335 * i40e_get_platform_mac_addr - get platform-specific MAC address
13336 * @pdev: PCI device information struct
13337 * @pf: board private structure
13339 * Look up the MAC address for the device. First we'll try
13340 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13341 * specific fallback. Otherwise, we'll default to the stored value in
13344 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13346 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13347 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13351 * i40e_probe - Device initialization routine
13352 * @pdev: PCI device information struct
13353 * @ent: entry in i40e_pci_tbl
13355 * i40e_probe initializes a PF identified by a pci_dev structure.
13356 * The OS initialization, configuring of the PF private structure,
13357 * and a hardware reset occur.
13359 * Returns 0 on success, negative on failure
13361 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13363 struct i40e_aq_get_phy_abilities_resp abilities;
13364 struct i40e_pf *pf;
13365 struct i40e_hw *hw;
13366 static u16 pfs_found;
13374 err = pci_enable_device_mem(pdev);
13378 /* set up for high or low dma */
13379 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
13381 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13383 dev_err(&pdev->dev,
13384 "DMA configuration failed: 0x%x\n", err);
13389 /* set up pci connections */
13390 err = pci_request_mem_regions(pdev, i40e_driver_name);
13392 dev_info(&pdev->dev,
13393 "pci_request_selected_regions failed %d\n", err);
13397 pci_enable_pcie_error_reporting(pdev);
13398 pci_set_master(pdev);
13400 /* Now that we have a PCI connection, we need to do the
13401 * low level device setup. This is primarily setting up
13402 * the Admin Queue structures and then querying for the
13403 * device's current profile information.
13405 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13412 set_bit(__I40E_DOWN, pf->state);
13417 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13418 I40E_MAX_CSR_SPACE);
13420 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
13421 if (!hw->hw_addr) {
13423 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13424 (unsigned int)pci_resource_start(pdev, 0),
13425 pf->ioremap_len, err);
13428 hw->vendor_id = pdev->vendor;
13429 hw->device_id = pdev->device;
13430 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13431 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13432 hw->subsystem_device_id = pdev->subsystem_device;
13433 hw->bus.device = PCI_SLOT(pdev->devfn);
13434 hw->bus.func = PCI_FUNC(pdev->devfn);
13435 hw->bus.bus_id = pdev->bus->number;
13436 pf->instance = pfs_found;
13438 /* Select something other than the 802.1ad ethertype for the
13439 * switch to use internally and drop on ingress.
13441 hw->switch_tag = 0xffff;
13442 hw->first_tag = ETH_P_8021AD;
13443 hw->second_tag = ETH_P_8021Q;
13445 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13446 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13448 /* set up the locks for the AQ, do this only once in probe
13449 * and destroy them only once in remove
13451 mutex_init(&hw->aq.asq_mutex);
13452 mutex_init(&hw->aq.arq_mutex);
13454 pf->msg_enable = netif_msg_init(debug,
13459 pf->hw.debug_mask = debug;
13461 /* do a special CORER for clearing PXE mode once at init */
13462 if (hw->revision_id == 0 &&
13463 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
13464 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
13469 i40e_clear_pxe_mode(hw);
13472 /* Reset here to make sure all is clean and to define PF 'n' */
13474 err = i40e_pf_reset(hw);
13476 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
13481 hw->aq.num_arq_entries = I40E_AQ_LEN;
13482 hw->aq.num_asq_entries = I40E_AQ_LEN;
13483 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13484 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13485 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
13487 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
13489 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
13491 err = i40e_init_shared_code(hw);
13493 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
13498 /* set up a default setting for link flow control */
13499 pf->hw.fc.requested_mode = I40E_FC_NONE;
13501 err = i40e_init_adminq(hw);
13503 if (err == I40E_ERR_FIRMWARE_API_VERSION)
13504 dev_info(&pdev->dev,
13505 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13507 dev_info(&pdev->dev,
13508 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13512 i40e_get_oem_version(hw);
13514 /* provide nvm, fw, api versions */
13515 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
13516 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
13517 hw->aq.api_maj_ver, hw->aq.api_min_ver,
13518 i40e_nvm_version_str(hw));
13520 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
13521 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
13522 dev_info(&pdev->dev,
13523 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13524 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
13525 dev_info(&pdev->dev,
13526 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13528 i40e_verify_eeprom(pf);
13530 /* Rev 0 hardware was never productized */
13531 if (hw->revision_id < 1)
13532 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13534 i40e_clear_pxe_mode(hw);
13535 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
13537 goto err_adminq_setup;
13539 err = i40e_sw_init(pf);
13541 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
13545 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
13546 hw->func_caps.num_rx_qp, 0, 0);
13548 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
13549 goto err_init_lan_hmc;
13552 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
13554 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
13556 goto err_configure_lan_hmc;
13559 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13560 * Ignore error return codes because if it was already disabled via
13561 * hardware settings this will fail
13563 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
13564 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
13565 i40e_aq_stop_lldp(hw, true, NULL);
13568 /* allow a platform config to override the HW addr */
13569 i40e_get_platform_mac_addr(pdev, pf);
13571 if (!is_valid_ether_addr(hw->mac.addr)) {
13572 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
13576 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
13577 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
13578 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
13579 if (is_valid_ether_addr(hw->mac.port_addr))
13580 pf->hw_features |= I40E_HW_PORT_ID_VALID;
13582 pci_set_drvdata(pdev, pf);
13583 pci_save_state(pdev);
13584 #ifdef CONFIG_I40E_DCB
13585 err = i40e_init_pf_dcb(pf);
13587 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
13588 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
13589 /* Continue without DCB enabled */
13591 #endif /* CONFIG_I40E_DCB */
13593 /* set up periodic task facility */
13594 timer_setup(&pf->service_timer, i40e_service_timer, 0);
13595 pf->service_timer_period = HZ;
13597 INIT_WORK(&pf->service_task, i40e_service_task);
13598 clear_bit(__I40E_SERVICE_SCHED, pf->state);
13600 /* NVM bit on means WoL disabled for the port */
13601 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
13602 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
13603 pf->wol_en = false;
13606 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
13608 /* set up the main switch operations */
13609 i40e_determine_queue_usage(pf);
13610 err = i40e_init_interrupt_scheme(pf);
13612 goto err_switch_setup;
13614 /* The number of VSIs reported by the FW is the minimum guaranteed
13615 * to us; HW supports far more and we share the remaining pool with
13616 * the other PFs. We allocate space for more than the guarantee with
13617 * the understanding that we might not get them all later.
13619 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
13620 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
13622 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
13624 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
13625 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
13629 goto err_switch_setup;
13632 #ifdef CONFIG_PCI_IOV
13633 /* prep for VF support */
13634 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13635 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
13636 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
13637 if (pci_num_vf(pdev))
13638 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13641 err = i40e_setup_pf_switch(pf, false);
13643 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
13646 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
13648 /* Make sure flow control is set according to current settings */
13649 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
13650 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
13651 dev_dbg(&pf->pdev->dev,
13652 "Set fc with err %s aq_err %s on get_phy_cap\n",
13653 i40e_stat_str(hw, err),
13654 i40e_aq_str(hw, hw->aq.asq_last_status));
13655 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
13656 dev_dbg(&pf->pdev->dev,
13657 "Set fc with err %s aq_err %s on set_phy_config\n",
13658 i40e_stat_str(hw, err),
13659 i40e_aq_str(hw, hw->aq.asq_last_status));
13660 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
13661 dev_dbg(&pf->pdev->dev,
13662 "Set fc with err %s aq_err %s on get_link_info\n",
13663 i40e_stat_str(hw, err),
13664 i40e_aq_str(hw, hw->aq.asq_last_status));
13666 /* if FDIR VSI was set up, start it now */
13667 for (i = 0; i < pf->num_alloc_vsi; i++) {
13668 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
13669 i40e_vsi_open(pf->vsi[i]);
13674 /* The driver only wants link up/down and module qualification
13675 * reports from firmware. Note the negative logic.
13677 err = i40e_aq_set_phy_int_mask(&pf->hw,
13678 ~(I40E_AQ_EVENT_LINK_UPDOWN |
13679 I40E_AQ_EVENT_MEDIA_NA |
13680 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
13682 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
13683 i40e_stat_str(&pf->hw, err),
13684 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13686 /* Reconfigure hardware for allowing smaller MSS in the case
13687 * of TSO, so that we avoid the MDD being fired and causing
13688 * a reset in the case of small MSS+TSO.
13690 val = rd32(hw, I40E_REG_MSS);
13691 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
13692 val &= ~I40E_REG_MSS_MIN_MASK;
13693 val |= I40E_64BYTE_MSS;
13694 wr32(hw, I40E_REG_MSS, val);
13697 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
13699 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
13701 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
13702 i40e_stat_str(&pf->hw, err),
13703 i40e_aq_str(&pf->hw,
13704 pf->hw.aq.asq_last_status));
13706 /* The main driver is (mostly) up and happy. We need to set this state
13707 * before setting up the misc vector or we get a race and the vector
13708 * ends up disabled forever.
13710 clear_bit(__I40E_DOWN, pf->state);
13712 /* In case of MSIX we are going to setup the misc vector right here
13713 * to handle admin queue events etc. In case of legacy and MSI
13714 * the misc functionality and queue processing is combined in
13715 * the same vector and that gets setup at open.
13717 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13718 err = i40e_setup_misc_vector(pf);
13720 dev_info(&pdev->dev,
13721 "setup of misc vector failed: %d\n", err);
13726 #ifdef CONFIG_PCI_IOV
13727 /* prep for VF support */
13728 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13729 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
13730 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
13731 /* disable link interrupts for VFs */
13732 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
13733 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
13734 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
13737 if (pci_num_vf(pdev)) {
13738 dev_info(&pdev->dev,
13739 "Active VFs found, allocating resources.\n");
13740 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
13742 dev_info(&pdev->dev,
13743 "Error %d allocating resources for existing VFs\n",
13747 #endif /* CONFIG_PCI_IOV */
13749 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
13750 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
13751 pf->num_iwarp_msix,
13752 I40E_IWARP_IRQ_PILE_ID);
13753 if (pf->iwarp_base_vector < 0) {
13754 dev_info(&pdev->dev,
13755 "failed to get tracking for %d vectors for IWARP err=%d\n",
13756 pf->num_iwarp_msix, pf->iwarp_base_vector);
13757 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
13761 i40e_dbg_pf_init(pf);
13763 /* tell the firmware that we're starting */
13764 i40e_send_version(pf);
13766 /* since everything's happy, start the service_task timer */
13767 mod_timer(&pf->service_timer,
13768 round_jiffies(jiffies + pf->service_timer_period));
13770 /* add this PF to client device list and launch a client service task */
13771 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
13772 err = i40e_lan_add_device(pf);
13774 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
13778 #define PCI_SPEED_SIZE 8
13779 #define PCI_WIDTH_SIZE 8
13780 /* Devices on the IOSF bus do not have this information
13781 * and will report PCI Gen 1 x 1 by default so don't bother
13784 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
13785 char speed[PCI_SPEED_SIZE] = "Unknown";
13786 char width[PCI_WIDTH_SIZE] = "Unknown";
13788 /* Get the negotiated link width and speed from PCI config
13791 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
13794 i40e_set_pci_config_data(hw, link_status);
13796 switch (hw->bus.speed) {
13797 case i40e_bus_speed_8000:
13798 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
13799 case i40e_bus_speed_5000:
13800 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
13801 case i40e_bus_speed_2500:
13802 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
13806 switch (hw->bus.width) {
13807 case i40e_bus_width_pcie_x8:
13808 strncpy(width, "8", PCI_WIDTH_SIZE); break;
13809 case i40e_bus_width_pcie_x4:
13810 strncpy(width, "4", PCI_WIDTH_SIZE); break;
13811 case i40e_bus_width_pcie_x2:
13812 strncpy(width, "2", PCI_WIDTH_SIZE); break;
13813 case i40e_bus_width_pcie_x1:
13814 strncpy(width, "1", PCI_WIDTH_SIZE); break;
13819 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
13822 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
13823 hw->bus.speed < i40e_bus_speed_8000) {
13824 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
13825 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
13829 /* get the requested speeds from the fw */
13830 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
13832 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
13833 i40e_stat_str(&pf->hw, err),
13834 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13835 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
13837 /* get the supported phy types from the fw */
13838 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
13840 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
13841 i40e_stat_str(&pf->hw, err),
13842 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13844 /* Add a filter to drop all Flow control frames from any VSI from being
13845 * transmitted. By doing so we stop a malicious VF from sending out
13846 * PAUSE or PFC frames and potentially controlling traffic for other
13848 * The FW can still send Flow control frames if enabled.
13850 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
13851 pf->main_vsi_seid);
13853 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
13854 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
13855 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
13856 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
13857 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
13858 /* print a string summarizing features */
13859 i40e_print_features(pf);
13863 /* Unwind what we've done if something failed in the setup */
13865 set_bit(__I40E_DOWN, pf->state);
13866 i40e_clear_interrupt_scheme(pf);
13869 i40e_reset_interrupt_capability(pf);
13870 del_timer_sync(&pf->service_timer);
13872 err_configure_lan_hmc:
13873 (void)i40e_shutdown_lan_hmc(hw);
13875 kfree(pf->qp_pile);
13879 iounmap(hw->hw_addr);
13883 pci_disable_pcie_error_reporting(pdev);
13884 pci_release_mem_regions(pdev);
13887 pci_disable_device(pdev);
13892 * i40e_remove - Device removal routine
13893 * @pdev: PCI device information struct
13895 * i40e_remove is called by the PCI subsystem to alert the driver
13896 * that is should release a PCI device. This could be caused by a
13897 * Hot-Plug event, or because the driver is going to be removed from
13900 static void i40e_remove(struct pci_dev *pdev)
13902 struct i40e_pf *pf = pci_get_drvdata(pdev);
13903 struct i40e_hw *hw = &pf->hw;
13904 i40e_status ret_code;
13907 i40e_dbg_pf_exit(pf);
13911 /* Disable RSS in hw */
13912 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
13913 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
13915 /* no more scheduling of any task */
13916 set_bit(__I40E_SUSPENDED, pf->state);
13917 set_bit(__I40E_DOWN, pf->state);
13918 if (pf->service_timer.function)
13919 del_timer_sync(&pf->service_timer);
13920 if (pf->service_task.func)
13921 cancel_work_sync(&pf->service_task);
13923 /* Client close must be called explicitly here because the timer
13924 * has been stopped.
13926 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
13928 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
13930 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
13933 i40e_fdir_teardown(pf);
13935 /* If there is a switch structure or any orphans, remove them.
13936 * This will leave only the PF's VSI remaining.
13938 for (i = 0; i < I40E_MAX_VEB; i++) {
13942 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
13943 pf->veb[i]->uplink_seid == 0)
13944 i40e_switch_branch_release(pf->veb[i]);
13947 /* Now we can shutdown the PF's VSI, just before we kill
13950 if (pf->vsi[pf->lan_vsi])
13951 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
13953 i40e_cloud_filter_exit(pf);
13955 /* remove attached clients */
13956 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
13957 ret_code = i40e_lan_del_device(pf);
13959 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
13963 /* shutdown and destroy the HMC */
13964 if (hw->hmc.hmc_obj) {
13965 ret_code = i40e_shutdown_lan_hmc(hw);
13967 dev_warn(&pdev->dev,
13968 "Failed to destroy the HMC resources: %d\n",
13972 /* shutdown the adminq */
13973 i40e_shutdown_adminq(hw);
13975 /* destroy the locks only once, here */
13976 mutex_destroy(&hw->aq.arq_mutex);
13977 mutex_destroy(&hw->aq.asq_mutex);
13979 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
13980 i40e_clear_interrupt_scheme(pf);
13981 for (i = 0; i < pf->num_alloc_vsi; i++) {
13983 i40e_vsi_clear_rings(pf->vsi[i]);
13984 i40e_vsi_clear(pf->vsi[i]);
13989 for (i = 0; i < I40E_MAX_VEB; i++) {
13994 kfree(pf->qp_pile);
13997 iounmap(hw->hw_addr);
13999 pci_release_mem_regions(pdev);
14001 pci_disable_pcie_error_reporting(pdev);
14002 pci_disable_device(pdev);
14006 * i40e_pci_error_detected - warning that something funky happened in PCI land
14007 * @pdev: PCI device information struct
14009 * Called to warn that something happened and the error handling steps
14010 * are in progress. Allows the driver to quiesce things, be ready for
14013 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14014 enum pci_channel_state error)
14016 struct i40e_pf *pf = pci_get_drvdata(pdev);
14018 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14021 dev_info(&pdev->dev,
14022 "Cannot recover - error happened during device probe\n");
14023 return PCI_ERS_RESULT_DISCONNECT;
14026 /* shutdown all operations */
14027 if (!test_bit(__I40E_SUSPENDED, pf->state))
14028 i40e_prep_for_reset(pf, false);
14030 /* Request a slot reset */
14031 return PCI_ERS_RESULT_NEED_RESET;
14035 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14036 * @pdev: PCI device information struct
14038 * Called to find if the driver can work with the device now that
14039 * the pci slot has been reset. If a basic connection seems good
14040 * (registers are readable and have sane content) then return a
14041 * happy little PCI_ERS_RESULT_xxx.
14043 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14045 struct i40e_pf *pf = pci_get_drvdata(pdev);
14046 pci_ers_result_t result;
14050 dev_dbg(&pdev->dev, "%s\n", __func__);
14051 if (pci_enable_device_mem(pdev)) {
14052 dev_info(&pdev->dev,
14053 "Cannot re-enable PCI device after reset.\n");
14054 result = PCI_ERS_RESULT_DISCONNECT;
14056 pci_set_master(pdev);
14057 pci_restore_state(pdev);
14058 pci_save_state(pdev);
14059 pci_wake_from_d3(pdev, false);
14061 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14063 result = PCI_ERS_RESULT_RECOVERED;
14065 result = PCI_ERS_RESULT_DISCONNECT;
14068 err = pci_cleanup_aer_uncorrect_error_status(pdev);
14070 dev_info(&pdev->dev,
14071 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14073 /* non-fatal, continue */
14080 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14081 * @pdev: PCI device information struct
14083 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14085 struct i40e_pf *pf = pci_get_drvdata(pdev);
14087 i40e_prep_for_reset(pf, false);
14091 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14092 * @pdev: PCI device information struct
14094 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14096 struct i40e_pf *pf = pci_get_drvdata(pdev);
14098 i40e_reset_and_rebuild(pf, false, false);
14102 * i40e_pci_error_resume - restart operations after PCI error recovery
14103 * @pdev: PCI device information struct
14105 * Called to allow the driver to bring things back up after PCI error
14106 * and/or reset recovery has finished.
14108 static void i40e_pci_error_resume(struct pci_dev *pdev)
14110 struct i40e_pf *pf = pci_get_drvdata(pdev);
14112 dev_dbg(&pdev->dev, "%s\n", __func__);
14113 if (test_bit(__I40E_SUSPENDED, pf->state))
14116 i40e_handle_reset_warning(pf, false);
14120 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14121 * using the mac_address_write admin q function
14122 * @pf: pointer to i40e_pf struct
14124 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14126 struct i40e_hw *hw = &pf->hw;
14131 /* Get current MAC address in case it's an LAA */
14132 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14133 ether_addr_copy(mac_addr,
14134 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14136 dev_err(&pf->pdev->dev,
14137 "Failed to retrieve MAC address; using default\n");
14138 ether_addr_copy(mac_addr, hw->mac.addr);
14141 /* The FW expects the mac address write cmd to first be called with
14142 * one of these flags before calling it again with the multicast
14145 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14147 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14148 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14150 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14152 dev_err(&pf->pdev->dev,
14153 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14157 flags = I40E_AQC_MC_MAG_EN
14158 | I40E_AQC_WOL_PRESERVE_ON_PFR
14159 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14160 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14162 dev_err(&pf->pdev->dev,
14163 "Failed to enable Multicast Magic Packet wake up\n");
14167 * i40e_shutdown - PCI callback for shutting down
14168 * @pdev: PCI device information struct
14170 static void i40e_shutdown(struct pci_dev *pdev)
14172 struct i40e_pf *pf = pci_get_drvdata(pdev);
14173 struct i40e_hw *hw = &pf->hw;
14175 set_bit(__I40E_SUSPENDED, pf->state);
14176 set_bit(__I40E_DOWN, pf->state);
14178 i40e_prep_for_reset(pf, true);
14181 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14182 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14184 del_timer_sync(&pf->service_timer);
14185 cancel_work_sync(&pf->service_task);
14186 i40e_cloud_filter_exit(pf);
14187 i40e_fdir_teardown(pf);
14189 /* Client close must be called explicitly here because the timer
14190 * has been stopped.
14192 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14194 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14195 i40e_enable_mc_magic_wake(pf);
14197 i40e_prep_for_reset(pf, false);
14199 wr32(hw, I40E_PFPM_APM,
14200 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14201 wr32(hw, I40E_PFPM_WUFC,
14202 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14204 i40e_clear_interrupt_scheme(pf);
14206 if (system_state == SYSTEM_POWER_OFF) {
14207 pci_wake_from_d3(pdev, pf->wol_en);
14208 pci_set_power_state(pdev, PCI_D3hot);
14213 * i40e_suspend - PM callback for moving to D3
14214 * @dev: generic device information structure
14216 static int __maybe_unused i40e_suspend(struct device *dev)
14218 struct pci_dev *pdev = to_pci_dev(dev);
14219 struct i40e_pf *pf = pci_get_drvdata(pdev);
14220 struct i40e_hw *hw = &pf->hw;
14222 /* If we're already suspended, then there is nothing to do */
14223 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14226 set_bit(__I40E_DOWN, pf->state);
14228 /* Ensure service task will not be running */
14229 del_timer_sync(&pf->service_timer);
14230 cancel_work_sync(&pf->service_task);
14232 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14233 i40e_enable_mc_magic_wake(pf);
14235 i40e_prep_for_reset(pf, false);
14237 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14238 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14240 /* Clear the interrupt scheme and release our IRQs so that the system
14241 * can safely hibernate even when there are a large number of CPUs.
14242 * Otherwise hibernation might fail when mapping all the vectors back
14245 i40e_clear_interrupt_scheme(pf);
14251 * i40e_resume - PM callback for waking up from D3
14252 * @dev: generic device information structure
14254 static int __maybe_unused i40e_resume(struct device *dev)
14256 struct pci_dev *pdev = to_pci_dev(dev);
14257 struct i40e_pf *pf = pci_get_drvdata(pdev);
14260 /* If we're not suspended, then there is nothing to do */
14261 if (!test_bit(__I40E_SUSPENDED, pf->state))
14264 /* We cleared the interrupt scheme when we suspended, so we need to
14265 * restore it now to resume device functionality.
14267 err = i40e_restore_interrupt_scheme(pf);
14269 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14273 clear_bit(__I40E_DOWN, pf->state);
14274 i40e_reset_and_rebuild(pf, false, false);
14276 /* Clear suspended state last after everything is recovered */
14277 clear_bit(__I40E_SUSPENDED, pf->state);
14279 /* Restart the service task */
14280 mod_timer(&pf->service_timer,
14281 round_jiffies(jiffies + pf->service_timer_period));
14286 static const struct pci_error_handlers i40e_err_handler = {
14287 .error_detected = i40e_pci_error_detected,
14288 .slot_reset = i40e_pci_error_slot_reset,
14289 .reset_prepare = i40e_pci_error_reset_prepare,
14290 .reset_done = i40e_pci_error_reset_done,
14291 .resume = i40e_pci_error_resume,
14294 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14296 static struct pci_driver i40e_driver = {
14297 .name = i40e_driver_name,
14298 .id_table = i40e_pci_tbl,
14299 .probe = i40e_probe,
14300 .remove = i40e_remove,
14302 .pm = &i40e_pm_ops,
14304 .shutdown = i40e_shutdown,
14305 .err_handler = &i40e_err_handler,
14306 .sriov_configure = i40e_pci_sriov_configure,
14310 * i40e_init_module - Driver registration routine
14312 * i40e_init_module is the first routine called when the driver is
14313 * loaded. All it does is register with the PCI subsystem.
14315 static int __init i40e_init_module(void)
14317 pr_info("%s: %s - version %s\n", i40e_driver_name,
14318 i40e_driver_string, i40e_driver_version_str);
14319 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
14321 /* There is no need to throttle the number of active tasks because
14322 * each device limits its own task using a state bit for scheduling
14323 * the service task, and the device tasks do not interfere with each
14324 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14325 * since we need to be able to guarantee forward progress even under
14328 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
14330 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14335 return pci_register_driver(&i40e_driver);
14337 module_init(i40e_init_module);
14340 * i40e_exit_module - Driver exit cleanup routine
14342 * i40e_exit_module is called just before the driver is removed
14345 static void __exit i40e_exit_module(void)
14347 pci_unregister_driver(&i40e_driver);
14348 destroy_workqueue(i40e_wq);
14351 module_exit(i40e_exit_module);