1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/crash_dump.h>
8 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
19 #include <asm/byteorder.h>
20 #include <asm/param.h>
22 #include <linux/netdev_features.h>
23 #include <linux/udp.h>
24 #include <linux/tcp.h>
25 #include <net/udp_tunnel.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/pkt_sched.h>
32 #include <linux/ethtool.h>
34 #include <linux/random.h>
35 #include <net/ip6_checksum.h>
36 #include <linux/bitops.h>
37 #include <linux/vmalloc.h>
38 #include <linux/aer.h>
42 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
43 MODULE_LICENSE("GPL");
46 module_param(debug, uint, 0);
47 MODULE_PARM_DESC(debug, " Default debug msglevel");
49 static const struct qed_eth_ops *qed_ops;
51 #define CHIP_NUM_57980S_40 0x1634
52 #define CHIP_NUM_57980S_10 0x1666
53 #define CHIP_NUM_57980S_MF 0x1636
54 #define CHIP_NUM_57980S_100 0x1644
55 #define CHIP_NUM_57980S_50 0x1654
56 #define CHIP_NUM_57980S_25 0x1656
57 #define CHIP_NUM_57980S_IOV 0x1664
58 #define CHIP_NUM_AH 0x8070
59 #define CHIP_NUM_AH_IOV 0x8090
61 #ifndef PCI_DEVICE_ID_NX2_57980E
62 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
63 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
64 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
65 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
66 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
67 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
68 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
69 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
70 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
74 enum qede_pci_private {
79 static const struct pci_device_id qede_pci_tbl[] = {
80 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
81 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
82 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
83 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
85 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
86 #ifdef CONFIG_QED_SRIOV
87 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
89 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
90 #ifdef CONFIG_QED_SRIOV
91 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
96 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
98 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
99 static pci_ers_result_t
100 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
102 #define TX_TIMEOUT (5 * HZ)
104 /* Utilize last protocol index for XDP */
107 static void qede_remove(struct pci_dev *pdev);
108 static void qede_shutdown(struct pci_dev *pdev);
109 static void qede_link_update(void *dev, struct qed_link_output *link);
110 static void qede_schedule_recovery_handler(void *dev);
111 static void qede_recovery_handler(struct qede_dev *edev);
112 static void qede_schedule_hw_err_handler(void *dev,
113 enum qed_hw_err_type err_type);
114 static void qede_get_eth_tlv_data(void *edev, void *data);
115 static void qede_get_generic_tlv_data(void *edev,
116 struct qed_generic_tlvs *data);
117 static void qede_generic_hw_err_handler(struct qede_dev *edev);
118 #ifdef CONFIG_QED_SRIOV
119 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
122 struct qede_dev *edev = netdev_priv(ndev);
125 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
129 if (vlan_proto != htons(ETH_P_8021Q))
130 return -EPROTONOSUPPORT;
132 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
135 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
138 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
140 struct qede_dev *edev = netdev_priv(ndev);
142 DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
144 if (!is_valid_ether_addr(mac)) {
145 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
149 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
152 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
154 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
155 struct qed_dev_info *qed_info = &edev->dev_info.common;
156 struct qed_update_vport_params *vport_params;
159 vport_params = vzalloc(sizeof(*vport_params));
162 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
164 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
166 /* Enable/Disable Tx switching for PF */
167 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
168 !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
169 vport_params->vport_id = 0;
170 vport_params->update_tx_switching_flg = 1;
171 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
172 edev->ops->vport_update(edev->cdev, vport_params);
180 static const struct pci_error_handlers qede_err_handler = {
181 .error_detected = qede_io_error_detected,
184 static struct pci_driver qede_pci_driver = {
186 .id_table = qede_pci_tbl,
188 .remove = qede_remove,
189 .shutdown = qede_shutdown,
190 #ifdef CONFIG_QED_SRIOV
191 .sriov_configure = qede_sriov_configure,
193 .err_handler = &qede_err_handler,
196 static struct qed_eth_cb_ops qede_ll_ops = {
198 #ifdef CONFIG_RFS_ACCEL
199 .arfs_filter_op = qede_arfs_filter_op,
201 .link_update = qede_link_update,
202 .schedule_recovery_handler = qede_schedule_recovery_handler,
203 .schedule_hw_err_handler = qede_schedule_hw_err_handler,
204 .get_generic_tlv_data = qede_get_generic_tlv_data,
205 .get_protocol_tlv_data = qede_get_eth_tlv_data,
207 .force_mac = qede_force_mac,
208 .ports_update = qede_udp_ports_update,
211 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
214 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
215 struct ethtool_drvinfo drvinfo;
216 struct qede_dev *edev;
218 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
221 /* Check whether this is a qede device */
222 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
225 memset(&drvinfo, 0, sizeof(drvinfo));
226 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
227 if (strcmp(drvinfo.driver, "qede"))
229 edev = netdev_priv(ndev);
232 case NETDEV_CHANGENAME:
233 /* Notify qed of the name change */
234 if (!edev->ops || !edev->ops->common)
236 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
238 case NETDEV_CHANGEADDR:
239 edev = netdev_priv(ndev);
240 qede_rdma_event_changeaddr(edev);
248 static struct notifier_block qede_netdev_notifier = {
249 .notifier_call = qede_netdev_event,
253 int __init qede_init(void)
257 pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
259 qede_forced_speed_maps_init();
261 qed_ops = qed_get_eth_ops();
263 pr_notice("Failed to get qed ethtool operations\n");
267 /* Must register notifier before pci ops, since we might miss
268 * interface rename after pci probe and netdev registration.
270 ret = register_netdevice_notifier(&qede_netdev_notifier);
272 pr_notice("Failed to register netdevice_notifier\n");
277 ret = pci_register_driver(&qede_pci_driver);
279 pr_notice("Failed to register driver\n");
280 unregister_netdevice_notifier(&qede_netdev_notifier);
288 static void __exit qede_cleanup(void)
290 if (debug & QED_LOG_INFO_MASK)
291 pr_info("qede_cleanup called\n");
293 unregister_netdevice_notifier(&qede_netdev_notifier);
294 pci_unregister_driver(&qede_pci_driver);
298 module_init(qede_init);
299 module_exit(qede_cleanup);
301 static int qede_open(struct net_device *ndev);
302 static int qede_close(struct net_device *ndev);
304 void qede_fill_by_demand_stats(struct qede_dev *edev)
306 struct qede_stats_common *p_common = &edev->stats.common;
307 struct qed_eth_stats stats;
309 edev->ops->get_vport_stats(edev->cdev, &stats);
311 p_common->no_buff_discards = stats.common.no_buff_discards;
312 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
313 p_common->ttl0_discard = stats.common.ttl0_discard;
314 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
315 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
316 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
317 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
318 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
319 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
320 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
321 p_common->mac_filter_discards = stats.common.mac_filter_discards;
322 p_common->gft_filter_drop = stats.common.gft_filter_drop;
324 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
325 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
326 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
327 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
328 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
329 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
330 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
331 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
332 p_common->coalesced_events = stats.common.tpa_coalesced_events;
333 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
334 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
335 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
337 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
338 p_common->rx_65_to_127_byte_packets =
339 stats.common.rx_65_to_127_byte_packets;
340 p_common->rx_128_to_255_byte_packets =
341 stats.common.rx_128_to_255_byte_packets;
342 p_common->rx_256_to_511_byte_packets =
343 stats.common.rx_256_to_511_byte_packets;
344 p_common->rx_512_to_1023_byte_packets =
345 stats.common.rx_512_to_1023_byte_packets;
346 p_common->rx_1024_to_1518_byte_packets =
347 stats.common.rx_1024_to_1518_byte_packets;
348 p_common->rx_crc_errors = stats.common.rx_crc_errors;
349 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
350 p_common->rx_pause_frames = stats.common.rx_pause_frames;
351 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
352 p_common->rx_align_errors = stats.common.rx_align_errors;
353 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
354 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
355 p_common->rx_jabbers = stats.common.rx_jabbers;
356 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
357 p_common->rx_fragments = stats.common.rx_fragments;
358 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
359 p_common->tx_65_to_127_byte_packets =
360 stats.common.tx_65_to_127_byte_packets;
361 p_common->tx_128_to_255_byte_packets =
362 stats.common.tx_128_to_255_byte_packets;
363 p_common->tx_256_to_511_byte_packets =
364 stats.common.tx_256_to_511_byte_packets;
365 p_common->tx_512_to_1023_byte_packets =
366 stats.common.tx_512_to_1023_byte_packets;
367 p_common->tx_1024_to_1518_byte_packets =
368 stats.common.tx_1024_to_1518_byte_packets;
369 p_common->tx_pause_frames = stats.common.tx_pause_frames;
370 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
371 p_common->brb_truncates = stats.common.brb_truncates;
372 p_common->brb_discards = stats.common.brb_discards;
373 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
374 p_common->link_change_count = stats.common.link_change_count;
375 p_common->ptp_skip_txts = edev->ptp_skip_txts;
377 if (QEDE_IS_BB(edev)) {
378 struct qede_stats_bb *p_bb = &edev->stats.bb;
380 p_bb->rx_1519_to_1522_byte_packets =
381 stats.bb.rx_1519_to_1522_byte_packets;
382 p_bb->rx_1519_to_2047_byte_packets =
383 stats.bb.rx_1519_to_2047_byte_packets;
384 p_bb->rx_2048_to_4095_byte_packets =
385 stats.bb.rx_2048_to_4095_byte_packets;
386 p_bb->rx_4096_to_9216_byte_packets =
387 stats.bb.rx_4096_to_9216_byte_packets;
388 p_bb->rx_9217_to_16383_byte_packets =
389 stats.bb.rx_9217_to_16383_byte_packets;
390 p_bb->tx_1519_to_2047_byte_packets =
391 stats.bb.tx_1519_to_2047_byte_packets;
392 p_bb->tx_2048_to_4095_byte_packets =
393 stats.bb.tx_2048_to_4095_byte_packets;
394 p_bb->tx_4096_to_9216_byte_packets =
395 stats.bb.tx_4096_to_9216_byte_packets;
396 p_bb->tx_9217_to_16383_byte_packets =
397 stats.bb.tx_9217_to_16383_byte_packets;
398 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
399 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
401 struct qede_stats_ah *p_ah = &edev->stats.ah;
403 p_ah->rx_1519_to_max_byte_packets =
404 stats.ah.rx_1519_to_max_byte_packets;
405 p_ah->tx_1519_to_max_byte_packets =
406 stats.ah.tx_1519_to_max_byte_packets;
410 static void qede_get_stats64(struct net_device *dev,
411 struct rtnl_link_stats64 *stats)
413 struct qede_dev *edev = netdev_priv(dev);
414 struct qede_stats_common *p_common;
416 qede_fill_by_demand_stats(edev);
417 p_common = &edev->stats.common;
419 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
420 p_common->rx_bcast_pkts;
421 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
422 p_common->tx_bcast_pkts;
424 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
425 p_common->rx_bcast_bytes;
426 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
427 p_common->tx_bcast_bytes;
429 stats->tx_errors = p_common->tx_err_drop_pkts;
430 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
432 stats->rx_fifo_errors = p_common->no_buff_discards;
434 if (QEDE_IS_BB(edev))
435 stats->collisions = edev->stats.bb.tx_total_collisions;
436 stats->rx_crc_errors = p_common->rx_crc_errors;
437 stats->rx_frame_errors = p_common->rx_align_errors;
440 #ifdef CONFIG_QED_SRIOV
441 static int qede_get_vf_config(struct net_device *dev, int vfidx,
442 struct ifla_vf_info *ivi)
444 struct qede_dev *edev = netdev_priv(dev);
449 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
452 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
453 int min_tx_rate, int max_tx_rate)
455 struct qede_dev *edev = netdev_priv(dev);
457 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
461 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
463 struct qede_dev *edev = netdev_priv(dev);
468 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
471 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
474 struct qede_dev *edev = netdev_priv(dev);
479 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
482 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
484 struct qede_dev *edev = netdev_priv(dev);
489 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
493 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
495 struct qede_dev *edev = netdev_priv(dev);
497 if (!netif_running(dev))
502 return qede_ptp_hw_ts(edev, ifr);
504 DP_VERBOSE(edev, QED_MSG_DEBUG,
505 "default IOCTL cmd 0x%x\n", cmd);
512 static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
514 char *p_sb = (char *)fp->sb_info->sb_virt;
517 sb_size = sizeof(struct status_block);
519 for (i = 0; i < sb_size; i += 8)
521 "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
522 p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
523 p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
527 qede_txq_fp_log_metadata(struct qede_dev *edev,
528 struct qede_fastpath *fp, struct qede_tx_queue *txq)
530 struct qed_chain *p_chain = &txq->tx_pbl;
532 /* Dump txq/fp/sb ids etc. other metadata */
534 "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
535 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
536 p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
538 /* Dump all the relevant prod/cons indexes */
540 "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
541 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
542 qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
546 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
548 struct qed_sb_info_dbg sb_dbg;
552 qede_fp_sb_dump(edev, fp);
554 memset(&sb_dbg, 0, sizeof(sb_dbg));
555 rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
557 DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
558 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
561 edev->ops->common->mfw_report(edev->cdev,
562 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
563 txq->index, le16_to_cpu(*txq->hw_cons_ptr),
564 qed_chain_get_cons_idx(&txq->tx_pbl),
565 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
567 edev->ops->common->mfw_report(edev->cdev,
568 "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
569 txq->index, fp->sb_info->igu_sb_id,
570 sb_dbg.igu_prod, sb_dbg.igu_cons,
571 sb_dbg.pi[TX_PI(txq->cos)]);
574 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
576 struct qede_dev *edev = netdev_priv(dev);
579 netif_carrier_off(dev);
580 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
583 struct qede_tx_queue *txq;
584 struct qede_fastpath *fp;
587 fp = &edev->fp_array[i];
588 if (!(fp->type & QEDE_FASTPATH_TX))
591 for_each_cos_in_txq(edev, cos) {
594 /* Dump basic metadata for all queues */
595 qede_txq_fp_log_metadata(edev, fp, txq);
597 if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
598 qed_chain_get_prod_idx(&txq->tx_pbl))
599 qede_tx_log_print(edev, fp, txq);
606 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
607 edev->state == QEDE_STATE_RECOVERY) {
609 "Avoid handling a Tx timeout while another HW error is being handled\n");
613 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
614 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
615 schedule_delayed_work(&edev->sp_task, 0);
618 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
620 struct qede_dev *edev = netdev_priv(ndev);
621 int cos, count, offset;
623 if (num_tc > edev->dev_info.num_tc)
626 netdev_reset_tc(ndev);
627 netdev_set_num_tc(ndev, num_tc);
629 for_each_cos_in_txq(edev, cos) {
630 count = QEDE_TSS_COUNT(edev);
631 offset = cos * QEDE_TSS_COUNT(edev);
632 netdev_set_tc_queue(ndev, cos, count, offset);
639 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
642 switch (f->command) {
643 case FLOW_CLS_REPLACE:
644 return qede_add_tc_flower_fltr(edev, proto, f);
645 case FLOW_CLS_DESTROY:
646 return qede_delete_flow_filter(edev, f->cookie);
652 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
655 struct flow_cls_offload *f;
656 struct qede_dev *edev = cb_priv;
658 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
662 case TC_SETUP_CLSFLOWER:
664 return qede_set_flower(edev, f, f->common.protocol);
670 static LIST_HEAD(qede_block_cb_list);
673 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
676 struct qede_dev *edev = netdev_priv(dev);
677 struct tc_mqprio_qopt *mqprio;
681 return flow_block_cb_setup_simple(type_data,
683 qede_setup_tc_block_cb,
685 case TC_SETUP_QDISC_MQPRIO:
688 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
689 return qede_setup_tc(dev, mqprio->num_tc);
695 static const struct net_device_ops qede_netdev_ops = {
696 .ndo_open = qede_open,
697 .ndo_stop = qede_close,
698 .ndo_start_xmit = qede_start_xmit,
699 .ndo_select_queue = qede_select_queue,
700 .ndo_set_rx_mode = qede_set_rx_mode,
701 .ndo_set_mac_address = qede_set_mac_addr,
702 .ndo_validate_addr = eth_validate_addr,
703 .ndo_change_mtu = qede_change_mtu,
704 .ndo_eth_ioctl = qede_ioctl,
705 .ndo_tx_timeout = qede_tx_timeout,
706 #ifdef CONFIG_QED_SRIOV
707 .ndo_set_vf_mac = qede_set_vf_mac,
708 .ndo_set_vf_vlan = qede_set_vf_vlan,
709 .ndo_set_vf_trust = qede_set_vf_trust,
711 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
712 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
713 .ndo_fix_features = qede_fix_features,
714 .ndo_set_features = qede_set_features,
715 .ndo_get_stats64 = qede_get_stats64,
716 #ifdef CONFIG_QED_SRIOV
717 .ndo_set_vf_link_state = qede_set_vf_link_state,
718 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
719 .ndo_get_vf_config = qede_get_vf_config,
720 .ndo_set_vf_rate = qede_set_vf_rate,
722 .ndo_features_check = qede_features_check,
724 #ifdef CONFIG_RFS_ACCEL
725 .ndo_rx_flow_steer = qede_rx_flow_steer,
727 .ndo_xdp_xmit = qede_xdp_transmit,
728 .ndo_setup_tc = qede_setup_tc_offload,
731 static const struct net_device_ops qede_netdev_vf_ops = {
732 .ndo_open = qede_open,
733 .ndo_stop = qede_close,
734 .ndo_start_xmit = qede_start_xmit,
735 .ndo_select_queue = qede_select_queue,
736 .ndo_set_rx_mode = qede_set_rx_mode,
737 .ndo_set_mac_address = qede_set_mac_addr,
738 .ndo_validate_addr = eth_validate_addr,
739 .ndo_change_mtu = qede_change_mtu,
740 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
741 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
742 .ndo_fix_features = qede_fix_features,
743 .ndo_set_features = qede_set_features,
744 .ndo_get_stats64 = qede_get_stats64,
745 .ndo_features_check = qede_features_check,
748 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
749 .ndo_open = qede_open,
750 .ndo_stop = qede_close,
751 .ndo_start_xmit = qede_start_xmit,
752 .ndo_select_queue = qede_select_queue,
753 .ndo_set_rx_mode = qede_set_rx_mode,
754 .ndo_set_mac_address = qede_set_mac_addr,
755 .ndo_validate_addr = eth_validate_addr,
756 .ndo_change_mtu = qede_change_mtu,
757 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
758 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
759 .ndo_fix_features = qede_fix_features,
760 .ndo_set_features = qede_set_features,
761 .ndo_get_stats64 = qede_get_stats64,
762 .ndo_features_check = qede_features_check,
764 .ndo_xdp_xmit = qede_xdp_transmit,
767 /* -------------------------------------------------------------------------
768 * START OF PROBE / REMOVE
769 * -------------------------------------------------------------------------
772 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
773 struct pci_dev *pdev,
774 struct qed_dev_eth_info *info,
775 u32 dp_module, u8 dp_level)
777 struct net_device *ndev;
778 struct qede_dev *edev;
780 ndev = alloc_etherdev_mqs(sizeof(*edev),
781 info->num_queues * info->num_tc,
784 pr_err("etherdev allocation failed\n");
788 edev = netdev_priv(ndev);
792 edev->dp_module = dp_module;
793 edev->dp_level = dp_level;
796 if (is_kdump_kernel()) {
797 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
798 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
800 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
801 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
804 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
805 info->num_queues, info->num_queues);
807 SET_NETDEV_DEV(ndev, &pdev->dev);
809 memset(&edev->stats, 0, sizeof(edev->stats));
810 memcpy(&edev->dev_info, info, sizeof(*info));
812 /* As ethtool doesn't have the ability to show WoL behavior as
813 * 'default', if device supports it declare it's enabled.
815 if (edev->dev_info.common.wol_support)
816 edev->wol_enabled = true;
818 INIT_LIST_HEAD(&edev->vlan_list);
823 static void qede_init_ndev(struct qede_dev *edev)
825 struct net_device *ndev = edev->ndev;
826 struct pci_dev *pdev = edev->pdev;
827 bool udp_tunnel_enable = false;
828 netdev_features_t hw_features;
830 pci_set_drvdata(pdev, ndev);
832 ndev->mem_start = edev->dev_info.common.pci_mem_start;
833 ndev->base_addr = ndev->mem_start;
834 ndev->mem_end = edev->dev_info.common.pci_mem_end;
835 ndev->irq = edev->dev_info.common.pci_irq;
837 ndev->watchdog_timeo = TX_TIMEOUT;
840 if (edev->dev_info.xdp_supported)
841 ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
843 ndev->netdev_ops = &qede_netdev_vf_ops;
845 ndev->netdev_ops = &qede_netdev_ops;
848 qede_set_ethtool_ops(ndev);
850 ndev->priv_flags |= IFF_UNICAST_FLT;
852 /* user-changeble features */
853 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
854 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
855 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
857 if (edev->dev_info.common.b_arfs_capable)
858 hw_features |= NETIF_F_NTUPLE;
860 if (edev->dev_info.common.vxlan_enable ||
861 edev->dev_info.common.geneve_enable)
862 udp_tunnel_enable = true;
864 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
865 hw_features |= NETIF_F_TSO_ECN;
866 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
867 NETIF_F_SG | NETIF_F_TSO |
868 NETIF_F_TSO_ECN | NETIF_F_TSO6 |
872 if (udp_tunnel_enable) {
873 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
874 NETIF_F_GSO_UDP_TUNNEL_CSUM);
875 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
876 NETIF_F_GSO_UDP_TUNNEL_CSUM);
878 qede_set_udp_tunnels(edev);
881 if (edev->dev_info.common.gre_enable) {
882 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
883 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
884 NETIF_F_GSO_GRE_CSUM);
887 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
889 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
890 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
891 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
893 ndev->hw_features = hw_features;
895 /* MTU range: 46 - 9600 */
896 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
897 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
899 /* Set network device HW mac */
900 eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
902 ndev->mtu = edev->dev_info.common.mtu;
905 /* This function converts from 32b param to two params of level and module
906 * Input 32b decoding:
907 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
908 * 'happy' flow, e.g. memory allocation failed.
909 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
910 * and provide important parameters.
911 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
912 * module. VERBOSE prints are for tracking the specific flow in low level.
914 * Notice that the level should be that of the lowest required logs.
916 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
918 *p_dp_level = QED_LEVEL_NOTICE;
921 if (debug & QED_LOG_VERBOSE_MASK) {
922 *p_dp_level = QED_LEVEL_VERBOSE;
923 *p_dp_module = (debug & 0x3FFFFFFF);
924 } else if (debug & QED_LOG_INFO_MASK) {
925 *p_dp_level = QED_LEVEL_INFO;
926 } else if (debug & QED_LOG_NOTICE_MASK) {
927 *p_dp_level = QED_LEVEL_NOTICE;
931 static void qede_free_fp_array(struct qede_dev *edev)
933 if (edev->fp_array) {
934 struct qede_fastpath *fp;
938 fp = &edev->fp_array[i];
941 /* Handle mem alloc failure case where qede_init_fp
942 * didn't register xdp_rxq_info yet.
943 * Implicit only (fp->type & QEDE_FASTPATH_RX)
945 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
946 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
951 kfree(edev->fp_array);
954 edev->num_queues = 0;
959 static int qede_alloc_fp_array(struct qede_dev *edev)
961 u8 fp_combined, fp_rx = edev->fp_num_rx;
962 struct qede_fastpath *fp;
966 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
967 sizeof(*edev->fp_array), GFP_KERNEL);
968 if (!edev->fp_array) {
969 DP_NOTICE(edev, "fp array allocation failed\n");
973 mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
974 sizeof(*edev->coal_entry), GFP_KERNEL);
976 DP_ERR(edev, "coalesce entry allocation failed\n");
977 kfree(edev->coal_entry);
980 edev->coal_entry = mem;
982 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
984 /* Allocate the FP elements for Rx queues followed by combined and then
985 * the Tx. This ordering should be maintained so that the respective
986 * queues (Rx or Tx) will be together in the fastpath array and the
987 * associated ids will be sequential.
990 fp = &edev->fp_array[i];
992 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
994 DP_NOTICE(edev, "sb info struct allocation failed\n");
999 fp->type = QEDE_FASTPATH_RX;
1001 } else if (fp_combined) {
1002 fp->type = QEDE_FASTPATH_COMBINED;
1005 fp->type = QEDE_FASTPATH_TX;
1008 if (fp->type & QEDE_FASTPATH_TX) {
1009 fp->txq = kcalloc(edev->dev_info.num_tc,
1010 sizeof(*fp->txq), GFP_KERNEL);
1015 if (fp->type & QEDE_FASTPATH_RX) {
1016 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
1020 if (edev->xdp_prog) {
1021 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
1025 fp->type |= QEDE_FASTPATH_XDP;
1032 qede_free_fp_array(edev);
1036 /* The qede lock is used to protect driver state change and driver flows that
1037 * are not reentrant.
1039 void __qede_lock(struct qede_dev *edev)
1041 mutex_lock(&edev->qede_lock);
1044 void __qede_unlock(struct qede_dev *edev)
1046 mutex_unlock(&edev->qede_lock);
1049 /* This version of the lock should be used when acquiring the RTNL lock is also
1050 * needed in addition to the internal qede lock.
1052 static void qede_lock(struct qede_dev *edev)
1058 static void qede_unlock(struct qede_dev *edev)
1060 __qede_unlock(edev);
1064 static void qede_sp_task(struct work_struct *work)
1066 struct qede_dev *edev = container_of(work, struct qede_dev,
1069 /* Disable execution of this deferred work once
1070 * qede removal is in progress, this stop any future
1071 * scheduling of sp_task.
1073 if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1076 /* The locking scheme depends on the specific flag:
1077 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1078 * ensure that ongoing flows are ended and new ones are not started.
1079 * In other cases - only the internal qede lock should be acquired.
1082 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1083 #ifdef CONFIG_QED_SRIOV
1084 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1085 * The recovery of the active VFs is currently not supported.
1087 if (pci_num_vf(edev->pdev))
1088 qede_sriov_configure(edev->pdev, 0);
1091 qede_recovery_handler(edev);
1097 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1098 if (edev->state == QEDE_STATE_OPEN)
1099 qede_config_rx_mode(edev->ndev);
1101 #ifdef CONFIG_RFS_ACCEL
1102 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1103 if (edev->state == QEDE_STATE_OPEN)
1104 qede_process_arfs_filters(edev, false);
1107 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1108 qede_generic_hw_err_handler(edev);
1109 __qede_unlock(edev);
1111 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1112 #ifdef CONFIG_QED_SRIOV
1113 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1114 * The recovery of the active VFs is currently not supported.
1116 if (pci_num_vf(edev->pdev))
1117 qede_sriov_configure(edev->pdev, 0);
1119 edev->ops->common->recovery_process(edev->cdev);
1123 static void qede_update_pf_params(struct qed_dev *cdev)
1125 struct qed_pf_params pf_params;
1128 /* 64 rx + 64 tx + 64 XDP */
1129 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1131 /* 1 rx + 1 xdp + max tx cos */
1132 num_cons = QED_MIN_L2_CONS;
1134 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1136 /* Same for VFs - make sure they'll have sufficient connections
1137 * to support XDP Tx queues.
1139 pf_params.eth_pf_params.num_vf_cons = 48;
1141 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1142 qed_ops->common->update_pf_params(cdev, &pf_params);
1145 #define QEDE_FW_VER_STR_SIZE 80
1147 static void qede_log_probe(struct qede_dev *edev)
1149 struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1150 u8 buf[QEDE_FW_VER_STR_SIZE];
1153 snprintf(buf, QEDE_FW_VER_STR_SIZE,
1154 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1155 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1157 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1158 QED_MFW_VERSION_3_OFFSET,
1159 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1160 QED_MFW_VERSION_2_OFFSET,
1161 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1162 QED_MFW_VERSION_1_OFFSET,
1163 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1164 QED_MFW_VERSION_0_OFFSET);
1166 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1167 if (p_dev_info->mbi_version && left_size)
1168 snprintf(buf + strlen(buf), left_size,
1170 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1171 QED_MBI_VERSION_2_OFFSET,
1172 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1173 QED_MBI_VERSION_1_OFFSET,
1174 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1175 QED_MBI_VERSION_0_OFFSET);
1177 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1178 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1179 buf, edev->ndev->name);
1182 enum qede_probe_mode {
1184 QEDE_PROBE_RECOVERY,
1187 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1188 bool is_vf, enum qede_probe_mode mode)
1190 struct qed_probe_params probe_params;
1191 struct qed_slowpath_params sp_params;
1192 struct qed_dev_eth_info dev_info;
1193 struct qede_dev *edev;
1194 struct qed_dev *cdev;
1197 if (unlikely(dp_level & QED_LEVEL_INFO))
1198 pr_notice("Starting qede probe\n");
1200 memset(&probe_params, 0, sizeof(probe_params));
1201 probe_params.protocol = QED_PROTOCOL_ETH;
1202 probe_params.dp_module = dp_module;
1203 probe_params.dp_level = dp_level;
1204 probe_params.is_vf = is_vf;
1205 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1206 cdev = qed_ops->common->probe(pdev, &probe_params);
1212 qede_update_pf_params(cdev);
1214 /* Start the Slowpath-process */
1215 memset(&sp_params, 0, sizeof(sp_params));
1216 sp_params.int_mode = QED_INT_MODE_MSIX;
1217 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1218 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1220 pr_notice("Cannot start slowpath\n");
1224 /* Learn information crucial for qede to progress */
1225 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1229 if (mode != QEDE_PROBE_RECOVERY) {
1230 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1237 edev->devlink = qed_ops->common->devlink_register(cdev);
1238 if (IS_ERR(edev->devlink)) {
1239 DP_NOTICE(edev, "Cannot register devlink\n");
1240 rc = PTR_ERR(edev->devlink);
1241 edev->devlink = NULL;
1245 struct net_device *ndev = pci_get_drvdata(pdev);
1246 struct qed_devlink *qdl;
1248 edev = netdev_priv(ndev);
1249 qdl = devlink_priv(edev->devlink);
1252 memset(&edev->stats, 0, sizeof(edev->stats));
1253 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1257 set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1259 qede_init_ndev(edev);
1261 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1265 if (mode != QEDE_PROBE_RECOVERY) {
1266 /* Prepare the lock prior to the registration of the netdev,
1267 * as once it's registered we might reach flows requiring it
1268 * [it's even possible to reach a flow needing it directly
1269 * from there, although it's unlikely].
1271 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1272 mutex_init(&edev->qede_lock);
1274 rc = register_netdev(edev->ndev);
1276 DP_NOTICE(edev, "Cannot register net-device\n");
1281 edev->ops->common->set_name(cdev, edev->ndev->name);
1283 /* PTP not supported on VFs */
1285 qede_ptp_enable(edev);
1287 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1291 qede_set_dcbnl_ops(edev->ndev);
1294 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1296 qede_log_probe(edev);
1300 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1302 if (mode != QEDE_PROBE_RECOVERY)
1303 free_netdev(edev->ndev);
1307 qed_ops->common->slowpath_stop(cdev);
1309 qed_ops->common->remove(cdev);
1314 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1320 switch ((enum qede_pci_private)id->driver_data) {
1321 case QEDE_PRIVATE_VF:
1322 if (debug & QED_LOG_VERBOSE_MASK)
1323 dev_err(&pdev->dev, "Probing a VF\n");
1327 if (debug & QED_LOG_VERBOSE_MASK)
1328 dev_err(&pdev->dev, "Probing a PF\n");
1331 qede_config_debug(debug, &dp_module, &dp_level);
1333 return __qede_probe(pdev, dp_module, dp_level, is_vf,
1337 enum qede_remove_mode {
1339 QEDE_REMOVE_RECOVERY,
1342 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1344 struct net_device *ndev = pci_get_drvdata(pdev);
1345 struct qede_dev *edev;
1346 struct qed_dev *cdev;
1349 dev_info(&pdev->dev, "Device has already been removed\n");
1353 edev = netdev_priv(ndev);
1356 DP_INFO(edev, "Starting qede_remove\n");
1358 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1360 if (mode != QEDE_REMOVE_RECOVERY) {
1361 set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1362 unregister_netdev(ndev);
1364 cancel_delayed_work_sync(&edev->sp_task);
1366 edev->ops->common->set_power_state(cdev, PCI_D0);
1368 pci_set_drvdata(pdev, NULL);
1371 qede_ptp_disable(edev);
1373 /* Use global ops since we've freed edev */
1374 qed_ops->common->slowpath_stop(cdev);
1375 if (system_state == SYSTEM_POWER_OFF)
1378 if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1379 qed_ops->common->devlink_unregister(edev->devlink);
1380 edev->devlink = NULL;
1382 qed_ops->common->remove(cdev);
1385 /* Since this can happen out-of-sync with other flows,
1386 * don't release the netdevice until after slowpath stop
1387 * has been called to guarantee various other contexts
1388 * [e.g., QED register callbacks] won't break anything when
1389 * accessing the netdevice.
1391 if (mode != QEDE_REMOVE_RECOVERY) {
1392 kfree(edev->coal_entry);
1396 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1399 static void qede_remove(struct pci_dev *pdev)
1401 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1404 static void qede_shutdown(struct pci_dev *pdev)
1406 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1409 /* -------------------------------------------------------------------------
1410 * START OF LOAD / UNLOAD
1411 * -------------------------------------------------------------------------
1414 static int qede_set_num_queues(struct qede_dev *edev)
1419 /* Setup queues according to possible resources*/
1420 if (edev->req_queues)
1421 rss_num = edev->req_queues;
1423 rss_num = netif_get_num_default_rss_queues() *
1424 edev->dev_info.common.num_hwfns;
1426 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1428 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1430 /* Managed to request interrupts for our queues */
1431 edev->num_queues = rc;
1432 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1433 QEDE_QUEUE_CNT(edev), rss_num);
1437 edev->fp_num_tx = edev->req_num_tx;
1438 edev->fp_num_rx = edev->req_num_rx;
1443 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1446 if (sb_info->sb_virt) {
1447 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1448 QED_SB_TYPE_L2_QUEUE);
1449 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1450 (void *)sb_info->sb_virt, sb_info->sb_phys);
1451 memset(sb_info, 0, sizeof(*sb_info));
1455 /* This function allocates fast-path status block memory */
1456 static int qede_alloc_mem_sb(struct qede_dev *edev,
1457 struct qed_sb_info *sb_info, u16 sb_id)
1459 struct status_block *sb_virt;
1463 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1464 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1466 DP_ERR(edev, "Status block allocation failed\n");
1470 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1471 sb_virt, sb_phys, sb_id,
1472 QED_SB_TYPE_L2_QUEUE);
1474 DP_ERR(edev, "Status block initialization failed\n");
1475 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1483 static void qede_free_rx_buffers(struct qede_dev *edev,
1484 struct qede_rx_queue *rxq)
1488 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1489 struct sw_rx_data *rx_buf;
1492 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1493 data = rx_buf->data;
1495 dma_unmap_page(&edev->pdev->dev,
1496 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1498 rx_buf->data = NULL;
1503 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1505 /* Free rx buffers */
1506 qede_free_rx_buffers(edev, rxq);
1508 /* Free the parallel SW ring */
1509 kfree(rxq->sw_rx_ring);
1511 /* Free the real RQ ring used by FW */
1512 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1513 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1516 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1520 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1521 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1523 tpa_info->state = QEDE_AGG_STATE_NONE;
1527 /* This function allocates all memory needed per Rx queue */
1528 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1530 struct qed_chain_init_params params = {
1531 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1532 .num_elems = RX_RING_SIZE,
1534 struct qed_dev *cdev = edev->cdev;
1537 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1539 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1541 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1542 size = rxq->rx_headroom +
1543 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1545 /* Make sure that the headroom and payload fit in a single page */
1546 if (rxq->rx_buf_size + size > PAGE_SIZE)
1547 rxq->rx_buf_size = PAGE_SIZE - size;
1549 /* Segment size to split a page in multiple equal parts,
1550 * unless XDP is used in which case we'd use the entire page.
1552 if (!edev->xdp_prog) {
1553 size = size + rxq->rx_buf_size;
1554 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1556 rxq->rx_buf_seg_size = PAGE_SIZE;
1557 edev->ndev->features &= ~NETIF_F_GRO_HW;
1560 /* Allocate the parallel driver ring for Rx buffers */
1561 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1562 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1563 if (!rxq->sw_rx_ring) {
1564 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1569 /* Allocate FW Rx ring */
1570 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1571 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1572 params.elem_size = sizeof(struct eth_rx_bd);
1574 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
1578 /* Allocate FW completion ring */
1579 params.mode = QED_CHAIN_MODE_PBL;
1580 params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1581 params.elem_size = sizeof(union eth_rx_cqe);
1583 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
1587 /* Allocate buffers for the Rx ring */
1588 rxq->filled_buffers = 0;
1589 for (i = 0; i < rxq->num_rx_buffers; i++) {
1590 rc = qede_alloc_rx_buffer(rxq, false);
1593 "Rx buffers allocation failed at index %d\n", i);
1598 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1599 if (!edev->gro_disable)
1600 qede_set_tpa_param(rxq);
1605 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1607 /* Free the parallel SW ring */
1609 kfree(txq->sw_tx_ring.xdp);
1611 kfree(txq->sw_tx_ring.skbs);
1613 /* Free the real RQ ring used by FW */
1614 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1617 /* This function allocates all memory needed per Tx queue */
1618 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1620 struct qed_chain_init_params params = {
1621 .mode = QED_CHAIN_MODE_PBL,
1622 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1623 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1624 .num_elems = edev->q_num_tx_buffers,
1625 .elem_size = sizeof(union eth_tx_bd_types),
1629 txq->num_tx_buffers = edev->q_num_tx_buffers;
1631 /* Allocate the parallel driver ring for Tx buffers */
1633 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1634 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1635 if (!txq->sw_tx_ring.xdp)
1638 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1639 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1640 if (!txq->sw_tx_ring.skbs)
1644 rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
1651 qede_free_mem_txq(edev, txq);
1655 /* This function frees all memory of a single fp */
1656 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1658 qede_free_mem_sb(edev, fp->sb_info, fp->id);
1660 if (fp->type & QEDE_FASTPATH_RX)
1661 qede_free_mem_rxq(edev, fp->rxq);
1663 if (fp->type & QEDE_FASTPATH_XDP)
1664 qede_free_mem_txq(edev, fp->xdp_tx);
1666 if (fp->type & QEDE_FASTPATH_TX) {
1669 for_each_cos_in_txq(edev, cos)
1670 qede_free_mem_txq(edev, &fp->txq[cos]);
1674 /* This function allocates all memory needed for a single fp (i.e. an entity
1675 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1677 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1681 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1685 if (fp->type & QEDE_FASTPATH_RX) {
1686 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1691 if (fp->type & QEDE_FASTPATH_XDP) {
1692 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1697 if (fp->type & QEDE_FASTPATH_TX) {
1700 for_each_cos_in_txq(edev, cos) {
1701 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1711 static void qede_free_mem_load(struct qede_dev *edev)
1716 struct qede_fastpath *fp = &edev->fp_array[i];
1718 qede_free_mem_fp(edev, fp);
1722 /* This function allocates all qede memory at NIC load. */
1723 static int qede_alloc_mem_load(struct qede_dev *edev)
1725 int rc = 0, queue_id;
1727 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1728 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1730 rc = qede_alloc_mem_fp(edev, fp);
1733 "Failed to allocate memory for fastpath - rss id = %d\n",
1735 qede_free_mem_load(edev);
1743 static void qede_empty_tx_queue(struct qede_dev *edev,
1744 struct qede_tx_queue *txq)
1746 unsigned int pkts_compl = 0, bytes_compl = 0;
1747 struct netdev_queue *netdev_txq;
1750 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1752 while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1753 qed_chain_get_prod_idx(&txq->tx_pbl)) {
1754 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1755 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1756 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1757 qed_chain_get_prod_idx(&txq->tx_pbl));
1759 rc = qede_free_tx_pkt(edev, txq, &len);
1762 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1764 qed_chain_get_cons_idx(&txq->tx_pbl),
1765 qed_chain_get_prod_idx(&txq->tx_pbl));
1774 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1777 static void qede_empty_tx_queues(struct qede_dev *edev)
1782 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1785 for_each_cos_in_txq(edev, cos) {
1786 struct qede_fastpath *fp;
1788 fp = &edev->fp_array[i];
1789 qede_empty_tx_queue(edev,
1795 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1796 static void qede_init_fp(struct qede_dev *edev)
1798 int queue_id, rxq_index = 0, txq_index = 0;
1799 struct qede_fastpath *fp;
1800 bool init_xdp = false;
1802 for_each_queue(queue_id) {
1803 fp = &edev->fp_array[queue_id];
1808 if (fp->type & QEDE_FASTPATH_XDP) {
1809 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1811 fp->xdp_tx->is_xdp = 1;
1813 spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1817 if (fp->type & QEDE_FASTPATH_RX) {
1818 fp->rxq->rxq_id = rxq_index++;
1820 /* Determine how to map buffers for this queue */
1821 if (fp->type & QEDE_FASTPATH_XDP)
1822 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1824 fp->rxq->data_direction = DMA_FROM_DEVICE;
1825 fp->rxq->dev = &edev->pdev->dev;
1827 /* Driver have no error path from here */
1828 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1829 fp->rxq->rxq_id, 0) < 0);
1831 if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1832 MEM_TYPE_PAGE_ORDER0,
1835 "Failed to register XDP memory model\n");
1839 if (fp->type & QEDE_FASTPATH_TX) {
1842 for_each_cos_in_txq(edev, cos) {
1843 struct qede_tx_queue *txq = &fp->txq[cos];
1847 txq->index = txq_index;
1848 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1849 txq->ndev_txq_id = ndev_tx_id;
1851 if (edev->dev_info.is_legacy)
1852 txq->is_legacy = true;
1853 txq->dev = &edev->pdev->dev;
1859 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1860 edev->ndev->name, queue_id);
1864 edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1865 DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1869 static int qede_set_real_num_queues(struct qede_dev *edev)
1873 rc = netif_set_real_num_tx_queues(edev->ndev,
1874 QEDE_TSS_COUNT(edev) *
1875 edev->dev_info.num_tc);
1877 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1881 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1883 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1890 static void qede_napi_disable_remove(struct qede_dev *edev)
1895 napi_disable(&edev->fp_array[i].napi);
1897 netif_napi_del(&edev->fp_array[i].napi);
1901 static void qede_napi_add_enable(struct qede_dev *edev)
1905 /* Add NAPI objects */
1907 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1908 qede_poll, NAPI_POLL_WEIGHT);
1909 napi_enable(&edev->fp_array[i].napi);
1913 static void qede_sync_free_irqs(struct qede_dev *edev)
1917 for (i = 0; i < edev->int_info.used_cnt; i++) {
1918 if (edev->int_info.msix_cnt) {
1919 free_irq(edev->int_info.msix[i].vector,
1920 &edev->fp_array[i]);
1922 edev->ops->common->simd_handler_clean(edev->cdev, i);
1926 edev->int_info.used_cnt = 0;
1927 edev->int_info.msix_cnt = 0;
1930 static int qede_req_msix_irqs(struct qede_dev *edev)
1934 /* Sanitize number of interrupts == number of prepared RSS queues */
1935 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1937 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1938 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1942 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1943 #ifdef CONFIG_RFS_ACCEL
1944 struct qede_fastpath *fp = &edev->fp_array[i];
1946 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1947 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1948 edev->int_info.msix[i].vector);
1950 DP_ERR(edev, "Failed to add CPU rmap\n");
1951 qede_free_arfs(edev);
1955 rc = request_irq(edev->int_info.msix[i].vector,
1956 qede_msix_fp_int, 0, edev->fp_array[i].name,
1957 &edev->fp_array[i]);
1959 DP_ERR(edev, "Request fp %d irq failed\n", i);
1960 #ifdef CONFIG_RFS_ACCEL
1961 if (edev->ndev->rx_cpu_rmap)
1962 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
1964 edev->ndev->rx_cpu_rmap = NULL;
1966 qede_sync_free_irqs(edev);
1969 DP_VERBOSE(edev, NETIF_MSG_INTR,
1970 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1971 edev->fp_array[i].name, i,
1972 &edev->fp_array[i]);
1973 edev->int_info.used_cnt++;
1979 static void qede_simd_fp_handler(void *cookie)
1981 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1983 napi_schedule_irqoff(&fp->napi);
1986 static int qede_setup_irqs(struct qede_dev *edev)
1990 /* Learn Interrupt configuration */
1991 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1995 if (edev->int_info.msix_cnt) {
1996 rc = qede_req_msix_irqs(edev);
1999 edev->ndev->irq = edev->int_info.msix[0].vector;
2001 const struct qed_common_ops *ops;
2003 /* qed should learn receive the RSS ids and callbacks */
2004 ops = edev->ops->common;
2005 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
2006 ops->simd_handler_config(edev->cdev,
2007 &edev->fp_array[i], i,
2008 qede_simd_fp_handler);
2009 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
2014 static int qede_drain_txq(struct qede_dev *edev,
2015 struct qede_tx_queue *txq, bool allow_drain)
2019 while (txq->sw_tx_cons != txq->sw_tx_prod) {
2023 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2025 rc = edev->ops->common->drain(edev->cdev);
2028 return qede_drain_txq(edev, txq, false);
2031 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2032 txq->index, txq->sw_tx_prod,
2037 usleep_range(1000, 2000);
2041 /* FW finished processing, wait for HW to transmit all tx packets */
2042 usleep_range(1000, 2000);
2047 static int qede_stop_txq(struct qede_dev *edev,
2048 struct qede_tx_queue *txq, int rss_id)
2050 /* delete doorbell from doorbell recovery mechanism */
2051 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2054 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2057 static int qede_stop_queues(struct qede_dev *edev)
2059 struct qed_update_vport_params *vport_update_params;
2060 struct qed_dev *cdev = edev->cdev;
2061 struct qede_fastpath *fp;
2064 /* Disable the vport */
2065 vport_update_params = vzalloc(sizeof(*vport_update_params));
2066 if (!vport_update_params)
2069 vport_update_params->vport_id = 0;
2070 vport_update_params->update_vport_active_flg = 1;
2071 vport_update_params->vport_active_flg = 0;
2072 vport_update_params->update_rss_flg = 0;
2074 rc = edev->ops->vport_update(cdev, vport_update_params);
2075 vfree(vport_update_params);
2078 DP_ERR(edev, "Failed to update vport\n");
2082 /* Flush Tx queues. If needed, request drain from MCP */
2084 fp = &edev->fp_array[i];
2086 if (fp->type & QEDE_FASTPATH_TX) {
2089 for_each_cos_in_txq(edev, cos) {
2090 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2096 if (fp->type & QEDE_FASTPATH_XDP) {
2097 rc = qede_drain_txq(edev, fp->xdp_tx, true);
2103 /* Stop all Queues in reverse order */
2104 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2105 fp = &edev->fp_array[i];
2107 /* Stop the Tx Queue(s) */
2108 if (fp->type & QEDE_FASTPATH_TX) {
2111 for_each_cos_in_txq(edev, cos) {
2112 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2118 /* Stop the Rx Queue */
2119 if (fp->type & QEDE_FASTPATH_RX) {
2120 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2122 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2127 /* Stop the XDP forwarding queue */
2128 if (fp->type & QEDE_FASTPATH_XDP) {
2129 rc = qede_stop_txq(edev, fp->xdp_tx, i);
2133 bpf_prog_put(fp->rxq->xdp_prog);
2137 /* Stop the vport */
2138 rc = edev->ops->vport_stop(cdev, 0);
2140 DP_ERR(edev, "Failed to stop VPORT\n");
2145 static int qede_start_txq(struct qede_dev *edev,
2146 struct qede_fastpath *fp,
2147 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2149 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2150 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2151 struct qed_queue_start_common_params params;
2152 struct qed_txq_start_ret_params ret_params;
2155 memset(¶ms, 0, sizeof(params));
2156 memset(&ret_params, 0, sizeof(ret_params));
2158 /* Let the XDP queue share the queue-zone with one of the regular txq.
2159 * We don't really care about its coalescing.
2162 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2164 params.queue_id = txq->index;
2166 params.p_sb = fp->sb_info;
2167 params.sb_idx = sb_idx;
2168 params.tc = txq->cos;
2170 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
2171 page_cnt, &ret_params);
2173 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2177 txq->doorbell_addr = ret_params.p_doorbell;
2178 txq->handle = ret_params.p_handle;
2180 /* Determine the FW consumer address associated */
2181 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2183 /* Prepare the doorbell parameters */
2184 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2185 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2186 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2187 DQ_XCM_ETH_TX_BD_PROD_CMD);
2188 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2190 /* register doorbell with doorbell recovery mechanism */
2191 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2192 &txq->tx_db, DB_REC_WIDTH_32B,
2198 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2200 int vlan_removal_en = 1;
2201 struct qed_dev *cdev = edev->cdev;
2202 struct qed_dev_info *qed_info = &edev->dev_info.common;
2203 struct qed_update_vport_params *vport_update_params;
2204 struct qed_queue_start_common_params q_params;
2205 struct qed_start_vport_params start = {0};
2208 if (!edev->num_queues) {
2210 "Cannot update V-VPORT as active as there are no Rx queues\n");
2214 vport_update_params = vzalloc(sizeof(*vport_update_params));
2215 if (!vport_update_params)
2218 start.handle_ptp_pkts = !!(edev->ptp);
2219 start.gro_enable = !edev->gro_disable;
2220 start.mtu = edev->ndev->mtu;
2222 start.drop_ttl0 = true;
2223 start.remove_inner_vlan = vlan_removal_en;
2224 start.clear_stats = clear_stats;
2226 rc = edev->ops->vport_start(cdev, &start);
2229 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2233 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2234 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2235 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2238 struct qede_fastpath *fp = &edev->fp_array[i];
2239 dma_addr_t p_phys_table;
2242 if (fp->type & QEDE_FASTPATH_RX) {
2243 struct qed_rxq_start_ret_params ret_params;
2244 struct qede_rx_queue *rxq = fp->rxq;
2247 memset(&ret_params, 0, sizeof(ret_params));
2248 memset(&q_params, 0, sizeof(q_params));
2249 q_params.queue_id = rxq->rxq_id;
2250 q_params.vport_id = 0;
2251 q_params.p_sb = fp->sb_info;
2252 q_params.sb_idx = RX_PI;
2255 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2256 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2258 rc = edev->ops->q_rx_start(cdev, i, &q_params,
2260 rxq->rx_bd_ring.p_phys_addr,
2262 page_cnt, &ret_params);
2264 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2269 /* Use the return parameters */
2270 rxq->hw_rxq_prod_addr = ret_params.p_prod;
2271 rxq->handle = ret_params.p_handle;
2273 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2274 rxq->hw_cons_ptr = val;
2276 qede_update_rx_prod(edev, rxq);
2279 if (fp->type & QEDE_FASTPATH_XDP) {
2280 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2284 bpf_prog_add(edev->xdp_prog, 1);
2285 fp->rxq->xdp_prog = edev->xdp_prog;
2288 if (fp->type & QEDE_FASTPATH_TX) {
2291 for_each_cos_in_txq(edev, cos) {
2292 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2300 /* Prepare and send the vport enable */
2301 vport_update_params->vport_id = start.vport_id;
2302 vport_update_params->update_vport_active_flg = 1;
2303 vport_update_params->vport_active_flg = 1;
2305 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2306 qed_info->tx_switching) {
2307 vport_update_params->update_tx_switching_flg = 1;
2308 vport_update_params->tx_switching_flg = 1;
2311 qede_fill_rss_params(edev, &vport_update_params->rss_params,
2312 &vport_update_params->update_rss_flg);
2314 rc = edev->ops->vport_update(cdev, vport_update_params);
2316 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2319 vfree(vport_update_params);
2323 enum qede_unload_mode {
2325 QEDE_UNLOAD_RECOVERY,
2328 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2331 struct qed_link_params link_params;
2334 DP_INFO(edev, "Starting qede unload\n");
2339 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2341 if (mode != QEDE_UNLOAD_RECOVERY)
2342 edev->state = QEDE_STATE_CLOSED;
2344 qede_rdma_dev_event_close(edev);
2347 netif_tx_disable(edev->ndev);
2348 netif_carrier_off(edev->ndev);
2350 if (mode != QEDE_UNLOAD_RECOVERY) {
2351 /* Reset the link */
2352 memset(&link_params, 0, sizeof(link_params));
2353 link_params.link_up = false;
2354 edev->ops->common->set_link(edev->cdev, &link_params);
2356 rc = qede_stop_queues(edev);
2358 #ifdef CONFIG_RFS_ACCEL
2359 if (edev->dev_info.common.b_arfs_capable) {
2360 qede_poll_for_freeing_arfs_filters(edev);
2361 if (edev->ndev->rx_cpu_rmap)
2362 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2364 edev->ndev->rx_cpu_rmap = NULL;
2367 qede_sync_free_irqs(edev);
2371 DP_INFO(edev, "Stopped Queues\n");
2374 qede_vlan_mark_nonconfigured(edev);
2375 edev->ops->fastpath_stop(edev->cdev);
2377 if (edev->dev_info.common.b_arfs_capable) {
2378 qede_poll_for_freeing_arfs_filters(edev);
2379 qede_free_arfs(edev);
2382 /* Release the interrupts */
2383 qede_sync_free_irqs(edev);
2384 edev->ops->common->set_fp_int(edev->cdev, 0);
2386 qede_napi_disable_remove(edev);
2388 if (mode == QEDE_UNLOAD_RECOVERY)
2389 qede_empty_tx_queues(edev);
2391 qede_free_mem_load(edev);
2392 qede_free_fp_array(edev);
2396 __qede_unlock(edev);
2398 if (mode != QEDE_UNLOAD_RECOVERY)
2399 DP_NOTICE(edev, "Link is down\n");
2401 edev->ptp_skip_txts = 0;
2403 DP_INFO(edev, "Ending qede unload\n");
2406 enum qede_load_mode {
2412 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2415 struct qed_link_params link_params;
2416 struct ethtool_coalesce coal = {};
2420 DP_INFO(edev, "Starting qede load\n");
2425 rc = qede_set_num_queues(edev);
2429 rc = qede_alloc_fp_array(edev);
2435 rc = qede_alloc_mem_load(edev);
2438 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2439 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2441 rc = qede_set_real_num_queues(edev);
2445 if (qede_alloc_arfs(edev)) {
2446 edev->ndev->features &= ~NETIF_F_NTUPLE;
2447 edev->dev_info.common.b_arfs_capable = false;
2450 qede_napi_add_enable(edev);
2451 DP_INFO(edev, "Napi added and enabled\n");
2453 rc = qede_setup_irqs(edev);
2456 DP_INFO(edev, "Setup IRQs succeeded\n");
2458 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2461 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2463 num_tc = netdev_get_num_tc(edev->ndev);
2464 num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2465 qede_setup_tc(edev->ndev, num_tc);
2467 /* Program un-configured VLANs */
2468 qede_configure_vlan_filters(edev);
2470 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2472 /* Ask for link-up using current configuration */
2473 memset(&link_params, 0, sizeof(link_params));
2474 link_params.link_up = true;
2475 edev->ops->common->set_link(edev->cdev, &link_params);
2477 edev->state = QEDE_STATE_OPEN;
2479 coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2480 coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2483 if (edev->coal_entry[i].isvalid) {
2484 coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2485 coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2487 __qede_unlock(edev);
2488 qede_set_per_coalesce(edev->ndev, i, &coal);
2491 DP_INFO(edev, "Ending successfully qede load\n");
2495 qede_sync_free_irqs(edev);
2497 qede_napi_disable_remove(edev);
2499 qede_free_mem_load(edev);
2501 edev->ops->common->set_fp_int(edev->cdev, 0);
2502 qede_free_fp_array(edev);
2503 edev->num_queues = 0;
2504 edev->fp_num_tx = 0;
2505 edev->fp_num_rx = 0;
2508 __qede_unlock(edev);
2513 /* 'func' should be able to run between unload and reload assuming interface
2514 * is actually running, or afterwards in case it's currently DOWN.
2516 void qede_reload(struct qede_dev *edev,
2517 struct qede_reload_args *args, bool is_locked)
2522 /* Since qede_lock is held, internal state wouldn't change even
2523 * if netdev state would start transitioning. Check whether current
2524 * internal configuration indicates device is up, then reload.
2526 if (edev->state == QEDE_STATE_OPEN) {
2527 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2529 args->func(edev, args);
2530 qede_load(edev, QEDE_LOAD_RELOAD, true);
2532 /* Since no one is going to do it for us, re-configure */
2533 qede_config_rx_mode(edev->ndev);
2535 args->func(edev, args);
2539 __qede_unlock(edev);
2542 /* called with rtnl_lock */
2543 static int qede_open(struct net_device *ndev)
2545 struct qede_dev *edev = netdev_priv(ndev);
2548 netif_carrier_off(ndev);
2550 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2552 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2556 udp_tunnel_nic_reset_ntf(ndev);
2558 edev->ops->common->update_drv_state(edev->cdev, true);
2563 static int qede_close(struct net_device *ndev)
2565 struct qede_dev *edev = netdev_priv(ndev);
2567 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2570 edev->ops->common->update_drv_state(edev->cdev, false);
2575 static void qede_link_update(void *dev, struct qed_link_output *link)
2577 struct qede_dev *edev = dev;
2579 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2580 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2584 if (link->link_up) {
2585 if (!netif_carrier_ok(edev->ndev)) {
2586 DP_NOTICE(edev, "Link is up\n");
2587 netif_tx_start_all_queues(edev->ndev);
2588 netif_carrier_on(edev->ndev);
2589 qede_rdma_dev_event_open(edev);
2592 if (netif_carrier_ok(edev->ndev)) {
2593 DP_NOTICE(edev, "Link is down\n");
2594 netif_tx_disable(edev->ndev);
2595 netif_carrier_off(edev->ndev);
2596 qede_rdma_dev_event_close(edev);
2601 static void qede_schedule_recovery_handler(void *dev)
2603 struct qede_dev *edev = dev;
2605 if (edev->state == QEDE_STATE_RECOVERY) {
2607 "Avoid scheduling a recovery handling since already in recovery state\n");
2611 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2612 schedule_delayed_work(&edev->sp_task, 0);
2614 DP_INFO(edev, "Scheduled a recovery handler\n");
2617 static void qede_recovery_failed(struct qede_dev *edev)
2619 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2621 netif_device_detach(edev->ndev);
2624 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2627 static void qede_recovery_handler(struct qede_dev *edev)
2629 u32 curr_state = edev->state;
2632 DP_NOTICE(edev, "Starting a recovery process\n");
2634 /* No need to acquire first the qede_lock since is done by qede_sp_task
2635 * before calling this function.
2637 edev->state = QEDE_STATE_RECOVERY;
2639 edev->ops->common->recovery_prolog(edev->cdev);
2641 if (curr_state == QEDE_STATE_OPEN)
2642 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2644 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2646 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2647 IS_VF(edev), QEDE_PROBE_RECOVERY);
2653 if (curr_state == QEDE_STATE_OPEN) {
2654 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2658 qede_config_rx_mode(edev->ndev);
2659 udp_tunnel_nic_reset_ntf(edev->ndev);
2662 edev->state = curr_state;
2664 DP_NOTICE(edev, "Recovery handling is done\n");
2669 qede_recovery_failed(edev);
2672 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2674 struct qed_dev *cdev = edev->cdev;
2677 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2680 /* Get a call trace of the flow that led to the error */
2681 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2683 /* Prevent HW attentions from being reasserted */
2684 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2685 edev->ops->common->attn_clr_enable(cdev, true);
2687 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2690 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2693 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2696 if (edev->devlink) {
2697 DP_NOTICE(edev, "Reporting fatal error to devlink\n");
2698 edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2701 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2703 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2706 static void qede_set_hw_err_flags(struct qede_dev *edev,
2707 enum qed_hw_err_type err_type)
2709 unsigned long err_flags = 0;
2712 case QED_HW_ERR_DMAE_FAIL:
2713 set_bit(QEDE_ERR_WARN, &err_flags);
2715 case QED_HW_ERR_MFW_RESP_FAIL:
2716 case QED_HW_ERR_HW_ATTN:
2717 case QED_HW_ERR_RAMROD_FAIL:
2718 case QED_HW_ERR_FW_ASSERT:
2719 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2720 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2721 /* make this error as recoverable and start recovery*/
2722 set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2726 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2730 edev->err_flags |= err_flags;
2733 static void qede_schedule_hw_err_handler(void *dev,
2734 enum qed_hw_err_type err_type)
2736 struct qede_dev *edev = dev;
2738 /* Fan failure cannot be masked by handling of another HW error or by a
2739 * concurrent recovery process.
2741 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2742 edev->state == QEDE_STATE_RECOVERY) &&
2743 err_type != QED_HW_ERR_FAN_FAIL) {
2745 "Avoid scheduling an error handling while another HW error is being handled\n");
2749 if (err_type >= QED_HW_ERR_LAST) {
2750 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2751 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2755 edev->last_err_type = err_type;
2756 qede_set_hw_err_flags(edev, err_type);
2757 qede_atomic_hw_err_handler(edev);
2758 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2759 schedule_delayed_work(&edev->sp_task, 0);
2761 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2764 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2766 struct netdev_queue *netdev_txq;
2768 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2769 if (netif_xmit_stopped(netdev_txq))
2775 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2777 struct qede_dev *edev = dev;
2778 struct netdev_hw_addr *ha;
2781 if (edev->ndev->features & NETIF_F_IP_CSUM)
2782 data->feat_flags |= QED_TLV_IP_CSUM;
2783 if (edev->ndev->features & NETIF_F_TSO)
2784 data->feat_flags |= QED_TLV_LSO;
2786 ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2787 eth_zero_addr(data->mac[1]);
2788 eth_zero_addr(data->mac[2]);
2789 /* Copy the first two UC macs */
2790 netif_addr_lock_bh(edev->ndev);
2792 netdev_for_each_uc_addr(ha, edev->ndev) {
2793 ether_addr_copy(data->mac[i++], ha->addr);
2794 if (i == QED_TLV_MAC_COUNT)
2798 netif_addr_unlock_bh(edev->ndev);
2801 static void qede_get_eth_tlv_data(void *dev, void *data)
2803 struct qed_mfw_tlv_eth *etlv = data;
2804 struct qede_dev *edev = dev;
2805 struct qede_fastpath *fp;
2808 etlv->lso_maxoff_size = 0XFFFF;
2809 etlv->lso_maxoff_size_set = true;
2810 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2811 etlv->lso_minseg_size_set = true;
2812 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2813 etlv->prom_mode_set = true;
2814 etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2815 etlv->tx_descr_size_set = true;
2816 etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2817 etlv->rx_descr_size_set = true;
2818 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2819 etlv->iov_offload_set = true;
2821 /* Fill information regarding queues; Should be done under the qede
2822 * lock to guarantee those don't change beneath our feet.
2824 etlv->txqs_empty = true;
2825 etlv->rxqs_empty = true;
2826 etlv->num_txqs_full = 0;
2827 etlv->num_rxqs_full = 0;
2831 fp = &edev->fp_array[i];
2832 if (fp->type & QEDE_FASTPATH_TX) {
2833 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2835 if (txq->sw_tx_cons != txq->sw_tx_prod)
2836 etlv->txqs_empty = false;
2837 if (qede_is_txq_full(edev, txq))
2838 etlv->num_txqs_full++;
2840 if (fp->type & QEDE_FASTPATH_RX) {
2841 if (qede_has_rx_work(fp->rxq))
2842 etlv->rxqs_empty = false;
2844 /* This one is a bit tricky; Firmware might stop
2845 * placing packets if ring is not yet full.
2846 * Give an approximation.
2848 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2849 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2851 etlv->num_rxqs_full++;
2854 __qede_unlock(edev);
2856 etlv->txqs_empty_set = true;
2857 etlv->rxqs_empty_set = true;
2858 etlv->num_txqs_full_set = true;
2859 etlv->num_rxqs_full_set = true;
2863 * qede_io_error_detected(): Called when PCI error is detected
2865 * @pdev: Pointer to PCI device
2866 * @state: The current pci connection state
2868 *Return: pci_ers_result_t.
2870 * This function is called after a PCI bus error affecting
2871 * this device has been detected.
2873 static pci_ers_result_t
2874 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2876 struct net_device *dev = pci_get_drvdata(pdev);
2877 struct qede_dev *edev = netdev_priv(dev);
2880 return PCI_ERS_RESULT_NONE;
2882 DP_NOTICE(edev, "IO error detected [%d]\n", state);
2885 if (edev->state == QEDE_STATE_RECOVERY) {
2886 DP_NOTICE(edev, "Device already in the recovery state\n");
2887 __qede_unlock(edev);
2888 return PCI_ERS_RESULT_NONE;
2891 /* PF handles the recovery of its VFs */
2893 DP_VERBOSE(edev, QED_MSG_IOV,
2894 "VF recovery is handled by its PF\n");
2895 __qede_unlock(edev);
2896 return PCI_ERS_RESULT_RECOVERED;
2900 netif_tx_disable(edev->ndev);
2901 netif_carrier_off(edev->ndev);
2903 set_bit(QEDE_SP_AER, &edev->sp_flags);
2904 schedule_delayed_work(&edev->sp_task, 0);
2906 __qede_unlock(edev);
2908 return PCI_ERS_RESULT_CAN_RECOVER;