]> Git Repo - linux.git/blob - drivers/net/ethernet/qlogic/qed/qed_main.c
Merge tag 'linux-kselftest-next-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kerne...
[linux.git] / drivers / net / ethernet / qlogic / qed / qed_main.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6
7 #include <linux/stddef.h>
8 #include <linux/pci.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/aer.h>
27 #include <linux/phylink.h>
28
29 #include "qed.h"
30 #include "qed_sriov.h"
31 #include "qed_sp.h"
32 #include "qed_dev_api.h"
33 #include "qed_ll2.h"
34 #include "qed_fcoe.h"
35 #include "qed_iscsi.h"
36
37 #include "qed_mcp.h"
38 #include "qed_reg_addr.h"
39 #include "qed_hw.h"
40 #include "qed_selftest.h"
41 #include "qed_debug.h"
42 #include "qed_devlink.h"
43
44 #define QED_ROCE_QPS                    (8192)
45 #define QED_ROCE_DPIS                   (8)
46 #define QED_RDMA_SRQS                   QED_ROCE_QPS
47 #define QED_NVM_CFG_GET_FLAGS           0xA
48 #define QED_NVM_CFG_GET_PF_FLAGS        0x1A
49 #define QED_NVM_CFG_MAX_ATTRS           50
50
51 static char version[] =
52         "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
53
54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
55 MODULE_LICENSE("GPL");
56 MODULE_VERSION(DRV_MODULE_VERSION);
57
58 #define FW_FILE_VERSION                         \
59         __stringify(FW_MAJOR_VERSION) "."       \
60         __stringify(FW_MINOR_VERSION) "."       \
61         __stringify(FW_REVISION_VERSION) "."    \
62         __stringify(FW_ENGINEERING_VERSION)
63
64 #define QED_FW_FILE_NAME        \
65         "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
66
67 MODULE_FIRMWARE(QED_FW_FILE_NAME);
68
69 /* MFW speed capabilities maps */
70
71 struct qed_mfw_speed_map {
72         u32             mfw_val;
73         __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
74
75         const u32       *cap_arr;
76         u32             arr_size;
77 };
78
79 #define QED_MFW_SPEED_MAP(type, arr)            \
80 {                                               \
81         .mfw_val        = (type),               \
82         .cap_arr        = (arr),                \
83         .arr_size       = ARRAY_SIZE(arr),      \
84 }
85
86 static const u32 qed_mfw_ext_1g[] __initconst = {
87         ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
88         ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
89         ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
90 };
91
92 static const u32 qed_mfw_ext_10g[] __initconst = {
93         ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
94         ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
95         ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
96         ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
97         ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
98         ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
99         ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
100         ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
101 };
102
103 static const u32 qed_mfw_ext_20g[] __initconst = {
104         ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
105 };
106
107 static const u32 qed_mfw_ext_25g[] __initconst = {
108         ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
109         ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
110         ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
111 };
112
113 static const u32 qed_mfw_ext_40g[] __initconst = {
114         ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
115         ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
116         ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
117         ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
118 };
119
120 static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
121         ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
122         ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
123         ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
124         ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
125         ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
126 };
127
128 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
129         ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
130         ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
131         ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
132 };
133
134 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
135         ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
136         ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
137         ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
138         ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
139         ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
140 };
141
142 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
143         ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
144         ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
145         ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
146         ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
147 };
148
149 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
150         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
151         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
152         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
153         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
154         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
155         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
156                           qed_mfw_ext_50g_base_r),
157         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
158                           qed_mfw_ext_50g_base_r2),
159         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
160                           qed_mfw_ext_100g_base_r2),
161         QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
162                           qed_mfw_ext_100g_base_r4),
163 };
164
165 static const u32 qed_mfw_legacy_1g[] __initconst = {
166         ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
167         ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
168         ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
169 };
170
171 static const u32 qed_mfw_legacy_10g[] __initconst = {
172         ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
173         ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
174         ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
175         ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
176         ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
177         ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
178         ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
179         ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
180 };
181
182 static const u32 qed_mfw_legacy_20g[] __initconst = {
183         ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
184 };
185
186 static const u32 qed_mfw_legacy_25g[] __initconst = {
187         ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
188         ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
189         ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
190 };
191
192 static const u32 qed_mfw_legacy_40g[] __initconst = {
193         ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
194         ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
195         ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
196         ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
197 };
198
199 static const u32 qed_mfw_legacy_50g[] __initconst = {
200         ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
201         ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
202         ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
203 };
204
205 static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
206         ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
207         ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
208         ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
209         ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
210 };
211
212 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
213         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
214                           qed_mfw_legacy_1g),
215         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
216                           qed_mfw_legacy_10g),
217         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
218                           qed_mfw_legacy_20g),
219         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
220                           qed_mfw_legacy_25g),
221         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
222                           qed_mfw_legacy_40g),
223         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
224                           qed_mfw_legacy_50g),
225         QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
226                           qed_mfw_legacy_bb_100g),
227 };
228
229 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
230 {
231         linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
232
233         map->cap_arr = NULL;
234         map->arr_size = 0;
235 }
236
237 static void __init qed_mfw_speed_maps_init(void)
238 {
239         u32 i;
240
241         for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
242                 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
243
244         for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
245                 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
246 }
247
248 static int __init qed_init(void)
249 {
250         pr_info("%s", version);
251
252         qed_mfw_speed_maps_init();
253
254         return 0;
255 }
256 module_init(qed_init);
257
258 static void __exit qed_exit(void)
259 {
260         /* To prevent marking this module as "permanent" */
261 }
262 module_exit(qed_exit);
263
264 /* Check if the DMA controller on the machine can properly handle the DMA
265  * addressing required by the device.
266 */
267 static int qed_set_coherency_mask(struct qed_dev *cdev)
268 {
269         struct device *dev = &cdev->pdev->dev;
270
271         if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
272                 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
273                         DP_NOTICE(cdev,
274                                   "Can't request 64-bit consistent allocations\n");
275                         return -EIO;
276                 }
277         } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
278                 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
279                 return -EIO;
280         }
281
282         return 0;
283 }
284
285 static void qed_free_pci(struct qed_dev *cdev)
286 {
287         struct pci_dev *pdev = cdev->pdev;
288
289         pci_disable_pcie_error_reporting(pdev);
290
291         if (cdev->doorbells && cdev->db_size)
292                 iounmap(cdev->doorbells);
293         if (cdev->regview)
294                 iounmap(cdev->regview);
295         if (atomic_read(&pdev->enable_cnt) == 1)
296                 pci_release_regions(pdev);
297
298         pci_disable_device(pdev);
299 }
300
301 #define PCI_REVISION_ID_ERROR_VAL       0xff
302
303 /* Performs PCI initializations as well as initializing PCI-related parameters
304  * in the device structrue. Returns 0 in case of success.
305  */
306 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
307 {
308         u8 rev_id;
309         int rc;
310
311         cdev->pdev = pdev;
312
313         rc = pci_enable_device(pdev);
314         if (rc) {
315                 DP_NOTICE(cdev, "Cannot enable PCI device\n");
316                 goto err0;
317         }
318
319         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
320                 DP_NOTICE(cdev, "No memory region found in bar #0\n");
321                 rc = -EIO;
322                 goto err1;
323         }
324
325         if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
326                 DP_NOTICE(cdev, "No memory region found in bar #2\n");
327                 rc = -EIO;
328                 goto err1;
329         }
330
331         if (atomic_read(&pdev->enable_cnt) == 1) {
332                 rc = pci_request_regions(pdev, "qed");
333                 if (rc) {
334                         DP_NOTICE(cdev,
335                                   "Failed to request PCI memory resources\n");
336                         goto err1;
337                 }
338                 pci_set_master(pdev);
339                 pci_save_state(pdev);
340         }
341
342         pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
343         if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
344                 DP_NOTICE(cdev,
345                           "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
346                           rev_id);
347                 rc = -ENODEV;
348                 goto err2;
349         }
350         if (!pci_is_pcie(pdev)) {
351                 DP_NOTICE(cdev, "The bus is not PCI Express\n");
352                 rc = -EIO;
353                 goto err2;
354         }
355
356         cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
357         if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
358                 DP_NOTICE(cdev, "Cannot find power management capability\n");
359
360         rc = qed_set_coherency_mask(cdev);
361         if (rc)
362                 goto err2;
363
364         cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
365         cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
366         cdev->pci_params.irq = pdev->irq;
367
368         cdev->regview = pci_ioremap_bar(pdev, 0);
369         if (!cdev->regview) {
370                 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
371                 rc = -ENOMEM;
372                 goto err2;
373         }
374
375         cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
376         cdev->db_size = pci_resource_len(cdev->pdev, 2);
377         if (!cdev->db_size) {
378                 if (IS_PF(cdev)) {
379                         DP_NOTICE(cdev, "No Doorbell bar available\n");
380                         return -EINVAL;
381                 } else {
382                         return 0;
383                 }
384         }
385
386         cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
387
388         if (!cdev->doorbells) {
389                 DP_NOTICE(cdev, "Cannot map doorbell space\n");
390                 return -ENOMEM;
391         }
392
393         /* AER (Advanced Error reporting) configuration */
394         rc = pci_enable_pcie_error_reporting(pdev);
395         if (rc)
396                 DP_VERBOSE(cdev, NETIF_MSG_DRV,
397                            "Failed to configure PCIe AER [%d]\n", rc);
398
399         return 0;
400
401 err2:
402         pci_release_regions(pdev);
403 err1:
404         pci_disable_device(pdev);
405 err0:
406         return rc;
407 }
408
409 int qed_fill_dev_info(struct qed_dev *cdev,
410                       struct qed_dev_info *dev_info)
411 {
412         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
413         struct qed_hw_info *hw_info = &p_hwfn->hw_info;
414         struct qed_tunnel_info *tun = &cdev->tunnel;
415         struct qed_ptt  *ptt;
416
417         memset(dev_info, 0, sizeof(struct qed_dev_info));
418
419         if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
420             tun->vxlan.b_mode_enabled)
421                 dev_info->vxlan_enable = true;
422
423         if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
424             tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
425             tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
426                 dev_info->gre_enable = true;
427
428         if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
429             tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
430             tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
431                 dev_info->geneve_enable = true;
432
433         dev_info->num_hwfns = cdev->num_hwfns;
434         dev_info->pci_mem_start = cdev->pci_params.mem_start;
435         dev_info->pci_mem_end = cdev->pci_params.mem_end;
436         dev_info->pci_irq = cdev->pci_params.irq;
437         dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
438         dev_info->dev_type = cdev->type;
439         ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
440
441         if (IS_PF(cdev)) {
442                 dev_info->fw_major = FW_MAJOR_VERSION;
443                 dev_info->fw_minor = FW_MINOR_VERSION;
444                 dev_info->fw_rev = FW_REVISION_VERSION;
445                 dev_info->fw_eng = FW_ENGINEERING_VERSION;
446                 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
447                                                        &cdev->mf_bits);
448                 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
449                         dev_info->b_arfs_capable = true;
450                 dev_info->tx_switching = true;
451
452                 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
453                         dev_info->wol_support = true;
454
455                 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
456
457                 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
458         } else {
459                 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
460                                       &dev_info->fw_minor, &dev_info->fw_rev,
461                                       &dev_info->fw_eng);
462         }
463
464         if (IS_PF(cdev)) {
465                 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
466                 if (ptt) {
467                         qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
468                                             &dev_info->mfw_rev, NULL);
469
470                         qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
471                                             &dev_info->mbi_version);
472
473                         qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
474                                                &dev_info->flash_size);
475
476                         qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
477                 }
478         } else {
479                 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
480                                     &dev_info->mfw_rev, NULL);
481         }
482
483         dev_info->mtu = hw_info->mtu;
484         cdev->common_dev_info = *dev_info;
485
486         return 0;
487 }
488
489 static void qed_free_cdev(struct qed_dev *cdev)
490 {
491         kfree((void *)cdev);
492 }
493
494 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
495 {
496         struct qed_dev *cdev;
497
498         cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
499         if (!cdev)
500                 return cdev;
501
502         qed_init_struct(cdev);
503
504         return cdev;
505 }
506
507 /* Sets the requested power state */
508 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
509 {
510         if (!cdev)
511                 return -ENODEV;
512
513         DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
514         return 0;
515 }
516
517 /* probing */
518 static struct qed_dev *qed_probe(struct pci_dev *pdev,
519                                  struct qed_probe_params *params)
520 {
521         struct qed_dev *cdev;
522         int rc;
523
524         cdev = qed_alloc_cdev(pdev);
525         if (!cdev)
526                 goto err0;
527
528         cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
529         cdev->protocol = params->protocol;
530
531         if (params->is_vf)
532                 cdev->b_is_vf = true;
533
534         qed_init_dp(cdev, params->dp_module, params->dp_level);
535
536         cdev->recov_in_prog = params->recov_in_prog;
537
538         rc = qed_init_pci(cdev, pdev);
539         if (rc) {
540                 DP_ERR(cdev, "init pci failed\n");
541                 goto err1;
542         }
543         DP_INFO(cdev, "PCI init completed successfully\n");
544
545         rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
546         if (rc) {
547                 DP_ERR(cdev, "hw prepare failed\n");
548                 goto err2;
549         }
550
551         DP_INFO(cdev, "qed_probe completed successfully\n");
552
553         return cdev;
554
555 err2:
556         qed_free_pci(cdev);
557 err1:
558         qed_free_cdev(cdev);
559 err0:
560         return NULL;
561 }
562
563 static void qed_remove(struct qed_dev *cdev)
564 {
565         if (!cdev)
566                 return;
567
568         qed_hw_remove(cdev);
569
570         qed_free_pci(cdev);
571
572         qed_set_power_state(cdev, PCI_D3hot);
573
574         qed_free_cdev(cdev);
575 }
576
577 static void qed_disable_msix(struct qed_dev *cdev)
578 {
579         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
580                 pci_disable_msix(cdev->pdev);
581                 kfree(cdev->int_params.msix_table);
582         } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
583                 pci_disable_msi(cdev->pdev);
584         }
585
586         memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
587 }
588
589 static int qed_enable_msix(struct qed_dev *cdev,
590                            struct qed_int_params *int_params)
591 {
592         int i, rc, cnt;
593
594         cnt = int_params->in.num_vectors;
595
596         for (i = 0; i < cnt; i++)
597                 int_params->msix_table[i].entry = i;
598
599         rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
600                                    int_params->in.min_msix_cnt, cnt);
601         if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
602             (rc % cdev->num_hwfns)) {
603                 pci_disable_msix(cdev->pdev);
604
605                 /* If fastpath is initialized, we need at least one interrupt
606                  * per hwfn [and the slow path interrupts]. New requested number
607                  * should be a multiple of the number of hwfns.
608                  */
609                 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
610                 DP_NOTICE(cdev,
611                           "Trying to enable MSI-X with less vectors (%d out of %d)\n",
612                           cnt, int_params->in.num_vectors);
613                 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
614                                            cnt);
615                 if (!rc)
616                         rc = cnt;
617         }
618
619         if (rc > 0) {
620                 /* MSI-x configuration was achieved */
621                 int_params->out.int_mode = QED_INT_MODE_MSIX;
622                 int_params->out.num_vectors = rc;
623                 rc = 0;
624         } else {
625                 DP_NOTICE(cdev,
626                           "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
627                           cnt, rc);
628         }
629
630         return rc;
631 }
632
633 /* This function outputs the int mode and the number of enabled msix vector */
634 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
635 {
636         struct qed_int_params *int_params = &cdev->int_params;
637         struct msix_entry *tbl;
638         int rc = 0, cnt;
639
640         switch (int_params->in.int_mode) {
641         case QED_INT_MODE_MSIX:
642                 /* Allocate MSIX table */
643                 cnt = int_params->in.num_vectors;
644                 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
645                 if (!int_params->msix_table) {
646                         rc = -ENOMEM;
647                         goto out;
648                 }
649
650                 /* Enable MSIX */
651                 rc = qed_enable_msix(cdev, int_params);
652                 if (!rc)
653                         goto out;
654
655                 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
656                 kfree(int_params->msix_table);
657                 if (force_mode)
658                         goto out;
659                 fallthrough;
660
661         case QED_INT_MODE_MSI:
662                 if (cdev->num_hwfns == 1) {
663                         rc = pci_enable_msi(cdev->pdev);
664                         if (!rc) {
665                                 int_params->out.int_mode = QED_INT_MODE_MSI;
666                                 goto out;
667                         }
668
669                         DP_NOTICE(cdev, "Failed to enable MSI\n");
670                         if (force_mode)
671                                 goto out;
672                 }
673                 fallthrough;
674
675         case QED_INT_MODE_INTA:
676                         int_params->out.int_mode = QED_INT_MODE_INTA;
677                         rc = 0;
678                         goto out;
679         default:
680                 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
681                           int_params->in.int_mode);
682                 rc = -EINVAL;
683         }
684
685 out:
686         if (!rc)
687                 DP_INFO(cdev, "Using %s interrupts\n",
688                         int_params->out.int_mode == QED_INT_MODE_INTA ?
689                         "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
690                         "MSI" : "MSIX");
691         cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
692
693         return rc;
694 }
695
696 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
697                                     int index, void(*handler)(void *))
698 {
699         struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
700         int relative_idx = index / cdev->num_hwfns;
701
702         hwfn->simd_proto_handler[relative_idx].func = handler;
703         hwfn->simd_proto_handler[relative_idx].token = token;
704 }
705
706 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
707 {
708         struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
709         int relative_idx = index / cdev->num_hwfns;
710
711         memset(&hwfn->simd_proto_handler[relative_idx], 0,
712                sizeof(struct qed_simd_fp_handler));
713 }
714
715 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
716 {
717         tasklet_schedule((struct tasklet_struct *)tasklet);
718         return IRQ_HANDLED;
719 }
720
721 static irqreturn_t qed_single_int(int irq, void *dev_instance)
722 {
723         struct qed_dev *cdev = (struct qed_dev *)dev_instance;
724         struct qed_hwfn *hwfn;
725         irqreturn_t rc = IRQ_NONE;
726         u64 status;
727         int i, j;
728
729         for (i = 0; i < cdev->num_hwfns; i++) {
730                 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
731
732                 if (!status)
733                         continue;
734
735                 hwfn = &cdev->hwfns[i];
736
737                 /* Slowpath interrupt */
738                 if (unlikely(status & 0x1)) {
739                         tasklet_schedule(&hwfn->sp_dpc);
740                         status &= ~0x1;
741                         rc = IRQ_HANDLED;
742                 }
743
744                 /* Fastpath interrupts */
745                 for (j = 0; j < 64; j++) {
746                         if ((0x2ULL << j) & status) {
747                                 struct qed_simd_fp_handler *p_handler =
748                                         &hwfn->simd_proto_handler[j];
749
750                                 if (p_handler->func)
751                                         p_handler->func(p_handler->token);
752                                 else
753                                         DP_NOTICE(hwfn,
754                                                   "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
755                                                   j, status);
756
757                                 status &= ~(0x2ULL << j);
758                                 rc = IRQ_HANDLED;
759                         }
760                 }
761
762                 if (unlikely(status))
763                         DP_VERBOSE(hwfn, NETIF_MSG_INTR,
764                                    "got an unknown interrupt status 0x%llx\n",
765                                    status);
766         }
767
768         return rc;
769 }
770
771 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
772 {
773         struct qed_dev *cdev = hwfn->cdev;
774         u32 int_mode;
775         int rc = 0;
776         u8 id;
777
778         int_mode = cdev->int_params.out.int_mode;
779         if (int_mode == QED_INT_MODE_MSIX) {
780                 id = hwfn->my_id;
781                 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
782                          id, cdev->pdev->bus->number,
783                          PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
784                 rc = request_irq(cdev->int_params.msix_table[id].vector,
785                                  qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
786         } else {
787                 unsigned long flags = 0;
788
789                 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
790                          cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
791                          PCI_FUNC(cdev->pdev->devfn));
792
793                 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
794                         flags |= IRQF_SHARED;
795
796                 rc = request_irq(cdev->pdev->irq, qed_single_int,
797                                  flags, cdev->name, cdev);
798         }
799
800         if (rc)
801                 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
802         else
803                 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
804                            "Requested slowpath %s\n",
805                            (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
806
807         return rc;
808 }
809
810 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
811 {
812         /* Calling the disable function will make sure that any
813          * currently-running function is completed. The following call to the
814          * enable function makes this sequence a flush-like operation.
815          */
816         if (p_hwfn->b_sp_dpc_enabled) {
817                 tasklet_disable(&p_hwfn->sp_dpc);
818                 tasklet_enable(&p_hwfn->sp_dpc);
819         }
820 }
821
822 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
823 {
824         struct qed_dev *cdev = p_hwfn->cdev;
825         u8 id = p_hwfn->my_id;
826         u32 int_mode;
827
828         int_mode = cdev->int_params.out.int_mode;
829         if (int_mode == QED_INT_MODE_MSIX)
830                 synchronize_irq(cdev->int_params.msix_table[id].vector);
831         else
832                 synchronize_irq(cdev->pdev->irq);
833
834         qed_slowpath_tasklet_flush(p_hwfn);
835 }
836
837 static void qed_slowpath_irq_free(struct qed_dev *cdev)
838 {
839         int i;
840
841         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
842                 for_each_hwfn(cdev, i) {
843                         if (!cdev->hwfns[i].b_int_requested)
844                                 break;
845                         synchronize_irq(cdev->int_params.msix_table[i].vector);
846                         free_irq(cdev->int_params.msix_table[i].vector,
847                                  &cdev->hwfns[i].sp_dpc);
848                 }
849         } else {
850                 if (QED_LEADING_HWFN(cdev)->b_int_requested)
851                         free_irq(cdev->pdev->irq, cdev);
852         }
853         qed_int_disable_post_isr_release(cdev);
854 }
855
856 static int qed_nic_stop(struct qed_dev *cdev)
857 {
858         int i, rc;
859
860         rc = qed_hw_stop(cdev);
861
862         for (i = 0; i < cdev->num_hwfns; i++) {
863                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
864
865                 if (p_hwfn->b_sp_dpc_enabled) {
866                         tasklet_disable(&p_hwfn->sp_dpc);
867                         p_hwfn->b_sp_dpc_enabled = false;
868                         DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
869                                    "Disabled sp tasklet [hwfn %d] at %p\n",
870                                    i, &p_hwfn->sp_dpc);
871                 }
872         }
873
874         qed_dbg_pf_exit(cdev);
875
876         return rc;
877 }
878
879 static int qed_nic_setup(struct qed_dev *cdev)
880 {
881         int rc, i;
882
883         /* Determine if interface is going to require LL2 */
884         if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
885                 for (i = 0; i < cdev->num_hwfns; i++) {
886                         struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
887
888                         p_hwfn->using_ll2 = true;
889                 }
890         }
891
892         rc = qed_resc_alloc(cdev);
893         if (rc)
894                 return rc;
895
896         DP_INFO(cdev, "Allocated qed resources\n");
897
898         qed_resc_setup(cdev);
899
900         return rc;
901 }
902
903 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
904 {
905         int limit = 0;
906
907         /* Mark the fastpath as free/used */
908         cdev->int_params.fp_initialized = cnt ? true : false;
909
910         if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
911                 limit = cdev->num_hwfns * 63;
912         else if (cdev->int_params.fp_msix_cnt)
913                 limit = cdev->int_params.fp_msix_cnt;
914
915         if (!limit)
916                 return -ENOMEM;
917
918         return min_t(int, cnt, limit);
919 }
920
921 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
922 {
923         memset(info, 0, sizeof(struct qed_int_info));
924
925         if (!cdev->int_params.fp_initialized) {
926                 DP_INFO(cdev,
927                         "Protocol driver requested interrupt information, but its support is not yet configured\n");
928                 return -EINVAL;
929         }
930
931         /* Need to expose only MSI-X information; Single IRQ is handled solely
932          * by qed.
933          */
934         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
935                 int msix_base = cdev->int_params.fp_msix_base;
936
937                 info->msix_cnt = cdev->int_params.fp_msix_cnt;
938                 info->msix = &cdev->int_params.msix_table[msix_base];
939         }
940
941         return 0;
942 }
943
944 static int qed_slowpath_setup_int(struct qed_dev *cdev,
945                                   enum qed_int_mode int_mode)
946 {
947         struct qed_sb_cnt_info sb_cnt_info;
948         int num_l2_queues = 0;
949         int rc;
950         int i;
951
952         if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
953                 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
954                 return -EINVAL;
955         }
956
957         memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
958         cdev->int_params.in.int_mode = int_mode;
959         for_each_hwfn(cdev, i) {
960                 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
961                 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
962                 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
963                 cdev->int_params.in.num_vectors++; /* slowpath */
964         }
965
966         /* We want a minimum of one slowpath and one fastpath vector per hwfn */
967         cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
968
969         if (is_kdump_kernel()) {
970                 DP_INFO(cdev,
971                         "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
972                         cdev->int_params.in.min_msix_cnt);
973                 cdev->int_params.in.num_vectors =
974                         cdev->int_params.in.min_msix_cnt;
975         }
976
977         rc = qed_set_int_mode(cdev, false);
978         if (rc)  {
979                 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
980                 return rc;
981         }
982
983         cdev->int_params.fp_msix_base = cdev->num_hwfns;
984         cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
985                                        cdev->num_hwfns;
986
987         if (!IS_ENABLED(CONFIG_QED_RDMA) ||
988             !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
989                 return 0;
990
991         for_each_hwfn(cdev, i)
992                 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
993
994         DP_VERBOSE(cdev, QED_MSG_RDMA,
995                    "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
996                    cdev->int_params.fp_msix_cnt, num_l2_queues);
997
998         if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
999                 cdev->int_params.rdma_msix_cnt =
1000                         (cdev->int_params.fp_msix_cnt - num_l2_queues)
1001                         / cdev->num_hwfns;
1002                 cdev->int_params.rdma_msix_base =
1003                         cdev->int_params.fp_msix_base + num_l2_queues;
1004                 cdev->int_params.fp_msix_cnt = num_l2_queues;
1005         } else {
1006                 cdev->int_params.rdma_msix_cnt = 0;
1007         }
1008
1009         DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
1010                    cdev->int_params.rdma_msix_cnt,
1011                    cdev->int_params.rdma_msix_base);
1012
1013         return 0;
1014 }
1015
1016 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
1017 {
1018         int rc;
1019
1020         memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
1021         cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
1022
1023         qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
1024                             &cdev->int_params.in.num_vectors);
1025         if (cdev->num_hwfns > 1) {
1026                 u8 vectors = 0;
1027
1028                 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1029                 cdev->int_params.in.num_vectors += vectors;
1030         }
1031
1032         /* We want a minimum of one fastpath vector per vf hwfn */
1033         cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1034
1035         rc = qed_set_int_mode(cdev, true);
1036         if (rc)
1037                 return rc;
1038
1039         cdev->int_params.fp_msix_base = 0;
1040         cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1041
1042         return 0;
1043 }
1044
1045 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1046                    u8 *input_buf, u32 max_size, u8 *unzip_buf)
1047 {
1048         int rc;
1049
1050         p_hwfn->stream->next_in = input_buf;
1051         p_hwfn->stream->avail_in = input_len;
1052         p_hwfn->stream->next_out = unzip_buf;
1053         p_hwfn->stream->avail_out = max_size;
1054
1055         rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1056
1057         if (rc != Z_OK) {
1058                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1059                            rc);
1060                 return 0;
1061         }
1062
1063         rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1064         zlib_inflateEnd(p_hwfn->stream);
1065
1066         if (rc != Z_OK && rc != Z_STREAM_END) {
1067                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1068                            p_hwfn->stream->msg, rc);
1069                 return 0;
1070         }
1071
1072         return p_hwfn->stream->total_out / 4;
1073 }
1074
1075 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1076 {
1077         int i;
1078         void *workspace;
1079
1080         for_each_hwfn(cdev, i) {
1081                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1082
1083                 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1084                 if (!p_hwfn->stream)
1085                         return -ENOMEM;
1086
1087                 workspace = vzalloc(zlib_inflate_workspacesize());
1088                 if (!workspace)
1089                         return -ENOMEM;
1090                 p_hwfn->stream->workspace = workspace;
1091         }
1092
1093         return 0;
1094 }
1095
1096 static void qed_free_stream_mem(struct qed_dev *cdev)
1097 {
1098         int i;
1099
1100         for_each_hwfn(cdev, i) {
1101                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1102
1103                 if (!p_hwfn->stream)
1104                         return;
1105
1106                 vfree(p_hwfn->stream->workspace);
1107                 kfree(p_hwfn->stream);
1108         }
1109 }
1110
1111 static void qed_update_pf_params(struct qed_dev *cdev,
1112                                  struct qed_pf_params *params)
1113 {
1114         int i;
1115
1116         if (IS_ENABLED(CONFIG_QED_RDMA)) {
1117                 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1118                 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1119                 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1120                 /* divide by 3 the MRs to avoid MF ILT overflow */
1121                 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1122         }
1123
1124         if (cdev->num_hwfns > 1 || IS_VF(cdev))
1125                 params->eth_pf_params.num_arfs_filters = 0;
1126
1127         /* In case we might support RDMA, don't allow qede to be greedy
1128          * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1129          * per hwfn.
1130          */
1131         if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1132                 u16 *num_cons;
1133
1134                 num_cons = &params->eth_pf_params.num_cons;
1135                 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1136         }
1137
1138         for (i = 0; i < cdev->num_hwfns; i++) {
1139                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1140
1141                 p_hwfn->pf_params = *params;
1142         }
1143 }
1144
1145 #define QED_PERIODIC_DB_REC_COUNT               10
1146 #define QED_PERIODIC_DB_REC_INTERVAL_MS         100
1147 #define QED_PERIODIC_DB_REC_INTERVAL \
1148         msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1149
1150 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1151                                      enum qed_slowpath_wq_flag wq_flag,
1152                                      unsigned long delay)
1153 {
1154         if (!hwfn->slowpath_wq_active)
1155                 return -EINVAL;
1156
1157         /* Memory barrier for setting atomic bit */
1158         smp_mb__before_atomic();
1159         set_bit(wq_flag, &hwfn->slowpath_task_flags);
1160         smp_mb__after_atomic();
1161         queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1162
1163         return 0;
1164 }
1165
1166 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1167 {
1168         /* Reset periodic Doorbell Recovery counter */
1169         p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1170
1171         /* Don't schedule periodic Doorbell Recovery if already scheduled */
1172         if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1173                      &p_hwfn->slowpath_task_flags))
1174                 return;
1175
1176         qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1177                                   QED_PERIODIC_DB_REC_INTERVAL);
1178 }
1179
1180 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1181 {
1182         int i;
1183
1184         if (IS_VF(cdev))
1185                 return;
1186
1187         for_each_hwfn(cdev, i) {
1188                 if (!cdev->hwfns[i].slowpath_wq)
1189                         continue;
1190
1191                 /* Stop queuing new delayed works */
1192                 cdev->hwfns[i].slowpath_wq_active = false;
1193
1194                 cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1195                 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1196         }
1197 }
1198
1199 static void qed_slowpath_task(struct work_struct *work)
1200 {
1201         struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1202                                              slowpath_task.work);
1203         struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1204
1205         if (!ptt) {
1206                 if (hwfn->slowpath_wq_active)
1207                         queue_delayed_work(hwfn->slowpath_wq,
1208                                            &hwfn->slowpath_task, 0);
1209
1210                 return;
1211         }
1212
1213         if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1214                                &hwfn->slowpath_task_flags))
1215                 qed_mfw_process_tlv_req(hwfn, ptt);
1216
1217         if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1218                                &hwfn->slowpath_task_flags)) {
1219                 qed_db_rec_handler(hwfn, ptt);
1220                 if (hwfn->periodic_db_rec_count--)
1221                         qed_slowpath_delayed_work(hwfn,
1222                                                   QED_SLOWPATH_PERIODIC_DB_REC,
1223                                                   QED_PERIODIC_DB_REC_INTERVAL);
1224         }
1225
1226         qed_ptt_release(hwfn, ptt);
1227 }
1228
1229 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1230 {
1231         struct qed_hwfn *hwfn;
1232         char name[NAME_SIZE];
1233         int i;
1234
1235         if (IS_VF(cdev))
1236                 return 0;
1237
1238         for_each_hwfn(cdev, i) {
1239                 hwfn = &cdev->hwfns[i];
1240
1241                 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1242                          cdev->pdev->bus->number,
1243                          PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1244
1245                 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1246                 if (!hwfn->slowpath_wq) {
1247                         DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1248                         return -ENOMEM;
1249                 }
1250
1251                 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1252                 hwfn->slowpath_wq_active = true;
1253         }
1254
1255         return 0;
1256 }
1257
1258 static int qed_slowpath_start(struct qed_dev *cdev,
1259                               struct qed_slowpath_params *params)
1260 {
1261         struct qed_drv_load_params drv_load_params;
1262         struct qed_hw_init_params hw_init_params;
1263         struct qed_mcp_drv_version drv_version;
1264         struct qed_tunnel_info tunn_info;
1265         const u8 *data = NULL;
1266         struct qed_hwfn *hwfn;
1267         struct qed_ptt *p_ptt;
1268         int rc = -EINVAL;
1269
1270         if (qed_iov_wq_start(cdev))
1271                 goto err;
1272
1273         if (qed_slowpath_wq_start(cdev))
1274                 goto err;
1275
1276         if (IS_PF(cdev)) {
1277                 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1278                                       &cdev->pdev->dev);
1279                 if (rc) {
1280                         DP_NOTICE(cdev,
1281                                   "Failed to find fw file - /lib/firmware/%s\n",
1282                                   QED_FW_FILE_NAME);
1283                         goto err;
1284                 }
1285
1286                 if (cdev->num_hwfns == 1) {
1287                         p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1288                         if (p_ptt) {
1289                                 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1290                         } else {
1291                                 DP_NOTICE(cdev,
1292                                           "Failed to acquire PTT for aRFS\n");
1293                                 goto err;
1294                         }
1295                 }
1296         }
1297
1298         cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1299         rc = qed_nic_setup(cdev);
1300         if (rc)
1301                 goto err;
1302
1303         if (IS_PF(cdev))
1304                 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1305         else
1306                 rc = qed_slowpath_vf_setup_int(cdev);
1307         if (rc)
1308                 goto err1;
1309
1310         if (IS_PF(cdev)) {
1311                 /* Allocate stream for unzipping */
1312                 rc = qed_alloc_stream_mem(cdev);
1313                 if (rc)
1314                         goto err2;
1315
1316                 /* First Dword used to differentiate between various sources */
1317                 data = cdev->firmware->data + sizeof(u32);
1318
1319                 qed_dbg_pf_init(cdev);
1320         }
1321
1322         /* Start the slowpath */
1323         memset(&hw_init_params, 0, sizeof(hw_init_params));
1324         memset(&tunn_info, 0, sizeof(tunn_info));
1325         tunn_info.vxlan.b_mode_enabled = true;
1326         tunn_info.l2_gre.b_mode_enabled = true;
1327         tunn_info.ip_gre.b_mode_enabled = true;
1328         tunn_info.l2_geneve.b_mode_enabled = true;
1329         tunn_info.ip_geneve.b_mode_enabled = true;
1330         tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1331         tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1332         tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1333         tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1334         tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1335         hw_init_params.p_tunn = &tunn_info;
1336         hw_init_params.b_hw_start = true;
1337         hw_init_params.int_mode = cdev->int_params.out.int_mode;
1338         hw_init_params.allow_npar_tx_switch = true;
1339         hw_init_params.bin_fw_data = data;
1340
1341         memset(&drv_load_params, 0, sizeof(drv_load_params));
1342         drv_load_params.is_crash_kernel = is_kdump_kernel();
1343         drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1344         drv_load_params.avoid_eng_reset = false;
1345         drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1346         hw_init_params.p_drv_load_params = &drv_load_params;
1347
1348         rc = qed_hw_init(cdev, &hw_init_params);
1349         if (rc)
1350                 goto err2;
1351
1352         DP_INFO(cdev,
1353                 "HW initialization and function start completed successfully\n");
1354
1355         if (IS_PF(cdev)) {
1356                 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1357                                            BIT(QED_MODE_L2GENEVE_TUNN) |
1358                                            BIT(QED_MODE_IPGENEVE_TUNN) |
1359                                            BIT(QED_MODE_L2GRE_TUNN) |
1360                                            BIT(QED_MODE_IPGRE_TUNN));
1361         }
1362
1363         /* Allocate LL2 interface if needed */
1364         if (QED_LEADING_HWFN(cdev)->using_ll2) {
1365                 rc = qed_ll2_alloc_if(cdev);
1366                 if (rc)
1367                         goto err3;
1368         }
1369         if (IS_PF(cdev)) {
1370                 hwfn = QED_LEADING_HWFN(cdev);
1371                 drv_version.version = (params->drv_major << 24) |
1372                                       (params->drv_minor << 16) |
1373                                       (params->drv_rev << 8) |
1374                                       (params->drv_eng);
1375                 strlcpy(drv_version.name, params->name,
1376                         MCP_DRV_VER_STR_SIZE - 4);
1377                 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1378                                               &drv_version);
1379                 if (rc) {
1380                         DP_NOTICE(cdev, "Failed sending drv version command\n");
1381                         goto err4;
1382                 }
1383         }
1384
1385         qed_reset_vport_stats(cdev);
1386
1387         return 0;
1388
1389 err4:
1390         qed_ll2_dealloc_if(cdev);
1391 err3:
1392         qed_hw_stop(cdev);
1393 err2:
1394         qed_hw_timers_stop_all(cdev);
1395         if (IS_PF(cdev))
1396                 qed_slowpath_irq_free(cdev);
1397         qed_free_stream_mem(cdev);
1398         qed_disable_msix(cdev);
1399 err1:
1400         qed_resc_free(cdev);
1401 err:
1402         if (IS_PF(cdev))
1403                 release_firmware(cdev->firmware);
1404
1405         if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1406             QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1407                 qed_ptt_release(QED_LEADING_HWFN(cdev),
1408                                 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1409
1410         qed_iov_wq_stop(cdev, false);
1411
1412         qed_slowpath_wq_stop(cdev);
1413
1414         return rc;
1415 }
1416
1417 static int qed_slowpath_stop(struct qed_dev *cdev)
1418 {
1419         if (!cdev)
1420                 return -ENODEV;
1421
1422         qed_slowpath_wq_stop(cdev);
1423
1424         qed_ll2_dealloc_if(cdev);
1425
1426         if (IS_PF(cdev)) {
1427                 if (cdev->num_hwfns == 1)
1428                         qed_ptt_release(QED_LEADING_HWFN(cdev),
1429                                         QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1430                 qed_free_stream_mem(cdev);
1431                 if (IS_QED_ETH_IF(cdev))
1432                         qed_sriov_disable(cdev, true);
1433         }
1434
1435         qed_nic_stop(cdev);
1436
1437         if (IS_PF(cdev))
1438                 qed_slowpath_irq_free(cdev);
1439
1440         qed_disable_msix(cdev);
1441
1442         qed_resc_free(cdev);
1443
1444         qed_iov_wq_stop(cdev, true);
1445
1446         if (IS_PF(cdev))
1447                 release_firmware(cdev->firmware);
1448
1449         return 0;
1450 }
1451
1452 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1453 {
1454         int i;
1455
1456         memcpy(cdev->name, name, NAME_SIZE);
1457         for_each_hwfn(cdev, i)
1458                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1459 }
1460
1461 static u32 qed_sb_init(struct qed_dev *cdev,
1462                        struct qed_sb_info *sb_info,
1463                        void *sb_virt_addr,
1464                        dma_addr_t sb_phy_addr, u16 sb_id,
1465                        enum qed_sb_type type)
1466 {
1467         struct qed_hwfn *p_hwfn;
1468         struct qed_ptt *p_ptt;
1469         u16 rel_sb_id;
1470         u32 rc;
1471
1472         /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1473         if (type == QED_SB_TYPE_L2_QUEUE) {
1474                 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1475                 rel_sb_id = sb_id / cdev->num_hwfns;
1476         } else {
1477                 p_hwfn = QED_AFFIN_HWFN(cdev);
1478                 rel_sb_id = sb_id;
1479         }
1480
1481         DP_VERBOSE(cdev, NETIF_MSG_INTR,
1482                    "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1483                    IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1484
1485         if (IS_PF(p_hwfn->cdev)) {
1486                 p_ptt = qed_ptt_acquire(p_hwfn);
1487                 if (!p_ptt)
1488                         return -EBUSY;
1489
1490                 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1491                                      sb_phy_addr, rel_sb_id);
1492                 qed_ptt_release(p_hwfn, p_ptt);
1493         } else {
1494                 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1495                                      sb_phy_addr, rel_sb_id);
1496         }
1497
1498         return rc;
1499 }
1500
1501 static u32 qed_sb_release(struct qed_dev *cdev,
1502                           struct qed_sb_info *sb_info,
1503                           u16 sb_id,
1504                           enum qed_sb_type type)
1505 {
1506         struct qed_hwfn *p_hwfn;
1507         u16 rel_sb_id;
1508         u32 rc;
1509
1510         /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1511         if (type == QED_SB_TYPE_L2_QUEUE) {
1512                 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1513                 rel_sb_id = sb_id / cdev->num_hwfns;
1514         } else {
1515                 p_hwfn = QED_AFFIN_HWFN(cdev);
1516                 rel_sb_id = sb_id;
1517         }
1518
1519         DP_VERBOSE(cdev, NETIF_MSG_INTR,
1520                    "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1521                    IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1522
1523         rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1524
1525         return rc;
1526 }
1527
1528 static bool qed_can_link_change(struct qed_dev *cdev)
1529 {
1530         return true;
1531 }
1532
1533 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1534                                      const struct qed_link_params *params)
1535 {
1536         struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1537         const struct qed_mfw_speed_map *map;
1538         u32 i;
1539
1540         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1541                 ext_speed->autoneg = !!params->autoneg;
1542
1543         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1544                 ext_speed->advertised_speeds = 0;
1545
1546                 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1547                         map = qed_mfw_ext_maps + i;
1548
1549                         if (linkmode_intersects(params->adv_speeds, map->caps))
1550                                 ext_speed->advertised_speeds |= map->mfw_val;
1551                 }
1552         }
1553
1554         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1555                 switch (params->forced_speed) {
1556                 case SPEED_1000:
1557                         ext_speed->forced_speed = QED_EXT_SPEED_1G;
1558                         break;
1559                 case SPEED_10000:
1560                         ext_speed->forced_speed = QED_EXT_SPEED_10G;
1561                         break;
1562                 case SPEED_20000:
1563                         ext_speed->forced_speed = QED_EXT_SPEED_20G;
1564                         break;
1565                 case SPEED_25000:
1566                         ext_speed->forced_speed = QED_EXT_SPEED_25G;
1567                         break;
1568                 case SPEED_40000:
1569                         ext_speed->forced_speed = QED_EXT_SPEED_40G;
1570                         break;
1571                 case SPEED_50000:
1572                         ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1573                                                   QED_EXT_SPEED_50G_R2;
1574                         break;
1575                 case SPEED_100000:
1576                         ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1577                                                   QED_EXT_SPEED_100G_R4 |
1578                                                   QED_EXT_SPEED_100G_P4;
1579                         break;
1580                 default:
1581                         break;
1582                 }
1583         }
1584
1585         if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1586                 return;
1587
1588         switch (params->forced_speed) {
1589         case SPEED_25000:
1590                 switch (params->fec) {
1591                 case FEC_FORCE_MODE_NONE:
1592                         link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1593                         break;
1594                 case FEC_FORCE_MODE_FIRECODE:
1595                         link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1596                         break;
1597                 case FEC_FORCE_MODE_RS:
1598                         link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1599                         break;
1600                 case FEC_FORCE_MODE_AUTO:
1601                         link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1602                                                     ETH_EXT_FEC_25G_BASE_R |
1603                                                     ETH_EXT_FEC_25G_NONE;
1604                         break;
1605                 default:
1606                         break;
1607                 }
1608
1609                 break;
1610         case SPEED_40000:
1611                 switch (params->fec) {
1612                 case FEC_FORCE_MODE_NONE:
1613                         link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1614                         break;
1615                 case FEC_FORCE_MODE_FIRECODE:
1616                         link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1617                         break;
1618                 case FEC_FORCE_MODE_AUTO:
1619                         link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1620                                                     ETH_EXT_FEC_40G_NONE;
1621                         break;
1622                 default:
1623                         break;
1624                 }
1625
1626                 break;
1627         case SPEED_50000:
1628                 switch (params->fec) {
1629                 case FEC_FORCE_MODE_NONE:
1630                         link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1631                         break;
1632                 case FEC_FORCE_MODE_FIRECODE:
1633                         link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1634                         break;
1635                 case FEC_FORCE_MODE_RS:
1636                         link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1637                         break;
1638                 case FEC_FORCE_MODE_AUTO:
1639                         link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1640                                                     ETH_EXT_FEC_50G_BASE_R |
1641                                                     ETH_EXT_FEC_50G_NONE;
1642                         break;
1643                 default:
1644                         break;
1645                 }
1646
1647                 break;
1648         case SPEED_100000:
1649                 switch (params->fec) {
1650                 case FEC_FORCE_MODE_NONE:
1651                         link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1652                         break;
1653                 case FEC_FORCE_MODE_FIRECODE:
1654                         link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1655                         break;
1656                 case FEC_FORCE_MODE_RS:
1657                         link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1658                         break;
1659                 case FEC_FORCE_MODE_AUTO:
1660                         link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1661                                                     ETH_EXT_FEC_100G_BASE_R |
1662                                                     ETH_EXT_FEC_100G_NONE;
1663                         break;
1664                 default:
1665                         break;
1666                 }
1667
1668                 break;
1669         default:
1670                 break;
1671         }
1672 }
1673
1674 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1675 {
1676         struct qed_mcp_link_params *link_params;
1677         struct qed_mcp_link_speed_params *speed;
1678         const struct qed_mfw_speed_map *map;
1679         struct qed_hwfn *hwfn;
1680         struct qed_ptt *ptt;
1681         int rc;
1682         u32 i;
1683
1684         if (!cdev)
1685                 return -ENODEV;
1686
1687         /* The link should be set only once per PF */
1688         hwfn = &cdev->hwfns[0];
1689
1690         /* When VF wants to set link, force it to read the bulletin instead.
1691          * This mimics the PF behavior, where a noitification [both immediate
1692          * and possible later] would be generated when changing properties.
1693          */
1694         if (IS_VF(cdev)) {
1695                 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1696                 return 0;
1697         }
1698
1699         ptt = qed_ptt_acquire(hwfn);
1700         if (!ptt)
1701                 return -EBUSY;
1702
1703         link_params = qed_mcp_get_link_params(hwfn);
1704         if (!link_params)
1705                 return -ENODATA;
1706
1707         speed = &link_params->speed;
1708
1709         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1710                 speed->autoneg = !!params->autoneg;
1711
1712         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1713                 speed->advertised_speeds = 0;
1714
1715                 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1716                         map = qed_mfw_legacy_maps + i;
1717
1718                         if (linkmode_intersects(params->adv_speeds, map->caps))
1719                                 speed->advertised_speeds |= map->mfw_val;
1720                 }
1721         }
1722
1723         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1724                 speed->forced_speed = params->forced_speed;
1725
1726         if (qed_mcp_is_ext_speed_supported(hwfn))
1727                 qed_set_ext_speed_params(link_params, params);
1728
1729         if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1730                 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1731                         link_params->pause.autoneg = true;
1732                 else
1733                         link_params->pause.autoneg = false;
1734                 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1735                         link_params->pause.forced_rx = true;
1736                 else
1737                         link_params->pause.forced_rx = false;
1738                 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1739                         link_params->pause.forced_tx = true;
1740                 else
1741                         link_params->pause.forced_tx = false;
1742         }
1743
1744         if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1745                 switch (params->loopback_mode) {
1746                 case QED_LINK_LOOPBACK_INT_PHY:
1747                         link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1748                         break;
1749                 case QED_LINK_LOOPBACK_EXT_PHY:
1750                         link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1751                         break;
1752                 case QED_LINK_LOOPBACK_EXT:
1753                         link_params->loopback_mode = ETH_LOOPBACK_EXT;
1754                         break;
1755                 case QED_LINK_LOOPBACK_MAC:
1756                         link_params->loopback_mode = ETH_LOOPBACK_MAC;
1757                         break;
1758                 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1759                         link_params->loopback_mode =
1760                                 ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1761                         break;
1762                 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1763                         link_params->loopback_mode =
1764                                 ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1765                         break;
1766                 case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1767                         link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1768                         break;
1769                 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1770                         link_params->loopback_mode =
1771                                 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1772                         break;
1773                 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1774                         link_params->loopback_mode =
1775                                 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1776                         break;
1777                 default:
1778                         link_params->loopback_mode = ETH_LOOPBACK_NONE;
1779                         break;
1780                 }
1781         }
1782
1783         if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1784                 memcpy(&link_params->eee, &params->eee,
1785                        sizeof(link_params->eee));
1786
1787         if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1788                 link_params->fec = params->fec;
1789
1790         rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1791
1792         qed_ptt_release(hwfn, ptt);
1793
1794         return rc;
1795 }
1796
1797 static int qed_get_port_type(u32 media_type)
1798 {
1799         int port_type;
1800
1801         switch (media_type) {
1802         case MEDIA_SFPP_10G_FIBER:
1803         case MEDIA_SFP_1G_FIBER:
1804         case MEDIA_XFP_FIBER:
1805         case MEDIA_MODULE_FIBER:
1806                 port_type = PORT_FIBRE;
1807                 break;
1808         case MEDIA_DA_TWINAX:
1809                 port_type = PORT_DA;
1810                 break;
1811         case MEDIA_BASE_T:
1812                 port_type = PORT_TP;
1813                 break;
1814         case MEDIA_KR:
1815         case MEDIA_NOT_PRESENT:
1816                 port_type = PORT_NONE;
1817                 break;
1818         case MEDIA_UNSPECIFIED:
1819         default:
1820                 port_type = PORT_OTHER;
1821                 break;
1822         }
1823         return port_type;
1824 }
1825
1826 static int qed_get_link_data(struct qed_hwfn *hwfn,
1827                              struct qed_mcp_link_params *params,
1828                              struct qed_mcp_link_state *link,
1829                              struct qed_mcp_link_capabilities *link_caps)
1830 {
1831         void *p;
1832
1833         if (!IS_PF(hwfn->cdev)) {
1834                 qed_vf_get_link_params(hwfn, params);
1835                 qed_vf_get_link_state(hwfn, link);
1836                 qed_vf_get_link_caps(hwfn, link_caps);
1837
1838                 return 0;
1839         }
1840
1841         p = qed_mcp_get_link_params(hwfn);
1842         if (!p)
1843                 return -ENXIO;
1844         memcpy(params, p, sizeof(*params));
1845
1846         p = qed_mcp_get_link_state(hwfn);
1847         if (!p)
1848                 return -ENXIO;
1849         memcpy(link, p, sizeof(*link));
1850
1851         p = qed_mcp_get_link_capabilities(hwfn);
1852         if (!p)
1853                 return -ENXIO;
1854         memcpy(link_caps, p, sizeof(*link_caps));
1855
1856         return 0;
1857 }
1858
1859 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1860                                      struct qed_ptt *ptt, u32 capability,
1861                                      unsigned long *if_caps)
1862 {
1863         u32 media_type, tcvr_state, tcvr_type;
1864         u32 speed_mask, board_cfg;
1865
1866         if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1867                 media_type = MEDIA_UNSPECIFIED;
1868
1869         if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1870                 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1871
1872         if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1873                 speed_mask = 0xFFFFFFFF;
1874
1875         if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1876                 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1877
1878         DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1879                    "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1880                    media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1881
1882         switch (media_type) {
1883         case MEDIA_DA_TWINAX:
1884                 phylink_set(if_caps, FIBRE);
1885
1886                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1887                         phylink_set(if_caps, 20000baseKR2_Full);
1888
1889                 /* For DAC media multiple speed capabilities are supported */
1890                 capability |= speed_mask;
1891
1892                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1893                         phylink_set(if_caps, 1000baseKX_Full);
1894                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1895                         phylink_set(if_caps, 10000baseCR_Full);
1896
1897                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1898                         switch (tcvr_type) {
1899                         case ETH_TRANSCEIVER_TYPE_40G_CR4:
1900                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1901                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1902                                 phylink_set(if_caps, 40000baseCR4_Full);
1903                                 break;
1904                         default:
1905                                 break;
1906                         }
1907
1908                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1909                         phylink_set(if_caps, 25000baseCR_Full);
1910                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1911                         phylink_set(if_caps, 50000baseCR2_Full);
1912
1913                 if (capability &
1914                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1915                         switch (tcvr_type) {
1916                         case ETH_TRANSCEIVER_TYPE_100G_CR4:
1917                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1918                                 phylink_set(if_caps, 100000baseCR4_Full);
1919                                 break;
1920                         default:
1921                                 break;
1922                         }
1923
1924                 break;
1925         case MEDIA_BASE_T:
1926                 phylink_set(if_caps, TP);
1927
1928                 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1929                         if (capability &
1930                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1931                                 phylink_set(if_caps, 1000baseT_Full);
1932                         if (capability &
1933                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1934                                 phylink_set(if_caps, 10000baseT_Full);
1935                 }
1936
1937                 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1938                         phylink_set(if_caps, FIBRE);
1939
1940                         switch (tcvr_type) {
1941                         case ETH_TRANSCEIVER_TYPE_1000BASET:
1942                                 phylink_set(if_caps, 1000baseT_Full);
1943                                 break;
1944                         case ETH_TRANSCEIVER_TYPE_10G_BASET:
1945                                 phylink_set(if_caps, 10000baseT_Full);
1946                                 break;
1947                         default:
1948                                 break;
1949                         }
1950                 }
1951
1952                 break;
1953         case MEDIA_SFP_1G_FIBER:
1954         case MEDIA_SFPP_10G_FIBER:
1955         case MEDIA_XFP_FIBER:
1956         case MEDIA_MODULE_FIBER:
1957                 phylink_set(if_caps, FIBRE);
1958                 capability |= speed_mask;
1959
1960                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1961                         switch (tcvr_type) {
1962                         case ETH_TRANSCEIVER_TYPE_1G_LX:
1963                         case ETH_TRANSCEIVER_TYPE_1G_SX:
1964                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1965                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1966                                 phylink_set(if_caps, 1000baseKX_Full);
1967                                 break;
1968                         default:
1969                                 break;
1970                         }
1971
1972                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1973                         switch (tcvr_type) {
1974                         case ETH_TRANSCEIVER_TYPE_10G_SR:
1975                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1976                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1977                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1978                                 phylink_set(if_caps, 10000baseSR_Full);
1979                                 break;
1980                         case ETH_TRANSCEIVER_TYPE_10G_LR:
1981                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1982                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1983                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1984                                 phylink_set(if_caps, 10000baseLR_Full);
1985                                 break;
1986                         case ETH_TRANSCEIVER_TYPE_10G_LRM:
1987                                 phylink_set(if_caps, 10000baseLRM_Full);
1988                                 break;
1989                         case ETH_TRANSCEIVER_TYPE_10G_ER:
1990                                 phylink_set(if_caps, 10000baseR_FEC);
1991                                 break;
1992                         default:
1993                                 break;
1994                         }
1995
1996                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1997                         phylink_set(if_caps, 20000baseKR2_Full);
1998
1999                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2000                         switch (tcvr_type) {
2001                         case ETH_TRANSCEIVER_TYPE_25G_SR:
2002                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2003                                 phylink_set(if_caps, 25000baseSR_Full);
2004                                 break;
2005                         default:
2006                                 break;
2007                         }
2008
2009                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2010                         switch (tcvr_type) {
2011                         case ETH_TRANSCEIVER_TYPE_40G_LR4:
2012                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2013                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2014                                 phylink_set(if_caps, 40000baseLR4_Full);
2015                                 break;
2016                         case ETH_TRANSCEIVER_TYPE_40G_SR4:
2017                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2018                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2019                                 phylink_set(if_caps, 40000baseSR4_Full);
2020                                 break;
2021                         default:
2022                                 break;
2023                         }
2024
2025                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2026                         phylink_set(if_caps, 50000baseKR2_Full);
2027
2028                 if (capability &
2029                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2030                         switch (tcvr_type) {
2031                         case ETH_TRANSCEIVER_TYPE_100G_SR4:
2032                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2033                                 phylink_set(if_caps, 100000baseSR4_Full);
2034                                 break;
2035                         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2036                                 phylink_set(if_caps, 100000baseLR4_ER4_Full);
2037                                 break;
2038                         default:
2039                                 break;
2040                         }
2041
2042                 break;
2043         case MEDIA_KR:
2044                 phylink_set(if_caps, Backplane);
2045
2046                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2047                         phylink_set(if_caps, 20000baseKR2_Full);
2048                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2049                         phylink_set(if_caps, 1000baseKX_Full);
2050                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2051                         phylink_set(if_caps, 10000baseKR_Full);
2052                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2053                         phylink_set(if_caps, 25000baseKR_Full);
2054                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2055                         phylink_set(if_caps, 40000baseKR4_Full);
2056                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2057                         phylink_set(if_caps, 50000baseKR2_Full);
2058                 if (capability &
2059                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2060                         phylink_set(if_caps, 100000baseKR4_Full);
2061
2062                 break;
2063         case MEDIA_UNSPECIFIED:
2064         case MEDIA_NOT_PRESENT:
2065         default:
2066                 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2067                            "Unknown media and transceiver type;\n");
2068                 break;
2069         }
2070 }
2071
2072 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2073 {
2074         *speed_mask = 0;
2075
2076         if (caps &
2077             (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2078                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2079         if (caps & QED_LINK_PARTNER_SPEED_10G)
2080                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2081         if (caps & QED_LINK_PARTNER_SPEED_20G)
2082                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2083         if (caps & QED_LINK_PARTNER_SPEED_25G)
2084                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2085         if (caps & QED_LINK_PARTNER_SPEED_40G)
2086                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2087         if (caps & QED_LINK_PARTNER_SPEED_50G)
2088                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2089         if (caps & QED_LINK_PARTNER_SPEED_100G)
2090                 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2091 }
2092
2093 static void qed_fill_link(struct qed_hwfn *hwfn,
2094                           struct qed_ptt *ptt,
2095                           struct qed_link_output *if_link)
2096 {
2097         struct qed_mcp_link_capabilities link_caps;
2098         struct qed_mcp_link_params params;
2099         struct qed_mcp_link_state link;
2100         u32 media_type, speed_mask;
2101
2102         memset(if_link, 0, sizeof(*if_link));
2103
2104         /* Prepare source inputs */
2105         if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
2106                 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2107                 return;
2108         }
2109
2110         /* Set the link parameters to pass to protocol driver */
2111         if (link.link_up)
2112                 if_link->link_up = true;
2113
2114         if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2115                 if (link_caps.default_ext_autoneg)
2116                         phylink_set(if_link->supported_caps, Autoneg);
2117
2118                 linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2119
2120                 if (params.ext_speed.autoneg)
2121                         phylink_set(if_link->advertised_caps, Autoneg);
2122                 else
2123                         phylink_clear(if_link->advertised_caps, Autoneg);
2124
2125                 qed_fill_link_capability(hwfn, ptt,
2126                                          params.ext_speed.advertised_speeds,
2127                                          if_link->advertised_caps);
2128         } else {
2129                 if (link_caps.default_speed_autoneg)
2130                         phylink_set(if_link->supported_caps, Autoneg);
2131
2132                 linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2133
2134                 if (params.speed.autoneg)
2135                         phylink_set(if_link->advertised_caps, Autoneg);
2136                 else
2137                         phylink_clear(if_link->advertised_caps, Autoneg);
2138         }
2139
2140         if (params.pause.autoneg ||
2141             (params.pause.forced_rx && params.pause.forced_tx))
2142                 phylink_set(if_link->supported_caps, Asym_Pause);
2143         if (params.pause.autoneg || params.pause.forced_rx ||
2144             params.pause.forced_tx)
2145                 phylink_set(if_link->supported_caps, Pause);
2146
2147         if_link->sup_fec = link_caps.fec_default;
2148         if_link->active_fec = params.fec;
2149
2150         /* Fill link advertised capability */
2151         qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2152                                  if_link->advertised_caps);
2153
2154         /* Fill link supported capability */
2155         qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2156                                  if_link->supported_caps);
2157
2158         /* Fill partner advertised capability */
2159         qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2160         qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2161
2162         if (link.link_up)
2163                 if_link->speed = link.speed;
2164
2165         /* TODO - fill duplex properly */
2166         if_link->duplex = DUPLEX_FULL;
2167         qed_mcp_get_media_type(hwfn, ptt, &media_type);
2168         if_link->port = qed_get_port_type(media_type);
2169
2170         if_link->autoneg = params.speed.autoneg;
2171
2172         if (params.pause.autoneg)
2173                 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2174         if (params.pause.forced_rx)
2175                 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2176         if (params.pause.forced_tx)
2177                 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2178
2179         if (link.an_complete)
2180                 phylink_set(if_link->lp_caps, Autoneg);
2181         if (link.partner_adv_pause)
2182                 phylink_set(if_link->lp_caps, Pause);
2183         if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2184             link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2185                 phylink_set(if_link->lp_caps, Asym_Pause);
2186
2187         if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2188                 if_link->eee_supported = false;
2189         } else {
2190                 if_link->eee_supported = true;
2191                 if_link->eee_active = link.eee_active;
2192                 if_link->sup_caps = link_caps.eee_speed_caps;
2193                 /* MFW clears adv_caps on eee disable; use configured value */
2194                 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2195                                         params.eee.adv_caps;
2196                 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2197                 if_link->eee.enable = params.eee.enable;
2198                 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2199                 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2200         }
2201 }
2202
2203 static void qed_get_current_link(struct qed_dev *cdev,
2204                                  struct qed_link_output *if_link)
2205 {
2206         struct qed_hwfn *hwfn;
2207         struct qed_ptt *ptt;
2208         int i;
2209
2210         hwfn = &cdev->hwfns[0];
2211         if (IS_PF(cdev)) {
2212                 ptt = qed_ptt_acquire(hwfn);
2213                 if (ptt) {
2214                         qed_fill_link(hwfn, ptt, if_link);
2215                         qed_ptt_release(hwfn, ptt);
2216                 } else {
2217                         DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2218                 }
2219         } else {
2220                 qed_fill_link(hwfn, NULL, if_link);
2221         }
2222
2223         for_each_hwfn(cdev, i)
2224                 qed_inform_vf_link_state(&cdev->hwfns[i]);
2225 }
2226
2227 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2228 {
2229         void *cookie = hwfn->cdev->ops_cookie;
2230         struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2231         struct qed_link_output if_link;
2232
2233         qed_fill_link(hwfn, ptt, &if_link);
2234         qed_inform_vf_link_state(hwfn);
2235
2236         if (IS_LEAD_HWFN(hwfn) && cookie)
2237                 op->link_update(cookie, &if_link);
2238 }
2239
2240 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2241 {
2242         void *cookie = hwfn->cdev->ops_cookie;
2243         struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2244
2245         if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2246                 op->bw_update(cookie);
2247 }
2248
2249 static int qed_drain(struct qed_dev *cdev)
2250 {
2251         struct qed_hwfn *hwfn;
2252         struct qed_ptt *ptt;
2253         int i, rc;
2254
2255         if (IS_VF(cdev))
2256                 return 0;
2257
2258         for_each_hwfn(cdev, i) {
2259                 hwfn = &cdev->hwfns[i];
2260                 ptt = qed_ptt_acquire(hwfn);
2261                 if (!ptt) {
2262                         DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2263                         return -EBUSY;
2264                 }
2265                 rc = qed_mcp_drain(hwfn, ptt);
2266                 qed_ptt_release(hwfn, ptt);
2267                 if (rc)
2268                         return rc;
2269         }
2270
2271         return 0;
2272 }
2273
2274 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2275                                           struct qed_nvm_image_att *nvm_image,
2276                                           u32 *crc)
2277 {
2278         u8 *buf = NULL;
2279         int rc;
2280
2281         /* Allocate a buffer for holding the nvram image */
2282         buf = kzalloc(nvm_image->length, GFP_KERNEL);
2283         if (!buf)
2284                 return -ENOMEM;
2285
2286         /* Read image into buffer */
2287         rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2288                               buf, nvm_image->length);
2289         if (rc) {
2290                 DP_ERR(cdev, "Failed reading image from nvm\n");
2291                 goto out;
2292         }
2293
2294         /* Convert the buffer into big-endian format (excluding the
2295          * closing 4 bytes of CRC).
2296          */
2297         cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2298                           DIV_ROUND_UP(nvm_image->length - 4, 4));
2299
2300         /* Calc CRC for the "actual" image buffer, i.e. not including
2301          * the last 4 CRC bytes.
2302          */
2303         *crc = ~crc32(~0U, buf, nvm_image->length - 4);
2304         *crc = (__force u32)cpu_to_be32p(crc);
2305
2306 out:
2307         kfree(buf);
2308
2309         return rc;
2310 }
2311
2312 /* Binary file format -
2313  *     /----------------------------------------------------------------------\
2314  * 0B  |                       0x4 [command index]                            |
2315  * 4B  | image_type     | Options        |  Number of register settings       |
2316  * 8B  |                       Value                                          |
2317  * 12B |                       Mask                                           |
2318  * 16B |                       Offset                                         |
2319  *     \----------------------------------------------------------------------/
2320  * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2321  * Options - 0'b - Calculate & Update CRC for image
2322  */
2323 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2324                                       bool *check_resp)
2325 {
2326         struct qed_nvm_image_att nvm_image;
2327         struct qed_hwfn *p_hwfn;
2328         bool is_crc = false;
2329         u32 image_type;
2330         int rc = 0, i;
2331         u16 len;
2332
2333         *data += 4;
2334         image_type = **data;
2335         p_hwfn = QED_LEADING_HWFN(cdev);
2336         for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2337                 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2338                         break;
2339         if (i == p_hwfn->nvm_info.num_images) {
2340                 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2341                        image_type);
2342                 return -ENOENT;
2343         }
2344
2345         nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2346         nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2347
2348         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2349                    "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2350                    **data, image_type, nvm_image.start_addr,
2351                    nvm_image.start_addr + nvm_image.length - 1);
2352         (*data)++;
2353         is_crc = !!(**data & BIT(0));
2354         (*data)++;
2355         len = *((u16 *)*data);
2356         *data += 2;
2357         if (is_crc) {
2358                 u32 crc = 0;
2359
2360                 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2361                 if (rc) {
2362                         DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2363                         goto exit;
2364                 }
2365
2366                 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2367                                        (nvm_image.start_addr +
2368                                         nvm_image.length - 4), (u8 *)&crc, 4);
2369                 if (rc)
2370                         DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2371                                nvm_image.start_addr + nvm_image.length - 4, rc);
2372                 goto exit;
2373         }
2374
2375         /* Iterate over the values for setting */
2376         while (len) {
2377                 u32 offset, mask, value, cur_value;
2378                 u8 buf[4];
2379
2380                 value = *((u32 *)*data);
2381                 *data += 4;
2382                 mask = *((u32 *)*data);
2383                 *data += 4;
2384                 offset = *((u32 *)*data);
2385                 *data += 4;
2386
2387                 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2388                                       4);
2389                 if (rc) {
2390                         DP_ERR(cdev, "Failed reading from %08x\n",
2391                                nvm_image.start_addr + offset);
2392                         goto exit;
2393                 }
2394
2395                 cur_value = le32_to_cpu(*((__le32 *)buf));
2396                 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2397                            "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2398                            nvm_image.start_addr + offset, cur_value,
2399                            (cur_value & ~mask) | (value & mask), value, mask);
2400                 value = (value & mask) | (cur_value & ~mask);
2401                 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2402                                        nvm_image.start_addr + offset,
2403                                        (u8 *)&value, 4);
2404                 if (rc) {
2405                         DP_ERR(cdev, "Failed writing to %08x\n",
2406                                nvm_image.start_addr + offset);
2407                         goto exit;
2408                 }
2409
2410                 len--;
2411         }
2412 exit:
2413         return rc;
2414 }
2415
2416 /* Binary file format -
2417  *     /----------------------------------------------------------------------\
2418  * 0B  |                       0x3 [command index]                            |
2419  * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2420  * 8B  | File-type |                   reserved                               |
2421  * 12B |                    Image length in bytes                             |
2422  *     \----------------------------------------------------------------------/
2423  *     Start a new file of the provided type
2424  */
2425 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2426                                           const u8 **data, bool *check_resp)
2427 {
2428         u32 file_type, file_size = 0;
2429         int rc;
2430
2431         *data += 4;
2432         *check_resp = !!(**data & BIT(0));
2433         *data += 4;
2434         file_type = **data;
2435
2436         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2437                    "About to start a new file of type %02x\n", file_type);
2438         if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2439                 *data += 4;
2440                 file_size = *((u32 *)(*data));
2441         }
2442
2443         rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2444                                (u8 *)(&file_size), 4);
2445         *data += 4;
2446
2447         return rc;
2448 }
2449
2450 /* Binary file format -
2451  *     /----------------------------------------------------------------------\
2452  * 0B  |                       0x2 [command index]                            |
2453  * 4B  |                       Length in bytes                                |
2454  * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2455  * 12B |                       Offset in bytes                                |
2456  * 16B |                       Data ...                                       |
2457  *     \----------------------------------------------------------------------/
2458  *     Write data as part of a file that was previously started. Data should be
2459  *     of length equal to that provided in the message
2460  */
2461 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2462                                          const u8 **data, bool *check_resp)
2463 {
2464         u32 offset, len;
2465         int rc;
2466
2467         *data += 4;
2468         len = *((u32 *)(*data));
2469         *data += 4;
2470         *check_resp = !!(**data & BIT(0));
2471         *data += 4;
2472         offset = *((u32 *)(*data));
2473         *data += 4;
2474
2475         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2476                    "About to write File-data: %08x bytes to offset %08x\n",
2477                    len, offset);
2478
2479         rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2480                                (char *)(*data), len);
2481         *data += len;
2482
2483         return rc;
2484 }
2485
2486 /* Binary file format [General header] -
2487  *     /----------------------------------------------------------------------\
2488  * 0B  |                       QED_NVM_SIGNATURE                              |
2489  * 4B  |                       Length in bytes                                |
2490  * 8B  | Highest command in this batchfile |          Reserved                |
2491  *     \----------------------------------------------------------------------/
2492  */
2493 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2494                                         const struct firmware *image,
2495                                         const u8 **data)
2496 {
2497         u32 signature, len;
2498
2499         /* Check minimum size */
2500         if (image->size < 12) {
2501                 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2502                 return -EINVAL;
2503         }
2504
2505         /* Check signature */
2506         signature = *((u32 *)(*data));
2507         if (signature != QED_NVM_SIGNATURE) {
2508                 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2509                 return -EINVAL;
2510         }
2511
2512         *data += 4;
2513         /* Validate internal size equals the image-size */
2514         len = *((u32 *)(*data));
2515         if (len != image->size) {
2516                 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2517                        len, (u32)image->size);
2518                 return -EINVAL;
2519         }
2520
2521         *data += 4;
2522         /* Make sure driver familiar with all commands necessary for this */
2523         if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2524                 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2525                        *((u16 *)(*data)));
2526                 return -EINVAL;
2527         }
2528
2529         *data += 4;
2530
2531         return 0;
2532 }
2533
2534 /* Binary file format -
2535  *     /----------------------------------------------------------------------\
2536  * 0B  |                       0x5 [command index]                            |
2537  * 4B  | Number of config attributes     |          Reserved                  |
2538  * 4B  | Config ID                       | Entity ID      | Length            |
2539  * 4B  | Value                                                                |
2540  *     |                                                                      |
2541  *     \----------------------------------------------------------------------/
2542  * There can be several cfg_id-entity_id-Length-Value sets as specified by
2543  * 'Number of config attributes'.
2544  *
2545  * The API parses config attributes from the user provided buffer and flashes
2546  * them to the respective NVM path using Management FW inerface.
2547  */
2548 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2549 {
2550         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2551         u8 entity_id, len, buf[32];
2552         bool need_nvm_init = true;
2553         struct qed_ptt *ptt;
2554         u16 cfg_id, count;
2555         int rc = 0, i;
2556         u32 flags;
2557
2558         ptt = qed_ptt_acquire(hwfn);
2559         if (!ptt)
2560                 return -EAGAIN;
2561
2562         /* NVM CFG ID attribute header */
2563         *data += 4;
2564         count = *((u16 *)*data);
2565         *data += 4;
2566
2567         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2568                    "Read config ids: num_attrs = %0d\n", count);
2569         /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2570          * arithmetic operations in the implementation.
2571          */
2572         for (i = 1; i <= count; i++) {
2573                 cfg_id = *((u16 *)*data);
2574                 *data += 2;
2575                 entity_id = **data;
2576                 (*data)++;
2577                 len = **data;
2578                 (*data)++;
2579                 memcpy(buf, *data, len);
2580                 *data += len;
2581
2582                 flags = 0;
2583                 if (need_nvm_init) {
2584                         flags |= QED_NVM_CFG_OPTION_INIT;
2585                         need_nvm_init = false;
2586                 }
2587
2588                 /* Commit to flash and free the resources */
2589                 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2590                         flags |= QED_NVM_CFG_OPTION_COMMIT |
2591                                  QED_NVM_CFG_OPTION_FREE;
2592                         need_nvm_init = true;
2593                 }
2594
2595                 if (entity_id)
2596                         flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2597
2598                 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2599                            "cfg_id = %d entity = %d len = %d\n", cfg_id,
2600                            entity_id, len);
2601                 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2602                                          buf, len);
2603                 if (rc) {
2604                         DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2605                         break;
2606                 }
2607         }
2608
2609         qed_ptt_release(hwfn, ptt);
2610
2611         return rc;
2612 }
2613
2614 #define QED_MAX_NVM_BUF_LEN     32
2615 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2616 {
2617         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2618         u8 buf[QED_MAX_NVM_BUF_LEN];
2619         struct qed_ptt *ptt;
2620         u32 len;
2621         int rc;
2622
2623         ptt = qed_ptt_acquire(hwfn);
2624         if (!ptt)
2625                 return QED_MAX_NVM_BUF_LEN;
2626
2627         rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2628                                  &len);
2629         if (rc || !len) {
2630                 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2631                 len = QED_MAX_NVM_BUF_LEN;
2632         }
2633
2634         qed_ptt_release(hwfn, ptt);
2635
2636         return len;
2637 }
2638
2639 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2640                                   u32 cmd, u32 entity_id)
2641 {
2642         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2643         struct qed_ptt *ptt;
2644         u32 flags, len;
2645         int rc = 0;
2646
2647         ptt = qed_ptt_acquire(hwfn);
2648         if (!ptt)
2649                 return -EAGAIN;
2650
2651         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2652                    "Read config cmd = %d entity id %d\n", cmd, entity_id);
2653         flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2654         rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2655         if (rc)
2656                 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2657
2658         qed_ptt_release(hwfn, ptt);
2659
2660         return rc;
2661 }
2662
2663 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2664 {
2665         const struct firmware *image;
2666         const u8 *data, *data_end;
2667         u32 cmd_type;
2668         int rc;
2669
2670         rc = request_firmware(&image, name, &cdev->pdev->dev);
2671         if (rc) {
2672                 DP_ERR(cdev, "Failed to find '%s'\n", name);
2673                 return rc;
2674         }
2675
2676         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2677                    "Flashing '%s' - firmware's data at %p, size is %08x\n",
2678                    name, image->data, (u32)image->size);
2679         data = image->data;
2680         data_end = data + image->size;
2681
2682         rc = qed_nvm_flash_image_validate(cdev, image, &data);
2683         if (rc)
2684                 goto exit;
2685
2686         while (data < data_end) {
2687                 bool check_resp = false;
2688
2689                 /* Parse the actual command */
2690                 cmd_type = *((u32 *)data);
2691                 switch (cmd_type) {
2692                 case QED_NVM_FLASH_CMD_FILE_DATA:
2693                         rc = qed_nvm_flash_image_file_data(cdev, &data,
2694                                                            &check_resp);
2695                         break;
2696                 case QED_NVM_FLASH_CMD_FILE_START:
2697                         rc = qed_nvm_flash_image_file_start(cdev, &data,
2698                                                             &check_resp);
2699                         break;
2700                 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2701                         rc = qed_nvm_flash_image_access(cdev, &data,
2702                                                         &check_resp);
2703                         break;
2704                 case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2705                         rc = qed_nvm_flash_cfg_write(cdev, &data);
2706                         break;
2707                 default:
2708                         DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2709                         rc = -EINVAL;
2710                         goto exit;
2711                 }
2712
2713                 if (rc) {
2714                         DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2715                         goto exit;
2716                 }
2717
2718                 /* Check response if needed */
2719                 if (check_resp) {
2720                         u32 mcp_response = 0;
2721
2722                         if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2723                                 DP_ERR(cdev, "Failed getting MCP response\n");
2724                                 rc = -EINVAL;
2725                                 goto exit;
2726                         }
2727
2728                         switch (mcp_response & FW_MSG_CODE_MASK) {
2729                         case FW_MSG_CODE_OK:
2730                         case FW_MSG_CODE_NVM_OK:
2731                         case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2732                         case FW_MSG_CODE_PHY_OK:
2733                                 break;
2734                         default:
2735                                 DP_ERR(cdev, "MFW returns error: %08x\n",
2736                                        mcp_response);
2737                                 rc = -EINVAL;
2738                                 goto exit;
2739                         }
2740                 }
2741         }
2742
2743 exit:
2744         release_firmware(image);
2745
2746         return rc;
2747 }
2748
2749 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2750                              u8 *buf, u16 len)
2751 {
2752         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2753
2754         return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2755 }
2756
2757 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2758 {
2759         struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2760         void *cookie = p_hwfn->cdev->ops_cookie;
2761
2762         if (ops && ops->schedule_recovery_handler)
2763                 ops->schedule_recovery_handler(cookie);
2764 }
2765
2766 static const char * const qed_hw_err_type_descr[] = {
2767         [QED_HW_ERR_FAN_FAIL]           = "Fan Failure",
2768         [QED_HW_ERR_MFW_RESP_FAIL]      = "MFW Response Failure",
2769         [QED_HW_ERR_HW_ATTN]            = "HW Attention",
2770         [QED_HW_ERR_DMAE_FAIL]          = "DMAE Failure",
2771         [QED_HW_ERR_RAMROD_FAIL]        = "Ramrod Failure",
2772         [QED_HW_ERR_FW_ASSERT]          = "FW Assertion",
2773         [QED_HW_ERR_LAST]               = "Unknown",
2774 };
2775
2776 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2777                            enum qed_hw_err_type err_type)
2778 {
2779         struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2780         void *cookie = p_hwfn->cdev->ops_cookie;
2781         const char *err_str;
2782
2783         if (err_type > QED_HW_ERR_LAST)
2784                 err_type = QED_HW_ERR_LAST;
2785         err_str = qed_hw_err_type_descr[err_type];
2786
2787         DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2788
2789         /* Call the HW error handler of the protocol driver.
2790          * If it is not available - perform a minimal handling of preventing
2791          * HW attentions from being reasserted.
2792          */
2793         if (ops && ops->schedule_hw_err_handler)
2794                 ops->schedule_hw_err_handler(cookie, err_type);
2795         else
2796                 qed_int_attn_clr_enable(p_hwfn->cdev, true);
2797 }
2798
2799 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2800                             void *handle)
2801 {
2802                 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2803 }
2804
2805 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2806 {
2807         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2808         struct qed_ptt *ptt;
2809         int status = 0;
2810
2811         ptt = qed_ptt_acquire(hwfn);
2812         if (!ptt)
2813                 return -EAGAIN;
2814
2815         status = qed_mcp_set_led(hwfn, ptt, mode);
2816
2817         qed_ptt_release(hwfn, ptt);
2818
2819         return status;
2820 }
2821
2822 int qed_recovery_process(struct qed_dev *cdev)
2823 {
2824         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2825         struct qed_ptt *p_ptt;
2826         int rc = 0;
2827
2828         p_ptt = qed_ptt_acquire(p_hwfn);
2829         if (!p_ptt)
2830                 return -EAGAIN;
2831
2832         rc = qed_start_recovery_process(p_hwfn, p_ptt);
2833
2834         qed_ptt_release(p_hwfn, p_ptt);
2835
2836         return rc;
2837 }
2838
2839 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2840 {
2841         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2842         struct qed_ptt *ptt;
2843         int rc = 0;
2844
2845         if (IS_VF(cdev))
2846                 return 0;
2847
2848         ptt = qed_ptt_acquire(hwfn);
2849         if (!ptt)
2850                 return -EAGAIN;
2851
2852         rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2853                                    : QED_OV_WOL_DISABLED);
2854         if (rc)
2855                 goto out;
2856         rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2857
2858 out:
2859         qed_ptt_release(hwfn, ptt);
2860         return rc;
2861 }
2862
2863 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2864 {
2865         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2866         struct qed_ptt *ptt;
2867         int status = 0;
2868
2869         if (IS_VF(cdev))
2870                 return 0;
2871
2872         ptt = qed_ptt_acquire(hwfn);
2873         if (!ptt)
2874                 return -EAGAIN;
2875
2876         status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2877                                                 QED_OV_DRIVER_STATE_ACTIVE :
2878                                                 QED_OV_DRIVER_STATE_DISABLED);
2879
2880         qed_ptt_release(hwfn, ptt);
2881
2882         return status;
2883 }
2884
2885 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2886 {
2887         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2888         struct qed_ptt *ptt;
2889         int status = 0;
2890
2891         if (IS_VF(cdev))
2892                 return 0;
2893
2894         ptt = qed_ptt_acquire(hwfn);
2895         if (!ptt)
2896                 return -EAGAIN;
2897
2898         status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2899         if (status)
2900                 goto out;
2901
2902         status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2903
2904 out:
2905         qed_ptt_release(hwfn, ptt);
2906         return status;
2907 }
2908
2909 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2910 {
2911         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2912         struct qed_ptt *ptt;
2913         int status = 0;
2914
2915         if (IS_VF(cdev))
2916                 return 0;
2917
2918         ptt = qed_ptt_acquire(hwfn);
2919         if (!ptt)
2920                 return -EAGAIN;
2921
2922         status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2923         if (status)
2924                 goto out;
2925
2926         status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2927
2928 out:
2929         qed_ptt_release(hwfn, ptt);
2930         return status;
2931 }
2932
2933 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2934                                   u8 dev_addr, u32 offset, u32 len)
2935 {
2936         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2937         struct qed_ptt *ptt;
2938         int rc = 0;
2939
2940         if (IS_VF(cdev))
2941                 return 0;
2942
2943         ptt = qed_ptt_acquire(hwfn);
2944         if (!ptt)
2945                 return -EAGAIN;
2946
2947         rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2948                                   offset, len, buf);
2949
2950         qed_ptt_release(hwfn, ptt);
2951
2952         return rc;
2953 }
2954
2955 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2956 {
2957         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2958         struct qed_ptt *ptt;
2959         int rc = 0;
2960
2961         if (IS_VF(cdev))
2962                 return 0;
2963
2964         ptt = qed_ptt_acquire(hwfn);
2965         if (!ptt)
2966                 return -EAGAIN;
2967
2968         rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2969
2970         qed_ptt_release(hwfn, ptt);
2971
2972         return rc;
2973 }
2974
2975 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2976 {
2977         return QED_AFFIN_HWFN_IDX(cdev);
2978 }
2979
2980 static struct qed_selftest_ops qed_selftest_ops_pass = {
2981         .selftest_memory = &qed_selftest_memory,
2982         .selftest_interrupt = &qed_selftest_interrupt,
2983         .selftest_register = &qed_selftest_register,
2984         .selftest_clock = &qed_selftest_clock,
2985         .selftest_nvram = &qed_selftest_nvram,
2986 };
2987
2988 const struct qed_common_ops qed_common_ops_pass = {
2989         .selftest = &qed_selftest_ops_pass,
2990         .probe = &qed_probe,
2991         .remove = &qed_remove,
2992         .set_power_state = &qed_set_power_state,
2993         .set_name = &qed_set_name,
2994         .update_pf_params = &qed_update_pf_params,
2995         .slowpath_start = &qed_slowpath_start,
2996         .slowpath_stop = &qed_slowpath_stop,
2997         .set_fp_int = &qed_set_int_fp,
2998         .get_fp_int = &qed_get_int_fp,
2999         .sb_init = &qed_sb_init,
3000         .sb_release = &qed_sb_release,
3001         .simd_handler_config = &qed_simd_handler_config,
3002         .simd_handler_clean = &qed_simd_handler_clean,
3003         .dbg_grc = &qed_dbg_grc,
3004         .dbg_grc_size = &qed_dbg_grc_size,
3005         .can_link_change = &qed_can_link_change,
3006         .set_link = &qed_set_link,
3007         .get_link = &qed_get_current_link,
3008         .drain = &qed_drain,
3009         .update_msglvl = &qed_init_dp,
3010         .devlink_register = qed_devlink_register,
3011         .devlink_unregister = qed_devlink_unregister,
3012         .report_fatal_error = qed_report_fatal_error,
3013         .dbg_all_data = &qed_dbg_all_data,
3014         .dbg_all_data_size = &qed_dbg_all_data_size,
3015         .chain_alloc = &qed_chain_alloc,
3016         .chain_free = &qed_chain_free,
3017         .nvm_flash = &qed_nvm_flash,
3018         .nvm_get_image = &qed_nvm_get_image,
3019         .set_coalesce = &qed_set_coalesce,
3020         .set_led = &qed_set_led,
3021         .recovery_process = &qed_recovery_process,
3022         .recovery_prolog = &qed_recovery_prolog,
3023         .attn_clr_enable = &qed_int_attn_clr_enable,
3024         .update_drv_state = &qed_update_drv_state,
3025         .update_mac = &qed_update_mac,
3026         .update_mtu = &qed_update_mtu,
3027         .update_wol = &qed_update_wol,
3028         .db_recovery_add = &qed_db_recovery_add,
3029         .db_recovery_del = &qed_db_recovery_del,
3030         .read_module_eeprom = &qed_read_module_eeprom,
3031         .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3032         .read_nvm_cfg = &qed_nvm_flash_cfg_read,
3033         .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3034         .set_grc_config = &qed_set_grc_config,
3035 };
3036
3037 void qed_get_protocol_stats(struct qed_dev *cdev,
3038                             enum qed_mcp_protocol_type type,
3039                             union qed_mcp_protocol_stats *stats)
3040 {
3041         struct qed_eth_stats eth_stats;
3042
3043         memset(stats, 0, sizeof(*stats));
3044
3045         switch (type) {
3046         case QED_MCP_LAN_STATS:
3047                 qed_get_vport_stats(cdev, &eth_stats);
3048                 stats->lan_stats.ucast_rx_pkts =
3049                                         eth_stats.common.rx_ucast_pkts;
3050                 stats->lan_stats.ucast_tx_pkts =
3051                                         eth_stats.common.tx_ucast_pkts;
3052                 stats->lan_stats.fcs_err = -1;
3053                 break;
3054         case QED_MCP_FCOE_STATS:
3055                 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
3056                 break;
3057         case QED_MCP_ISCSI_STATS:
3058                 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
3059                 break;
3060         default:
3061                 DP_VERBOSE(cdev, QED_MSG_SP,
3062                            "Invalid protocol type = %d\n", type);
3063                 return;
3064         }
3065 }
3066
3067 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3068 {
3069         DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3070                    "Scheduling slowpath task [Flag: %d]\n",
3071                    QED_SLOWPATH_MFW_TLV_REQ);
3072         smp_mb__before_atomic();
3073         set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3074         smp_mb__after_atomic();
3075         queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3076
3077         return 0;
3078 }
3079
3080 static void
3081 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3082 {
3083         struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3084         struct qed_eth_stats_common *p_common;
3085         struct qed_generic_tlvs gen_tlvs;
3086         struct qed_eth_stats stats;
3087         int i;
3088
3089         memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3090         op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3091
3092         if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3093                 tlv->flags.ipv4_csum_offload = true;
3094         if (gen_tlvs.feat_flags & QED_TLV_LSO)
3095                 tlv->flags.lso_supported = true;
3096         tlv->flags.b_set = true;
3097
3098         for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3099                 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3100                         ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3101                         tlv->mac_set[i] = true;
3102                 }
3103         }
3104
3105         qed_get_vport_stats(cdev, &stats);
3106         p_common = &stats.common;
3107         tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3108                          p_common->rx_bcast_pkts;
3109         tlv->rx_frames_set = true;
3110         tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3111                         p_common->rx_bcast_bytes;
3112         tlv->rx_bytes_set = true;
3113         tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3114                          p_common->tx_bcast_pkts;
3115         tlv->tx_frames_set = true;
3116         tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3117                         p_common->tx_bcast_bytes;
3118         tlv->rx_bytes_set = true;
3119 }
3120
3121 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3122                           union qed_mfw_tlv_data *tlv_buf)
3123 {
3124         struct qed_dev *cdev = hwfn->cdev;
3125         struct qed_common_cb_ops *ops;
3126
3127         ops = cdev->protocol_ops.common;
3128         if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3129                 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3130                 return -EINVAL;
3131         }
3132
3133         switch (type) {
3134         case QED_MFW_TLV_GENERIC:
3135                 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3136                 break;
3137         case QED_MFW_TLV_ETH:
3138                 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3139                 break;
3140         case QED_MFW_TLV_FCOE:
3141                 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3142                 break;
3143         case QED_MFW_TLV_ISCSI:
3144                 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
3145                 break;
3146         default:
3147                 break;
3148         }
3149
3150         return 0;
3151 }
This page took 0.227586 seconds and 4 git commands to generate.