1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
53 #include "qed_sriov.h"
55 #include "qed_dev_api.h"
58 #include "qed_iscsi.h"
61 #include "qed_reg_addr.h"
63 #include "qed_selftest.h"
64 #include "qed_debug.h"
66 #define QED_ROCE_QPS (8192)
67 #define QED_ROCE_DPIS (8)
68 #define QED_RDMA_SRQS QED_ROCE_QPS
70 static char version[] =
71 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
73 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
74 MODULE_LICENSE("GPL");
75 MODULE_VERSION(DRV_MODULE_VERSION);
77 #define FW_FILE_VERSION \
78 __stringify(FW_MAJOR_VERSION) "." \
79 __stringify(FW_MINOR_VERSION) "." \
80 __stringify(FW_REVISION_VERSION) "." \
81 __stringify(FW_ENGINEERING_VERSION)
83 #define QED_FW_FILE_NAME \
84 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
86 MODULE_FIRMWARE(QED_FW_FILE_NAME);
88 static int __init qed_init(void)
90 pr_info("%s", version);
95 static void __exit qed_cleanup(void)
97 pr_notice("qed_cleanup called\n");
100 module_init(qed_init);
101 module_exit(qed_cleanup);
103 /* Check if the DMA controller on the machine can properly handle the DMA
104 * addressing required by the device.
106 static int qed_set_coherency_mask(struct qed_dev *cdev)
108 struct device *dev = &cdev->pdev->dev;
110 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
111 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
113 "Can't request 64-bit consistent allocations\n");
116 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
117 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
124 static void qed_free_pci(struct qed_dev *cdev)
126 struct pci_dev *pdev = cdev->pdev;
128 if (cdev->doorbells && cdev->db_size)
129 iounmap(cdev->doorbells);
131 iounmap(cdev->regview);
132 if (atomic_read(&pdev->enable_cnt) == 1)
133 pci_release_regions(pdev);
135 pci_disable_device(pdev);
138 #define PCI_REVISION_ID_ERROR_VAL 0xff
140 /* Performs PCI initializations as well as initializing PCI-related parameters
141 * in the device structrue. Returns 0 in case of success.
143 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
150 rc = pci_enable_device(pdev);
152 DP_NOTICE(cdev, "Cannot enable PCI device\n");
156 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
157 DP_NOTICE(cdev, "No memory region found in bar #0\n");
162 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
163 DP_NOTICE(cdev, "No memory region found in bar #2\n");
168 if (atomic_read(&pdev->enable_cnt) == 1) {
169 rc = pci_request_regions(pdev, "qed");
172 "Failed to request PCI memory resources\n");
175 pci_set_master(pdev);
176 pci_save_state(pdev);
179 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
180 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
182 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
187 if (!pci_is_pcie(pdev)) {
188 DP_NOTICE(cdev, "The bus is not PCI Express\n");
193 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
194 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
195 DP_NOTICE(cdev, "Cannot find power management capability\n");
197 rc = qed_set_coherency_mask(cdev);
201 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
202 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
203 cdev->pci_params.irq = pdev->irq;
205 cdev->regview = pci_ioremap_bar(pdev, 0);
206 if (!cdev->regview) {
207 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
212 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
213 cdev->db_size = pci_resource_len(cdev->pdev, 2);
214 if (!cdev->db_size) {
216 DP_NOTICE(cdev, "No Doorbell bar available\n");
223 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
225 if (!cdev->doorbells) {
226 DP_NOTICE(cdev, "Cannot map doorbell space\n");
233 pci_release_regions(pdev);
235 pci_disable_device(pdev);
240 int qed_fill_dev_info(struct qed_dev *cdev,
241 struct qed_dev_info *dev_info)
243 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
244 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
245 struct qed_tunnel_info *tun = &cdev->tunnel;
248 memset(dev_info, 0, sizeof(struct qed_dev_info));
250 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
251 tun->vxlan.b_mode_enabled)
252 dev_info->vxlan_enable = true;
254 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
255 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
256 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
257 dev_info->gre_enable = true;
259 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
260 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
261 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
262 dev_info->geneve_enable = true;
264 dev_info->num_hwfns = cdev->num_hwfns;
265 dev_info->pci_mem_start = cdev->pci_params.mem_start;
266 dev_info->pci_mem_end = cdev->pci_params.mem_end;
267 dev_info->pci_irq = cdev->pci_params.irq;
268 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
269 dev_info->dev_type = cdev->type;
270 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
273 dev_info->fw_major = FW_MAJOR_VERSION;
274 dev_info->fw_minor = FW_MINOR_VERSION;
275 dev_info->fw_rev = FW_REVISION_VERSION;
276 dev_info->fw_eng = FW_ENGINEERING_VERSION;
277 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
279 dev_info->tx_switching = true;
281 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
282 dev_info->wol_support = true;
284 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
286 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
287 &dev_info->fw_minor, &dev_info->fw_rev,
292 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
294 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
295 &dev_info->mfw_rev, NULL);
297 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
298 &dev_info->mbi_version);
300 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
301 &dev_info->flash_size);
303 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
306 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
307 &dev_info->mfw_rev, NULL);
310 dev_info->mtu = hw_info->mtu;
315 static void qed_free_cdev(struct qed_dev *cdev)
320 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
322 struct qed_dev *cdev;
324 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
328 qed_init_struct(cdev);
333 /* Sets the requested power state */
334 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
339 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
344 static struct qed_dev *qed_probe(struct pci_dev *pdev,
345 struct qed_probe_params *params)
347 struct qed_dev *cdev;
350 cdev = qed_alloc_cdev(pdev);
354 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
355 cdev->protocol = params->protocol;
358 cdev->b_is_vf = true;
360 qed_init_dp(cdev, params->dp_module, params->dp_level);
362 rc = qed_init_pci(cdev, pdev);
364 DP_ERR(cdev, "init pci failed\n");
367 DP_INFO(cdev, "PCI init completed successfully\n");
369 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
371 DP_ERR(cdev, "hw prepare failed\n");
375 DP_INFO(cdev, "qed_probe completed successfully\n");
387 static void qed_remove(struct qed_dev *cdev)
396 qed_set_power_state(cdev, PCI_D3hot);
401 static void qed_disable_msix(struct qed_dev *cdev)
403 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
404 pci_disable_msix(cdev->pdev);
405 kfree(cdev->int_params.msix_table);
406 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
407 pci_disable_msi(cdev->pdev);
410 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
413 static int qed_enable_msix(struct qed_dev *cdev,
414 struct qed_int_params *int_params)
418 cnt = int_params->in.num_vectors;
420 for (i = 0; i < cnt; i++)
421 int_params->msix_table[i].entry = i;
423 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
424 int_params->in.min_msix_cnt, cnt);
425 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
426 (rc % cdev->num_hwfns)) {
427 pci_disable_msix(cdev->pdev);
429 /* If fastpath is initialized, we need at least one interrupt
430 * per hwfn [and the slow path interrupts]. New requested number
431 * should be a multiple of the number of hwfns.
433 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
435 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
436 cnt, int_params->in.num_vectors);
437 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
444 /* MSI-x configuration was achieved */
445 int_params->out.int_mode = QED_INT_MODE_MSIX;
446 int_params->out.num_vectors = rc;
450 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
457 /* This function outputs the int mode and the number of enabled msix vector */
458 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
460 struct qed_int_params *int_params = &cdev->int_params;
461 struct msix_entry *tbl;
464 switch (int_params->in.int_mode) {
465 case QED_INT_MODE_MSIX:
466 /* Allocate MSIX table */
467 cnt = int_params->in.num_vectors;
468 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
469 if (!int_params->msix_table) {
475 rc = qed_enable_msix(cdev, int_params);
479 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
480 kfree(int_params->msix_table);
485 case QED_INT_MODE_MSI:
486 if (cdev->num_hwfns == 1) {
487 rc = pci_enable_msi(cdev->pdev);
489 int_params->out.int_mode = QED_INT_MODE_MSI;
493 DP_NOTICE(cdev, "Failed to enable MSI\n");
499 case QED_INT_MODE_INTA:
500 int_params->out.int_mode = QED_INT_MODE_INTA;
504 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
505 int_params->in.int_mode);
511 DP_INFO(cdev, "Using %s interrupts\n",
512 int_params->out.int_mode == QED_INT_MODE_INTA ?
513 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
515 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
520 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
521 int index, void(*handler)(void *))
523 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
524 int relative_idx = index / cdev->num_hwfns;
526 hwfn->simd_proto_handler[relative_idx].func = handler;
527 hwfn->simd_proto_handler[relative_idx].token = token;
530 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
532 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
533 int relative_idx = index / cdev->num_hwfns;
535 memset(&hwfn->simd_proto_handler[relative_idx], 0,
536 sizeof(struct qed_simd_fp_handler));
539 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
541 tasklet_schedule((struct tasklet_struct *)tasklet);
545 static irqreturn_t qed_single_int(int irq, void *dev_instance)
547 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
548 struct qed_hwfn *hwfn;
549 irqreturn_t rc = IRQ_NONE;
553 for (i = 0; i < cdev->num_hwfns; i++) {
554 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
559 hwfn = &cdev->hwfns[i];
561 /* Slowpath interrupt */
562 if (unlikely(status & 0x1)) {
563 tasklet_schedule(hwfn->sp_dpc);
568 /* Fastpath interrupts */
569 for (j = 0; j < 64; j++) {
570 if ((0x2ULL << j) & status) {
571 struct qed_simd_fp_handler *p_handler =
572 &hwfn->simd_proto_handler[j];
575 p_handler->func(p_handler->token);
578 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
581 status &= ~(0x2ULL << j);
586 if (unlikely(status))
587 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
588 "got an unknown interrupt status 0x%llx\n",
595 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
597 struct qed_dev *cdev = hwfn->cdev;
602 int_mode = cdev->int_params.out.int_mode;
603 if (int_mode == QED_INT_MODE_MSIX) {
605 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
606 id, cdev->pdev->bus->number,
607 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
608 rc = request_irq(cdev->int_params.msix_table[id].vector,
609 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
611 unsigned long flags = 0;
613 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
614 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
615 PCI_FUNC(cdev->pdev->devfn));
617 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
618 flags |= IRQF_SHARED;
620 rc = request_irq(cdev->pdev->irq, qed_single_int,
621 flags, cdev->name, cdev);
625 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
627 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
628 "Requested slowpath %s\n",
629 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
634 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
636 /* Calling the disable function will make sure that any
637 * currently-running function is completed. The following call to the
638 * enable function makes this sequence a flush-like operation.
640 if (p_hwfn->b_sp_dpc_enabled) {
641 tasklet_disable(p_hwfn->sp_dpc);
642 tasklet_enable(p_hwfn->sp_dpc);
646 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
648 struct qed_dev *cdev = p_hwfn->cdev;
649 u8 id = p_hwfn->my_id;
652 int_mode = cdev->int_params.out.int_mode;
653 if (int_mode == QED_INT_MODE_MSIX)
654 synchronize_irq(cdev->int_params.msix_table[id].vector);
656 synchronize_irq(cdev->pdev->irq);
658 qed_slowpath_tasklet_flush(p_hwfn);
661 static void qed_slowpath_irq_free(struct qed_dev *cdev)
665 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
666 for_each_hwfn(cdev, i) {
667 if (!cdev->hwfns[i].b_int_requested)
669 synchronize_irq(cdev->int_params.msix_table[i].vector);
670 free_irq(cdev->int_params.msix_table[i].vector,
671 cdev->hwfns[i].sp_dpc);
674 if (QED_LEADING_HWFN(cdev)->b_int_requested)
675 free_irq(cdev->pdev->irq, cdev);
677 qed_int_disable_post_isr_release(cdev);
680 static int qed_nic_stop(struct qed_dev *cdev)
684 rc = qed_hw_stop(cdev);
686 for (i = 0; i < cdev->num_hwfns; i++) {
687 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
689 if (p_hwfn->b_sp_dpc_enabled) {
690 tasklet_disable(p_hwfn->sp_dpc);
691 p_hwfn->b_sp_dpc_enabled = false;
692 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
693 "Disabled sp tasklet [hwfn %d] at %p\n",
698 qed_dbg_pf_exit(cdev);
703 static int qed_nic_setup(struct qed_dev *cdev)
707 /* Determine if interface is going to require LL2 */
708 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
709 for (i = 0; i < cdev->num_hwfns; i++) {
710 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
712 p_hwfn->using_ll2 = true;
716 rc = qed_resc_alloc(cdev);
720 DP_INFO(cdev, "Allocated qed resources\n");
722 qed_resc_setup(cdev);
727 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
731 /* Mark the fastpath as free/used */
732 cdev->int_params.fp_initialized = cnt ? true : false;
734 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
735 limit = cdev->num_hwfns * 63;
736 else if (cdev->int_params.fp_msix_cnt)
737 limit = cdev->int_params.fp_msix_cnt;
742 return min_t(int, cnt, limit);
745 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
747 memset(info, 0, sizeof(struct qed_int_info));
749 if (!cdev->int_params.fp_initialized) {
751 "Protocol driver requested interrupt information, but its support is not yet configured\n");
755 /* Need to expose only MSI-X information; Single IRQ is handled solely
758 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
759 int msix_base = cdev->int_params.fp_msix_base;
761 info->msix_cnt = cdev->int_params.fp_msix_cnt;
762 info->msix = &cdev->int_params.msix_table[msix_base];
768 static int qed_slowpath_setup_int(struct qed_dev *cdev,
769 enum qed_int_mode int_mode)
771 struct qed_sb_cnt_info sb_cnt_info;
772 int num_l2_queues = 0;
776 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
777 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
781 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
782 cdev->int_params.in.int_mode = int_mode;
783 for_each_hwfn(cdev, i) {
784 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
785 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
786 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
787 cdev->int_params.in.num_vectors++; /* slowpath */
790 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
791 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
793 if (is_kdump_kernel()) {
795 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
796 cdev->int_params.in.min_msix_cnt);
797 cdev->int_params.in.num_vectors =
798 cdev->int_params.in.min_msix_cnt;
801 rc = qed_set_int_mode(cdev, false);
803 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
807 cdev->int_params.fp_msix_base = cdev->num_hwfns;
808 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
811 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
812 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
815 for_each_hwfn(cdev, i)
816 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
818 DP_VERBOSE(cdev, QED_MSG_RDMA,
819 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
820 cdev->int_params.fp_msix_cnt, num_l2_queues);
822 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
823 cdev->int_params.rdma_msix_cnt =
824 (cdev->int_params.fp_msix_cnt - num_l2_queues)
826 cdev->int_params.rdma_msix_base =
827 cdev->int_params.fp_msix_base + num_l2_queues;
828 cdev->int_params.fp_msix_cnt = num_l2_queues;
830 cdev->int_params.rdma_msix_cnt = 0;
833 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
834 cdev->int_params.rdma_msix_cnt,
835 cdev->int_params.rdma_msix_base);
840 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
844 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
845 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
847 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
848 &cdev->int_params.in.num_vectors);
849 if (cdev->num_hwfns > 1) {
852 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
853 cdev->int_params.in.num_vectors += vectors;
856 /* We want a minimum of one fastpath vector per vf hwfn */
857 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
859 rc = qed_set_int_mode(cdev, true);
863 cdev->int_params.fp_msix_base = 0;
864 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
869 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
870 u8 *input_buf, u32 max_size, u8 *unzip_buf)
874 p_hwfn->stream->next_in = input_buf;
875 p_hwfn->stream->avail_in = input_len;
876 p_hwfn->stream->next_out = unzip_buf;
877 p_hwfn->stream->avail_out = max_size;
879 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
882 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
887 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
888 zlib_inflateEnd(p_hwfn->stream);
890 if (rc != Z_OK && rc != Z_STREAM_END) {
891 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
892 p_hwfn->stream->msg, rc);
896 return p_hwfn->stream->total_out / 4;
899 static int qed_alloc_stream_mem(struct qed_dev *cdev)
904 for_each_hwfn(cdev, i) {
905 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
907 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
911 workspace = vzalloc(zlib_inflate_workspacesize());
914 p_hwfn->stream->workspace = workspace;
920 static void qed_free_stream_mem(struct qed_dev *cdev)
924 for_each_hwfn(cdev, i) {
925 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
930 vfree(p_hwfn->stream->workspace);
931 kfree(p_hwfn->stream);
935 static void qed_update_pf_params(struct qed_dev *cdev,
936 struct qed_pf_params *params)
940 if (IS_ENABLED(CONFIG_QED_RDMA)) {
941 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
942 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
943 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
944 /* divide by 3 the MRs to avoid MF ILT overflow */
945 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
948 if (cdev->num_hwfns > 1 || IS_VF(cdev))
949 params->eth_pf_params.num_arfs_filters = 0;
951 /* In case we might support RDMA, don't allow qede to be greedy
952 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
955 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
958 num_cons = ¶ms->eth_pf_params.num_cons;
959 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
962 for (i = 0; i < cdev->num_hwfns; i++) {
963 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
965 p_hwfn->pf_params = *params;
969 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
976 for_each_hwfn(cdev, i) {
977 if (!cdev->hwfns[i].slowpath_wq)
980 flush_workqueue(cdev->hwfns[i].slowpath_wq);
981 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
985 static void qed_slowpath_task(struct work_struct *work)
987 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
989 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
992 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
996 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
997 &hwfn->slowpath_task_flags))
998 qed_mfw_process_tlv_req(hwfn, ptt);
1000 qed_ptt_release(hwfn, ptt);
1003 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1005 struct qed_hwfn *hwfn;
1006 char name[NAME_SIZE];
1012 for_each_hwfn(cdev, i) {
1013 hwfn = &cdev->hwfns[i];
1015 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1016 cdev->pdev->bus->number,
1017 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1019 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1020 if (!hwfn->slowpath_wq) {
1021 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1025 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1031 static int qed_slowpath_start(struct qed_dev *cdev,
1032 struct qed_slowpath_params *params)
1034 struct qed_drv_load_params drv_load_params;
1035 struct qed_hw_init_params hw_init_params;
1036 struct qed_mcp_drv_version drv_version;
1037 struct qed_tunnel_info tunn_info;
1038 const u8 *data = NULL;
1039 struct qed_hwfn *hwfn;
1040 struct qed_ptt *p_ptt;
1043 if (qed_iov_wq_start(cdev))
1046 if (qed_slowpath_wq_start(cdev))
1050 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1054 "Failed to find fw file - /lib/firmware/%s\n",
1059 if (cdev->num_hwfns == 1) {
1060 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1062 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1065 "Failed to acquire PTT for aRFS\n");
1071 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1072 rc = qed_nic_setup(cdev);
1077 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1079 rc = qed_slowpath_vf_setup_int(cdev);
1084 /* Allocate stream for unzipping */
1085 rc = qed_alloc_stream_mem(cdev);
1089 /* First Dword used to differentiate between various sources */
1090 data = cdev->firmware->data + sizeof(u32);
1092 qed_dbg_pf_init(cdev);
1095 /* Start the slowpath */
1096 memset(&hw_init_params, 0, sizeof(hw_init_params));
1097 memset(&tunn_info, 0, sizeof(tunn_info));
1098 tunn_info.vxlan.b_mode_enabled = true;
1099 tunn_info.l2_gre.b_mode_enabled = true;
1100 tunn_info.ip_gre.b_mode_enabled = true;
1101 tunn_info.l2_geneve.b_mode_enabled = true;
1102 tunn_info.ip_geneve.b_mode_enabled = true;
1103 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1104 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1105 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1106 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1107 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1108 hw_init_params.p_tunn = &tunn_info;
1109 hw_init_params.b_hw_start = true;
1110 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1111 hw_init_params.allow_npar_tx_switch = true;
1112 hw_init_params.bin_fw_data = data;
1114 memset(&drv_load_params, 0, sizeof(drv_load_params));
1115 drv_load_params.is_crash_kernel = is_kdump_kernel();
1116 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1117 drv_load_params.avoid_eng_reset = false;
1118 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1119 hw_init_params.p_drv_load_params = &drv_load_params;
1121 rc = qed_hw_init(cdev, &hw_init_params);
1126 "HW initialization and function start completed successfully\n");
1129 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1130 BIT(QED_MODE_L2GENEVE_TUNN) |
1131 BIT(QED_MODE_IPGENEVE_TUNN) |
1132 BIT(QED_MODE_L2GRE_TUNN) |
1133 BIT(QED_MODE_IPGRE_TUNN));
1136 /* Allocate LL2 interface if needed */
1137 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1138 rc = qed_ll2_alloc_if(cdev);
1143 hwfn = QED_LEADING_HWFN(cdev);
1144 drv_version.version = (params->drv_major << 24) |
1145 (params->drv_minor << 16) |
1146 (params->drv_rev << 8) |
1148 strlcpy(drv_version.name, params->name,
1149 MCP_DRV_VER_STR_SIZE - 4);
1150 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1153 DP_NOTICE(cdev, "Failed sending drv version command\n");
1158 qed_reset_vport_stats(cdev);
1165 qed_hw_timers_stop_all(cdev);
1167 qed_slowpath_irq_free(cdev);
1168 qed_free_stream_mem(cdev);
1169 qed_disable_msix(cdev);
1171 qed_resc_free(cdev);
1174 release_firmware(cdev->firmware);
1176 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1177 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1178 qed_ptt_release(QED_LEADING_HWFN(cdev),
1179 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1181 qed_iov_wq_stop(cdev, false);
1183 qed_slowpath_wq_stop(cdev);
1188 static int qed_slowpath_stop(struct qed_dev *cdev)
1193 qed_slowpath_wq_stop(cdev);
1195 qed_ll2_dealloc_if(cdev);
1198 if (cdev->num_hwfns == 1)
1199 qed_ptt_release(QED_LEADING_HWFN(cdev),
1200 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1201 qed_free_stream_mem(cdev);
1202 if (IS_QED_ETH_IF(cdev))
1203 qed_sriov_disable(cdev, true);
1209 qed_slowpath_irq_free(cdev);
1211 qed_disable_msix(cdev);
1213 qed_resc_free(cdev);
1215 qed_iov_wq_stop(cdev, true);
1218 release_firmware(cdev->firmware);
1223 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1227 memcpy(cdev->name, name, NAME_SIZE);
1228 for_each_hwfn(cdev, i)
1229 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1232 static u32 qed_sb_init(struct qed_dev *cdev,
1233 struct qed_sb_info *sb_info,
1235 dma_addr_t sb_phy_addr, u16 sb_id,
1236 enum qed_sb_type type)
1238 struct qed_hwfn *p_hwfn;
1239 struct qed_ptt *p_ptt;
1245 /* RoCE uses single engine and CMT uses two engines. When using both
1246 * we force only a single engine. Storage uses only engine 0 too.
1248 if (type == QED_SB_TYPE_L2_QUEUE)
1249 n_hwfns = cdev->num_hwfns;
1253 hwfn_index = sb_id % n_hwfns;
1254 p_hwfn = &cdev->hwfns[hwfn_index];
1255 rel_sb_id = sb_id / n_hwfns;
1257 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1258 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1259 hwfn_index, rel_sb_id, sb_id);
1261 if (IS_PF(p_hwfn->cdev)) {
1262 p_ptt = qed_ptt_acquire(p_hwfn);
1266 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1267 sb_phy_addr, rel_sb_id);
1268 qed_ptt_release(p_hwfn, p_ptt);
1270 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1271 sb_phy_addr, rel_sb_id);
1277 static u32 qed_sb_release(struct qed_dev *cdev,
1278 struct qed_sb_info *sb_info, u16 sb_id)
1280 struct qed_hwfn *p_hwfn;
1285 hwfn_index = sb_id % cdev->num_hwfns;
1286 p_hwfn = &cdev->hwfns[hwfn_index];
1287 rel_sb_id = sb_id / cdev->num_hwfns;
1289 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1290 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1291 hwfn_index, rel_sb_id, sb_id);
1293 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1298 static bool qed_can_link_change(struct qed_dev *cdev)
1303 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1305 struct qed_hwfn *hwfn;
1306 struct qed_mcp_link_params *link_params;
1307 struct qed_ptt *ptt;
1314 /* The link should be set only once per PF */
1315 hwfn = &cdev->hwfns[0];
1317 /* When VF wants to set link, force it to read the bulletin instead.
1318 * This mimics the PF behavior, where a noitification [both immediate
1319 * and possible later] would be generated when changing properties.
1322 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1326 ptt = qed_ptt_acquire(hwfn);
1330 link_params = qed_mcp_get_link_params(hwfn);
1331 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1332 link_params->speed.autoneg = params->autoneg;
1333 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1334 link_params->speed.advertised_speeds = 0;
1335 sup_caps = QED_LM_1000baseT_Full_BIT |
1336 QED_LM_1000baseKX_Full_BIT |
1337 QED_LM_1000baseX_Full_BIT;
1338 if (params->adv_speeds & sup_caps)
1339 link_params->speed.advertised_speeds |=
1340 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1341 sup_caps = QED_LM_10000baseT_Full_BIT |
1342 QED_LM_10000baseKR_Full_BIT |
1343 QED_LM_10000baseKX4_Full_BIT |
1344 QED_LM_10000baseR_FEC_BIT |
1345 QED_LM_10000baseCR_Full_BIT |
1346 QED_LM_10000baseSR_Full_BIT |
1347 QED_LM_10000baseLR_Full_BIT |
1348 QED_LM_10000baseLRM_Full_BIT;
1349 if (params->adv_speeds & sup_caps)
1350 link_params->speed.advertised_speeds |=
1351 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1352 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1353 link_params->speed.advertised_speeds |=
1354 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1355 sup_caps = QED_LM_25000baseKR_Full_BIT |
1356 QED_LM_25000baseCR_Full_BIT |
1357 QED_LM_25000baseSR_Full_BIT;
1358 if (params->adv_speeds & sup_caps)
1359 link_params->speed.advertised_speeds |=
1360 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1361 sup_caps = QED_LM_40000baseLR4_Full_BIT |
1362 QED_LM_40000baseKR4_Full_BIT |
1363 QED_LM_40000baseCR4_Full_BIT |
1364 QED_LM_40000baseSR4_Full_BIT;
1365 if (params->adv_speeds & sup_caps)
1366 link_params->speed.advertised_speeds |=
1367 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1368 sup_caps = QED_LM_50000baseKR2_Full_BIT |
1369 QED_LM_50000baseCR2_Full_BIT |
1370 QED_LM_50000baseSR2_Full_BIT;
1371 if (params->adv_speeds & sup_caps)
1372 link_params->speed.advertised_speeds |=
1373 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1374 sup_caps = QED_LM_100000baseKR4_Full_BIT |
1375 QED_LM_100000baseSR4_Full_BIT |
1376 QED_LM_100000baseCR4_Full_BIT |
1377 QED_LM_100000baseLR4_ER4_Full_BIT;
1378 if (params->adv_speeds & sup_caps)
1379 link_params->speed.advertised_speeds |=
1380 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1382 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1383 link_params->speed.forced_speed = params->forced_speed;
1384 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1385 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1386 link_params->pause.autoneg = true;
1388 link_params->pause.autoneg = false;
1389 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1390 link_params->pause.forced_rx = true;
1392 link_params->pause.forced_rx = false;
1393 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1394 link_params->pause.forced_tx = true;
1396 link_params->pause.forced_tx = false;
1398 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1399 switch (params->loopback_mode) {
1400 case QED_LINK_LOOPBACK_INT_PHY:
1401 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1403 case QED_LINK_LOOPBACK_EXT_PHY:
1404 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1406 case QED_LINK_LOOPBACK_EXT:
1407 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1409 case QED_LINK_LOOPBACK_MAC:
1410 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1413 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1418 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1419 memcpy(&link_params->eee, ¶ms->eee,
1420 sizeof(link_params->eee));
1422 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1424 qed_ptt_release(hwfn, ptt);
1429 static int qed_get_port_type(u32 media_type)
1433 switch (media_type) {
1434 case MEDIA_SFPP_10G_FIBER:
1435 case MEDIA_SFP_1G_FIBER:
1436 case MEDIA_XFP_FIBER:
1437 case MEDIA_MODULE_FIBER:
1439 port_type = PORT_FIBRE;
1441 case MEDIA_DA_TWINAX:
1442 port_type = PORT_DA;
1445 port_type = PORT_TP;
1447 case MEDIA_NOT_PRESENT:
1448 port_type = PORT_NONE;
1450 case MEDIA_UNSPECIFIED:
1452 port_type = PORT_OTHER;
1458 static int qed_get_link_data(struct qed_hwfn *hwfn,
1459 struct qed_mcp_link_params *params,
1460 struct qed_mcp_link_state *link,
1461 struct qed_mcp_link_capabilities *link_caps)
1465 if (!IS_PF(hwfn->cdev)) {
1466 qed_vf_get_link_params(hwfn, params);
1467 qed_vf_get_link_state(hwfn, link);
1468 qed_vf_get_link_caps(hwfn, link_caps);
1473 p = qed_mcp_get_link_params(hwfn);
1476 memcpy(params, p, sizeof(*params));
1478 p = qed_mcp_get_link_state(hwfn);
1481 memcpy(link, p, sizeof(*link));
1483 p = qed_mcp_get_link_capabilities(hwfn);
1486 memcpy(link_caps, p, sizeof(*link_caps));
1491 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1492 struct qed_ptt *ptt, u32 capability,
1495 u32 media_type, tcvr_state, tcvr_type;
1496 u32 speed_mask, board_cfg;
1498 if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1499 media_type = MEDIA_UNSPECIFIED;
1501 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1502 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1504 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1505 speed_mask = 0xFFFFFFFF;
1507 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1508 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1510 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1511 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1512 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1514 switch (media_type) {
1515 case MEDIA_DA_TWINAX:
1516 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1517 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1518 /* For DAC media multiple speed capabilities are supported*/
1519 capability = capability & speed_mask;
1520 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1521 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1522 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1523 *if_capability |= QED_LM_10000baseCR_Full_BIT;
1524 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1525 *if_capability |= QED_LM_40000baseCR4_Full_BIT;
1526 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1527 *if_capability |= QED_LM_25000baseCR_Full_BIT;
1528 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1529 *if_capability |= QED_LM_50000baseCR2_Full_BIT;
1531 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1532 *if_capability |= QED_LM_100000baseCR4_Full_BIT;
1535 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1537 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1538 *if_capability |= QED_LM_1000baseT_Full_BIT;
1541 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1542 *if_capability |= QED_LM_10000baseT_Full_BIT;
1545 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1546 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1547 *if_capability |= QED_LM_1000baseT_Full_BIT;
1548 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1549 *if_capability |= QED_LM_10000baseT_Full_BIT;
1552 case MEDIA_SFP_1G_FIBER:
1553 case MEDIA_SFPP_10G_FIBER:
1554 case MEDIA_XFP_FIBER:
1555 case MEDIA_MODULE_FIBER:
1557 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1558 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1559 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1560 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1563 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1564 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1565 *if_capability |= QED_LM_10000baseSR_Full_BIT;
1566 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1567 *if_capability |= QED_LM_10000baseLR_Full_BIT;
1568 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1569 *if_capability |= QED_LM_10000baseLRM_Full_BIT;
1570 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1571 *if_capability |= QED_LM_10000baseR_FEC_BIT;
1573 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1574 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1576 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1577 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1578 *if_capability |= QED_LM_25000baseSR_Full_BIT;
1581 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1582 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1583 *if_capability |= QED_LM_40000baseLR4_Full_BIT;
1584 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1585 *if_capability |= QED_LM_40000baseSR4_Full_BIT;
1588 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1589 *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1591 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1592 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1593 *if_capability |= QED_LM_100000baseSR4_Full_BIT;
1598 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1599 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1601 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1602 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1604 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1605 *if_capability |= QED_LM_10000baseKR_Full_BIT;
1607 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1608 *if_capability |= QED_LM_25000baseKR_Full_BIT;
1610 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1611 *if_capability |= QED_LM_40000baseKR4_Full_BIT;
1613 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1614 *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1616 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1617 *if_capability |= QED_LM_100000baseKR4_Full_BIT;
1619 case MEDIA_UNSPECIFIED:
1620 case MEDIA_NOT_PRESENT:
1621 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1622 "Unknown media and transceiver type;\n");
1627 static void qed_fill_link(struct qed_hwfn *hwfn,
1628 struct qed_ptt *ptt,
1629 struct qed_link_output *if_link)
1631 struct qed_mcp_link_capabilities link_caps;
1632 struct qed_mcp_link_params params;
1633 struct qed_mcp_link_state link;
1636 memset(if_link, 0, sizeof(*if_link));
1638 /* Prepare source inputs */
1639 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1640 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1644 /* Set the link parameters to pass to protocol driver */
1646 if_link->link_up = true;
1648 /* TODO - at the moment assume supported and advertised speed equal */
1649 if_link->supported_caps = QED_LM_FIBRE_BIT;
1650 if (link_caps.default_speed_autoneg)
1651 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1652 if (params.pause.autoneg ||
1653 (params.pause.forced_rx && params.pause.forced_tx))
1654 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1655 if (params.pause.autoneg || params.pause.forced_rx ||
1656 params.pause.forced_tx)
1657 if_link->supported_caps |= QED_LM_Pause_BIT;
1659 if_link->advertised_caps = if_link->supported_caps;
1660 if (params.speed.autoneg)
1661 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1663 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1665 /* Fill link advertised capability*/
1666 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1667 &if_link->advertised_caps);
1668 /* Fill link supported capability*/
1669 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1670 &if_link->supported_caps);
1673 if_link->speed = link.speed;
1675 /* TODO - fill duplex properly */
1676 if_link->duplex = DUPLEX_FULL;
1677 qed_mcp_get_media_type(hwfn, ptt, &media_type);
1678 if_link->port = qed_get_port_type(media_type);
1680 if_link->autoneg = params.speed.autoneg;
1682 if (params.pause.autoneg)
1683 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1684 if (params.pause.forced_rx)
1685 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1686 if (params.pause.forced_tx)
1687 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1689 /* Link partner capabilities */
1690 if (link.partner_adv_speed &
1691 QED_LINK_PARTNER_SPEED_1G_FD)
1692 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1693 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1694 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1695 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1696 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1697 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1698 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1699 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1700 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1701 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1702 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1703 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1704 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1706 if (link.an_complete)
1707 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1709 if (link.partner_adv_pause)
1710 if_link->lp_caps |= QED_LM_Pause_BIT;
1711 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1712 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1713 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1715 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1716 if_link->eee_supported = false;
1718 if_link->eee_supported = true;
1719 if_link->eee_active = link.eee_active;
1720 if_link->sup_caps = link_caps.eee_speed_caps;
1721 /* MFW clears adv_caps on eee disable; use configured value */
1722 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1723 params.eee.adv_caps;
1724 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1725 if_link->eee.enable = params.eee.enable;
1726 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1727 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1731 static void qed_get_current_link(struct qed_dev *cdev,
1732 struct qed_link_output *if_link)
1734 struct qed_hwfn *hwfn;
1735 struct qed_ptt *ptt;
1738 hwfn = &cdev->hwfns[0];
1740 ptt = qed_ptt_acquire(hwfn);
1742 qed_fill_link(hwfn, ptt, if_link);
1743 qed_ptt_release(hwfn, ptt);
1745 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1748 qed_fill_link(hwfn, NULL, if_link);
1751 for_each_hwfn(cdev, i)
1752 qed_inform_vf_link_state(&cdev->hwfns[i]);
1755 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1757 void *cookie = hwfn->cdev->ops_cookie;
1758 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1759 struct qed_link_output if_link;
1761 qed_fill_link(hwfn, ptt, &if_link);
1762 qed_inform_vf_link_state(hwfn);
1764 if (IS_LEAD_HWFN(hwfn) && cookie)
1765 op->link_update(cookie, &if_link);
1768 static int qed_drain(struct qed_dev *cdev)
1770 struct qed_hwfn *hwfn;
1771 struct qed_ptt *ptt;
1777 for_each_hwfn(cdev, i) {
1778 hwfn = &cdev->hwfns[i];
1779 ptt = qed_ptt_acquire(hwfn);
1781 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1784 rc = qed_mcp_drain(hwfn, ptt);
1787 qed_ptt_release(hwfn, ptt);
1793 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1794 struct qed_nvm_image_att *nvm_image,
1801 /* Allocate a buffer for holding the nvram image */
1802 buf = kzalloc(nvm_image->length, GFP_KERNEL);
1806 /* Read image into buffer */
1807 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1808 buf, nvm_image->length);
1810 DP_ERR(cdev, "Failed reading image from nvm\n");
1814 /* Convert the buffer into big-endian format (excluding the
1815 * closing 4 bytes of CRC).
1817 for (j = 0; j < nvm_image->length - 4; j += 4) {
1818 val = cpu_to_be32(*(u32 *)&buf[j]);
1819 *(u32 *)&buf[j] = val;
1822 /* Calc CRC for the "actual" image buffer, i.e. not including
1823 * the last 4 CRC bytes.
1825 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
1833 /* Binary file format -
1834 * /----------------------------------------------------------------------\
1835 * 0B | 0x4 [command index] |
1836 * 4B | image_type | Options | Number of register settings |
1840 * \----------------------------------------------------------------------/
1841 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1842 * Options - 0'b - Calculate & Update CRC for image
1844 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
1847 struct qed_nvm_image_att nvm_image;
1848 struct qed_hwfn *p_hwfn;
1849 bool is_crc = false;
1855 image_type = **data;
1856 p_hwfn = QED_LEADING_HWFN(cdev);
1857 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
1858 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
1860 if (i == p_hwfn->nvm_info.num_images) {
1861 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
1866 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
1867 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
1869 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1870 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1871 **data, image_type, nvm_image.start_addr,
1872 nvm_image.start_addr + nvm_image.length - 1);
1874 is_crc = !!(**data & BIT(0));
1876 len = *((u16 *)*data);
1881 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
1883 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
1887 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1888 (nvm_image.start_addr +
1889 nvm_image.length - 4), (u8 *)&crc, 4);
1891 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
1892 nvm_image.start_addr + nvm_image.length - 4, rc);
1896 /* Iterate over the values for setting */
1898 u32 offset, mask, value, cur_value;
1901 value = *((u32 *)*data);
1903 mask = *((u32 *)*data);
1905 offset = *((u32 *)*data);
1908 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
1911 DP_ERR(cdev, "Failed reading from %08x\n",
1912 nvm_image.start_addr + offset);
1916 cur_value = le32_to_cpu(*((__le32 *)buf));
1917 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1918 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1919 nvm_image.start_addr + offset, cur_value,
1920 (cur_value & ~mask) | (value & mask), value, mask);
1921 value = (value & mask) | (cur_value & ~mask);
1922 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1923 nvm_image.start_addr + offset,
1926 DP_ERR(cdev, "Failed writing to %08x\n",
1927 nvm_image.start_addr + offset);
1937 /* Binary file format -
1938 * /----------------------------------------------------------------------\
1939 * 0B | 0x3 [command index] |
1940 * 4B | b'0: check_response? | b'1-31 reserved |
1941 * 8B | File-type | reserved |
1942 * \----------------------------------------------------------------------/
1943 * Start a new file of the provided type
1945 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
1946 const u8 **data, bool *check_resp)
1951 *check_resp = !!(**data & BIT(0));
1954 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1955 "About to start a new file of type %02x\n", **data);
1956 rc = qed_mcp_nvm_put_file_begin(cdev, **data);
1962 /* Binary file format -
1963 * /----------------------------------------------------------------------\
1964 * 0B | 0x2 [command index] |
1965 * 4B | Length in bytes |
1966 * 8B | b'0: check_response? | b'1-31 reserved |
1967 * 12B | Offset in bytes |
1969 * \----------------------------------------------------------------------/
1970 * Write data as part of a file that was previously started. Data should be
1971 * of length equal to that provided in the message
1973 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
1974 const u8 **data, bool *check_resp)
1980 len = *((u32 *)(*data));
1982 *check_resp = !!(**data & BIT(0));
1984 offset = *((u32 *)(*data));
1987 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1988 "About to write File-data: %08x bytes to offset %08x\n",
1991 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
1992 (char *)(*data), len);
1998 /* Binary file format [General header] -
1999 * /----------------------------------------------------------------------\
2000 * 0B | QED_NVM_SIGNATURE |
2001 * 4B | Length in bytes |
2002 * 8B | Highest command in this batchfile | Reserved |
2003 * \----------------------------------------------------------------------/
2005 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2006 const struct firmware *image,
2011 /* Check minimum size */
2012 if (image->size < 12) {
2013 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2017 /* Check signature */
2018 signature = *((u32 *)(*data));
2019 if (signature != QED_NVM_SIGNATURE) {
2020 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2025 /* Validate internal size equals the image-size */
2026 len = *((u32 *)(*data));
2027 if (len != image->size) {
2028 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2029 len, (u32)image->size);
2034 /* Make sure driver familiar with all commands necessary for this */
2035 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2036 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2046 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2048 const struct firmware *image;
2049 const u8 *data, *data_end;
2053 rc = request_firmware(&image, name, &cdev->pdev->dev);
2055 DP_ERR(cdev, "Failed to find '%s'\n", name);
2059 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2060 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2061 name, image->data, (u32)image->size);
2063 data_end = data + image->size;
2065 rc = qed_nvm_flash_image_validate(cdev, image, &data);
2069 while (data < data_end) {
2070 bool check_resp = false;
2072 /* Parse the actual command */
2073 cmd_type = *((u32 *)data);
2075 case QED_NVM_FLASH_CMD_FILE_DATA:
2076 rc = qed_nvm_flash_image_file_data(cdev, &data,
2079 case QED_NVM_FLASH_CMD_FILE_START:
2080 rc = qed_nvm_flash_image_file_start(cdev, &data,
2083 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2084 rc = qed_nvm_flash_image_access(cdev, &data,
2088 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2094 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2098 /* Check response if needed */
2100 u32 mcp_response = 0;
2102 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2103 DP_ERR(cdev, "Failed getting MCP response\n");
2108 switch (mcp_response & FW_MSG_CODE_MASK) {
2109 case FW_MSG_CODE_OK:
2110 case FW_MSG_CODE_NVM_OK:
2111 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2112 case FW_MSG_CODE_PHY_OK:
2115 DP_ERR(cdev, "MFW returns error: %08x\n",
2124 release_firmware(image);
2129 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2132 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2134 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2137 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2140 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2143 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2145 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2146 struct qed_ptt *ptt;
2149 ptt = qed_ptt_acquire(hwfn);
2153 status = qed_mcp_set_led(hwfn, ptt, mode);
2155 qed_ptt_release(hwfn, ptt);
2160 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2162 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2163 struct qed_ptt *ptt;
2169 ptt = qed_ptt_acquire(hwfn);
2173 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2174 : QED_OV_WOL_DISABLED);
2177 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2180 qed_ptt_release(hwfn, ptt);
2184 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2186 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2187 struct qed_ptt *ptt;
2193 ptt = qed_ptt_acquire(hwfn);
2197 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2198 QED_OV_DRIVER_STATE_ACTIVE :
2199 QED_OV_DRIVER_STATE_DISABLED);
2201 qed_ptt_release(hwfn, ptt);
2206 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2208 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2209 struct qed_ptt *ptt;
2215 ptt = qed_ptt_acquire(hwfn);
2219 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2223 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2226 qed_ptt_release(hwfn, ptt);
2230 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2232 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2233 struct qed_ptt *ptt;
2239 ptt = qed_ptt_acquire(hwfn);
2243 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2247 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2250 qed_ptt_release(hwfn, ptt);
2254 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2255 u8 dev_addr, u32 offset, u32 len)
2257 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2258 struct qed_ptt *ptt;
2264 ptt = qed_ptt_acquire(hwfn);
2268 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2271 qed_ptt_release(hwfn, ptt);
2276 static struct qed_selftest_ops qed_selftest_ops_pass = {
2277 .selftest_memory = &qed_selftest_memory,
2278 .selftest_interrupt = &qed_selftest_interrupt,
2279 .selftest_register = &qed_selftest_register,
2280 .selftest_clock = &qed_selftest_clock,
2281 .selftest_nvram = &qed_selftest_nvram,
2284 const struct qed_common_ops qed_common_ops_pass = {
2285 .selftest = &qed_selftest_ops_pass,
2286 .probe = &qed_probe,
2287 .remove = &qed_remove,
2288 .set_power_state = &qed_set_power_state,
2289 .set_name = &qed_set_name,
2290 .update_pf_params = &qed_update_pf_params,
2291 .slowpath_start = &qed_slowpath_start,
2292 .slowpath_stop = &qed_slowpath_stop,
2293 .set_fp_int = &qed_set_int_fp,
2294 .get_fp_int = &qed_get_int_fp,
2295 .sb_init = &qed_sb_init,
2296 .sb_release = &qed_sb_release,
2297 .simd_handler_config = &qed_simd_handler_config,
2298 .simd_handler_clean = &qed_simd_handler_clean,
2299 .dbg_grc = &qed_dbg_grc,
2300 .dbg_grc_size = &qed_dbg_grc_size,
2301 .can_link_change = &qed_can_link_change,
2302 .set_link = &qed_set_link,
2303 .get_link = &qed_get_current_link,
2304 .drain = &qed_drain,
2305 .update_msglvl = &qed_init_dp,
2306 .dbg_all_data = &qed_dbg_all_data,
2307 .dbg_all_data_size = &qed_dbg_all_data_size,
2308 .chain_alloc = &qed_chain_alloc,
2309 .chain_free = &qed_chain_free,
2310 .nvm_flash = &qed_nvm_flash,
2311 .nvm_get_image = &qed_nvm_get_image,
2312 .set_coalesce = &qed_set_coalesce,
2313 .set_led = &qed_set_led,
2314 .update_drv_state = &qed_update_drv_state,
2315 .update_mac = &qed_update_mac,
2316 .update_mtu = &qed_update_mtu,
2317 .update_wol = &qed_update_wol,
2318 .read_module_eeprom = &qed_read_module_eeprom,
2321 void qed_get_protocol_stats(struct qed_dev *cdev,
2322 enum qed_mcp_protocol_type type,
2323 union qed_mcp_protocol_stats *stats)
2325 struct qed_eth_stats eth_stats;
2327 memset(stats, 0, sizeof(*stats));
2330 case QED_MCP_LAN_STATS:
2331 qed_get_vport_stats(cdev, ð_stats);
2332 stats->lan_stats.ucast_rx_pkts =
2333 eth_stats.common.rx_ucast_pkts;
2334 stats->lan_stats.ucast_tx_pkts =
2335 eth_stats.common.tx_ucast_pkts;
2336 stats->lan_stats.fcs_err = -1;
2338 case QED_MCP_FCOE_STATS:
2339 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2341 case QED_MCP_ISCSI_STATS:
2342 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2345 DP_VERBOSE(cdev, QED_MSG_SP,
2346 "Invalid protocol type = %d\n", type);
2351 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2353 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2354 "Scheduling slowpath task [Flag: %d]\n",
2355 QED_SLOWPATH_MFW_TLV_REQ);
2356 smp_mb__before_atomic();
2357 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2358 smp_mb__after_atomic();
2359 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2365 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2367 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2368 struct qed_eth_stats_common *p_common;
2369 struct qed_generic_tlvs gen_tlvs;
2370 struct qed_eth_stats stats;
2373 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2374 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2376 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2377 tlv->flags.ipv4_csum_offload = true;
2378 if (gen_tlvs.feat_flags & QED_TLV_LSO)
2379 tlv->flags.lso_supported = true;
2380 tlv->flags.b_set = true;
2382 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2383 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2384 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2385 tlv->mac_set[i] = true;
2389 qed_get_vport_stats(cdev, &stats);
2390 p_common = &stats.common;
2391 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2392 p_common->rx_bcast_pkts;
2393 tlv->rx_frames_set = true;
2394 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2395 p_common->rx_bcast_bytes;
2396 tlv->rx_bytes_set = true;
2397 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2398 p_common->tx_bcast_pkts;
2399 tlv->tx_frames_set = true;
2400 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2401 p_common->tx_bcast_bytes;
2402 tlv->rx_bytes_set = true;
2405 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2406 union qed_mfw_tlv_data *tlv_buf)
2408 struct qed_dev *cdev = hwfn->cdev;
2409 struct qed_common_cb_ops *ops;
2411 ops = cdev->protocol_ops.common;
2412 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2413 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2418 case QED_MFW_TLV_GENERIC:
2419 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2421 case QED_MFW_TLV_ETH:
2422 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2424 case QED_MFW_TLV_FCOE:
2425 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2427 case QED_MFW_TLV_ISCSI:
2428 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);