]> Git Repo - linux.git/blob - drivers/scsi/lpfc/lpfc_init.c
net: bgmac: Fix return value check for fixed_phy_register()
[linux.git] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/clock.h>
34 #include <linux/ctype.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/irq.h>
40 #include <linux/bitops.h>
41 #include <linux/crash_dump.h>
42 #include <linux/cpu.h>
43 #include <linux/cpuhotplug.h>
44
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_transport_fc.h>
49 #include <scsi/scsi_tcq.h>
50 #include <scsi/fc/fc_fs.h>
51
52 #include "lpfc_hw4.h"
53 #include "lpfc_hw.h"
54 #include "lpfc_sli.h"
55 #include "lpfc_sli4.h"
56 #include "lpfc_nl.h"
57 #include "lpfc_disc.h"
58 #include "lpfc.h"
59 #include "lpfc_scsi.h"
60 #include "lpfc_nvme.h"
61 #include "lpfc_logmsg.h"
62 #include "lpfc_crtn.h"
63 #include "lpfc_vport.h"
64 #include "lpfc_version.h"
65 #include "lpfc_ids.h"
66
67 static enum cpuhp_state lpfc_cpuhp_state;
68 /* Used when mapping IRQ vectors in a driver centric manner */
69 static uint32_t lpfc_present_cpu;
70 static bool lpfc_pldv_detect;
71
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
97 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
98
99 static struct scsi_transport_template *lpfc_transport_template = NULL;
100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
101 static DEFINE_IDR(lpfc_hba_index);
102 #define LPFC_NVMET_BUF_POST 254
103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
104 static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts);
105
106 /**
107  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
108  * @phba: pointer to lpfc hba data structure.
109  *
110  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
111  * mailbox command. It retrieves the revision information from the HBA and
112  * collects the Vital Product Data (VPD) about the HBA for preparing the
113  * configuration of the HBA.
114  *
115  * Return codes:
116  *   0 - success.
117  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
118  *   Any other value - indicates an error.
119  **/
120 int
121 lpfc_config_port_prep(struct lpfc_hba *phba)
122 {
123         lpfc_vpd_t *vp = &phba->vpd;
124         int i = 0, rc;
125         LPFC_MBOXQ_t *pmb;
126         MAILBOX_t *mb;
127         char *lpfc_vpd_data = NULL;
128         uint16_t offset = 0;
129         static char licensed[56] =
130                     "key unlock for use with gnu public licensed code only\0";
131         static int init_key = 1;
132
133         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
134         if (!pmb) {
135                 phba->link_state = LPFC_HBA_ERROR;
136                 return -ENOMEM;
137         }
138
139         mb = &pmb->u.mb;
140         phba->link_state = LPFC_INIT_MBX_CMDS;
141
142         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
143                 if (init_key) {
144                         uint32_t *ptext = (uint32_t *) licensed;
145
146                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
147                                 *ptext = cpu_to_be32(*ptext);
148                         init_key = 0;
149                 }
150
151                 lpfc_read_nv(phba, pmb);
152                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
153                         sizeof (mb->un.varRDnvp.rsvd3));
154                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
155                          sizeof (licensed));
156
157                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
158
159                 if (rc != MBX_SUCCESS) {
160                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
161                                         "0324 Config Port initialization "
162                                         "error, mbxCmd x%x READ_NVPARM, "
163                                         "mbxStatus x%x\n",
164                                         mb->mbxCommand, mb->mbxStatus);
165                         mempool_free(pmb, phba->mbox_mem_pool);
166                         return -ERESTART;
167                 }
168                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
169                        sizeof(phba->wwnn));
170                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
171                        sizeof(phba->wwpn));
172         }
173
174         /*
175          * Clear all option bits except LPFC_SLI3_BG_ENABLED,
176          * which was already set in lpfc_get_cfgparam()
177          */
178         phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
179
180         /* Setup and issue mailbox READ REV command */
181         lpfc_read_rev(phba, pmb);
182         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
183         if (rc != MBX_SUCCESS) {
184                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
185                                 "0439 Adapter failed to init, mbxCmd x%x "
186                                 "READ_REV, mbxStatus x%x\n",
187                                 mb->mbxCommand, mb->mbxStatus);
188                 mempool_free( pmb, phba->mbox_mem_pool);
189                 return -ERESTART;
190         }
191
192
193         /*
194          * The value of rr must be 1 since the driver set the cv field to 1.
195          * This setting requires the FW to set all revision fields.
196          */
197         if (mb->un.varRdRev.rr == 0) {
198                 vp->rev.rBit = 0;
199                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
200                                 "0440 Adapter failed to init, READ_REV has "
201                                 "missing revision information.\n");
202                 mempool_free(pmb, phba->mbox_mem_pool);
203                 return -ERESTART;
204         }
205
206         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
207                 mempool_free(pmb, phba->mbox_mem_pool);
208                 return -EINVAL;
209         }
210
211         /* Save information as VPD data */
212         vp->rev.rBit = 1;
213         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
214         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
215         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
216         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
217         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
218         vp->rev.biuRev = mb->un.varRdRev.biuRev;
219         vp->rev.smRev = mb->un.varRdRev.smRev;
220         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
221         vp->rev.endecRev = mb->un.varRdRev.endecRev;
222         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
223         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
224         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
225         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
226         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
227         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
228
229         /* If the sli feature level is less then 9, we must
230          * tear down all RPIs and VPIs on link down if NPIV
231          * is enabled.
232          */
233         if (vp->rev.feaLevelHigh < 9)
234                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
235
236         if (lpfc_is_LC_HBA(phba->pcidev->device))
237                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
238                                                 sizeof (phba->RandomData));
239
240         /* Get adapter VPD information */
241         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
242         if (!lpfc_vpd_data)
243                 goto out_free_mbox;
244         do {
245                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
246                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
247
248                 if (rc != MBX_SUCCESS) {
249                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
250                                         "0441 VPD not present on adapter, "
251                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
252                                         mb->mbxCommand, mb->mbxStatus);
253                         mb->un.varDmp.word_cnt = 0;
254                 }
255                 /* dump mem may return a zero when finished or we got a
256                  * mailbox error, either way we are done.
257                  */
258                 if (mb->un.varDmp.word_cnt == 0)
259                         break;
260
261                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
262                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
263                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
264                                       lpfc_vpd_data + offset,
265                                       mb->un.varDmp.word_cnt);
266                 offset += mb->un.varDmp.word_cnt;
267         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
268
269         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
270
271         kfree(lpfc_vpd_data);
272 out_free_mbox:
273         mempool_free(pmb, phba->mbox_mem_pool);
274         return 0;
275 }
276
277 /**
278  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
279  * @phba: pointer to lpfc hba data structure.
280  * @pmboxq: pointer to the driver internal queue element for mailbox command.
281  *
282  * This is the completion handler for driver's configuring asynchronous event
283  * mailbox command to the device. If the mailbox command returns successfully,
284  * it will set internal async event support flag to 1; otherwise, it will
285  * set internal async event support flag to 0.
286  **/
287 static void
288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
289 {
290         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
291                 phba->temp_sensor_support = 1;
292         else
293                 phba->temp_sensor_support = 0;
294         mempool_free(pmboxq, phba->mbox_mem_pool);
295         return;
296 }
297
298 /**
299  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
300  * @phba: pointer to lpfc hba data structure.
301  * @pmboxq: pointer to the driver internal queue element for mailbox command.
302  *
303  * This is the completion handler for dump mailbox command for getting
304  * wake up parameters. When this command complete, the response contain
305  * Option rom version of the HBA. This function translate the version number
306  * into a human readable string and store it in OptionROMVersion.
307  **/
308 static void
309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
310 {
311         struct prog_id *prg;
312         uint32_t prog_id_word;
313         char dist = ' ';
314         /* character array used for decoding dist type. */
315         char dist_char[] = "nabx";
316
317         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
318                 mempool_free(pmboxq, phba->mbox_mem_pool);
319                 return;
320         }
321
322         prg = (struct prog_id *) &prog_id_word;
323
324         /* word 7 contain option rom version */
325         prog_id_word = pmboxq->u.mb.un.varWords[7];
326
327         /* Decode the Option rom version word to a readable string */
328         dist = dist_char[prg->dist];
329
330         if ((prg->dist == 3) && (prg->num == 0))
331                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
332                         prg->ver, prg->rev, prg->lev);
333         else
334                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
335                         prg->ver, prg->rev, prg->lev,
336                         dist, prg->num);
337         mempool_free(pmboxq, phba->mbox_mem_pool);
338         return;
339 }
340
341 /**
342  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
343  * @vport: pointer to lpfc vport data structure.
344  *
345  *
346  * Return codes
347  *   None.
348  **/
349 void
350 lpfc_update_vport_wwn(struct lpfc_vport *vport)
351 {
352         struct lpfc_hba *phba = vport->phba;
353
354         /*
355          * If the name is empty or there exists a soft name
356          * then copy the service params name, otherwise use the fc name
357          */
358         if (vport->fc_nodename.u.wwn[0] == 0)
359                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
360                         sizeof(struct lpfc_name));
361         else
362                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
363                         sizeof(struct lpfc_name));
364
365         /*
366          * If the port name has changed, then set the Param changes flag
367          * to unreg the login
368          */
369         if (vport->fc_portname.u.wwn[0] != 0 &&
370                 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
371                        sizeof(struct lpfc_name))) {
372                 vport->vport_flag |= FAWWPN_PARAM_CHG;
373
374                 if (phba->sli_rev == LPFC_SLI_REV4 &&
375                     vport->port_type == LPFC_PHYSICAL_PORT &&
376                     phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
377                         if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
378                                 phba->sli4_hba.fawwpn_flag &=
379                                                 ~LPFC_FAWWPN_FABRIC;
380                         lpfc_printf_log(phba, KERN_INFO,
381                                         LOG_SLI | LOG_DISCOVERY | LOG_ELS,
382                                         "2701 FA-PWWN change WWPN from %llx to "
383                                         "%llx: vflag x%x fawwpn_flag x%x\n",
384                                         wwn_to_u64(vport->fc_portname.u.wwn),
385                                         wwn_to_u64
386                                            (vport->fc_sparam.portName.u.wwn),
387                                         vport->vport_flag,
388                                         phba->sli4_hba.fawwpn_flag);
389                         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390                                sizeof(struct lpfc_name));
391                 }
392         }
393
394         if (vport->fc_portname.u.wwn[0] == 0)
395                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
396                        sizeof(struct lpfc_name));
397         else
398                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
399                        sizeof(struct lpfc_name));
400 }
401
402 /**
403  * lpfc_config_port_post - Perform lpfc initialization after config port
404  * @phba: pointer to lpfc hba data structure.
405  *
406  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
407  * command call. It performs all internal resource and state setups on the
408  * port: post IOCB buffers, enable appropriate host interrupt attentions,
409  * ELS ring timers, etc.
410  *
411  * Return codes
412  *   0 - success.
413  *   Any other value - error.
414  **/
415 int
416 lpfc_config_port_post(struct lpfc_hba *phba)
417 {
418         struct lpfc_vport *vport = phba->pport;
419         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
420         LPFC_MBOXQ_t *pmb;
421         MAILBOX_t *mb;
422         struct lpfc_dmabuf *mp;
423         struct lpfc_sli *psli = &phba->sli;
424         uint32_t status, timeout;
425         int i, j;
426         int rc;
427
428         spin_lock_irq(&phba->hbalock);
429         /*
430          * If the Config port completed correctly the HBA is not
431          * over heated any more.
432          */
433         if (phba->over_temp_state == HBA_OVER_TEMP)
434                 phba->over_temp_state = HBA_NORMAL_TEMP;
435         spin_unlock_irq(&phba->hbalock);
436
437         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
438         if (!pmb) {
439                 phba->link_state = LPFC_HBA_ERROR;
440                 return -ENOMEM;
441         }
442         mb = &pmb->u.mb;
443
444         /* Get login parameters for NID.  */
445         rc = lpfc_read_sparam(phba, pmb, 0);
446         if (rc) {
447                 mempool_free(pmb, phba->mbox_mem_pool);
448                 return -ENOMEM;
449         }
450
451         pmb->vport = vport;
452         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
453                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
454                                 "0448 Adapter failed init, mbxCmd x%x "
455                                 "READ_SPARM mbxStatus x%x\n",
456                                 mb->mbxCommand, mb->mbxStatus);
457                 phba->link_state = LPFC_HBA_ERROR;
458                 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
459                 return -EIO;
460         }
461
462         mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
463
464         /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
465          * longer needed.  Prevent unintended ctx_buf access as the mbox is
466          * reused.
467          */
468         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
469         lpfc_mbuf_free(phba, mp->virt, mp->phys);
470         kfree(mp);
471         pmb->ctx_buf = NULL;
472         lpfc_update_vport_wwn(vport);
473
474         /* Update the fc_host data structures with new wwn. */
475         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
476         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
477         fc_host_max_npiv_vports(shost) = phba->max_vpi;
478
479         /* If no serial number in VPD data, use low 6 bytes of WWNN */
480         /* This should be consolidated into parse_vpd ? - mr */
481         if (phba->SerialNumber[0] == 0) {
482                 uint8_t *outptr;
483
484                 outptr = &vport->fc_nodename.u.s.IEEE[0];
485                 for (i = 0; i < 12; i++) {
486                         status = *outptr++;
487                         j = ((status & 0xf0) >> 4);
488                         if (j <= 9)
489                                 phba->SerialNumber[i] =
490                                     (char)((uint8_t) 0x30 + (uint8_t) j);
491                         else
492                                 phba->SerialNumber[i] =
493                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
494                         i++;
495                         j = (status & 0xf);
496                         if (j <= 9)
497                                 phba->SerialNumber[i] =
498                                     (char)((uint8_t) 0x30 + (uint8_t) j);
499                         else
500                                 phba->SerialNumber[i] =
501                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
502                 }
503         }
504
505         lpfc_read_config(phba, pmb);
506         pmb->vport = vport;
507         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
508                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
509                                 "0453 Adapter failed to init, mbxCmd x%x "
510                                 "READ_CONFIG, mbxStatus x%x\n",
511                                 mb->mbxCommand, mb->mbxStatus);
512                 phba->link_state = LPFC_HBA_ERROR;
513                 mempool_free( pmb, phba->mbox_mem_pool);
514                 return -EIO;
515         }
516
517         /* Check if the port is disabled */
518         lpfc_sli_read_link_ste(phba);
519
520         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
521         if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
522                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
523                                 "3359 HBA queue depth changed from %d to %d\n",
524                                 phba->cfg_hba_queue_depth,
525                                 mb->un.varRdConfig.max_xri);
526                 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
527         }
528
529         phba->lmt = mb->un.varRdConfig.lmt;
530
531         /* Get the default values for Model Name and Description */
532         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
533
534         phba->link_state = LPFC_LINK_DOWN;
535
536         /* Only process IOCBs on ELS ring till hba_state is READY */
537         if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
538                 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
539         if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
540                 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
541
542         /* Post receive buffers for desired rings */
543         if (phba->sli_rev != 3)
544                 lpfc_post_rcv_buf(phba);
545
546         /*
547          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
548          */
549         if (phba->intr_type == MSIX) {
550                 rc = lpfc_config_msi(phba, pmb);
551                 if (rc) {
552                         mempool_free(pmb, phba->mbox_mem_pool);
553                         return -EIO;
554                 }
555                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
556                 if (rc != MBX_SUCCESS) {
557                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
558                                         "0352 Config MSI mailbox command "
559                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
560                                         pmb->u.mb.mbxCommand,
561                                         pmb->u.mb.mbxStatus);
562                         mempool_free(pmb, phba->mbox_mem_pool);
563                         return -EIO;
564                 }
565         }
566
567         spin_lock_irq(&phba->hbalock);
568         /* Initialize ERATT handling flag */
569         phba->hba_flag &= ~HBA_ERATT_HANDLED;
570
571         /* Enable appropriate host interrupts */
572         if (lpfc_readl(phba->HCregaddr, &status)) {
573                 spin_unlock_irq(&phba->hbalock);
574                 return -EIO;
575         }
576         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
577         if (psli->num_rings > 0)
578                 status |= HC_R0INT_ENA;
579         if (psli->num_rings > 1)
580                 status |= HC_R1INT_ENA;
581         if (psli->num_rings > 2)
582                 status |= HC_R2INT_ENA;
583         if (psli->num_rings > 3)
584                 status |= HC_R3INT_ENA;
585
586         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
587             (phba->cfg_poll & DISABLE_FCP_RING_INT))
588                 status &= ~(HC_R0INT_ENA);
589
590         writel(status, phba->HCregaddr);
591         readl(phba->HCregaddr); /* flush */
592         spin_unlock_irq(&phba->hbalock);
593
594         /* Set up ring-0 (ELS) timer */
595         timeout = phba->fc_ratov * 2;
596         mod_timer(&vport->els_tmofunc,
597                   jiffies + msecs_to_jiffies(1000 * timeout));
598         /* Set up heart beat (HB) timer */
599         mod_timer(&phba->hb_tmofunc,
600                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
601         phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
602         phba->last_completion_time = jiffies;
603         /* Set up error attention (ERATT) polling timer */
604         mod_timer(&phba->eratt_poll,
605                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
606
607         if (phba->hba_flag & LINK_DISABLED) {
608                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609                                 "2598 Adapter Link is disabled.\n");
610                 lpfc_down_link(phba, pmb);
611                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
612                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
613                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
614                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
615                                         "2599 Adapter failed to issue DOWN_LINK"
616                                         " mbox command rc 0x%x\n", rc);
617
618                         mempool_free(pmb, phba->mbox_mem_pool);
619                         return -EIO;
620                 }
621         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
622                 mempool_free(pmb, phba->mbox_mem_pool);
623                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
624                 if (rc)
625                         return rc;
626         }
627         /* MBOX buffer will be freed in mbox compl */
628         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629         if (!pmb) {
630                 phba->link_state = LPFC_HBA_ERROR;
631                 return -ENOMEM;
632         }
633
634         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
635         pmb->mbox_cmpl = lpfc_config_async_cmpl;
636         pmb->vport = phba->pport;
637         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
638
639         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
640                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
641                                 "0456 Adapter failed to issue "
642                                 "ASYNCEVT_ENABLE mbox status x%x\n",
643                                 rc);
644                 mempool_free(pmb, phba->mbox_mem_pool);
645         }
646
647         /* Get Option rom version */
648         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
649         if (!pmb) {
650                 phba->link_state = LPFC_HBA_ERROR;
651                 return -ENOMEM;
652         }
653
654         lpfc_dump_wakeup_param(phba, pmb);
655         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
656         pmb->vport = phba->pport;
657         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
658
659         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
660                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
661                                 "0435 Adapter failed "
662                                 "to get Option ROM version status x%x\n", rc);
663                 mempool_free(pmb, phba->mbox_mem_pool);
664         }
665
666         return 0;
667 }
668
669 /**
670  * lpfc_sli4_refresh_params - update driver copy of params.
671  * @phba: Pointer to HBA context object.
672  *
673  * This is called to refresh driver copy of dynamic fields from the
674  * common_get_sli4_parameters descriptor.
675  **/
676 int
677 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
678 {
679         LPFC_MBOXQ_t *mboxq;
680         struct lpfc_mqe *mqe;
681         struct lpfc_sli4_parameters *mbx_sli4_parameters;
682         int length, rc;
683
684         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
685         if (!mboxq)
686                 return -ENOMEM;
687
688         mqe = &mboxq->u.mqe;
689         /* Read the port's SLI4 Config Parameters */
690         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
691                   sizeof(struct lpfc_sli4_cfg_mhdr));
692         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
693                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
694                          length, LPFC_SLI4_MBX_EMBED);
695
696         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
697         if (unlikely(rc)) {
698                 mempool_free(mboxq, phba->mbox_mem_pool);
699                 return rc;
700         }
701         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
702         phba->sli4_hba.pc_sli4_params.mi_cap =
703                 bf_get(cfg_mi_ver, mbx_sli4_parameters);
704
705         /* Are we forcing MI off via module parameter? */
706         if (phba->cfg_enable_mi)
707                 phba->sli4_hba.pc_sli4_params.mi_ver =
708                         bf_get(cfg_mi_ver, mbx_sli4_parameters);
709         else
710                 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
711
712         phba->sli4_hba.pc_sli4_params.cmf =
713                         bf_get(cfg_cmf, mbx_sli4_parameters);
714         phba->sli4_hba.pc_sli4_params.pls =
715                         bf_get(cfg_pvl, mbx_sli4_parameters);
716
717         mempool_free(mboxq, phba->mbox_mem_pool);
718         return rc;
719 }
720
721 /**
722  * lpfc_hba_init_link - Initialize the FC link
723  * @phba: pointer to lpfc hba data structure.
724  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
725  *
726  * This routine will issue the INIT_LINK mailbox command call.
727  * It is available to other drivers through the lpfc_hba data
728  * structure for use as a delayed link up mechanism with the
729  * module parameter lpfc_suppress_link_up.
730  *
731  * Return code
732  *              0 - success
733  *              Any other value - error
734  **/
735 static int
736 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
737 {
738         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
739 }
740
741 /**
742  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
743  * @phba: pointer to lpfc hba data structure.
744  * @fc_topology: desired fc topology.
745  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
746  *
747  * This routine will issue the INIT_LINK mailbox command call.
748  * It is available to other drivers through the lpfc_hba data
749  * structure for use as a delayed link up mechanism with the
750  * module parameter lpfc_suppress_link_up.
751  *
752  * Return code
753  *              0 - success
754  *              Any other value - error
755  **/
756 int
757 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
758                                uint32_t flag)
759 {
760         struct lpfc_vport *vport = phba->pport;
761         LPFC_MBOXQ_t *pmb;
762         MAILBOX_t *mb;
763         int rc;
764
765         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
766         if (!pmb) {
767                 phba->link_state = LPFC_HBA_ERROR;
768                 return -ENOMEM;
769         }
770         mb = &pmb->u.mb;
771         pmb->vport = vport;
772
773         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
774             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
775              !(phba->lmt & LMT_1Gb)) ||
776             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
777              !(phba->lmt & LMT_2Gb)) ||
778             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
779              !(phba->lmt & LMT_4Gb)) ||
780             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
781              !(phba->lmt & LMT_8Gb)) ||
782             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
783              !(phba->lmt & LMT_10Gb)) ||
784             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
785              !(phba->lmt & LMT_16Gb)) ||
786             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
787              !(phba->lmt & LMT_32Gb)) ||
788             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
789              !(phba->lmt & LMT_64Gb))) {
790                 /* Reset link speed to auto */
791                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
792                                 "1302 Invalid speed for this board:%d "
793                                 "Reset link speed to auto.\n",
794                                 phba->cfg_link_speed);
795                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
796         }
797         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
798         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799         if (phba->sli_rev < LPFC_SLI_REV4)
800                 lpfc_set_loopback_flag(phba);
801         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
802         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
803                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
804                                 "0498 Adapter failed to init, mbxCmd x%x "
805                                 "INIT_LINK, mbxStatus x%x\n",
806                                 mb->mbxCommand, mb->mbxStatus);
807                 if (phba->sli_rev <= LPFC_SLI_REV3) {
808                         /* Clear all interrupt enable conditions */
809                         writel(0, phba->HCregaddr);
810                         readl(phba->HCregaddr); /* flush */
811                         /* Clear all pending interrupts */
812                         writel(0xffffffff, phba->HAregaddr);
813                         readl(phba->HAregaddr); /* flush */
814                 }
815                 phba->link_state = LPFC_HBA_ERROR;
816                 if (rc != MBX_BUSY || flag == MBX_POLL)
817                         mempool_free(pmb, phba->mbox_mem_pool);
818                 return -EIO;
819         }
820         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
821         if (flag == MBX_POLL)
822                 mempool_free(pmb, phba->mbox_mem_pool);
823
824         return 0;
825 }
826
827 /**
828  * lpfc_hba_down_link - this routine downs the FC link
829  * @phba: pointer to lpfc hba data structure.
830  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
831  *
832  * This routine will issue the DOWN_LINK mailbox command call.
833  * It is available to other drivers through the lpfc_hba data
834  * structure for use to stop the link.
835  *
836  * Return code
837  *              0 - success
838  *              Any other value - error
839  **/
840 static int
841 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
842 {
843         LPFC_MBOXQ_t *pmb;
844         int rc;
845
846         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
847         if (!pmb) {
848                 phba->link_state = LPFC_HBA_ERROR;
849                 return -ENOMEM;
850         }
851
852         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
853                         "0491 Adapter Link is disabled.\n");
854         lpfc_down_link(phba, pmb);
855         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
856         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
857         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
858                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
859                                 "2522 Adapter failed to issue DOWN_LINK"
860                                 " mbox command rc 0x%x\n", rc);
861
862                 mempool_free(pmb, phba->mbox_mem_pool);
863                 return -EIO;
864         }
865         if (flag == MBX_POLL)
866                 mempool_free(pmb, phba->mbox_mem_pool);
867
868         return 0;
869 }
870
871 /**
872  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
873  * @phba: pointer to lpfc HBA data structure.
874  *
875  * This routine will do LPFC uninitialization before the HBA is reset when
876  * bringing down the SLI Layer.
877  *
878  * Return codes
879  *   0 - success.
880  *   Any other value - error.
881  **/
882 int
883 lpfc_hba_down_prep(struct lpfc_hba *phba)
884 {
885         struct lpfc_vport **vports;
886         int i;
887
888         if (phba->sli_rev <= LPFC_SLI_REV3) {
889                 /* Disable interrupts */
890                 writel(0, phba->HCregaddr);
891                 readl(phba->HCregaddr); /* flush */
892         }
893
894         if (phba->pport->load_flag & FC_UNLOADING)
895                 lpfc_cleanup_discovery_resources(phba->pport);
896         else {
897                 vports = lpfc_create_vport_work_array(phba);
898                 if (vports != NULL)
899                         for (i = 0; i <= phba->max_vports &&
900                                 vports[i] != NULL; i++)
901                                 lpfc_cleanup_discovery_resources(vports[i]);
902                 lpfc_destroy_vport_work_array(phba, vports);
903         }
904         return 0;
905 }
906
907 /**
908  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
909  * rspiocb which got deferred
910  *
911  * @phba: pointer to lpfc HBA data structure.
912  *
913  * This routine will cleanup completed slow path events after HBA is reset
914  * when bringing down the SLI Layer.
915  *
916  *
917  * Return codes
918  *   void.
919  **/
920 static void
921 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
922 {
923         struct lpfc_iocbq *rspiocbq;
924         struct hbq_dmabuf *dmabuf;
925         struct lpfc_cq_event *cq_event;
926
927         spin_lock_irq(&phba->hbalock);
928         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
929         spin_unlock_irq(&phba->hbalock);
930
931         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
932                 /* Get the response iocb from the head of work queue */
933                 spin_lock_irq(&phba->hbalock);
934                 list_remove_head(&phba->sli4_hba.sp_queue_event,
935                                  cq_event, struct lpfc_cq_event, list);
936                 spin_unlock_irq(&phba->hbalock);
937
938                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
939                 case CQE_CODE_COMPL_WQE:
940                         rspiocbq = container_of(cq_event, struct lpfc_iocbq,
941                                                  cq_event);
942                         lpfc_sli_release_iocbq(phba, rspiocbq);
943                         break;
944                 case CQE_CODE_RECEIVE:
945                 case CQE_CODE_RECEIVE_V1:
946                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
947                                               cq_event);
948                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
949                 }
950         }
951 }
952
953 /**
954  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
955  * @phba: pointer to lpfc HBA data structure.
956  *
957  * This routine will cleanup posted ELS buffers after the HBA is reset
958  * when bringing down the SLI Layer.
959  *
960  *
961  * Return codes
962  *   void.
963  **/
964 static void
965 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
966 {
967         struct lpfc_sli *psli = &phba->sli;
968         struct lpfc_sli_ring *pring;
969         struct lpfc_dmabuf *mp, *next_mp;
970         LIST_HEAD(buflist);
971         int count;
972
973         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
974                 lpfc_sli_hbqbuf_free_all(phba);
975         else {
976                 /* Cleanup preposted buffers on the ELS ring */
977                 pring = &psli->sli3_ring[LPFC_ELS_RING];
978                 spin_lock_irq(&phba->hbalock);
979                 list_splice_init(&pring->postbufq, &buflist);
980                 spin_unlock_irq(&phba->hbalock);
981
982                 count = 0;
983                 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
984                         list_del(&mp->list);
985                         count++;
986                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
987                         kfree(mp);
988                 }
989
990                 spin_lock_irq(&phba->hbalock);
991                 pring->postbufq_cnt -= count;
992                 spin_unlock_irq(&phba->hbalock);
993         }
994 }
995
996 /**
997  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
998  * @phba: pointer to lpfc HBA data structure.
999  *
1000  * This routine will cleanup the txcmplq after the HBA is reset when bringing
1001  * down the SLI Layer.
1002  *
1003  * Return codes
1004  *   void
1005  **/
1006 static void
1007 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1008 {
1009         struct lpfc_sli *psli = &phba->sli;
1010         struct lpfc_queue *qp = NULL;
1011         struct lpfc_sli_ring *pring;
1012         LIST_HEAD(completions);
1013         int i;
1014         struct lpfc_iocbq *piocb, *next_iocb;
1015
1016         if (phba->sli_rev != LPFC_SLI_REV4) {
1017                 for (i = 0; i < psli->num_rings; i++) {
1018                         pring = &psli->sli3_ring[i];
1019                         spin_lock_irq(&phba->hbalock);
1020                         /* At this point in time the HBA is either reset or DOA
1021                          * Nothing should be on txcmplq as it will
1022                          * NEVER complete.
1023                          */
1024                         list_splice_init(&pring->txcmplq, &completions);
1025                         pring->txcmplq_cnt = 0;
1026                         spin_unlock_irq(&phba->hbalock);
1027
1028                         lpfc_sli_abort_iocb_ring(phba, pring);
1029                 }
1030                 /* Cancel all the IOCBs from the completions list */
1031                 lpfc_sli_cancel_iocbs(phba, &completions,
1032                                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1033                 return;
1034         }
1035         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1036                 pring = qp->pring;
1037                 if (!pring)
1038                         continue;
1039                 spin_lock_irq(&pring->ring_lock);
1040                 list_for_each_entry_safe(piocb, next_iocb,
1041                                          &pring->txcmplq, list)
1042                         piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1043                 list_splice_init(&pring->txcmplq, &completions);
1044                 pring->txcmplq_cnt = 0;
1045                 spin_unlock_irq(&pring->ring_lock);
1046                 lpfc_sli_abort_iocb_ring(phba, pring);
1047         }
1048         /* Cancel all the IOCBs from the completions list */
1049         lpfc_sli_cancel_iocbs(phba, &completions,
1050                               IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1051 }
1052
1053 /**
1054  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1055  * @phba: pointer to lpfc HBA data structure.
1056  *
1057  * This routine will do uninitialization after the HBA is reset when bring
1058  * down the SLI Layer.
1059  *
1060  * Return codes
1061  *   0 - success.
1062  *   Any other value - error.
1063  **/
1064 static int
1065 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1066 {
1067         lpfc_hba_free_post_buf(phba);
1068         lpfc_hba_clean_txcmplq(phba);
1069         return 0;
1070 }
1071
1072 /**
1073  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1074  * @phba: pointer to lpfc HBA data structure.
1075  *
1076  * This routine will do uninitialization after the HBA is reset when bring
1077  * down the SLI Layer.
1078  *
1079  * Return codes
1080  *   0 - success.
1081  *   Any other value - error.
1082  **/
1083 static int
1084 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1085 {
1086         struct lpfc_io_buf *psb, *psb_next;
1087         struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1088         struct lpfc_sli4_hdw_queue *qp;
1089         LIST_HEAD(aborts);
1090         LIST_HEAD(nvme_aborts);
1091         LIST_HEAD(nvmet_aborts);
1092         struct lpfc_sglq *sglq_entry = NULL;
1093         int cnt, idx;
1094
1095
1096         lpfc_sli_hbqbuf_free_all(phba);
1097         lpfc_hba_clean_txcmplq(phba);
1098
1099         /* At this point in time the HBA is either reset or DOA. Either
1100          * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1101          * on the lpfc_els_sgl_list so that it can either be freed if the
1102          * driver is unloading or reposted if the driver is restarting
1103          * the port.
1104          */
1105
1106         /* sgl_list_lock required because worker thread uses this
1107          * list.
1108          */
1109         spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1110         list_for_each_entry(sglq_entry,
1111                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1112                 sglq_entry->state = SGL_FREED;
1113
1114         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1115                         &phba->sli4_hba.lpfc_els_sgl_list);
1116
1117
1118         spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1119
1120         /* abts_xxxx_buf_list_lock required because worker thread uses this
1121          * list.
1122          */
1123         spin_lock_irq(&phba->hbalock);
1124         cnt = 0;
1125         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1126                 qp = &phba->sli4_hba.hdwq[idx];
1127
1128                 spin_lock(&qp->abts_io_buf_list_lock);
1129                 list_splice_init(&qp->lpfc_abts_io_buf_list,
1130                                  &aborts);
1131
1132                 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1133                         psb->pCmd = NULL;
1134                         psb->status = IOSTAT_SUCCESS;
1135                         cnt++;
1136                 }
1137                 spin_lock(&qp->io_buf_list_put_lock);
1138                 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1139                 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1140                 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1141                 qp->abts_scsi_io_bufs = 0;
1142                 qp->abts_nvme_io_bufs = 0;
1143                 spin_unlock(&qp->io_buf_list_put_lock);
1144                 spin_unlock(&qp->abts_io_buf_list_lock);
1145         }
1146         spin_unlock_irq(&phba->hbalock);
1147
1148         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1149                 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1150                 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1151                                  &nvmet_aborts);
1152                 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1153                 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1154                         ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1155                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1156                 }
1157         }
1158
1159         lpfc_sli4_free_sp_events(phba);
1160         return cnt;
1161 }
1162
1163 /**
1164  * lpfc_hba_down_post - Wrapper func for hba down post routine
1165  * @phba: pointer to lpfc HBA data structure.
1166  *
1167  * This routine wraps the actual SLI3 or SLI4 routine for performing
1168  * uninitialization after the HBA is reset when bring down the SLI Layer.
1169  *
1170  * Return codes
1171  *   0 - success.
1172  *   Any other value - error.
1173  **/
1174 int
1175 lpfc_hba_down_post(struct lpfc_hba *phba)
1176 {
1177         return (*phba->lpfc_hba_down_post)(phba);
1178 }
1179
1180 /**
1181  * lpfc_hb_timeout - The HBA-timer timeout handler
1182  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1183  *
1184  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1185  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1186  * work-port-events bitmap and the worker thread is notified. This timeout
1187  * event will be used by the worker thread to invoke the actual timeout
1188  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1189  * be performed in the timeout handler and the HBA timeout event bit shall
1190  * be cleared by the worker thread after it has taken the event bitmap out.
1191  **/
1192 static void
1193 lpfc_hb_timeout(struct timer_list *t)
1194 {
1195         struct lpfc_hba *phba;
1196         uint32_t tmo_posted;
1197         unsigned long iflag;
1198
1199         phba = from_timer(phba, t, hb_tmofunc);
1200
1201         /* Check for heart beat timeout conditions */
1202         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1203         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1204         if (!tmo_posted)
1205                 phba->pport->work_port_events |= WORKER_HB_TMO;
1206         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1207
1208         /* Tell the worker thread there is work to do */
1209         if (!tmo_posted)
1210                 lpfc_worker_wake_up(phba);
1211         return;
1212 }
1213
1214 /**
1215  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1216  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1217  *
1218  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1219  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1220  * work-port-events bitmap and the worker thread is notified. This timeout
1221  * event will be used by the worker thread to invoke the actual timeout
1222  * handler routine, lpfc_rrq_handler. Any periodical operations will
1223  * be performed in the timeout handler and the RRQ timeout event bit shall
1224  * be cleared by the worker thread after it has taken the event bitmap out.
1225  **/
1226 static void
1227 lpfc_rrq_timeout(struct timer_list *t)
1228 {
1229         struct lpfc_hba *phba;
1230         unsigned long iflag;
1231
1232         phba = from_timer(phba, t, rrq_tmr);
1233         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1234         if (!(phba->pport->load_flag & FC_UNLOADING))
1235                 phba->hba_flag |= HBA_RRQ_ACTIVE;
1236         else
1237                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1238         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1239
1240         if (!(phba->pport->load_flag & FC_UNLOADING))
1241                 lpfc_worker_wake_up(phba);
1242 }
1243
1244 /**
1245  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1246  * @phba: pointer to lpfc hba data structure.
1247  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1248  *
1249  * This is the callback function to the lpfc heart-beat mailbox command.
1250  * If configured, the lpfc driver issues the heart-beat mailbox command to
1251  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1252  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1253  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1254  * heart-beat outstanding state. Once the mailbox command comes back and
1255  * no error conditions detected, the heart-beat mailbox command timer is
1256  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1257  * state is cleared for the next heart-beat. If the timer expired with the
1258  * heart-beat outstanding state set, the driver will put the HBA offline.
1259  **/
1260 static void
1261 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1262 {
1263         unsigned long drvr_flag;
1264
1265         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1266         phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1267         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1268
1269         /* Check and reset heart-beat timer if necessary */
1270         mempool_free(pmboxq, phba->mbox_mem_pool);
1271         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1272                 !(phba->link_state == LPFC_HBA_ERROR) &&
1273                 !(phba->pport->load_flag & FC_UNLOADING))
1274                 mod_timer(&phba->hb_tmofunc,
1275                           jiffies +
1276                           msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1277         return;
1278 }
1279
1280 /*
1281  * lpfc_idle_stat_delay_work - idle_stat tracking
1282  *
1283  * This routine tracks per-eq idle_stat and determines polling decisions.
1284  *
1285  * Return codes:
1286  *   None
1287  **/
1288 static void
1289 lpfc_idle_stat_delay_work(struct work_struct *work)
1290 {
1291         struct lpfc_hba *phba = container_of(to_delayed_work(work),
1292                                              struct lpfc_hba,
1293                                              idle_stat_delay_work);
1294         struct lpfc_queue *eq;
1295         struct lpfc_sli4_hdw_queue *hdwq;
1296         struct lpfc_idle_stat *idle_stat;
1297         u32 i, idle_percent;
1298         u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1299
1300         if (phba->pport->load_flag & FC_UNLOADING)
1301                 return;
1302
1303         if (phba->link_state == LPFC_HBA_ERROR ||
1304             phba->pport->fc_flag & FC_OFFLINE_MODE ||
1305             phba->cmf_active_mode != LPFC_CFG_OFF)
1306                 goto requeue;
1307
1308         for_each_present_cpu(i) {
1309                 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1310                 eq = hdwq->hba_eq;
1311
1312                 /* Skip if we've already handled this eq's primary CPU */
1313                 if (eq->chann != i)
1314                         continue;
1315
1316                 idle_stat = &phba->sli4_hba.idle_stat[i];
1317
1318                 /* get_cpu_idle_time returns values as running counters. Thus,
1319                  * to know the amount for this period, the prior counter values
1320                  * need to be subtracted from the current counter values.
1321                  * From there, the idle time stat can be calculated as a
1322                  * percentage of 100 - the sum of the other consumption times.
1323                  */
1324                 wall_idle = get_cpu_idle_time(i, &wall, 1);
1325                 diff_idle = wall_idle - idle_stat->prev_idle;
1326                 diff_wall = wall - idle_stat->prev_wall;
1327
1328                 if (diff_wall <= diff_idle)
1329                         busy_time = 0;
1330                 else
1331                         busy_time = diff_wall - diff_idle;
1332
1333                 idle_percent = div64_u64(100 * busy_time, diff_wall);
1334                 idle_percent = 100 - idle_percent;
1335
1336                 if (idle_percent < 15)
1337                         eq->poll_mode = LPFC_QUEUE_WORK;
1338                 else
1339                         eq->poll_mode = LPFC_THREADED_IRQ;
1340
1341                 idle_stat->prev_idle = wall_idle;
1342                 idle_stat->prev_wall = wall;
1343         }
1344
1345 requeue:
1346         schedule_delayed_work(&phba->idle_stat_delay_work,
1347                               msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1348 }
1349
1350 static void
1351 lpfc_hb_eq_delay_work(struct work_struct *work)
1352 {
1353         struct lpfc_hba *phba = container_of(to_delayed_work(work),
1354                                              struct lpfc_hba, eq_delay_work);
1355         struct lpfc_eq_intr_info *eqi, *eqi_new;
1356         struct lpfc_queue *eq, *eq_next;
1357         unsigned char *ena_delay = NULL;
1358         uint32_t usdelay;
1359         int i;
1360
1361         if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1362                 return;
1363
1364         if (phba->link_state == LPFC_HBA_ERROR ||
1365             phba->pport->fc_flag & FC_OFFLINE_MODE)
1366                 goto requeue;
1367
1368         ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1369                             GFP_KERNEL);
1370         if (!ena_delay)
1371                 goto requeue;
1372
1373         for (i = 0; i < phba->cfg_irq_chann; i++) {
1374                 /* Get the EQ corresponding to the IRQ vector */
1375                 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1376                 if (!eq)
1377                         continue;
1378                 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1379                         eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1380                         ena_delay[eq->last_cpu] = 1;
1381                 }
1382         }
1383
1384         for_each_present_cpu(i) {
1385                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1386                 if (ena_delay[i]) {
1387                         usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1388                         if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1389                                 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1390                 } else {
1391                         usdelay = 0;
1392                 }
1393
1394                 eqi->icnt = 0;
1395
1396                 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1397                         if (unlikely(eq->last_cpu != i)) {
1398                                 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1399                                                       eq->last_cpu);
1400                                 list_move_tail(&eq->cpu_list, &eqi_new->list);
1401                                 continue;
1402                         }
1403                         if (usdelay != eq->q_mode)
1404                                 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1405                                                          usdelay);
1406                 }
1407         }
1408
1409         kfree(ena_delay);
1410
1411 requeue:
1412         queue_delayed_work(phba->wq, &phba->eq_delay_work,
1413                            msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1414 }
1415
1416 /**
1417  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1418  * @phba: pointer to lpfc hba data structure.
1419  *
1420  * For each heartbeat, this routine does some heuristic methods to adjust
1421  * XRI distribution. The goal is to fully utilize free XRIs.
1422  **/
1423 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1424 {
1425         u32 i;
1426         u32 hwq_count;
1427
1428         hwq_count = phba->cfg_hdw_queue;
1429         for (i = 0; i < hwq_count; i++) {
1430                 /* Adjust XRIs in private pool */
1431                 lpfc_adjust_pvt_pool_count(phba, i);
1432
1433                 /* Adjust high watermark */
1434                 lpfc_adjust_high_watermark(phba, i);
1435
1436 #ifdef LPFC_MXP_STAT
1437                 /* Snapshot pbl, pvt and busy count */
1438                 lpfc_snapshot_mxp(phba, i);
1439 #endif
1440         }
1441 }
1442
1443 /**
1444  * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1445  * @phba: pointer to lpfc hba data structure.
1446  *
1447  * If a HB mbox is not already in progrees, this routine will allocate
1448  * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1449  * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1450  **/
1451 int
1452 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1453 {
1454         LPFC_MBOXQ_t *pmboxq;
1455         int retval;
1456
1457         /* Is a Heartbeat mbox already in progress */
1458         if (phba->hba_flag & HBA_HBEAT_INP)
1459                 return 0;
1460
1461         pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1462         if (!pmboxq)
1463                 return -ENOMEM;
1464
1465         lpfc_heart_beat(phba, pmboxq);
1466         pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1467         pmboxq->vport = phba->pport;
1468         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1469
1470         if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1471                 mempool_free(pmboxq, phba->mbox_mem_pool);
1472                 return -ENXIO;
1473         }
1474         phba->hba_flag |= HBA_HBEAT_INP;
1475
1476         return 0;
1477 }
1478
1479 /**
1480  * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1481  * @phba: pointer to lpfc hba data structure.
1482  *
1483  * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1484  * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1485  * of the value of lpfc_enable_hba_heartbeat.
1486  * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1487  * try to issue a MBX_HEARTBEAT mbox command.
1488  **/
1489 void
1490 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1491 {
1492         if (phba->cfg_enable_hba_heartbeat)
1493                 return;
1494         phba->hba_flag |= HBA_HBEAT_TMO;
1495 }
1496
1497 /**
1498  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1499  * @phba: pointer to lpfc hba data structure.
1500  *
1501  * This is the actual HBA-timer timeout handler to be invoked by the worker
1502  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1503  * handler performs any periodic operations needed for the device. If such
1504  * periodic event has already been attended to either in the interrupt handler
1505  * or by processing slow-ring or fast-ring events within the HBA-timer
1506  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1507  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1508  * is configured and there is no heart-beat mailbox command outstanding, a
1509  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1510  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1511  * to offline.
1512  **/
1513 void
1514 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1515 {
1516         struct lpfc_vport **vports;
1517         struct lpfc_dmabuf *buf_ptr;
1518         int retval = 0;
1519         int i, tmo;
1520         struct lpfc_sli *psli = &phba->sli;
1521         LIST_HEAD(completions);
1522
1523         if (phba->cfg_xri_rebalancing) {
1524                 /* Multi-XRI pools handler */
1525                 lpfc_hb_mxp_handler(phba);
1526         }
1527
1528         vports = lpfc_create_vport_work_array(phba);
1529         if (vports != NULL)
1530                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1531                         lpfc_rcv_seq_check_edtov(vports[i]);
1532                         lpfc_fdmi_change_check(vports[i]);
1533                 }
1534         lpfc_destroy_vport_work_array(phba, vports);
1535
1536         if ((phba->link_state == LPFC_HBA_ERROR) ||
1537                 (phba->pport->load_flag & FC_UNLOADING) ||
1538                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1539                 return;
1540
1541         if (phba->elsbuf_cnt &&
1542                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1543                 spin_lock_irq(&phba->hbalock);
1544                 list_splice_init(&phba->elsbuf, &completions);
1545                 phba->elsbuf_cnt = 0;
1546                 phba->elsbuf_prev_cnt = 0;
1547                 spin_unlock_irq(&phba->hbalock);
1548
1549                 while (!list_empty(&completions)) {
1550                         list_remove_head(&completions, buf_ptr,
1551                                 struct lpfc_dmabuf, list);
1552                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1553                         kfree(buf_ptr);
1554                 }
1555         }
1556         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1557
1558         /* If there is no heart beat outstanding, issue a heartbeat command */
1559         if (phba->cfg_enable_hba_heartbeat) {
1560                 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1561                 spin_lock_irq(&phba->pport->work_port_lock);
1562                 if (time_after(phba->last_completion_time +
1563                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1564                                 jiffies)) {
1565                         spin_unlock_irq(&phba->pport->work_port_lock);
1566                         if (phba->hba_flag & HBA_HBEAT_INP)
1567                                 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1568                         else
1569                                 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1570                         goto out;
1571                 }
1572                 spin_unlock_irq(&phba->pport->work_port_lock);
1573
1574                 /* Check if a MBX_HEARTBEAT is already in progress */
1575                 if (phba->hba_flag & HBA_HBEAT_INP) {
1576                         /*
1577                          * If heart beat timeout called with HBA_HBEAT_INP set
1578                          * we need to give the hb mailbox cmd a chance to
1579                          * complete or TMO.
1580                          */
1581                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1582                                 "0459 Adapter heartbeat still outstanding: "
1583                                 "last compl time was %d ms.\n",
1584                                 jiffies_to_msecs(jiffies
1585                                          - phba->last_completion_time));
1586                         tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1587                 } else {
1588                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1589                                 (list_empty(&psli->mboxq))) {
1590
1591                                 retval = lpfc_issue_hb_mbox(phba);
1592                                 if (retval) {
1593                                         tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1594                                         goto out;
1595                                 }
1596                                 phba->skipped_hb = 0;
1597                         } else if (time_before_eq(phba->last_completion_time,
1598                                         phba->skipped_hb)) {
1599                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1600                                         "2857 Last completion time not "
1601                                         " updated in %d ms\n",
1602                                         jiffies_to_msecs(jiffies
1603                                                  - phba->last_completion_time));
1604                         } else
1605                                 phba->skipped_hb = jiffies;
1606
1607                         tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1608                         goto out;
1609                 }
1610         } else {
1611                 /* Check to see if we want to force a MBX_HEARTBEAT */
1612                 if (phba->hba_flag & HBA_HBEAT_TMO) {
1613                         retval = lpfc_issue_hb_mbox(phba);
1614                         if (retval)
1615                                 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1616                         else
1617                                 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1618                         goto out;
1619                 }
1620                 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1621         }
1622 out:
1623         mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1624 }
1625
1626 /**
1627  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1628  * @phba: pointer to lpfc hba data structure.
1629  *
1630  * This routine is called to bring the HBA offline when HBA hardware error
1631  * other than Port Error 6 has been detected.
1632  **/
1633 static void
1634 lpfc_offline_eratt(struct lpfc_hba *phba)
1635 {
1636         struct lpfc_sli   *psli = &phba->sli;
1637
1638         spin_lock_irq(&phba->hbalock);
1639         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1640         spin_unlock_irq(&phba->hbalock);
1641         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1642
1643         lpfc_offline(phba);
1644         lpfc_reset_barrier(phba);
1645         spin_lock_irq(&phba->hbalock);
1646         lpfc_sli_brdreset(phba);
1647         spin_unlock_irq(&phba->hbalock);
1648         lpfc_hba_down_post(phba);
1649         lpfc_sli_brdready(phba, HS_MBRDY);
1650         lpfc_unblock_mgmt_io(phba);
1651         phba->link_state = LPFC_HBA_ERROR;
1652         return;
1653 }
1654
1655 /**
1656  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1657  * @phba: pointer to lpfc hba data structure.
1658  *
1659  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1660  * other than Port Error 6 has been detected.
1661  **/
1662 void
1663 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1664 {
1665         spin_lock_irq(&phba->hbalock);
1666         if (phba->link_state == LPFC_HBA_ERROR &&
1667                 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1668                 spin_unlock_irq(&phba->hbalock);
1669                 return;
1670         }
1671         phba->link_state = LPFC_HBA_ERROR;
1672         spin_unlock_irq(&phba->hbalock);
1673
1674         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1675         lpfc_sli_flush_io_rings(phba);
1676         lpfc_offline(phba);
1677         lpfc_hba_down_post(phba);
1678         lpfc_unblock_mgmt_io(phba);
1679 }
1680
1681 /**
1682  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1683  * @phba: pointer to lpfc hba data structure.
1684  *
1685  * This routine is invoked to handle the deferred HBA hardware error
1686  * conditions. This type of error is indicated by HBA by setting ER1
1687  * and another ER bit in the host status register. The driver will
1688  * wait until the ER1 bit clears before handling the error condition.
1689  **/
1690 static void
1691 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1692 {
1693         uint32_t old_host_status = phba->work_hs;
1694         struct lpfc_sli *psli = &phba->sli;
1695
1696         /* If the pci channel is offline, ignore possible errors,
1697          * since we cannot communicate with the pci card anyway.
1698          */
1699         if (pci_channel_offline(phba->pcidev)) {
1700                 spin_lock_irq(&phba->hbalock);
1701                 phba->hba_flag &= ~DEFER_ERATT;
1702                 spin_unlock_irq(&phba->hbalock);
1703                 return;
1704         }
1705
1706         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1707                         "0479 Deferred Adapter Hardware Error "
1708                         "Data: x%x x%x x%x\n",
1709                         phba->work_hs, phba->work_status[0],
1710                         phba->work_status[1]);
1711
1712         spin_lock_irq(&phba->hbalock);
1713         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1714         spin_unlock_irq(&phba->hbalock);
1715
1716
1717         /*
1718          * Firmware stops when it triggred erratt. That could cause the I/Os
1719          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1720          * SCSI layer retry it after re-establishing link.
1721          */
1722         lpfc_sli_abort_fcp_rings(phba);
1723
1724         /*
1725          * There was a firmware error. Take the hba offline and then
1726          * attempt to restart it.
1727          */
1728         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1729         lpfc_offline(phba);
1730
1731         /* Wait for the ER1 bit to clear.*/
1732         while (phba->work_hs & HS_FFER1) {
1733                 msleep(100);
1734                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1735                         phba->work_hs = UNPLUG_ERR ;
1736                         break;
1737                 }
1738                 /* If driver is unloading let the worker thread continue */
1739                 if (phba->pport->load_flag & FC_UNLOADING) {
1740                         phba->work_hs = 0;
1741                         break;
1742                 }
1743         }
1744
1745         /*
1746          * This is to ptrotect against a race condition in which
1747          * first write to the host attention register clear the
1748          * host status register.
1749          */
1750         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1751                 phba->work_hs = old_host_status & ~HS_FFER1;
1752
1753         spin_lock_irq(&phba->hbalock);
1754         phba->hba_flag &= ~DEFER_ERATT;
1755         spin_unlock_irq(&phba->hbalock);
1756         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1757         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1758 }
1759
1760 static void
1761 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1762 {
1763         struct lpfc_board_event_header board_event;
1764         struct Scsi_Host *shost;
1765
1766         board_event.event_type = FC_REG_BOARD_EVENT;
1767         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1768         shost = lpfc_shost_from_vport(phba->pport);
1769         fc_host_post_vendor_event(shost, fc_get_event_number(),
1770                                   sizeof(board_event),
1771                                   (char *) &board_event,
1772                                   LPFC_NL_VENDOR_ID);
1773 }
1774
1775 /**
1776  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1777  * @phba: pointer to lpfc hba data structure.
1778  *
1779  * This routine is invoked to handle the following HBA hardware error
1780  * conditions:
1781  * 1 - HBA error attention interrupt
1782  * 2 - DMA ring index out of range
1783  * 3 - Mailbox command came back as unknown
1784  **/
1785 static void
1786 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1787 {
1788         struct lpfc_vport *vport = phba->pport;
1789         struct lpfc_sli   *psli = &phba->sli;
1790         uint32_t event_data;
1791         unsigned long temperature;
1792         struct temp_event temp_event_data;
1793         struct Scsi_Host  *shost;
1794
1795         /* If the pci channel is offline, ignore possible errors,
1796          * since we cannot communicate with the pci card anyway.
1797          */
1798         if (pci_channel_offline(phba->pcidev)) {
1799                 spin_lock_irq(&phba->hbalock);
1800                 phba->hba_flag &= ~DEFER_ERATT;
1801                 spin_unlock_irq(&phba->hbalock);
1802                 return;
1803         }
1804
1805         /* If resets are disabled then leave the HBA alone and return */
1806         if (!phba->cfg_enable_hba_reset)
1807                 return;
1808
1809         /* Send an internal error event to mgmt application */
1810         lpfc_board_errevt_to_mgmt(phba);
1811
1812         if (phba->hba_flag & DEFER_ERATT)
1813                 lpfc_handle_deferred_eratt(phba);
1814
1815         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1816                 if (phba->work_hs & HS_FFER6)
1817                         /* Re-establishing Link */
1818                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1819                                         "1301 Re-establishing Link "
1820                                         "Data: x%x x%x x%x\n",
1821                                         phba->work_hs, phba->work_status[0],
1822                                         phba->work_status[1]);
1823                 if (phba->work_hs & HS_FFER8)
1824                         /* Device Zeroization */
1825                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1826                                         "2861 Host Authentication device "
1827                                         "zeroization Data:x%x x%x x%x\n",
1828                                         phba->work_hs, phba->work_status[0],
1829                                         phba->work_status[1]);
1830
1831                 spin_lock_irq(&phba->hbalock);
1832                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1833                 spin_unlock_irq(&phba->hbalock);
1834
1835                 /*
1836                 * Firmware stops when it triggled erratt with HS_FFER6.
1837                 * That could cause the I/Os dropped by the firmware.
1838                 * Error iocb (I/O) on txcmplq and let the SCSI layer
1839                 * retry it after re-establishing link.
1840                 */
1841                 lpfc_sli_abort_fcp_rings(phba);
1842
1843                 /*
1844                  * There was a firmware error.  Take the hba offline and then
1845                  * attempt to restart it.
1846                  */
1847                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1848                 lpfc_offline(phba);
1849                 lpfc_sli_brdrestart(phba);
1850                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1851                         lpfc_unblock_mgmt_io(phba);
1852                         return;
1853                 }
1854                 lpfc_unblock_mgmt_io(phba);
1855         } else if (phba->work_hs & HS_CRIT_TEMP) {
1856                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1857                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1858                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1859                 temp_event_data.data = (uint32_t)temperature;
1860
1861                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1862                                 "0406 Adapter maximum temperature exceeded "
1863                                 "(%ld), taking this port offline "
1864                                 "Data: x%x x%x x%x\n",
1865                                 temperature, phba->work_hs,
1866                                 phba->work_status[0], phba->work_status[1]);
1867
1868                 shost = lpfc_shost_from_vport(phba->pport);
1869                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1870                                           sizeof(temp_event_data),
1871                                           (char *) &temp_event_data,
1872                                           SCSI_NL_VID_TYPE_PCI
1873                                           | PCI_VENDOR_ID_EMULEX);
1874
1875                 spin_lock_irq(&phba->hbalock);
1876                 phba->over_temp_state = HBA_OVER_TEMP;
1877                 spin_unlock_irq(&phba->hbalock);
1878                 lpfc_offline_eratt(phba);
1879
1880         } else {
1881                 /* The if clause above forces this code path when the status
1882                  * failure is a value other than FFER6. Do not call the offline
1883                  * twice. This is the adapter hardware error path.
1884                  */
1885                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1886                                 "0457 Adapter Hardware Error "
1887                                 "Data: x%x x%x x%x\n",
1888                                 phba->work_hs,
1889                                 phba->work_status[0], phba->work_status[1]);
1890
1891                 event_data = FC_REG_DUMP_EVENT;
1892                 shost = lpfc_shost_from_vport(vport);
1893                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1894                                 sizeof(event_data), (char *) &event_data,
1895                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1896
1897                 lpfc_offline_eratt(phba);
1898         }
1899         return;
1900 }
1901
1902 /**
1903  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1904  * @phba: pointer to lpfc hba data structure.
1905  * @mbx_action: flag for mailbox shutdown action.
1906  * @en_rn_msg: send reset/port recovery message.
1907  * This routine is invoked to perform an SLI4 port PCI function reset in
1908  * response to port status register polling attention. It waits for port
1909  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1910  * During this process, interrupt vectors are freed and later requested
1911  * for handling possible port resource change.
1912  **/
1913 static int
1914 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1915                             bool en_rn_msg)
1916 {
1917         int rc;
1918         uint32_t intr_mode;
1919         LPFC_MBOXQ_t *mboxq;
1920
1921         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1922             LPFC_SLI_INTF_IF_TYPE_2) {
1923                 /*
1924                  * On error status condition, driver need to wait for port
1925                  * ready before performing reset.
1926                  */
1927                 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1928                 if (rc)
1929                         return rc;
1930         }
1931
1932         /* need reset: attempt for port recovery */
1933         if (en_rn_msg)
1934                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1935                                 "2887 Reset Needed: Attempting Port "
1936                                 "Recovery...\n");
1937
1938         /* If we are no wait, the HBA has been reset and is not
1939          * functional, thus we should clear
1940          * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1941          */
1942         if (mbx_action == LPFC_MBX_NO_WAIT) {
1943                 spin_lock_irq(&phba->hbalock);
1944                 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1945                 if (phba->sli.mbox_active) {
1946                         mboxq = phba->sli.mbox_active;
1947                         mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1948                         __lpfc_mbox_cmpl_put(phba, mboxq);
1949                         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1950                         phba->sli.mbox_active = NULL;
1951                 }
1952                 spin_unlock_irq(&phba->hbalock);
1953         }
1954
1955         lpfc_offline_prep(phba, mbx_action);
1956         lpfc_sli_flush_io_rings(phba);
1957         lpfc_offline(phba);
1958         /* release interrupt for possible resource change */
1959         lpfc_sli4_disable_intr(phba);
1960         rc = lpfc_sli_brdrestart(phba);
1961         if (rc) {
1962                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963                                 "6309 Failed to restart board\n");
1964                 return rc;
1965         }
1966         /* request and enable interrupt */
1967         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1968         if (intr_mode == LPFC_INTR_ERROR) {
1969                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970                                 "3175 Failed to enable interrupt\n");
1971                 return -EIO;
1972         }
1973         phba->intr_mode = intr_mode;
1974         rc = lpfc_online(phba);
1975         if (rc == 0)
1976                 lpfc_unblock_mgmt_io(phba);
1977
1978         return rc;
1979 }
1980
1981 /**
1982  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1983  * @phba: pointer to lpfc hba data structure.
1984  *
1985  * This routine is invoked to handle the SLI4 HBA hardware error attention
1986  * conditions.
1987  **/
1988 static void
1989 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1990 {
1991         struct lpfc_vport *vport = phba->pport;
1992         uint32_t event_data;
1993         struct Scsi_Host *shost;
1994         uint32_t if_type;
1995         struct lpfc_register portstat_reg = {0};
1996         uint32_t reg_err1, reg_err2;
1997         uint32_t uerrlo_reg, uemasklo_reg;
1998         uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1999         bool en_rn_msg = true;
2000         struct temp_event temp_event_data;
2001         struct lpfc_register portsmphr_reg;
2002         int rc, i;
2003
2004         /* If the pci channel is offline, ignore possible errors, since
2005          * we cannot communicate with the pci card anyway.
2006          */
2007         if (pci_channel_offline(phba->pcidev)) {
2008                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2009                                 "3166 pci channel is offline\n");
2010                 lpfc_sli_flush_io_rings(phba);
2011                 return;
2012         }
2013
2014         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2015         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2016         switch (if_type) {
2017         case LPFC_SLI_INTF_IF_TYPE_0:
2018                 pci_rd_rc1 = lpfc_readl(
2019                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2020                                 &uerrlo_reg);
2021                 pci_rd_rc2 = lpfc_readl(
2022                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2023                                 &uemasklo_reg);
2024                 /* consider PCI bus read error as pci_channel_offline */
2025                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2026                         return;
2027                 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2028                         lpfc_sli4_offline_eratt(phba);
2029                         return;
2030                 }
2031                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2032                                 "7623 Checking UE recoverable");
2033
2034                 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2035                         if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2036                                        &portsmphr_reg.word0))
2037                                 continue;
2038
2039                         smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2040                                                    &portsmphr_reg);
2041                         if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2042                             LPFC_PORT_SEM_UE_RECOVERABLE)
2043                                 break;
2044                         /*Sleep for 1Sec, before checking SEMAPHORE */
2045                         msleep(1000);
2046                 }
2047
2048                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2049                                 "4827 smphr_port_status x%x : Waited %dSec",
2050                                 smphr_port_status, i);
2051
2052                 /* Recoverable UE, reset the HBA device */
2053                 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2054                     LPFC_PORT_SEM_UE_RECOVERABLE) {
2055                         for (i = 0; i < 20; i++) {
2056                                 msleep(1000);
2057                                 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2058                                     &portsmphr_reg.word0) &&
2059                                     (LPFC_POST_STAGE_PORT_READY ==
2060                                      bf_get(lpfc_port_smphr_port_status,
2061                                      &portsmphr_reg))) {
2062                                         rc = lpfc_sli4_port_sta_fn_reset(phba,
2063                                                 LPFC_MBX_NO_WAIT, en_rn_msg);
2064                                         if (rc == 0)
2065                                                 return;
2066                                         lpfc_printf_log(phba, KERN_ERR,
2067                                                 LOG_TRACE_EVENT,
2068                                                 "4215 Failed to recover UE");
2069                                         break;
2070                                 }
2071                         }
2072                 }
2073                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2074                                 "7624 Firmware not ready: Failing UE recovery,"
2075                                 " waited %dSec", i);
2076                 phba->link_state = LPFC_HBA_ERROR;
2077                 break;
2078
2079         case LPFC_SLI_INTF_IF_TYPE_2:
2080         case LPFC_SLI_INTF_IF_TYPE_6:
2081                 pci_rd_rc1 = lpfc_readl(
2082                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
2083                                 &portstat_reg.word0);
2084                 /* consider PCI bus read error as pci_channel_offline */
2085                 if (pci_rd_rc1 == -EIO) {
2086                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2087                                 "3151 PCI bus read access failure: x%x\n",
2088                                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2089                         lpfc_sli4_offline_eratt(phba);
2090                         return;
2091                 }
2092                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2093                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2094                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2095                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2096                                         "2889 Port Overtemperature event, "
2097                                         "taking port offline Data: x%x x%x\n",
2098                                         reg_err1, reg_err2);
2099
2100                         phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2101                         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2102                         temp_event_data.event_code = LPFC_CRIT_TEMP;
2103                         temp_event_data.data = 0xFFFFFFFF;
2104
2105                         shost = lpfc_shost_from_vport(phba->pport);
2106                         fc_host_post_vendor_event(shost, fc_get_event_number(),
2107                                                   sizeof(temp_event_data),
2108                                                   (char *)&temp_event_data,
2109                                                   SCSI_NL_VID_TYPE_PCI
2110                                                   | PCI_VENDOR_ID_EMULEX);
2111
2112                         spin_lock_irq(&phba->hbalock);
2113                         phba->over_temp_state = HBA_OVER_TEMP;
2114                         spin_unlock_irq(&phba->hbalock);
2115                         lpfc_sli4_offline_eratt(phba);
2116                         return;
2117                 }
2118                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2119                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2120                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2121                                         "3143 Port Down: Firmware Update "
2122                                         "Detected\n");
2123                         en_rn_msg = false;
2124                 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2126                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2127                                         "3144 Port Down: Debug Dump\n");
2128                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2129                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2130                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2131                                         "3145 Port Down: Provisioning\n");
2132
2133                 /* If resets are disabled then leave the HBA alone and return */
2134                 if (!phba->cfg_enable_hba_reset)
2135                         return;
2136
2137                 /* Check port status register for function reset */
2138                 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2139                                 en_rn_msg);
2140                 if (rc == 0) {
2141                         /* don't report event on forced debug dump */
2142                         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2143                             reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2144                                 return;
2145                         else
2146                                 break;
2147                 }
2148                 /* fall through for not able to recover */
2149                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2150                                 "3152 Unrecoverable error\n");
2151                 lpfc_sli4_offline_eratt(phba);
2152                 break;
2153         case LPFC_SLI_INTF_IF_TYPE_1:
2154         default:
2155                 break;
2156         }
2157         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2158                         "3123 Report dump event to upper layer\n");
2159         /* Send an internal error event to mgmt application */
2160         lpfc_board_errevt_to_mgmt(phba);
2161
2162         event_data = FC_REG_DUMP_EVENT;
2163         shost = lpfc_shost_from_vport(vport);
2164         fc_host_post_vendor_event(shost, fc_get_event_number(),
2165                                   sizeof(event_data), (char *) &event_data,
2166                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2167 }
2168
2169 /**
2170  * lpfc_handle_eratt - Wrapper func for handling hba error attention
2171  * @phba: pointer to lpfc HBA data structure.
2172  *
2173  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2174  * routine from the API jump table function pointer from the lpfc_hba struct.
2175  *
2176  * Return codes
2177  *   0 - success.
2178  *   Any other value - error.
2179  **/
2180 void
2181 lpfc_handle_eratt(struct lpfc_hba *phba)
2182 {
2183         (*phba->lpfc_handle_eratt)(phba);
2184 }
2185
2186 /**
2187  * lpfc_handle_latt - The HBA link event handler
2188  * @phba: pointer to lpfc hba data structure.
2189  *
2190  * This routine is invoked from the worker thread to handle a HBA host
2191  * attention link event. SLI3 only.
2192  **/
2193 void
2194 lpfc_handle_latt(struct lpfc_hba *phba)
2195 {
2196         struct lpfc_vport *vport = phba->pport;
2197         struct lpfc_sli   *psli = &phba->sli;
2198         LPFC_MBOXQ_t *pmb;
2199         volatile uint32_t control;
2200         int rc = 0;
2201
2202         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2203         if (!pmb) {
2204                 rc = 1;
2205                 goto lpfc_handle_latt_err_exit;
2206         }
2207
2208         rc = lpfc_mbox_rsrc_prep(phba, pmb);
2209         if (rc) {
2210                 rc = 2;
2211                 mempool_free(pmb, phba->mbox_mem_pool);
2212                 goto lpfc_handle_latt_err_exit;
2213         }
2214
2215         /* Cleanup any outstanding ELS commands */
2216         lpfc_els_flush_all_cmd(phba);
2217         psli->slistat.link_event++;
2218         lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2219         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2220         pmb->vport = vport;
2221         /* Block ELS IOCBs until we have processed this mbox command */
2222         phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2223         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2224         if (rc == MBX_NOT_FINISHED) {
2225                 rc = 4;
2226                 goto lpfc_handle_latt_free_mbuf;
2227         }
2228
2229         /* Clear Link Attention in HA REG */
2230         spin_lock_irq(&phba->hbalock);
2231         writel(HA_LATT, phba->HAregaddr);
2232         readl(phba->HAregaddr); /* flush */
2233         spin_unlock_irq(&phba->hbalock);
2234
2235         return;
2236
2237 lpfc_handle_latt_free_mbuf:
2238         phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2239         lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2240 lpfc_handle_latt_err_exit:
2241         /* Enable Link attention interrupts */
2242         spin_lock_irq(&phba->hbalock);
2243         psli->sli_flag |= LPFC_PROCESS_LA;
2244         control = readl(phba->HCregaddr);
2245         control |= HC_LAINT_ENA;
2246         writel(control, phba->HCregaddr);
2247         readl(phba->HCregaddr); /* flush */
2248
2249         /* Clear Link Attention in HA REG */
2250         writel(HA_LATT, phba->HAregaddr);
2251         readl(phba->HAregaddr); /* flush */
2252         spin_unlock_irq(&phba->hbalock);
2253         lpfc_linkdown(phba);
2254         phba->link_state = LPFC_HBA_ERROR;
2255
2256         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2257                         "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2258
2259         return;
2260 }
2261
2262 static void
2263 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
2264 {
2265         int i, j;
2266
2267         while (length > 0) {
2268                 /* Look for Serial Number */
2269                 if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
2270                         *pindex += 2;
2271                         i = vpd[*pindex];
2272                         *pindex += 1;
2273                         j = 0;
2274                         length -= (3+i);
2275                         while (i--) {
2276                                 phba->SerialNumber[j++] = vpd[(*pindex)++];
2277                                 if (j == 31)
2278                                         break;
2279                         }
2280                         phba->SerialNumber[j] = 0;
2281                         continue;
2282                 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
2283                         phba->vpd_flag |= VPD_MODEL_DESC;
2284                         *pindex += 2;
2285                         i = vpd[*pindex];
2286                         *pindex += 1;
2287                         j = 0;
2288                         length -= (3+i);
2289                         while (i--) {
2290                                 phba->ModelDesc[j++] = vpd[(*pindex)++];
2291                                 if (j == 255)
2292                                         break;
2293                         }
2294                         phba->ModelDesc[j] = 0;
2295                         continue;
2296                 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
2297                         phba->vpd_flag |= VPD_MODEL_NAME;
2298                         *pindex += 2;
2299                         i = vpd[*pindex];
2300                         *pindex += 1;
2301                         j = 0;
2302                         length -= (3+i);
2303                         while (i--) {
2304                                 phba->ModelName[j++] = vpd[(*pindex)++];
2305                                 if (j == 79)
2306                                         break;
2307                         }
2308                         phba->ModelName[j] = 0;
2309                         continue;
2310                 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
2311                         phba->vpd_flag |= VPD_PROGRAM_TYPE;
2312                         *pindex += 2;
2313                         i = vpd[*pindex];
2314                         *pindex += 1;
2315                         j = 0;
2316                         length -= (3+i);
2317                         while (i--) {
2318                                 phba->ProgramType[j++] = vpd[(*pindex)++];
2319                                 if (j == 255)
2320                                         break;
2321                         }
2322                         phba->ProgramType[j] = 0;
2323                         continue;
2324                 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
2325                         phba->vpd_flag |= VPD_PORT;
2326                         *pindex += 2;
2327                         i = vpd[*pindex];
2328                         *pindex += 1;
2329                         j = 0;
2330                         length -= (3 + i);
2331                         while (i--) {
2332                                 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333                                     (phba->sli4_hba.pport_name_sta ==
2334                                      LPFC_SLI4_PPNAME_GET)) {
2335                                         j++;
2336                                         (*pindex)++;
2337                                 } else
2338                                         phba->Port[j++] = vpd[(*pindex)++];
2339                                 if (j == 19)
2340                                         break;
2341                         }
2342                         if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343                             (phba->sli4_hba.pport_name_sta ==
2344                              LPFC_SLI4_PPNAME_NON))
2345                                 phba->Port[j] = 0;
2346                         continue;
2347                 } else {
2348                         *pindex += 2;
2349                         i = vpd[*pindex];
2350                         *pindex += 1;
2351                         *pindex += i;
2352                         length -= (3 + i);
2353                 }
2354         }
2355 }
2356
2357 /**
2358  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2359  * @phba: pointer to lpfc hba data structure.
2360  * @vpd: pointer to the vital product data.
2361  * @len: length of the vital product data in bytes.
2362  *
2363  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2364  * an array of characters. In this routine, the ModelName, ProgramType, and
2365  * ModelDesc, etc. fields of the phba data structure will be populated.
2366  *
2367  * Return codes
2368  *   0 - pointer to the VPD passed in is NULL
2369  *   1 - success
2370  **/
2371 int
2372 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2373 {
2374         uint8_t lenlo, lenhi;
2375         int Length;
2376         int i;
2377         int finished = 0;
2378         int index = 0;
2379
2380         if (!vpd)
2381                 return 0;
2382
2383         /* Vital Product */
2384         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2385                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
2386                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2387                         (uint32_t) vpd[3]);
2388         while (!finished && (index < (len - 4))) {
2389                 switch (vpd[index]) {
2390                 case 0x82:
2391                 case 0x91:
2392                         index += 1;
2393                         lenlo = vpd[index];
2394                         index += 1;
2395                         lenhi = vpd[index];
2396                         index += 1;
2397                         i = ((((unsigned short)lenhi) << 8) + lenlo);
2398                         index += i;
2399                         break;
2400                 case 0x90:
2401                         index += 1;
2402                         lenlo = vpd[index];
2403                         index += 1;
2404                         lenhi = vpd[index];
2405                         index += 1;
2406                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
2407                         if (Length > len - index)
2408                                 Length = len - index;
2409
2410                         lpfc_fill_vpd(phba, vpd, Length, &index);
2411                         finished = 0;
2412                         break;
2413                 case 0x78:
2414                         finished = 1;
2415                         break;
2416                 default:
2417                         index ++;
2418                         break;
2419                 }
2420         }
2421
2422         return(1);
2423 }
2424
2425 /**
2426  * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2427  * @phba: pointer to lpfc hba data structure.
2428  * @mdp: pointer to the data structure to hold the derived model name.
2429  * @descp: pointer to the data structure to hold the derived description.
2430  *
2431  * This routine retrieves HBA's description based on its registered PCI device
2432  * ID. The @descp passed into this function points to an array of 256 chars. It
2433  * shall be returned with the model name, maximum speed, and the host bus type.
2434  * The @mdp passed into this function points to an array of 80 chars. When the
2435  * function returns, the @mdp will be filled with the model name.
2436  **/
2437 static void
2438 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2439 {
2440         uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2441         char *model = "<Unknown>";
2442         int tbolt = 0;
2443
2444         switch (sub_dev_id) {
2445         case PCI_DEVICE_ID_CLRY_161E:
2446                 model = "161E";
2447                 break;
2448         case PCI_DEVICE_ID_CLRY_162E:
2449                 model = "162E";
2450                 break;
2451         case PCI_DEVICE_ID_CLRY_164E:
2452                 model = "164E";
2453                 break;
2454         case PCI_DEVICE_ID_CLRY_161P:
2455                 model = "161P";
2456                 break;
2457         case PCI_DEVICE_ID_CLRY_162P:
2458                 model = "162P";
2459                 break;
2460         case PCI_DEVICE_ID_CLRY_164P:
2461                 model = "164P";
2462                 break;
2463         case PCI_DEVICE_ID_CLRY_321E:
2464                 model = "321E";
2465                 break;
2466         case PCI_DEVICE_ID_CLRY_322E:
2467                 model = "322E";
2468                 break;
2469         case PCI_DEVICE_ID_CLRY_324E:
2470                 model = "324E";
2471                 break;
2472         case PCI_DEVICE_ID_CLRY_321P:
2473                 model = "321P";
2474                 break;
2475         case PCI_DEVICE_ID_CLRY_322P:
2476                 model = "322P";
2477                 break;
2478         case PCI_DEVICE_ID_CLRY_324P:
2479                 model = "324P";
2480                 break;
2481         case PCI_DEVICE_ID_TLFC_2XX2:
2482                 model = "2XX2";
2483                 tbolt = 1;
2484                 break;
2485         case PCI_DEVICE_ID_TLFC_3162:
2486                 model = "3162";
2487                 tbolt = 1;
2488                 break;
2489         case PCI_DEVICE_ID_TLFC_3322:
2490                 model = "3322";
2491                 tbolt = 1;
2492                 break;
2493         default:
2494                 model = "Unknown";
2495                 break;
2496         }
2497
2498         if (mdp && mdp[0] == '\0')
2499                 snprintf(mdp, 79, "%s", model);
2500
2501         if (descp && descp[0] == '\0')
2502                 snprintf(descp, 255,
2503                          "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2504                          (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2505                          model,
2506                          phba->Port);
2507 }
2508
2509 /**
2510  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2511  * @phba: pointer to lpfc hba data structure.
2512  * @mdp: pointer to the data structure to hold the derived model name.
2513  * @descp: pointer to the data structure to hold the derived description.
2514  *
2515  * This routine retrieves HBA's description based on its registered PCI device
2516  * ID. The @descp passed into this function points to an array of 256 chars. It
2517  * shall be returned with the model name, maximum speed, and the host bus type.
2518  * The @mdp passed into this function points to an array of 80 chars. When the
2519  * function returns, the @mdp will be filled with the model name.
2520  **/
2521 static void
2522 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2523 {
2524         lpfc_vpd_t *vp;
2525         uint16_t dev_id = phba->pcidev->device;
2526         int max_speed;
2527         int GE = 0;
2528         int oneConnect = 0; /* default is not a oneConnect */
2529         struct {
2530                 char *name;
2531                 char *bus;
2532                 char *function;
2533         } m = {"<Unknown>", "", ""};
2534
2535         if (mdp && mdp[0] != '\0'
2536                 && descp && descp[0] != '\0')
2537                 return;
2538
2539         if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2540                 lpfc_get_atto_model_desc(phba, mdp, descp);
2541                 return;
2542         }
2543
2544         if (phba->lmt & LMT_64Gb)
2545                 max_speed = 64;
2546         else if (phba->lmt & LMT_32Gb)
2547                 max_speed = 32;
2548         else if (phba->lmt & LMT_16Gb)
2549                 max_speed = 16;
2550         else if (phba->lmt & LMT_10Gb)
2551                 max_speed = 10;
2552         else if (phba->lmt & LMT_8Gb)
2553                 max_speed = 8;
2554         else if (phba->lmt & LMT_4Gb)
2555                 max_speed = 4;
2556         else if (phba->lmt & LMT_2Gb)
2557                 max_speed = 2;
2558         else if (phba->lmt & LMT_1Gb)
2559                 max_speed = 1;
2560         else
2561                 max_speed = 0;
2562
2563         vp = &phba->vpd;
2564
2565         switch (dev_id) {
2566         case PCI_DEVICE_ID_FIREFLY:
2567                 m = (typeof(m)){"LP6000", "PCI",
2568                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2569                 break;
2570         case PCI_DEVICE_ID_SUPERFLY:
2571                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2572                         m = (typeof(m)){"LP7000", "PCI", ""};
2573                 else
2574                         m = (typeof(m)){"LP7000E", "PCI", ""};
2575                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2576                 break;
2577         case PCI_DEVICE_ID_DRAGONFLY:
2578                 m = (typeof(m)){"LP8000", "PCI",
2579                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2580                 break;
2581         case PCI_DEVICE_ID_CENTAUR:
2582                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2583                         m = (typeof(m)){"LP9002", "PCI", ""};
2584                 else
2585                         m = (typeof(m)){"LP9000", "PCI", ""};
2586                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2587                 break;
2588         case PCI_DEVICE_ID_RFLY:
2589                 m = (typeof(m)){"LP952", "PCI",
2590                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2591                 break;
2592         case PCI_DEVICE_ID_PEGASUS:
2593                 m = (typeof(m)){"LP9802", "PCI-X",
2594                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2595                 break;
2596         case PCI_DEVICE_ID_THOR:
2597                 m = (typeof(m)){"LP10000", "PCI-X",
2598                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2599                 break;
2600         case PCI_DEVICE_ID_VIPER:
2601                 m = (typeof(m)){"LPX1000",  "PCI-X",
2602                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2603                 break;
2604         case PCI_DEVICE_ID_PFLY:
2605                 m = (typeof(m)){"LP982", "PCI-X",
2606                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2607                 break;
2608         case PCI_DEVICE_ID_TFLY:
2609                 m = (typeof(m)){"LP1050", "PCI-X",
2610                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2611                 break;
2612         case PCI_DEVICE_ID_HELIOS:
2613                 m = (typeof(m)){"LP11000", "PCI-X2",
2614                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2615                 break;
2616         case PCI_DEVICE_ID_HELIOS_SCSP:
2617                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2618                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2619                 break;
2620         case PCI_DEVICE_ID_HELIOS_DCSP:
2621                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2622                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2623                 break;
2624         case PCI_DEVICE_ID_NEPTUNE:
2625                 m = (typeof(m)){"LPe1000", "PCIe",
2626                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2627                 break;
2628         case PCI_DEVICE_ID_NEPTUNE_SCSP:
2629                 m = (typeof(m)){"LPe1000-SP", "PCIe",
2630                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2631                 break;
2632         case PCI_DEVICE_ID_NEPTUNE_DCSP:
2633                 m = (typeof(m)){"LPe1002-SP", "PCIe",
2634                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2635                 break;
2636         case PCI_DEVICE_ID_BMID:
2637                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2638                 break;
2639         case PCI_DEVICE_ID_BSMB:
2640                 m = (typeof(m)){"LP111", "PCI-X2",
2641                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2642                 break;
2643         case PCI_DEVICE_ID_ZEPHYR:
2644                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2645                 break;
2646         case PCI_DEVICE_ID_ZEPHYR_SCSP:
2647                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2648                 break;
2649         case PCI_DEVICE_ID_ZEPHYR_DCSP:
2650                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2651                 GE = 1;
2652                 break;
2653         case PCI_DEVICE_ID_ZMID:
2654                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2655                 break;
2656         case PCI_DEVICE_ID_ZSMB:
2657                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2658                 break;
2659         case PCI_DEVICE_ID_LP101:
2660                 m = (typeof(m)){"LP101", "PCI-X",
2661                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2662                 break;
2663         case PCI_DEVICE_ID_LP10000S:
2664                 m = (typeof(m)){"LP10000-S", "PCI",
2665                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2666                 break;
2667         case PCI_DEVICE_ID_LP11000S:
2668                 m = (typeof(m)){"LP11000-S", "PCI-X2",
2669                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2670                 break;
2671         case PCI_DEVICE_ID_LPE11000S:
2672                 m = (typeof(m)){"LPe11000-S", "PCIe",
2673                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2674                 break;
2675         case PCI_DEVICE_ID_SAT:
2676                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2677                 break;
2678         case PCI_DEVICE_ID_SAT_MID:
2679                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2680                 break;
2681         case PCI_DEVICE_ID_SAT_SMB:
2682                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2683                 break;
2684         case PCI_DEVICE_ID_SAT_DCSP:
2685                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2686                 break;
2687         case PCI_DEVICE_ID_SAT_SCSP:
2688                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2689                 break;
2690         case PCI_DEVICE_ID_SAT_S:
2691                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2692                 break;
2693         case PCI_DEVICE_ID_PROTEUS_VF:
2694                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2695                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2696                 break;
2697         case PCI_DEVICE_ID_PROTEUS_PF:
2698                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2699                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2700                 break;
2701         case PCI_DEVICE_ID_PROTEUS_S:
2702                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2703                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2704                 break;
2705         case PCI_DEVICE_ID_TIGERSHARK:
2706                 oneConnect = 1;
2707                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2708                 break;
2709         case PCI_DEVICE_ID_TOMCAT:
2710                 oneConnect = 1;
2711                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2712                 break;
2713         case PCI_DEVICE_ID_FALCON:
2714                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2715                                 "EmulexSecure Fibre"};
2716                 break;
2717         case PCI_DEVICE_ID_BALIUS:
2718                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2719                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2720                 break;
2721         case PCI_DEVICE_ID_LANCER_FC:
2722                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2723                 break;
2724         case PCI_DEVICE_ID_LANCER_FC_VF:
2725                 m = (typeof(m)){"LPe16000", "PCIe",
2726                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2727                 break;
2728         case PCI_DEVICE_ID_LANCER_FCOE:
2729                 oneConnect = 1;
2730                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2731                 break;
2732         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2733                 oneConnect = 1;
2734                 m = (typeof(m)){"OCe15100", "PCIe",
2735                                 "Obsolete, Unsupported FCoE"};
2736                 break;
2737         case PCI_DEVICE_ID_LANCER_G6_FC:
2738                 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2739                 break;
2740         case PCI_DEVICE_ID_LANCER_G7_FC:
2741                 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2742                 break;
2743         case PCI_DEVICE_ID_LANCER_G7P_FC:
2744                 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2745                 break;
2746         case PCI_DEVICE_ID_SKYHAWK:
2747         case PCI_DEVICE_ID_SKYHAWK_VF:
2748                 oneConnect = 1;
2749                 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2750                 break;
2751         default:
2752                 m = (typeof(m)){"Unknown", "", ""};
2753                 break;
2754         }
2755
2756         if (mdp && mdp[0] == '\0')
2757                 snprintf(mdp, 79,"%s", m.name);
2758         /*
2759          * oneConnect hba requires special processing, they are all initiators
2760          * and we put the port number on the end
2761          */
2762         if (descp && descp[0] == '\0') {
2763                 if (oneConnect)
2764                         snprintf(descp, 255,
2765                                 "Emulex OneConnect %s, %s Initiator %s",
2766                                 m.name, m.function,
2767                                 phba->Port);
2768                 else if (max_speed == 0)
2769                         snprintf(descp, 255,
2770                                 "Emulex %s %s %s",
2771                                 m.name, m.bus, m.function);
2772                 else
2773                         snprintf(descp, 255,
2774                                 "Emulex %s %d%s %s %s",
2775                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2776                                 m.bus, m.function);
2777         }
2778 }
2779
2780 /**
2781  * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2782  * @phba: pointer to lpfc hba data structure.
2783  * @pring: pointer to a IOCB ring.
2784  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2785  *
2786  * This routine posts a given number of IOCBs with the associated DMA buffer
2787  * descriptors specified by the cnt argument to the given IOCB ring.
2788  *
2789  * Return codes
2790  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2791  **/
2792 int
2793 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2794 {
2795         IOCB_t *icmd;
2796         struct lpfc_iocbq *iocb;
2797         struct lpfc_dmabuf *mp1, *mp2;
2798
2799         cnt += pring->missbufcnt;
2800
2801         /* While there are buffers to post */
2802         while (cnt > 0) {
2803                 /* Allocate buffer for  command iocb */
2804                 iocb = lpfc_sli_get_iocbq(phba);
2805                 if (iocb == NULL) {
2806                         pring->missbufcnt = cnt;
2807                         return cnt;
2808                 }
2809                 icmd = &iocb->iocb;
2810
2811                 /* 2 buffers can be posted per command */
2812                 /* Allocate buffer to post */
2813                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2814                 if (mp1)
2815                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2816                 if (!mp1 || !mp1->virt) {
2817                         kfree(mp1);
2818                         lpfc_sli_release_iocbq(phba, iocb);
2819                         pring->missbufcnt = cnt;
2820                         return cnt;
2821                 }
2822
2823                 INIT_LIST_HEAD(&mp1->list);
2824                 /* Allocate buffer to post */
2825                 if (cnt > 1) {
2826                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2827                         if (mp2)
2828                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2829                                                             &mp2->phys);
2830                         if (!mp2 || !mp2->virt) {
2831                                 kfree(mp2);
2832                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2833                                 kfree(mp1);
2834                                 lpfc_sli_release_iocbq(phba, iocb);
2835                                 pring->missbufcnt = cnt;
2836                                 return cnt;
2837                         }
2838
2839                         INIT_LIST_HEAD(&mp2->list);
2840                 } else {
2841                         mp2 = NULL;
2842                 }
2843
2844                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2845                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2846                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2847                 icmd->ulpBdeCount = 1;
2848                 cnt--;
2849                 if (mp2) {
2850                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2851                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2852                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2853                         cnt--;
2854                         icmd->ulpBdeCount = 2;
2855                 }
2856
2857                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2858                 icmd->ulpLe = 1;
2859
2860                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2861                     IOCB_ERROR) {
2862                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2863                         kfree(mp1);
2864                         cnt++;
2865                         if (mp2) {
2866                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2867                                 kfree(mp2);
2868                                 cnt++;
2869                         }
2870                         lpfc_sli_release_iocbq(phba, iocb);
2871                         pring->missbufcnt = cnt;
2872                         return cnt;
2873                 }
2874                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2875                 if (mp2)
2876                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2877         }
2878         pring->missbufcnt = 0;
2879         return 0;
2880 }
2881
2882 /**
2883  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2884  * @phba: pointer to lpfc hba data structure.
2885  *
2886  * This routine posts initial receive IOCB buffers to the ELS ring. The
2887  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2888  * set to 64 IOCBs. SLI3 only.
2889  *
2890  * Return codes
2891  *   0 - success (currently always success)
2892  **/
2893 static int
2894 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2895 {
2896         struct lpfc_sli *psli = &phba->sli;
2897
2898         /* Ring 0, ELS / CT buffers */
2899         lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2900         /* Ring 2 - FCP no buffers needed */
2901
2902         return 0;
2903 }
2904
2905 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2906
2907 /**
2908  * lpfc_sha_init - Set up initial array of hash table entries
2909  * @HashResultPointer: pointer to an array as hash table.
2910  *
2911  * This routine sets up the initial values to the array of hash table entries
2912  * for the LC HBAs.
2913  **/
2914 static void
2915 lpfc_sha_init(uint32_t * HashResultPointer)
2916 {
2917         HashResultPointer[0] = 0x67452301;
2918         HashResultPointer[1] = 0xEFCDAB89;
2919         HashResultPointer[2] = 0x98BADCFE;
2920         HashResultPointer[3] = 0x10325476;
2921         HashResultPointer[4] = 0xC3D2E1F0;
2922 }
2923
2924 /**
2925  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2926  * @HashResultPointer: pointer to an initial/result hash table.
2927  * @HashWorkingPointer: pointer to an working hash table.
2928  *
2929  * This routine iterates an initial hash table pointed by @HashResultPointer
2930  * with the values from the working hash table pointeed by @HashWorkingPointer.
2931  * The results are putting back to the initial hash table, returned through
2932  * the @HashResultPointer as the result hash table.
2933  **/
2934 static void
2935 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2936 {
2937         int t;
2938         uint32_t TEMP;
2939         uint32_t A, B, C, D, E;
2940         t = 16;
2941         do {
2942                 HashWorkingPointer[t] =
2943                     S(1,
2944                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2945                                                                      8] ^
2946                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2947         } while (++t <= 79);
2948         t = 0;
2949         A = HashResultPointer[0];
2950         B = HashResultPointer[1];
2951         C = HashResultPointer[2];
2952         D = HashResultPointer[3];
2953         E = HashResultPointer[4];
2954
2955         do {
2956                 if (t < 20) {
2957                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2958                 } else if (t < 40) {
2959                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2960                 } else if (t < 60) {
2961                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2962                 } else {
2963                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2964                 }
2965                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2966                 E = D;
2967                 D = C;
2968                 C = S(30, B);
2969                 B = A;
2970                 A = TEMP;
2971         } while (++t <= 79);
2972
2973         HashResultPointer[0] += A;
2974         HashResultPointer[1] += B;
2975         HashResultPointer[2] += C;
2976         HashResultPointer[3] += D;
2977         HashResultPointer[4] += E;
2978
2979 }
2980
2981 /**
2982  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2983  * @RandomChallenge: pointer to the entry of host challenge random number array.
2984  * @HashWorking: pointer to the entry of the working hash array.
2985  *
2986  * This routine calculates the working hash array referred by @HashWorking
2987  * from the challenge random numbers associated with the host, referred by
2988  * @RandomChallenge. The result is put into the entry of the working hash
2989  * array and returned by reference through @HashWorking.
2990  **/
2991 static void
2992 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2993 {
2994         *HashWorking = (*RandomChallenge ^ *HashWorking);
2995 }
2996
2997 /**
2998  * lpfc_hba_init - Perform special handling for LC HBA initialization
2999  * @phba: pointer to lpfc hba data structure.
3000  * @hbainit: pointer to an array of unsigned 32-bit integers.
3001  *
3002  * This routine performs the special handling for LC HBA initialization.
3003  **/
3004 void
3005 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3006 {
3007         int t;
3008         uint32_t *HashWorking;
3009         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3010
3011         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3012         if (!HashWorking)
3013                 return;
3014
3015         HashWorking[0] = HashWorking[78] = *pwwnn++;
3016         HashWorking[1] = HashWorking[79] = *pwwnn;
3017
3018         for (t = 0; t < 7; t++)
3019                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3020
3021         lpfc_sha_init(hbainit);
3022         lpfc_sha_iterate(hbainit, HashWorking);
3023         kfree(HashWorking);
3024 }
3025
3026 /**
3027  * lpfc_cleanup - Performs vport cleanups before deleting a vport
3028  * @vport: pointer to a virtual N_Port data structure.
3029  *
3030  * This routine performs the necessary cleanups before deleting the @vport.
3031  * It invokes the discovery state machine to perform necessary state
3032  * transitions and to release the ndlps associated with the @vport. Note,
3033  * the physical port is treated as @vport 0.
3034  **/
3035 void
3036 lpfc_cleanup(struct lpfc_vport *vport)
3037 {
3038         struct lpfc_hba   *phba = vport->phba;
3039         struct lpfc_nodelist *ndlp, *next_ndlp;
3040         int i = 0;
3041
3042         if (phba->link_state > LPFC_LINK_DOWN)
3043                 lpfc_port_link_failure(vport);
3044
3045         /* Clean up VMID resources */
3046         if (lpfc_is_vmid_enabled(phba))
3047                 lpfc_vmid_vport_cleanup(vport);
3048
3049         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3050                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3051                     ndlp->nlp_DID == Fabric_DID) {
3052                         /* Just free up ndlp with Fabric_DID for vports */
3053                         lpfc_nlp_put(ndlp);
3054                         continue;
3055                 }
3056
3057                 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3058                     ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3059                         lpfc_nlp_put(ndlp);
3060                         continue;
3061                 }
3062
3063                 /* Fabric Ports not in UNMAPPED state are cleaned up in the
3064                  * DEVICE_RM event.
3065                  */
3066                 if (ndlp->nlp_type & NLP_FABRIC &&
3067                     ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3068                         lpfc_disc_state_machine(vport, ndlp, NULL,
3069                                         NLP_EVT_DEVICE_RECOVERY);
3070
3071                 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3072                         lpfc_disc_state_machine(vport, ndlp, NULL,
3073                                         NLP_EVT_DEVICE_RM);
3074         }
3075
3076         /* This is a special case flush to return all
3077          * IOs before entering this loop. There are
3078          * two points in the code where a flush is
3079          * avoided if the FC_UNLOADING flag is set.
3080          * one is in the multipool destroy,
3081          * (this prevents a crash) and the other is
3082          * in the nvme abort handler, ( also prevents
3083          * a crash). Both of these exceptions are
3084          * cases where the slot is still accessible.
3085          * The flush here is only when the pci slot
3086          * is offline.
3087          */
3088         if (vport->load_flag & FC_UNLOADING &&
3089             pci_channel_offline(phba->pcidev))
3090                 lpfc_sli_flush_io_rings(vport->phba);
3091
3092         /* At this point, ALL ndlp's should be gone
3093          * because of the previous NLP_EVT_DEVICE_RM.
3094          * Lets wait for this to happen, if needed.
3095          */
3096         while (!list_empty(&vport->fc_nodes)) {
3097                 if (i++ > 3000) {
3098                         lpfc_printf_vlog(vport, KERN_ERR,
3099                                          LOG_TRACE_EVENT,
3100                                 "0233 Nodelist not empty\n");
3101                         list_for_each_entry_safe(ndlp, next_ndlp,
3102                                                 &vport->fc_nodes, nlp_listp) {
3103                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3104                                                  LOG_DISCOVERY,
3105                                                  "0282 did:x%x ndlp:x%px "
3106                                                  "refcnt:%d xflags x%x nflag x%x\n",
3107                                                  ndlp->nlp_DID, (void *)ndlp,
3108                                                  kref_read(&ndlp->kref),
3109                                                  ndlp->fc4_xpt_flags,
3110                                                  ndlp->nlp_flag);
3111                         }
3112                         break;
3113                 }
3114
3115                 /* Wait for any activity on ndlps to settle */
3116                 msleep(10);
3117         }
3118         lpfc_cleanup_vports_rrqs(vport, NULL);
3119 }
3120
3121 /**
3122  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3123  * @vport: pointer to a virtual N_Port data structure.
3124  *
3125  * This routine stops all the timers associated with a @vport. This function
3126  * is invoked before disabling or deleting a @vport. Note that the physical
3127  * port is treated as @vport 0.
3128  **/
3129 void
3130 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3131 {
3132         del_timer_sync(&vport->els_tmofunc);
3133         del_timer_sync(&vport->delayed_disc_tmo);
3134         lpfc_can_disctmo(vport);
3135         return;
3136 }
3137
3138 /**
3139  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3140  * @phba: pointer to lpfc hba data structure.
3141  *
3142  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3143  * caller of this routine should already hold the host lock.
3144  **/
3145 void
3146 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3147 {
3148         /* Clear pending FCF rediscovery wait flag */
3149         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3150
3151         /* Now, try to stop the timer */
3152         del_timer(&phba->fcf.redisc_wait);
3153 }
3154
3155 /**
3156  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3157  * @phba: pointer to lpfc hba data structure.
3158  *
3159  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3160  * checks whether the FCF rediscovery wait timer is pending with the host
3161  * lock held before proceeding with disabling the timer and clearing the
3162  * wait timer pendig flag.
3163  **/
3164 void
3165 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3166 {
3167         spin_lock_irq(&phba->hbalock);
3168         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3169                 /* FCF rediscovery timer already fired or stopped */
3170                 spin_unlock_irq(&phba->hbalock);
3171                 return;
3172         }
3173         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3174         /* Clear failover in progress flags */
3175         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3176         spin_unlock_irq(&phba->hbalock);
3177 }
3178
3179 /**
3180  * lpfc_cmf_stop - Stop CMF processing
3181  * @phba: pointer to lpfc hba data structure.
3182  *
3183  * This is called when the link goes down or if CMF mode is turned OFF.
3184  * It is also called when going offline or unloaded just before the
3185  * congestion info buffer is unregistered.
3186  **/
3187 void
3188 lpfc_cmf_stop(struct lpfc_hba *phba)
3189 {
3190         int cpu;
3191         struct lpfc_cgn_stat *cgs;
3192
3193         /* We only do something if CMF is enabled */
3194         if (!phba->sli4_hba.pc_sli4_params.cmf)
3195                 return;
3196
3197         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3198                         "6221 Stop CMF / Cancel Timer\n");
3199
3200         /* Cancel the CMF timer */
3201         hrtimer_cancel(&phba->cmf_stats_timer);
3202         hrtimer_cancel(&phba->cmf_timer);
3203
3204         /* Zero CMF counters */
3205         atomic_set(&phba->cmf_busy, 0);
3206         for_each_present_cpu(cpu) {
3207                 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3208                 atomic64_set(&cgs->total_bytes, 0);
3209                 atomic64_set(&cgs->rcv_bytes, 0);
3210                 atomic_set(&cgs->rx_io_cnt, 0);
3211                 atomic64_set(&cgs->rx_latency, 0);
3212         }
3213         atomic_set(&phba->cmf_bw_wait, 0);
3214
3215         /* Resume any blocked IO - Queue unblock on workqueue */
3216         queue_work(phba->wq, &phba->unblock_request_work);
3217 }
3218
3219 static inline uint64_t
3220 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3221 {
3222         uint64_t rate = lpfc_sli_port_speed_get(phba);
3223
3224         return ((((unsigned long)rate) * 1024 * 1024) / 10);
3225 }
3226
3227 void
3228 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3229 {
3230         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3231                         "6223 Signal CMF init\n");
3232
3233         /* Use the new fc_linkspeed to recalculate */
3234         phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3235         phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3236         phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3237                                             phba->cmf_interval_rate, 1000);
3238         phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3239
3240         /* This is a signal to firmware to sync up CMF BW with link speed */
3241         lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3242 }
3243
3244 /**
3245  * lpfc_cmf_start - Start CMF processing
3246  * @phba: pointer to lpfc hba data structure.
3247  *
3248  * This is called when the link comes up or if CMF mode is turned OFF
3249  * to Monitor or Managed.
3250  **/
3251 void
3252 lpfc_cmf_start(struct lpfc_hba *phba)
3253 {
3254         struct lpfc_cgn_stat *cgs;
3255         int cpu;
3256
3257         /* We only do something if CMF is enabled */
3258         if (!phba->sli4_hba.pc_sli4_params.cmf ||
3259             phba->cmf_active_mode == LPFC_CFG_OFF)
3260                 return;
3261
3262         /* Reinitialize congestion buffer info */
3263         lpfc_init_congestion_buf(phba);
3264
3265         atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3266         atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3267         atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3268         atomic_set(&phba->cgn_sync_warn_cnt, 0);
3269
3270         atomic_set(&phba->cmf_busy, 0);
3271         for_each_present_cpu(cpu) {
3272                 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3273                 atomic64_set(&cgs->total_bytes, 0);
3274                 atomic64_set(&cgs->rcv_bytes, 0);
3275                 atomic_set(&cgs->rx_io_cnt, 0);
3276                 atomic64_set(&cgs->rx_latency, 0);
3277         }
3278         phba->cmf_latency.tv_sec = 0;
3279         phba->cmf_latency.tv_nsec = 0;
3280
3281         lpfc_cmf_signal_init(phba);
3282
3283         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3284                         "6222 Start CMF / Timer\n");
3285
3286         phba->cmf_timer_cnt = 0;
3287         hrtimer_start(&phba->cmf_timer,
3288                       ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC),
3289                       HRTIMER_MODE_REL);
3290         hrtimer_start(&phba->cmf_stats_timer,
3291                       ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC),
3292                       HRTIMER_MODE_REL);
3293         /* Setup for latency check in IO cmpl routines */
3294         ktime_get_real_ts64(&phba->cmf_latency);
3295
3296         atomic_set(&phba->cmf_bw_wait, 0);
3297         atomic_set(&phba->cmf_stop_io, 0);
3298 }
3299
3300 /**
3301  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3302  * @phba: pointer to lpfc hba data structure.
3303  *
3304  * This routine stops all the timers associated with a HBA. This function is
3305  * invoked before either putting a HBA offline or unloading the driver.
3306  **/
3307 void
3308 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3309 {
3310         if (phba->pport)
3311                 lpfc_stop_vport_timers(phba->pport);
3312         cancel_delayed_work_sync(&phba->eq_delay_work);
3313         cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3314         del_timer_sync(&phba->sli.mbox_tmo);
3315         del_timer_sync(&phba->fabric_block_timer);
3316         del_timer_sync(&phba->eratt_poll);
3317         del_timer_sync(&phba->hb_tmofunc);
3318         if (phba->sli_rev == LPFC_SLI_REV4) {
3319                 del_timer_sync(&phba->rrq_tmr);
3320                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3321         }
3322         phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3323
3324         switch (phba->pci_dev_grp) {
3325         case LPFC_PCI_DEV_LP:
3326                 /* Stop any LightPulse device specific driver timers */
3327                 del_timer_sync(&phba->fcp_poll_timer);
3328                 break;
3329         case LPFC_PCI_DEV_OC:
3330                 /* Stop any OneConnect device specific driver timers */
3331                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3332                 break;
3333         default:
3334                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3335                                 "0297 Invalid device group (x%x)\n",
3336                                 phba->pci_dev_grp);
3337                 break;
3338         }
3339         return;
3340 }
3341
3342 /**
3343  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3344  * @phba: pointer to lpfc hba data structure.
3345  * @mbx_action: flag for mailbox no wait action.
3346  *
3347  * This routine marks a HBA's management interface as blocked. Once the HBA's
3348  * management interface is marked as blocked, all the user space access to
3349  * the HBA, whether they are from sysfs interface or libdfc interface will
3350  * all be blocked. The HBA is set to block the management interface when the
3351  * driver prepares the HBA interface for online or offline.
3352  **/
3353 static void
3354 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3355 {
3356         unsigned long iflag;
3357         uint8_t actcmd = MBX_HEARTBEAT;
3358         unsigned long timeout;
3359
3360         spin_lock_irqsave(&phba->hbalock, iflag);
3361         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3362         spin_unlock_irqrestore(&phba->hbalock, iflag);
3363         if (mbx_action == LPFC_MBX_NO_WAIT)
3364                 return;
3365         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3366         spin_lock_irqsave(&phba->hbalock, iflag);
3367         if (phba->sli.mbox_active) {
3368                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3369                 /* Determine how long we might wait for the active mailbox
3370                  * command to be gracefully completed by firmware.
3371                  */
3372                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3373                                 phba->sli.mbox_active) * 1000) + jiffies;
3374         }
3375         spin_unlock_irqrestore(&phba->hbalock, iflag);
3376
3377         /* Wait for the outstnading mailbox command to complete */
3378         while (phba->sli.mbox_active) {
3379                 /* Check active mailbox complete status every 2ms */
3380                 msleep(2);
3381                 if (time_after(jiffies, timeout)) {
3382                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3383                                         "2813 Mgmt IO is Blocked %x "
3384                                         "- mbox cmd %x still active\n",
3385                                         phba->sli.sli_flag, actcmd);
3386                         break;
3387                 }
3388         }
3389 }
3390
3391 /**
3392  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3393  * @phba: pointer to lpfc hba data structure.
3394  *
3395  * Allocate RPIs for all active remote nodes. This is needed whenever
3396  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3397  * is to fixup the temporary rpi assignments.
3398  **/
3399 void
3400 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3401 {
3402         struct lpfc_nodelist  *ndlp, *next_ndlp;
3403         struct lpfc_vport **vports;
3404         int i, rpi;
3405
3406         if (phba->sli_rev != LPFC_SLI_REV4)
3407                 return;
3408
3409         vports = lpfc_create_vport_work_array(phba);
3410         if (vports == NULL)
3411                 return;
3412
3413         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3414                 if (vports[i]->load_flag & FC_UNLOADING)
3415                         continue;
3416
3417                 list_for_each_entry_safe(ndlp, next_ndlp,
3418                                          &vports[i]->fc_nodes,
3419                                          nlp_listp) {
3420                         rpi = lpfc_sli4_alloc_rpi(phba);
3421                         if (rpi == LPFC_RPI_ALLOC_ERROR) {
3422                                 /* TODO print log? */
3423                                 continue;
3424                         }
3425                         ndlp->nlp_rpi = rpi;
3426                         lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3427                                          LOG_NODE | LOG_DISCOVERY,
3428                                          "0009 Assign RPI x%x to ndlp x%px "
3429                                          "DID:x%06x flg:x%x\n",
3430                                          ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3431                                          ndlp->nlp_flag);
3432                 }
3433         }
3434         lpfc_destroy_vport_work_array(phba, vports);
3435 }
3436
3437 /**
3438  * lpfc_create_expedite_pool - create expedite pool
3439  * @phba: pointer to lpfc hba data structure.
3440  *
3441  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3442  * to expedite pool. Mark them as expedite.
3443  **/
3444 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3445 {
3446         struct lpfc_sli4_hdw_queue *qp;
3447         struct lpfc_io_buf *lpfc_ncmd;
3448         struct lpfc_io_buf *lpfc_ncmd_next;
3449         struct lpfc_epd_pool *epd_pool;
3450         unsigned long iflag;
3451
3452         epd_pool = &phba->epd_pool;
3453         qp = &phba->sli4_hba.hdwq[0];
3454
3455         spin_lock_init(&epd_pool->lock);
3456         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3457         spin_lock(&epd_pool->lock);
3458         INIT_LIST_HEAD(&epd_pool->list);
3459         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3460                                  &qp->lpfc_io_buf_list_put, list) {
3461                 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3462                 lpfc_ncmd->expedite = true;
3463                 qp->put_io_bufs--;
3464                 epd_pool->count++;
3465                 if (epd_pool->count >= XRI_BATCH)
3466                         break;
3467         }
3468         spin_unlock(&epd_pool->lock);
3469         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3470 }
3471
3472 /**
3473  * lpfc_destroy_expedite_pool - destroy expedite pool
3474  * @phba: pointer to lpfc hba data structure.
3475  *
3476  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3477  * of HWQ 0. Clear the mark.
3478  **/
3479 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3480 {
3481         struct lpfc_sli4_hdw_queue *qp;
3482         struct lpfc_io_buf *lpfc_ncmd;
3483         struct lpfc_io_buf *lpfc_ncmd_next;
3484         struct lpfc_epd_pool *epd_pool;
3485         unsigned long iflag;
3486
3487         epd_pool = &phba->epd_pool;
3488         qp = &phba->sli4_hba.hdwq[0];
3489
3490         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3491         spin_lock(&epd_pool->lock);
3492         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3493                                  &epd_pool->list, list) {
3494                 list_move_tail(&lpfc_ncmd->list,
3495                                &qp->lpfc_io_buf_list_put);
3496                 lpfc_ncmd->flags = false;
3497                 qp->put_io_bufs++;
3498                 epd_pool->count--;
3499         }
3500         spin_unlock(&epd_pool->lock);
3501         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3502 }
3503
3504 /**
3505  * lpfc_create_multixri_pools - create multi-XRI pools
3506  * @phba: pointer to lpfc hba data structure.
3507  *
3508  * This routine initialize public, private per HWQ. Then, move XRIs from
3509  * lpfc_io_buf_list_put to public pool. High and low watermark are also
3510  * Initialized.
3511  **/
3512 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3513 {
3514         u32 i, j;
3515         u32 hwq_count;
3516         u32 count_per_hwq;
3517         struct lpfc_io_buf *lpfc_ncmd;
3518         struct lpfc_io_buf *lpfc_ncmd_next;
3519         unsigned long iflag;
3520         struct lpfc_sli4_hdw_queue *qp;
3521         struct lpfc_multixri_pool *multixri_pool;
3522         struct lpfc_pbl_pool *pbl_pool;
3523         struct lpfc_pvt_pool *pvt_pool;
3524
3525         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3526                         "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3527                         phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3528                         phba->sli4_hba.io_xri_cnt);
3529
3530         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3531                 lpfc_create_expedite_pool(phba);
3532
3533         hwq_count = phba->cfg_hdw_queue;
3534         count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3535
3536         for (i = 0; i < hwq_count; i++) {
3537                 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3538
3539                 if (!multixri_pool) {
3540                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3541                                         "1238 Failed to allocate memory for "
3542                                         "multixri_pool\n");
3543
3544                         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3545                                 lpfc_destroy_expedite_pool(phba);
3546
3547                         j = 0;
3548                         while (j < i) {
3549                                 qp = &phba->sli4_hba.hdwq[j];
3550                                 kfree(qp->p_multixri_pool);
3551                                 j++;
3552                         }
3553                         phba->cfg_xri_rebalancing = 0;
3554                         return;
3555                 }
3556
3557                 qp = &phba->sli4_hba.hdwq[i];
3558                 qp->p_multixri_pool = multixri_pool;
3559
3560                 multixri_pool->xri_limit = count_per_hwq;
3561                 multixri_pool->rrb_next_hwqid = i;
3562
3563                 /* Deal with public free xri pool */
3564                 pbl_pool = &multixri_pool->pbl_pool;
3565                 spin_lock_init(&pbl_pool->lock);
3566                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3567                 spin_lock(&pbl_pool->lock);
3568                 INIT_LIST_HEAD(&pbl_pool->list);
3569                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3570                                          &qp->lpfc_io_buf_list_put, list) {
3571                         list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3572                         qp->put_io_bufs--;
3573                         pbl_pool->count++;
3574                 }
3575                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3576                                 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3577                                 pbl_pool->count, i);
3578                 spin_unlock(&pbl_pool->lock);
3579                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3580
3581                 /* Deal with private free xri pool */
3582                 pvt_pool = &multixri_pool->pvt_pool;
3583                 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3584                 pvt_pool->low_watermark = XRI_BATCH;
3585                 spin_lock_init(&pvt_pool->lock);
3586                 spin_lock_irqsave(&pvt_pool->lock, iflag);
3587                 INIT_LIST_HEAD(&pvt_pool->list);
3588                 pvt_pool->count = 0;
3589                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3590         }
3591 }
3592
3593 /**
3594  * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3595  * @phba: pointer to lpfc hba data structure.
3596  *
3597  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3598  **/
3599 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3600 {
3601         u32 i;
3602         u32 hwq_count;
3603         struct lpfc_io_buf *lpfc_ncmd;
3604         struct lpfc_io_buf *lpfc_ncmd_next;
3605         unsigned long iflag;
3606         struct lpfc_sli4_hdw_queue *qp;
3607         struct lpfc_multixri_pool *multixri_pool;
3608         struct lpfc_pbl_pool *pbl_pool;
3609         struct lpfc_pvt_pool *pvt_pool;
3610
3611         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3612                 lpfc_destroy_expedite_pool(phba);
3613
3614         if (!(phba->pport->load_flag & FC_UNLOADING))
3615                 lpfc_sli_flush_io_rings(phba);
3616
3617         hwq_count = phba->cfg_hdw_queue;
3618
3619         for (i = 0; i < hwq_count; i++) {
3620                 qp = &phba->sli4_hba.hdwq[i];
3621                 multixri_pool = qp->p_multixri_pool;
3622                 if (!multixri_pool)
3623                         continue;
3624
3625                 qp->p_multixri_pool = NULL;
3626
3627                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3628
3629                 /* Deal with public free xri pool */
3630                 pbl_pool = &multixri_pool->pbl_pool;
3631                 spin_lock(&pbl_pool->lock);
3632
3633                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3634                                 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3635                                 pbl_pool->count, i);
3636
3637                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3638                                          &pbl_pool->list, list) {
3639                         list_move_tail(&lpfc_ncmd->list,
3640                                        &qp->lpfc_io_buf_list_put);
3641                         qp->put_io_bufs++;
3642                         pbl_pool->count--;
3643                 }
3644
3645                 INIT_LIST_HEAD(&pbl_pool->list);
3646                 pbl_pool->count = 0;
3647
3648                 spin_unlock(&pbl_pool->lock);
3649
3650                 /* Deal with private free xri pool */
3651                 pvt_pool = &multixri_pool->pvt_pool;
3652                 spin_lock(&pvt_pool->lock);
3653
3654                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3655                                 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3656                                 pvt_pool->count, i);
3657
3658                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3659                                          &pvt_pool->list, list) {
3660                         list_move_tail(&lpfc_ncmd->list,
3661                                        &qp->lpfc_io_buf_list_put);
3662                         qp->put_io_bufs++;
3663                         pvt_pool->count--;
3664                 }
3665
3666                 INIT_LIST_HEAD(&pvt_pool->list);
3667                 pvt_pool->count = 0;
3668
3669                 spin_unlock(&pvt_pool->lock);
3670                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3671
3672                 kfree(multixri_pool);
3673         }
3674 }
3675
3676 /**
3677  * lpfc_online - Initialize and bring a HBA online
3678  * @phba: pointer to lpfc hba data structure.
3679  *
3680  * This routine initializes the HBA and brings a HBA online. During this
3681  * process, the management interface is blocked to prevent user space access
3682  * to the HBA interfering with the driver initialization.
3683  *
3684  * Return codes
3685  *   0 - successful
3686  *   1 - failed
3687  **/
3688 int
3689 lpfc_online(struct lpfc_hba *phba)
3690 {
3691         struct lpfc_vport *vport;
3692         struct lpfc_vport **vports;
3693         int i, error = 0;
3694         bool vpis_cleared = false;
3695
3696         if (!phba)
3697                 return 0;
3698         vport = phba->pport;
3699
3700         if (!(vport->fc_flag & FC_OFFLINE_MODE))
3701                 return 0;
3702
3703         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3704                         "0458 Bring Adapter online\n");
3705
3706         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3707
3708         if (phba->sli_rev == LPFC_SLI_REV4) {
3709                 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3710                         lpfc_unblock_mgmt_io(phba);
3711                         return 1;
3712                 }
3713                 spin_lock_irq(&phba->hbalock);
3714                 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3715                         vpis_cleared = true;
3716                 spin_unlock_irq(&phba->hbalock);
3717
3718                 /* Reestablish the local initiator port.
3719                  * The offline process destroyed the previous lport.
3720                  */
3721                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3722                                 !phba->nvmet_support) {
3723                         error = lpfc_nvme_create_localport(phba->pport);
3724                         if (error)
3725                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3726                                         "6132 NVME restore reg failed "
3727                                         "on nvmei error x%x\n", error);
3728                 }
3729         } else {
3730                 lpfc_sli_queue_init(phba);
3731                 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3732                         lpfc_unblock_mgmt_io(phba);
3733                         return 1;
3734                 }
3735         }
3736
3737         vports = lpfc_create_vport_work_array(phba);
3738         if (vports != NULL) {
3739                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3740                         struct Scsi_Host *shost;
3741                         shost = lpfc_shost_from_vport(vports[i]);
3742                         spin_lock_irq(shost->host_lock);
3743                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3744                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3745                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3746                         if (phba->sli_rev == LPFC_SLI_REV4) {
3747                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3748                                 if ((vpis_cleared) &&
3749                                     (vports[i]->port_type !=
3750                                         LPFC_PHYSICAL_PORT))
3751                                         vports[i]->vpi = 0;
3752                         }
3753                         spin_unlock_irq(shost->host_lock);
3754                 }
3755         }
3756         lpfc_destroy_vport_work_array(phba, vports);
3757
3758         if (phba->cfg_xri_rebalancing)
3759                 lpfc_create_multixri_pools(phba);
3760
3761         lpfc_cpuhp_add(phba);
3762
3763         lpfc_unblock_mgmt_io(phba);
3764         return 0;
3765 }
3766
3767 /**
3768  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3769  * @phba: pointer to lpfc hba data structure.
3770  *
3771  * This routine marks a HBA's management interface as not blocked. Once the
3772  * HBA's management interface is marked as not blocked, all the user space
3773  * access to the HBA, whether they are from sysfs interface or libdfc
3774  * interface will be allowed. The HBA is set to block the management interface
3775  * when the driver prepares the HBA interface for online or offline and then
3776  * set to unblock the management interface afterwards.
3777  **/
3778 void
3779 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3780 {
3781         unsigned long iflag;
3782
3783         spin_lock_irqsave(&phba->hbalock, iflag);
3784         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3785         spin_unlock_irqrestore(&phba->hbalock, iflag);
3786 }
3787
3788 /**
3789  * lpfc_offline_prep - Prepare a HBA to be brought offline
3790  * @phba: pointer to lpfc hba data structure.
3791  * @mbx_action: flag for mailbox shutdown action.
3792  *
3793  * This routine is invoked to prepare a HBA to be brought offline. It performs
3794  * unregistration login to all the nodes on all vports and flushes the mailbox
3795  * queue to make it ready to be brought offline.
3796  **/
3797 void
3798 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3799 {
3800         struct lpfc_vport *vport = phba->pport;
3801         struct lpfc_nodelist  *ndlp, *next_ndlp;
3802         struct lpfc_vport **vports;
3803         struct Scsi_Host *shost;
3804         int i;
3805         int offline;
3806         bool hba_pci_err;
3807
3808         if (vport->fc_flag & FC_OFFLINE_MODE)
3809                 return;
3810
3811         lpfc_block_mgmt_io(phba, mbx_action);
3812
3813         lpfc_linkdown(phba);
3814
3815         offline =  pci_channel_offline(phba->pcidev);
3816         hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3817
3818         /* Issue an unreg_login to all nodes on all vports */
3819         vports = lpfc_create_vport_work_array(phba);
3820         if (vports != NULL) {
3821                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3822                         if (vports[i]->load_flag & FC_UNLOADING)
3823                                 continue;
3824                         shost = lpfc_shost_from_vport(vports[i]);
3825                         spin_lock_irq(shost->host_lock);
3826                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3827                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3828                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3829                         spin_unlock_irq(shost->host_lock);
3830
3831                         shost = lpfc_shost_from_vport(vports[i]);
3832                         list_for_each_entry_safe(ndlp, next_ndlp,
3833                                                  &vports[i]->fc_nodes,
3834                                                  nlp_listp) {
3835
3836                                 spin_lock_irq(&ndlp->lock);
3837                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3838                                 spin_unlock_irq(&ndlp->lock);
3839
3840                                 if (offline || hba_pci_err) {
3841                                         spin_lock_irq(&ndlp->lock);
3842                                         ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3843                                                             NLP_RPI_REGISTERED);
3844                                         spin_unlock_irq(&ndlp->lock);
3845                                         if (phba->sli_rev == LPFC_SLI_REV4)
3846                                                 lpfc_sli_rpi_release(vports[i],
3847                                                                      ndlp);
3848                                 } else {
3849                                         lpfc_unreg_rpi(vports[i], ndlp);
3850                                 }
3851                                 /*
3852                                  * Whenever an SLI4 port goes offline, free the
3853                                  * RPI. Get a new RPI when the adapter port
3854                                  * comes back online.
3855                                  */
3856                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3857                                         lpfc_printf_vlog(vports[i], KERN_INFO,
3858                                                  LOG_NODE | LOG_DISCOVERY,
3859                                                  "0011 Free RPI x%x on "
3860                                                  "ndlp: x%px did x%x\n",
3861                                                  ndlp->nlp_rpi, ndlp,
3862                                                  ndlp->nlp_DID);
3863                                         lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3864                                         ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3865                                 }
3866
3867                                 if (ndlp->nlp_type & NLP_FABRIC) {
3868                                         lpfc_disc_state_machine(vports[i], ndlp,
3869                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
3870
3871                                         /* Don't remove the node unless the node
3872                                          * has been unregistered with the
3873                                          * transport, and we're not in recovery
3874                                          * before dev_loss_tmo triggered.
3875                                          * Otherwise, let dev_loss take care of
3876                                          * the node.
3877                                          */
3878                                         if (!(ndlp->save_flags &
3879                                               NLP_IN_RECOV_POST_DEV_LOSS) &&
3880                                             !(ndlp->fc4_xpt_flags &
3881                                               (NVME_XPT_REGD | SCSI_XPT_REGD)))
3882                                                 lpfc_disc_state_machine
3883                                                         (vports[i], ndlp,
3884                                                          NULL,
3885                                                          NLP_EVT_DEVICE_RM);
3886                                 }
3887                         }
3888                 }
3889         }
3890         lpfc_destroy_vport_work_array(phba, vports);
3891
3892         lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3893
3894         if (phba->wq)
3895                 flush_workqueue(phba->wq);
3896 }
3897
3898 /**
3899  * lpfc_offline - Bring a HBA offline
3900  * @phba: pointer to lpfc hba data structure.
3901  *
3902  * This routine actually brings a HBA offline. It stops all the timers
3903  * associated with the HBA, brings down the SLI layer, and eventually
3904  * marks the HBA as in offline state for the upper layer protocol.
3905  **/
3906 void
3907 lpfc_offline(struct lpfc_hba *phba)
3908 {
3909         struct Scsi_Host  *shost;
3910         struct lpfc_vport **vports;
3911         int i;
3912
3913         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3914                 return;
3915
3916         /* stop port and all timers associated with this hba */
3917         lpfc_stop_port(phba);
3918
3919         /* Tear down the local and target port registrations.  The
3920          * nvme transports need to cleanup.
3921          */
3922         lpfc_nvmet_destroy_targetport(phba);
3923         lpfc_nvme_destroy_localport(phba->pport);
3924
3925         vports = lpfc_create_vport_work_array(phba);
3926         if (vports != NULL)
3927                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3928                         lpfc_stop_vport_timers(vports[i]);
3929         lpfc_destroy_vport_work_array(phba, vports);
3930         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3931                         "0460 Bring Adapter offline\n");
3932         /* Bring down the SLI Layer and cleanup.  The HBA is offline
3933            now.  */
3934         lpfc_sli_hba_down(phba);
3935         spin_lock_irq(&phba->hbalock);
3936         phba->work_ha = 0;
3937         spin_unlock_irq(&phba->hbalock);
3938         vports = lpfc_create_vport_work_array(phba);
3939         if (vports != NULL)
3940                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3941                         shost = lpfc_shost_from_vport(vports[i]);
3942                         spin_lock_irq(shost->host_lock);
3943                         vports[i]->work_port_events = 0;
3944                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
3945                         spin_unlock_irq(shost->host_lock);
3946                 }
3947         lpfc_destroy_vport_work_array(phba, vports);
3948         /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3949          * in hba_unset
3950          */
3951         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3952                 __lpfc_cpuhp_remove(phba);
3953
3954         if (phba->cfg_xri_rebalancing)
3955                 lpfc_destroy_multixri_pools(phba);
3956 }
3957
3958 /**
3959  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3960  * @phba: pointer to lpfc hba data structure.
3961  *
3962  * This routine is to free all the SCSI buffers and IOCBs from the driver
3963  * list back to kernel. It is called from lpfc_pci_remove_one to free
3964  * the internal resources before the device is removed from the system.
3965  **/
3966 static void
3967 lpfc_scsi_free(struct lpfc_hba *phba)
3968 {
3969         struct lpfc_io_buf *sb, *sb_next;
3970
3971         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3972                 return;
3973
3974         spin_lock_irq(&phba->hbalock);
3975
3976         /* Release all the lpfc_scsi_bufs maintained by this host. */
3977
3978         spin_lock(&phba->scsi_buf_list_put_lock);
3979         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3980                                  list) {
3981                 list_del(&sb->list);
3982                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3983                               sb->dma_handle);
3984                 kfree(sb);
3985                 phba->total_scsi_bufs--;
3986         }
3987         spin_unlock(&phba->scsi_buf_list_put_lock);
3988
3989         spin_lock(&phba->scsi_buf_list_get_lock);
3990         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3991                                  list) {
3992                 list_del(&sb->list);
3993                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3994                               sb->dma_handle);
3995                 kfree(sb);
3996                 phba->total_scsi_bufs--;
3997         }
3998         spin_unlock(&phba->scsi_buf_list_get_lock);
3999         spin_unlock_irq(&phba->hbalock);
4000 }
4001
4002 /**
4003  * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
4004  * @phba: pointer to lpfc hba data structure.
4005  *
4006  * This routine is to free all the IO buffers and IOCBs from the driver
4007  * list back to kernel. It is called from lpfc_pci_remove_one to free
4008  * the internal resources before the device is removed from the system.
4009  **/
4010 void
4011 lpfc_io_free(struct lpfc_hba *phba)
4012 {
4013         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4014         struct lpfc_sli4_hdw_queue *qp;
4015         int idx;
4016
4017         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4018                 qp = &phba->sli4_hba.hdwq[idx];
4019                 /* Release all the lpfc_nvme_bufs maintained by this host. */
4020                 spin_lock(&qp->io_buf_list_put_lock);
4021                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4022                                          &qp->lpfc_io_buf_list_put,
4023                                          list) {
4024                         list_del(&lpfc_ncmd->list);
4025                         qp->put_io_bufs--;
4026                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4027                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4028                         if (phba->cfg_xpsgl && !phba->nvmet_support)
4029                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4030                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4031                         kfree(lpfc_ncmd);
4032                         qp->total_io_bufs--;
4033                 }
4034                 spin_unlock(&qp->io_buf_list_put_lock);
4035
4036                 spin_lock(&qp->io_buf_list_get_lock);
4037                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4038                                          &qp->lpfc_io_buf_list_get,
4039                                          list) {
4040                         list_del(&lpfc_ncmd->list);
4041                         qp->get_io_bufs--;
4042                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4043                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4044                         if (phba->cfg_xpsgl && !phba->nvmet_support)
4045                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4046                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4047                         kfree(lpfc_ncmd);
4048                         qp->total_io_bufs--;
4049                 }
4050                 spin_unlock(&qp->io_buf_list_get_lock);
4051         }
4052 }
4053
4054 /**
4055  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4056  * @phba: pointer to lpfc hba data structure.
4057  *
4058  * This routine first calculates the sizes of the current els and allocated
4059  * scsi sgl lists, and then goes through all sgls to updates the physical
4060  * XRIs assigned due to port function reset. During port initialization, the
4061  * current els and allocated scsi sgl lists are 0s.
4062  *
4063  * Return codes
4064  *   0 - successful (for now, it always returns 0)
4065  **/
4066 int
4067 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4068 {
4069         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4070         uint16_t i, lxri, xri_cnt, els_xri_cnt;
4071         LIST_HEAD(els_sgl_list);
4072         int rc;
4073
4074         /*
4075          * update on pci function's els xri-sgl list
4076          */
4077         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4078
4079         if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4080                 /* els xri-sgl expanded */
4081                 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4082                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4083                                 "3157 ELS xri-sgl count increased from "
4084                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4085                                 els_xri_cnt);
4086                 /* allocate the additional els sgls */
4087                 for (i = 0; i < xri_cnt; i++) {
4088                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4089                                              GFP_KERNEL);
4090                         if (sglq_entry == NULL) {
4091                                 lpfc_printf_log(phba, KERN_ERR,
4092                                                 LOG_TRACE_EVENT,
4093                                                 "2562 Failure to allocate an "
4094                                                 "ELS sgl entry:%d\n", i);
4095                                 rc = -ENOMEM;
4096                                 goto out_free_mem;
4097                         }
4098                         sglq_entry->buff_type = GEN_BUFF_TYPE;
4099                         sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4100                                                            &sglq_entry->phys);
4101                         if (sglq_entry->virt == NULL) {
4102                                 kfree(sglq_entry);
4103                                 lpfc_printf_log(phba, KERN_ERR,
4104                                                 LOG_TRACE_EVENT,
4105                                                 "2563 Failure to allocate an "
4106                                                 "ELS mbuf:%d\n", i);
4107                                 rc = -ENOMEM;
4108                                 goto out_free_mem;
4109                         }
4110                         sglq_entry->sgl = sglq_entry->virt;
4111                         memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4112                         sglq_entry->state = SGL_FREED;
4113                         list_add_tail(&sglq_entry->list, &els_sgl_list);
4114                 }
4115                 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4116                 list_splice_init(&els_sgl_list,
4117                                  &phba->sli4_hba.lpfc_els_sgl_list);
4118                 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4119         } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4120                 /* els xri-sgl shrinked */
4121                 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4122                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4123                                 "3158 ELS xri-sgl count decreased from "
4124                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4125                                 els_xri_cnt);
4126                 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4127                 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4128                                  &els_sgl_list);
4129                 /* release extra els sgls from list */
4130                 for (i = 0; i < xri_cnt; i++) {
4131                         list_remove_head(&els_sgl_list,
4132                                          sglq_entry, struct lpfc_sglq, list);
4133                         if (sglq_entry) {
4134                                 __lpfc_mbuf_free(phba, sglq_entry->virt,
4135                                                  sglq_entry->phys);
4136                                 kfree(sglq_entry);
4137                         }
4138                 }
4139                 list_splice_init(&els_sgl_list,
4140                                  &phba->sli4_hba.lpfc_els_sgl_list);
4141                 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4142         } else
4143                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4144                                 "3163 ELS xri-sgl count unchanged: %d\n",
4145                                 els_xri_cnt);
4146         phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4147
4148         /* update xris to els sgls on the list */
4149         sglq_entry = NULL;
4150         sglq_entry_next = NULL;
4151         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4152                                  &phba->sli4_hba.lpfc_els_sgl_list, list) {
4153                 lxri = lpfc_sli4_next_xritag(phba);
4154                 if (lxri == NO_XRI) {
4155                         lpfc_printf_log(phba, KERN_ERR,
4156                                         LOG_TRACE_EVENT,
4157                                         "2400 Failed to allocate xri for "
4158                                         "ELS sgl\n");
4159                         rc = -ENOMEM;
4160                         goto out_free_mem;
4161                 }
4162                 sglq_entry->sli4_lxritag = lxri;
4163                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4164         }
4165         return 0;
4166
4167 out_free_mem:
4168         lpfc_free_els_sgl_list(phba);
4169         return rc;
4170 }
4171
4172 /**
4173  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4174  * @phba: pointer to lpfc hba data structure.
4175  *
4176  * This routine first calculates the sizes of the current els and allocated
4177  * scsi sgl lists, and then goes through all sgls to updates the physical
4178  * XRIs assigned due to port function reset. During port initialization, the
4179  * current els and allocated scsi sgl lists are 0s.
4180  *
4181  * Return codes
4182  *   0 - successful (for now, it always returns 0)
4183  **/
4184 int
4185 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4186 {
4187         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4188         uint16_t i, lxri, xri_cnt, els_xri_cnt;
4189         uint16_t nvmet_xri_cnt;
4190         LIST_HEAD(nvmet_sgl_list);
4191         int rc;
4192
4193         /*
4194          * update on pci function's nvmet xri-sgl list
4195          */
4196         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4197
4198         /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4199         nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4200         if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4201                 /* els xri-sgl expanded */
4202                 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4203                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4204                                 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4205                                 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4206                 /* allocate the additional nvmet sgls */
4207                 for (i = 0; i < xri_cnt; i++) {
4208                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4209                                              GFP_KERNEL);
4210                         if (sglq_entry == NULL) {
4211                                 lpfc_printf_log(phba, KERN_ERR,
4212                                                 LOG_TRACE_EVENT,
4213                                                 "6303 Failure to allocate an "
4214                                                 "NVMET sgl entry:%d\n", i);
4215                                 rc = -ENOMEM;
4216                                 goto out_free_mem;
4217                         }
4218                         sglq_entry->buff_type = NVMET_BUFF_TYPE;
4219                         sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4220                                                            &sglq_entry->phys);
4221                         if (sglq_entry->virt == NULL) {
4222                                 kfree(sglq_entry);
4223                                 lpfc_printf_log(phba, KERN_ERR,
4224                                                 LOG_TRACE_EVENT,
4225                                                 "6304 Failure to allocate an "
4226                                                 "NVMET buf:%d\n", i);
4227                                 rc = -ENOMEM;
4228                                 goto out_free_mem;
4229                         }
4230                         sglq_entry->sgl = sglq_entry->virt;
4231                         memset(sglq_entry->sgl, 0,
4232                                phba->cfg_sg_dma_buf_size);
4233                         sglq_entry->state = SGL_FREED;
4234                         list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4235                 }
4236                 spin_lock_irq(&phba->hbalock);
4237                 spin_lock(&phba->sli4_hba.sgl_list_lock);
4238                 list_splice_init(&nvmet_sgl_list,
4239                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
4240                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4241                 spin_unlock_irq(&phba->hbalock);
4242         } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4243                 /* nvmet xri-sgl shrunk */
4244                 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4245                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4246                                 "6305 NVMET xri-sgl count decreased from "
4247                                 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4248                                 nvmet_xri_cnt);
4249                 spin_lock_irq(&phba->hbalock);
4250                 spin_lock(&phba->sli4_hba.sgl_list_lock);
4251                 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4252                                  &nvmet_sgl_list);
4253                 /* release extra nvmet sgls from list */
4254                 for (i = 0; i < xri_cnt; i++) {
4255                         list_remove_head(&nvmet_sgl_list,
4256                                          sglq_entry, struct lpfc_sglq, list);
4257                         if (sglq_entry) {
4258                                 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4259                                                     sglq_entry->phys);
4260                                 kfree(sglq_entry);
4261                         }
4262                 }
4263                 list_splice_init(&nvmet_sgl_list,
4264                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
4265                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4266                 spin_unlock_irq(&phba->hbalock);
4267         } else
4268                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4269                                 "6306 NVMET xri-sgl count unchanged: %d\n",
4270                                 nvmet_xri_cnt);
4271         phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4272
4273         /* update xris to nvmet sgls on the list */
4274         sglq_entry = NULL;
4275         sglq_entry_next = NULL;
4276         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4277                                  &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4278                 lxri = lpfc_sli4_next_xritag(phba);
4279                 if (lxri == NO_XRI) {
4280                         lpfc_printf_log(phba, KERN_ERR,
4281                                         LOG_TRACE_EVENT,
4282                                         "6307 Failed to allocate xri for "
4283                                         "NVMET sgl\n");
4284                         rc = -ENOMEM;
4285                         goto out_free_mem;
4286                 }
4287                 sglq_entry->sli4_lxritag = lxri;
4288                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4289         }
4290         return 0;
4291
4292 out_free_mem:
4293         lpfc_free_nvmet_sgl_list(phba);
4294         return rc;
4295 }
4296
4297 int
4298 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4299 {
4300         LIST_HEAD(blist);
4301         struct lpfc_sli4_hdw_queue *qp;
4302         struct lpfc_io_buf *lpfc_cmd;
4303         struct lpfc_io_buf *iobufp, *prev_iobufp;
4304         int idx, cnt, xri, inserted;
4305
4306         cnt = 0;
4307         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4308                 qp = &phba->sli4_hba.hdwq[idx];
4309                 spin_lock_irq(&qp->io_buf_list_get_lock);
4310                 spin_lock(&qp->io_buf_list_put_lock);
4311
4312                 /* Take everything off the get and put lists */
4313                 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4314                 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4315                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4316                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4317                 cnt += qp->get_io_bufs + qp->put_io_bufs;
4318                 qp->get_io_bufs = 0;
4319                 qp->put_io_bufs = 0;
4320                 qp->total_io_bufs = 0;
4321                 spin_unlock(&qp->io_buf_list_put_lock);
4322                 spin_unlock_irq(&qp->io_buf_list_get_lock);
4323         }
4324
4325         /*
4326          * Take IO buffers off blist and put on cbuf sorted by XRI.
4327          * This is because POST_SGL takes a sequential range of XRIs
4328          * to post to the firmware.
4329          */
4330         for (idx = 0; idx < cnt; idx++) {
4331                 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4332                 if (!lpfc_cmd)
4333                         return cnt;
4334                 if (idx == 0) {
4335                         list_add_tail(&lpfc_cmd->list, cbuf);
4336                         continue;
4337                 }
4338                 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4339                 inserted = 0;
4340                 prev_iobufp = NULL;
4341                 list_for_each_entry(iobufp, cbuf, list) {
4342                         if (xri < iobufp->cur_iocbq.sli4_xritag) {
4343                                 if (prev_iobufp)
4344                                         list_add(&lpfc_cmd->list,
4345                                                  &prev_iobufp->list);
4346                                 else
4347                                         list_add(&lpfc_cmd->list, cbuf);
4348                                 inserted = 1;
4349                                 break;
4350                         }
4351                         prev_iobufp = iobufp;
4352                 }
4353                 if (!inserted)
4354                         list_add_tail(&lpfc_cmd->list, cbuf);
4355         }
4356         return cnt;
4357 }
4358
4359 int
4360 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4361 {
4362         struct lpfc_sli4_hdw_queue *qp;
4363         struct lpfc_io_buf *lpfc_cmd;
4364         int idx, cnt;
4365         unsigned long iflags;
4366
4367         qp = phba->sli4_hba.hdwq;
4368         cnt = 0;
4369         while (!list_empty(cbuf)) {
4370                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4371                         list_remove_head(cbuf, lpfc_cmd,
4372                                          struct lpfc_io_buf, list);
4373                         if (!lpfc_cmd)
4374                                 return cnt;
4375                         cnt++;
4376                         qp = &phba->sli4_hba.hdwq[idx];
4377                         lpfc_cmd->hdwq_no = idx;
4378                         lpfc_cmd->hdwq = qp;
4379                         lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4380                         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
4381                         list_add_tail(&lpfc_cmd->list,
4382                                       &qp->lpfc_io_buf_list_put);
4383                         qp->put_io_bufs++;
4384                         qp->total_io_bufs++;
4385                         spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
4386                                                iflags);
4387                 }
4388         }
4389         return cnt;
4390 }
4391
4392 /**
4393  * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4394  * @phba: pointer to lpfc hba data structure.
4395  *
4396  * This routine first calculates the sizes of the current els and allocated
4397  * scsi sgl lists, and then goes through all sgls to updates the physical
4398  * XRIs assigned due to port function reset. During port initialization, the
4399  * current els and allocated scsi sgl lists are 0s.
4400  *
4401  * Return codes
4402  *   0 - successful (for now, it always returns 0)
4403  **/
4404 int
4405 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4406 {
4407         struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4408         uint16_t i, lxri, els_xri_cnt;
4409         uint16_t io_xri_cnt, io_xri_max;
4410         LIST_HEAD(io_sgl_list);
4411         int rc, cnt;
4412
4413         /*
4414          * update on pci function's allocated nvme xri-sgl list
4415          */
4416
4417         /* maximum number of xris available for nvme buffers */
4418         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4419         io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4420         phba->sli4_hba.io_xri_max = io_xri_max;
4421
4422         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4423                         "6074 Current allocated XRI sgl count:%d, "
4424                         "maximum XRI count:%d els_xri_cnt:%d\n\n",
4425                         phba->sli4_hba.io_xri_cnt,
4426                         phba->sli4_hba.io_xri_max,
4427                         els_xri_cnt);
4428
4429         cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4430
4431         if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4432                 /* max nvme xri shrunk below the allocated nvme buffers */
4433                 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4434                                         phba->sli4_hba.io_xri_max;
4435                 /* release the extra allocated nvme buffers */
4436                 for (i = 0; i < io_xri_cnt; i++) {
4437                         list_remove_head(&io_sgl_list, lpfc_ncmd,
4438                                          struct lpfc_io_buf, list);
4439                         if (lpfc_ncmd) {
4440                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4441                                               lpfc_ncmd->data,
4442                                               lpfc_ncmd->dma_handle);
4443                                 kfree(lpfc_ncmd);
4444                         }
4445                 }
4446                 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4447         }
4448
4449         /* update xris associated to remaining allocated nvme buffers */
4450         lpfc_ncmd = NULL;
4451         lpfc_ncmd_next = NULL;
4452         phba->sli4_hba.io_xri_cnt = cnt;
4453         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4454                                  &io_sgl_list, list) {
4455                 lxri = lpfc_sli4_next_xritag(phba);
4456                 if (lxri == NO_XRI) {
4457                         lpfc_printf_log(phba, KERN_ERR,
4458                                         LOG_TRACE_EVENT,
4459                                         "6075 Failed to allocate xri for "
4460                                         "nvme buffer\n");
4461                         rc = -ENOMEM;
4462                         goto out_free_mem;
4463                 }
4464                 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4465                 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4466         }
4467         cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4468         return 0;
4469
4470 out_free_mem:
4471         lpfc_io_free(phba);
4472         return rc;
4473 }
4474
4475 /**
4476  * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4477  * @phba: Pointer to lpfc hba data structure.
4478  * @num_to_alloc: The requested number of buffers to allocate.
4479  *
4480  * This routine allocates nvme buffers for device with SLI-4 interface spec,
4481  * the nvme buffer contains all the necessary information needed to initiate
4482  * an I/O. After allocating up to @num_to_allocate IO buffers and put
4483  * them on a list, it post them to the port by using SGL block post.
4484  *
4485  * Return codes:
4486  *   int - number of IO buffers that were allocated and posted.
4487  *   0 = failure, less than num_to_alloc is a partial failure.
4488  **/
4489 int
4490 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4491 {
4492         struct lpfc_io_buf *lpfc_ncmd;
4493         struct lpfc_iocbq *pwqeq;
4494         uint16_t iotag, lxri = 0;
4495         int bcnt, num_posted;
4496         LIST_HEAD(prep_nblist);
4497         LIST_HEAD(post_nblist);
4498         LIST_HEAD(nvme_nblist);
4499
4500         phba->sli4_hba.io_xri_cnt = 0;
4501         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4502                 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4503                 if (!lpfc_ncmd)
4504                         break;
4505                 /*
4506                  * Get memory from the pci pool to map the virt space to
4507                  * pci bus space for an I/O. The DMA buffer includes the
4508                  * number of SGE's necessary to support the sg_tablesize.
4509                  */
4510                 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4511                                                   GFP_KERNEL,
4512                                                   &lpfc_ncmd->dma_handle);
4513                 if (!lpfc_ncmd->data) {
4514                         kfree(lpfc_ncmd);
4515                         break;
4516                 }
4517
4518                 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4519                         INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4520                 } else {
4521                         /*
4522                          * 4K Page alignment is CRITICAL to BlockGuard, double
4523                          * check to be sure.
4524                          */
4525                         if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4526                             (((unsigned long)(lpfc_ncmd->data) &
4527                             (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4528                                 lpfc_printf_log(phba, KERN_ERR,
4529                                                 LOG_TRACE_EVENT,
4530                                                 "3369 Memory alignment err: "
4531                                                 "addr=%lx\n",
4532                                                 (unsigned long)lpfc_ncmd->data);
4533                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4534                                               lpfc_ncmd->data,
4535                                               lpfc_ncmd->dma_handle);
4536                                 kfree(lpfc_ncmd);
4537                                 break;
4538                         }
4539                 }
4540
4541                 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4542
4543                 lxri = lpfc_sli4_next_xritag(phba);
4544                 if (lxri == NO_XRI) {
4545                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4546                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4547                         kfree(lpfc_ncmd);
4548                         break;
4549                 }
4550                 pwqeq = &lpfc_ncmd->cur_iocbq;
4551
4552                 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4553                 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4554                 if (iotag == 0) {
4555                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4556                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4557                         kfree(lpfc_ncmd);
4558                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4559                                         "6121 Failed to allocate IOTAG for"
4560                                         " XRI:0x%x\n", lxri);
4561                         lpfc_sli4_free_xri(phba, lxri);
4562                         break;
4563                 }
4564                 pwqeq->sli4_lxritag = lxri;
4565                 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4566
4567                 /* Initialize local short-hand pointers. */
4568                 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4569                 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4570                 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4571                 spin_lock_init(&lpfc_ncmd->buf_lock);
4572
4573                 /* add the nvme buffer to a post list */
4574                 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4575                 phba->sli4_hba.io_xri_cnt++;
4576         }
4577         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4578                         "6114 Allocate %d out of %d requested new NVME "
4579                         "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4580                         sizeof(*lpfc_ncmd));
4581
4582
4583         /* post the list of nvme buffer sgls to port if available */
4584         if (!list_empty(&post_nblist))
4585                 num_posted = lpfc_sli4_post_io_sgl_list(
4586                                 phba, &post_nblist, bcnt);
4587         else
4588                 num_posted = 0;
4589
4590         return num_posted;
4591 }
4592
4593 static uint64_t
4594 lpfc_get_wwpn(struct lpfc_hba *phba)
4595 {
4596         uint64_t wwn;
4597         int rc;
4598         LPFC_MBOXQ_t *mboxq;
4599         MAILBOX_t *mb;
4600
4601         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4602                                                 GFP_KERNEL);
4603         if (!mboxq)
4604                 return (uint64_t)-1;
4605
4606         /* First get WWN of HBA instance */
4607         lpfc_read_nv(phba, mboxq);
4608         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4609         if (rc != MBX_SUCCESS) {
4610                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4611                                 "6019 Mailbox failed , mbxCmd x%x "
4612                                 "READ_NV, mbxStatus x%x\n",
4613                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4614                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4615                 mempool_free(mboxq, phba->mbox_mem_pool);
4616                 return (uint64_t) -1;
4617         }
4618         mb = &mboxq->u.mb;
4619         memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4620         /* wwn is WWPN of HBA instance */
4621         mempool_free(mboxq, phba->mbox_mem_pool);
4622         if (phba->sli_rev == LPFC_SLI_REV4)
4623                 return be64_to_cpu(wwn);
4624         else
4625                 return rol64(wwn, 32);
4626 }
4627
4628 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
4629 {
4630         if (phba->sli_rev == LPFC_SLI_REV4)
4631                 if (phba->cfg_xpsgl && !phba->nvmet_support)
4632                         return LPFC_MAX_SG_TABLESIZE;
4633                 else
4634                         return phba->cfg_scsi_seg_cnt;
4635         else
4636                 return phba->cfg_sg_seg_cnt;
4637 }
4638
4639 /**
4640  * lpfc_vmid_res_alloc - Allocates resources for VMID
4641  * @phba: pointer to lpfc hba data structure.
4642  * @vport: pointer to vport data structure
4643  *
4644  * This routine allocated the resources needed for the VMID.
4645  *
4646  * Return codes
4647  *      0 on Success
4648  *      Non-0 on Failure
4649  */
4650 static int
4651 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4652 {
4653         /* VMID feature is supported only on SLI4 */
4654         if (phba->sli_rev == LPFC_SLI_REV3) {
4655                 phba->cfg_vmid_app_header = 0;
4656                 phba->cfg_vmid_priority_tagging = 0;
4657         }
4658
4659         if (lpfc_is_vmid_enabled(phba)) {
4660                 vport->vmid =
4661                     kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4662                             GFP_KERNEL);
4663                 if (!vport->vmid)
4664                         return -ENOMEM;
4665
4666                 rwlock_init(&vport->vmid_lock);
4667
4668                 /* Set the VMID parameters for the vport */
4669                 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4670                 vport->vmid_inactivity_timeout =
4671                     phba->cfg_vmid_inactivity_timeout;
4672                 vport->max_vmid = phba->cfg_max_vmid;
4673                 vport->cur_vmid_cnt = 0;
4674
4675                 vport->vmid_priority_range = bitmap_zalloc
4676                         (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4677
4678                 if (!vport->vmid_priority_range) {
4679                         kfree(vport->vmid);
4680                         return -ENOMEM;
4681                 }
4682
4683                 hash_init(vport->hash_table);
4684         }
4685         return 0;
4686 }
4687
4688 /**
4689  * lpfc_create_port - Create an FC port
4690  * @phba: pointer to lpfc hba data structure.
4691  * @instance: a unique integer ID to this FC port.
4692  * @dev: pointer to the device data structure.
4693  *
4694  * This routine creates a FC port for the upper layer protocol. The FC port
4695  * can be created on top of either a physical port or a virtual port provided
4696  * by the HBA. This routine also allocates a SCSI host data structure (shost)
4697  * and associates the FC port created before adding the shost into the SCSI
4698  * layer.
4699  *
4700  * Return codes
4701  *   @vport - pointer to the virtual N_Port data structure.
4702  *   NULL - port create failed.
4703  **/
4704 struct lpfc_vport *
4705 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4706 {
4707         struct lpfc_vport *vport;
4708         struct Scsi_Host  *shost = NULL;
4709         struct scsi_host_template *template;
4710         int error = 0;
4711         int i;
4712         uint64_t wwn;
4713         bool use_no_reset_hba = false;
4714         int rc;
4715
4716         if (lpfc_no_hba_reset_cnt) {
4717                 if (phba->sli_rev < LPFC_SLI_REV4 &&
4718                     dev == &phba->pcidev->dev) {
4719                         /* Reset the port first */
4720                         lpfc_sli_brdrestart(phba);
4721                         rc = lpfc_sli_chipset_init(phba);
4722                         if (rc)
4723                                 return NULL;
4724                 }
4725                 wwn = lpfc_get_wwpn(phba);
4726         }
4727
4728         for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4729                 if (wwn == lpfc_no_hba_reset[i]) {
4730                         lpfc_printf_log(phba, KERN_ERR,
4731                                         LOG_TRACE_EVENT,
4732                                         "6020 Setting use_no_reset port=%llx\n",
4733                                         wwn);
4734                         use_no_reset_hba = true;
4735                         break;
4736                 }
4737         }
4738
4739         /* Seed template for SCSI host registration */
4740         if (dev == &phba->pcidev->dev) {
4741                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4742                         /* Seed physical port template */
4743                         template = &lpfc_template;
4744
4745                         if (use_no_reset_hba)
4746                                 /* template is for a no reset SCSI Host */
4747                                 template->eh_host_reset_handler = NULL;
4748
4749                         /* Seed updated value of sg_tablesize */
4750                         template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4751                 } else {
4752                         /* NVMET is for physical port only */
4753                         template = &lpfc_template_nvme;
4754                 }
4755         } else {
4756                 /* Seed vport template */
4757                 template = &lpfc_vport_template;
4758
4759                 /* Seed updated value of sg_tablesize */
4760                 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4761         }
4762
4763         shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4764         if (!shost)
4765                 goto out;
4766
4767         vport = (struct lpfc_vport *) shost->hostdata;
4768         vport->phba = phba;
4769         vport->load_flag |= FC_LOADING;
4770         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4771         vport->fc_rscn_flush = 0;
4772         lpfc_get_vport_cfgparam(vport);
4773
4774         /* Adjust value in vport */
4775         vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4776
4777         shost->unique_id = instance;
4778         shost->max_id = LPFC_MAX_TARGET;
4779         shost->max_lun = vport->cfg_max_luns;
4780         shost->this_id = -1;
4781         shost->max_cmd_len = 16;
4782
4783         if (phba->sli_rev == LPFC_SLI_REV4) {
4784                 if (!phba->cfg_fcp_mq_threshold ||
4785                     phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4786                         phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4787
4788                 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4789                                             phba->cfg_fcp_mq_threshold);
4790
4791                 shost->dma_boundary =
4792                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4793         } else
4794                 /* SLI-3 has a limited number of hardware queues (3),
4795                  * thus there is only one for FCP processing.
4796                  */
4797                 shost->nr_hw_queues = 1;
4798
4799         /*
4800          * Set initial can_queue value since 0 is no longer supported and
4801          * scsi_add_host will fail. This will be adjusted later based on the
4802          * max xri value determined in hba setup.
4803          */
4804         shost->can_queue = phba->cfg_hba_queue_depth - 10;
4805         if (dev != &phba->pcidev->dev) {
4806                 shost->transportt = lpfc_vport_transport_template;
4807                 vport->port_type = LPFC_NPIV_PORT;
4808         } else {
4809                 shost->transportt = lpfc_transport_template;
4810                 vport->port_type = LPFC_PHYSICAL_PORT;
4811         }
4812
4813         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4814                         "9081 CreatePort TMPLATE type %x TBLsize %d "
4815                         "SEGcnt %d/%d\n",
4816                         vport->port_type, shost->sg_tablesize,
4817                         phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4818
4819         /* Allocate the resources for VMID */
4820         rc = lpfc_vmid_res_alloc(phba, vport);
4821
4822         if (rc)
4823                 goto out_put_shost;
4824
4825         /* Initialize all internally managed lists. */
4826         INIT_LIST_HEAD(&vport->fc_nodes);
4827         INIT_LIST_HEAD(&vport->rcv_buffer_list);
4828         spin_lock_init(&vport->work_port_lock);
4829
4830         timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4831
4832         timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4833
4834         timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4835
4836         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4837                 lpfc_setup_bg(phba, shost);
4838
4839         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4840         if (error)
4841                 goto out_free_vmid;
4842
4843         spin_lock_irq(&phba->port_list_lock);
4844         list_add_tail(&vport->listentry, &phba->port_list);
4845         spin_unlock_irq(&phba->port_list_lock);
4846         return vport;
4847
4848 out_free_vmid:
4849         kfree(vport->vmid);
4850         bitmap_free(vport->vmid_priority_range);
4851 out_put_shost:
4852         scsi_host_put(shost);
4853 out:
4854         return NULL;
4855 }
4856
4857 /**
4858  * destroy_port -  destroy an FC port
4859  * @vport: pointer to an lpfc virtual N_Port data structure.
4860  *
4861  * This routine destroys a FC port from the upper layer protocol. All the
4862  * resources associated with the port are released.
4863  **/
4864 void
4865 destroy_port(struct lpfc_vport *vport)
4866 {
4867         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4868         struct lpfc_hba  *phba = vport->phba;
4869
4870         lpfc_debugfs_terminate(vport);
4871         fc_remove_host(shost);
4872         scsi_remove_host(shost);
4873
4874         spin_lock_irq(&phba->port_list_lock);
4875         list_del_init(&vport->listentry);
4876         spin_unlock_irq(&phba->port_list_lock);
4877
4878         lpfc_cleanup(vport);
4879         return;
4880 }
4881
4882 /**
4883  * lpfc_get_instance - Get a unique integer ID
4884  *
4885  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4886  * uses the kernel idr facility to perform the task.
4887  *
4888  * Return codes:
4889  *   instance - a unique integer ID allocated as the new instance.
4890  *   -1 - lpfc get instance failed.
4891  **/
4892 int
4893 lpfc_get_instance(void)
4894 {
4895         int ret;
4896
4897         ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4898         return ret < 0 ? -1 : ret;
4899 }
4900
4901 /**
4902  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4903  * @shost: pointer to SCSI host data structure.
4904  * @time: elapsed time of the scan in jiffies.
4905  *
4906  * This routine is called by the SCSI layer with a SCSI host to determine
4907  * whether the scan host is finished.
4908  *
4909  * Note: there is no scan_start function as adapter initialization will have
4910  * asynchronously kicked off the link initialization.
4911  *
4912  * Return codes
4913  *   0 - SCSI host scan is not over yet.
4914  *   1 - SCSI host scan is over.
4915  **/
4916 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4917 {
4918         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4919         struct lpfc_hba   *phba = vport->phba;
4920         int stat = 0;
4921
4922         spin_lock_irq(shost->host_lock);
4923
4924         if (vport->load_flag & FC_UNLOADING) {
4925                 stat = 1;
4926                 goto finished;
4927         }
4928         if (time >= msecs_to_jiffies(30 * 1000)) {
4929                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4930                                 "0461 Scanning longer than 30 "
4931                                 "seconds.  Continuing initialization\n");
4932                 stat = 1;
4933                 goto finished;
4934         }
4935         if (time >= msecs_to_jiffies(15 * 1000) &&
4936             phba->link_state <= LPFC_LINK_DOWN) {
4937                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4938                                 "0465 Link down longer than 15 "
4939                                 "seconds.  Continuing initialization\n");
4940                 stat = 1;
4941                 goto finished;
4942         }
4943
4944         if (vport->port_state != LPFC_VPORT_READY)
4945                 goto finished;
4946         if (vport->num_disc_nodes || vport->fc_prli_sent)
4947                 goto finished;
4948         if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4949                 goto finished;
4950         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4951                 goto finished;
4952
4953         stat = 1;
4954
4955 finished:
4956         spin_unlock_irq(shost->host_lock);
4957         return stat;
4958 }
4959
4960 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4961 {
4962         struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4963         struct lpfc_hba   *phba = vport->phba;
4964
4965         fc_host_supported_speeds(shost) = 0;
4966         /*
4967          * Avoid reporting supported link speed for FCoE as it can't be
4968          * controlled via FCoE.
4969          */
4970         if (phba->hba_flag & HBA_FCOE_MODE)
4971                 return;
4972
4973         if (phba->lmt & LMT_256Gb)
4974                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4975         if (phba->lmt & LMT_128Gb)
4976                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4977         if (phba->lmt & LMT_64Gb)
4978                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4979         if (phba->lmt & LMT_32Gb)
4980                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4981         if (phba->lmt & LMT_16Gb)
4982                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4983         if (phba->lmt & LMT_10Gb)
4984                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4985         if (phba->lmt & LMT_8Gb)
4986                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4987         if (phba->lmt & LMT_4Gb)
4988                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4989         if (phba->lmt & LMT_2Gb)
4990                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4991         if (phba->lmt & LMT_1Gb)
4992                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4993 }
4994
4995 /**
4996  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4997  * @shost: pointer to SCSI host data structure.
4998  *
4999  * This routine initializes a given SCSI host attributes on a FC port. The
5000  * SCSI host can be either on top of a physical port or a virtual port.
5001  **/
5002 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5003 {
5004         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5005         struct lpfc_hba   *phba = vport->phba;
5006         /*
5007          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
5008          */
5009
5010         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5011         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5012         fc_host_supported_classes(shost) = FC_COS_CLASS3;
5013
5014         memset(fc_host_supported_fc4s(shost), 0,
5015                sizeof(fc_host_supported_fc4s(shost)));
5016         fc_host_supported_fc4s(shost)[2] = 1;
5017         fc_host_supported_fc4s(shost)[7] = 1;
5018
5019         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5020                                  sizeof fc_host_symbolic_name(shost));
5021
5022         lpfc_host_supported_speeds_set(shost);
5023
5024         fc_host_maxframe_size(shost) =
5025                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5026                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5027
5028         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5029
5030         /* This value is also unchanging */
5031         memset(fc_host_active_fc4s(shost), 0,
5032                sizeof(fc_host_active_fc4s(shost)));
5033         fc_host_active_fc4s(shost)[2] = 1;
5034         fc_host_active_fc4s(shost)[7] = 1;
5035
5036         fc_host_max_npiv_vports(shost) = phba->max_vpi;
5037         spin_lock_irq(shost->host_lock);
5038         vport->load_flag &= ~FC_LOADING;
5039         spin_unlock_irq(shost->host_lock);
5040 }
5041
5042 /**
5043  * lpfc_stop_port_s3 - Stop SLI3 device port
5044  * @phba: pointer to lpfc hba data structure.
5045  *
5046  * This routine is invoked to stop an SLI3 device port, it stops the device
5047  * from generating interrupts and stops the device driver's timers for the
5048  * device.
5049  **/
5050 static void
5051 lpfc_stop_port_s3(struct lpfc_hba *phba)
5052 {
5053         /* Clear all interrupt enable conditions */
5054         writel(0, phba->HCregaddr);
5055         readl(phba->HCregaddr); /* flush */
5056         /* Clear all pending interrupts */
5057         writel(0xffffffff, phba->HAregaddr);
5058         readl(phba->HAregaddr); /* flush */
5059
5060         /* Reset some HBA SLI setup states */
5061         lpfc_stop_hba_timers(phba);
5062         phba->pport->work_port_events = 0;
5063 }
5064
5065 /**
5066  * lpfc_stop_port_s4 - Stop SLI4 device port
5067  * @phba: pointer to lpfc hba data structure.
5068  *
5069  * This routine is invoked to stop an SLI4 device port, it stops the device
5070  * from generating interrupts and stops the device driver's timers for the
5071  * device.
5072  **/
5073 static void
5074 lpfc_stop_port_s4(struct lpfc_hba *phba)
5075 {
5076         /* Reset some HBA SLI4 setup states */
5077         lpfc_stop_hba_timers(phba);
5078         if (phba->pport)
5079                 phba->pport->work_port_events = 0;
5080         phba->sli4_hba.intr_enable = 0;
5081 }
5082
5083 /**
5084  * lpfc_stop_port - Wrapper function for stopping hba port
5085  * @phba: Pointer to HBA context object.
5086  *
5087  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5088  * the API jump table function pointer from the lpfc_hba struct.
5089  **/
5090 void
5091 lpfc_stop_port(struct lpfc_hba *phba)
5092 {
5093         phba->lpfc_stop_port(phba);
5094
5095         if (phba->wq)
5096                 flush_workqueue(phba->wq);
5097 }
5098
5099 /**
5100  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5101  * @phba: Pointer to hba for which this call is being executed.
5102  *
5103  * This routine starts the timer waiting for the FCF rediscovery to complete.
5104  **/
5105 void
5106 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5107 {
5108         unsigned long fcf_redisc_wait_tmo =
5109                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5110         /* Start fcf rediscovery wait period timer */
5111         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5112         spin_lock_irq(&phba->hbalock);
5113         /* Allow action to new fcf asynchronous event */
5114         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5115         /* Mark the FCF rediscovery pending state */
5116         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5117         spin_unlock_irq(&phba->hbalock);
5118 }
5119
5120 /**
5121  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5122  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5123  *
5124  * This routine is invoked when waiting for FCF table rediscover has been
5125  * timed out. If new FCF record(s) has (have) been discovered during the
5126  * wait period, a new FCF event shall be added to the FCOE async event
5127  * list, and then worker thread shall be waked up for processing from the
5128  * worker thread context.
5129  **/
5130 static void
5131 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5132 {
5133         struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5134
5135         /* Don't send FCF rediscovery event if timer cancelled */
5136         spin_lock_irq(&phba->hbalock);
5137         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5138                 spin_unlock_irq(&phba->hbalock);
5139                 return;
5140         }
5141         /* Clear FCF rediscovery timer pending flag */
5142         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5143         /* FCF rediscovery event to worker thread */
5144         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5145         spin_unlock_irq(&phba->hbalock);
5146         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5147                         "2776 FCF rediscover quiescent timer expired\n");
5148         /* wake up worker thread */
5149         lpfc_worker_wake_up(phba);
5150 }
5151
5152 /**
5153  * lpfc_vmid_poll - VMID timeout detection
5154  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5155  *
5156  * This routine is invoked when there is no I/O on by a VM for the specified
5157  * amount of time. When this situation is detected, the VMID has to be
5158  * deregistered from the switch and all the local resources freed. The VMID
5159  * will be reassigned to the VM once the I/O begins.
5160  **/
5161 static void
5162 lpfc_vmid_poll(struct timer_list *t)
5163 {
5164         struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5165         u32 wake_up = 0;
5166
5167         /* check if there is a need to issue QFPA */
5168         if (phba->pport->vmid_priority_tagging) {
5169                 wake_up = 1;
5170                 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5171         }
5172
5173         /* Is the vmid inactivity timer enabled */
5174         if (phba->pport->vmid_inactivity_timeout ||
5175             phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5176                 wake_up = 1;
5177                 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5178         }
5179
5180         if (wake_up)
5181                 lpfc_worker_wake_up(phba);
5182
5183         /* restart the timer for the next iteration */
5184         mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5185                                                         LPFC_VMID_TIMER));
5186 }
5187
5188 /**
5189  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5190  * @phba: pointer to lpfc hba data structure.
5191  * @acqe_link: pointer to the async link completion queue entry.
5192  *
5193  * This routine is to parse the SLI4 link-attention link fault code.
5194  **/
5195 static void
5196 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5197                            struct lpfc_acqe_link *acqe_link)
5198 {
5199         switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
5200         case LPFC_FC_LA_TYPE_LINK_DOWN:
5201         case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
5202         case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
5203         case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
5204                 break;
5205         default:
5206                 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5207                 case LPFC_ASYNC_LINK_FAULT_NONE:
5208                 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5209                 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5210                 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5211                         break;
5212                 default:
5213                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5214                                         "0398 Unknown link fault code: x%x\n",
5215                                         bf_get(lpfc_acqe_link_fault, acqe_link));
5216                         break;
5217                 }
5218                 break;
5219         }
5220 }
5221
5222 /**
5223  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5224  * @phba: pointer to lpfc hba data structure.
5225  * @acqe_link: pointer to the async link completion queue entry.
5226  *
5227  * This routine is to parse the SLI4 link attention type and translate it
5228  * into the base driver's link attention type coding.
5229  *
5230  * Return: Link attention type in terms of base driver's coding.
5231  **/
5232 static uint8_t
5233 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5234                           struct lpfc_acqe_link *acqe_link)
5235 {
5236         uint8_t att_type;
5237
5238         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5239         case LPFC_ASYNC_LINK_STATUS_DOWN:
5240         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5241                 att_type = LPFC_ATT_LINK_DOWN;
5242                 break;
5243         case LPFC_ASYNC_LINK_STATUS_UP:
5244                 /* Ignore physical link up events - wait for logical link up */
5245                 att_type = LPFC_ATT_RESERVED;
5246                 break;
5247         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5248                 att_type = LPFC_ATT_LINK_UP;
5249                 break;
5250         default:
5251                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5252                                 "0399 Invalid link attention type: x%x\n",
5253                                 bf_get(lpfc_acqe_link_status, acqe_link));
5254                 att_type = LPFC_ATT_RESERVED;
5255                 break;
5256         }
5257         return att_type;
5258 }
5259
5260 /**
5261  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5262  * @phba: pointer to lpfc hba data structure.
5263  *
5264  * This routine is to get an SLI3 FC port's link speed in Mbps.
5265  *
5266  * Return: link speed in terms of Mbps.
5267  **/
5268 uint32_t
5269 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5270 {
5271         uint32_t link_speed;
5272
5273         if (!lpfc_is_link_up(phba))
5274                 return 0;
5275
5276         if (phba->sli_rev <= LPFC_SLI_REV3) {
5277                 switch (phba->fc_linkspeed) {
5278                 case LPFC_LINK_SPEED_1GHZ:
5279                         link_speed = 1000;
5280                         break;
5281                 case LPFC_LINK_SPEED_2GHZ:
5282                         link_speed = 2000;
5283                         break;
5284                 case LPFC_LINK_SPEED_4GHZ:
5285                         link_speed = 4000;
5286                         break;
5287                 case LPFC_LINK_SPEED_8GHZ:
5288                         link_speed = 8000;
5289                         break;
5290                 case LPFC_LINK_SPEED_10GHZ:
5291                         link_speed = 10000;
5292                         break;
5293                 case LPFC_LINK_SPEED_16GHZ:
5294                         link_speed = 16000;
5295                         break;
5296                 default:
5297                         link_speed = 0;
5298                 }
5299         } else {
5300                 if (phba->sli4_hba.link_state.logical_speed)
5301                         link_speed =
5302                               phba->sli4_hba.link_state.logical_speed;
5303                 else
5304                         link_speed = phba->sli4_hba.link_state.speed;
5305         }
5306         return link_speed;
5307 }
5308
5309 /**
5310  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5311  * @phba: pointer to lpfc hba data structure.
5312  * @evt_code: asynchronous event code.
5313  * @speed_code: asynchronous event link speed code.
5314  *
5315  * This routine is to parse the giving SLI4 async event link speed code into
5316  * value of Mbps for the link speed.
5317  *
5318  * Return: link speed in terms of Mbps.
5319  **/
5320 static uint32_t
5321 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5322                            uint8_t speed_code)
5323 {
5324         uint32_t port_speed;
5325
5326         switch (evt_code) {
5327         case LPFC_TRAILER_CODE_LINK:
5328                 switch (speed_code) {
5329                 case LPFC_ASYNC_LINK_SPEED_ZERO:
5330                         port_speed = 0;
5331                         break;
5332                 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5333                         port_speed = 10;
5334                         break;
5335                 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5336                         port_speed = 100;
5337                         break;
5338                 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5339                         port_speed = 1000;
5340                         break;
5341                 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5342                         port_speed = 10000;
5343                         break;
5344                 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5345                         port_speed = 20000;
5346                         break;
5347                 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5348                         port_speed = 25000;
5349                         break;
5350                 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5351                         port_speed = 40000;
5352                         break;
5353                 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5354                         port_speed = 100000;
5355                         break;
5356                 default:
5357                         port_speed = 0;
5358                 }
5359                 break;
5360         case LPFC_TRAILER_CODE_FC:
5361                 switch (speed_code) {
5362                 case LPFC_FC_LA_SPEED_UNKNOWN:
5363                         port_speed = 0;
5364                         break;
5365                 case LPFC_FC_LA_SPEED_1G:
5366                         port_speed = 1000;
5367                         break;
5368                 case LPFC_FC_LA_SPEED_2G:
5369                         port_speed = 2000;
5370                         break;
5371                 case LPFC_FC_LA_SPEED_4G:
5372                         port_speed = 4000;
5373                         break;
5374                 case LPFC_FC_LA_SPEED_8G:
5375                         port_speed = 8000;
5376                         break;
5377                 case LPFC_FC_LA_SPEED_10G:
5378                         port_speed = 10000;
5379                         break;
5380                 case LPFC_FC_LA_SPEED_16G:
5381                         port_speed = 16000;
5382                         break;
5383                 case LPFC_FC_LA_SPEED_32G:
5384                         port_speed = 32000;
5385                         break;
5386                 case LPFC_FC_LA_SPEED_64G:
5387                         port_speed = 64000;
5388                         break;
5389                 case LPFC_FC_LA_SPEED_128G:
5390                         port_speed = 128000;
5391                         break;
5392                 case LPFC_FC_LA_SPEED_256G:
5393                         port_speed = 256000;
5394                         break;
5395                 default:
5396                         port_speed = 0;
5397                 }
5398                 break;
5399         default:
5400                 port_speed = 0;
5401         }
5402         return port_speed;
5403 }
5404
5405 /**
5406  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5407  * @phba: pointer to lpfc hba data structure.
5408  * @acqe_link: pointer to the async link completion queue entry.
5409  *
5410  * This routine is to handle the SLI4 asynchronous FCoE link event.
5411  **/
5412 static void
5413 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5414                          struct lpfc_acqe_link *acqe_link)
5415 {
5416         LPFC_MBOXQ_t *pmb;
5417         MAILBOX_t *mb;
5418         struct lpfc_mbx_read_top *la;
5419         uint8_t att_type;
5420         int rc;
5421
5422         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5423         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5424                 return;
5425         phba->fcoe_eventtag = acqe_link->event_tag;
5426         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5427         if (!pmb) {
5428                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5429                                 "0395 The mboxq allocation failed\n");
5430                 return;
5431         }
5432
5433         rc = lpfc_mbox_rsrc_prep(phba, pmb);
5434         if (rc) {
5435                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5436                                 "0396 mailbox allocation failed\n");
5437                 goto out_free_pmb;
5438         }
5439
5440         /* Cleanup any outstanding ELS commands */
5441         lpfc_els_flush_all_cmd(phba);
5442
5443         /* Block ELS IOCBs until we have done process link event */
5444         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5445
5446         /* Update link event statistics */
5447         phba->sli.slistat.link_event++;
5448
5449         /* Create lpfc_handle_latt mailbox command from link ACQE */
5450         lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5451         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5452         pmb->vport = phba->pport;
5453
5454         /* Keep the link status for extra SLI4 state machine reference */
5455         phba->sli4_hba.link_state.speed =
5456                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5457                                 bf_get(lpfc_acqe_link_speed, acqe_link));
5458         phba->sli4_hba.link_state.duplex =
5459                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
5460         phba->sli4_hba.link_state.status =
5461                                 bf_get(lpfc_acqe_link_status, acqe_link);
5462         phba->sli4_hba.link_state.type =
5463                                 bf_get(lpfc_acqe_link_type, acqe_link);
5464         phba->sli4_hba.link_state.number =
5465                                 bf_get(lpfc_acqe_link_number, acqe_link);
5466         phba->sli4_hba.link_state.fault =
5467                                 bf_get(lpfc_acqe_link_fault, acqe_link);
5468         phba->sli4_hba.link_state.logical_speed =
5469                         bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5470
5471         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5472                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
5473                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5474                         "Logical speed:%dMbps Fault:%d\n",
5475                         phba->sli4_hba.link_state.speed,
5476                         phba->sli4_hba.link_state.topology,
5477                         phba->sli4_hba.link_state.status,
5478                         phba->sli4_hba.link_state.type,
5479                         phba->sli4_hba.link_state.number,
5480                         phba->sli4_hba.link_state.logical_speed,
5481                         phba->sli4_hba.link_state.fault);
5482         /*
5483          * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5484          * topology info. Note: Optional for non FC-AL ports.
5485          */
5486         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5487                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5488                 if (rc == MBX_NOT_FINISHED)
5489                         goto out_free_pmb;
5490                 return;
5491         }
5492         /*
5493          * For FCoE Mode: fill in all the topology information we need and call
5494          * the READ_TOPOLOGY completion routine to continue without actually
5495          * sending the READ_TOPOLOGY mailbox command to the port.
5496          */
5497         /* Initialize completion status */
5498         mb = &pmb->u.mb;
5499         mb->mbxStatus = MBX_SUCCESS;
5500
5501         /* Parse port fault information field */
5502         lpfc_sli4_parse_latt_fault(phba, acqe_link);
5503
5504         /* Parse and translate link attention fields */
5505         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5506         la->eventTag = acqe_link->event_tag;
5507         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5508         bf_set(lpfc_mbx_read_top_link_spd, la,
5509                (bf_get(lpfc_acqe_link_speed, acqe_link)));
5510
5511         /* Fake the following irrelevant fields */
5512         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5513         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5514         bf_set(lpfc_mbx_read_top_il, la, 0);
5515         bf_set(lpfc_mbx_read_top_pb, la, 0);
5516         bf_set(lpfc_mbx_read_top_fa, la, 0);
5517         bf_set(lpfc_mbx_read_top_mm, la, 0);
5518
5519         /* Invoke the lpfc_handle_latt mailbox command callback function */
5520         lpfc_mbx_cmpl_read_topology(phba, pmb);
5521
5522         return;
5523
5524 out_free_pmb:
5525         lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5526 }
5527
5528 /**
5529  * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5530  * topology.
5531  * @phba: pointer to lpfc hba data structure.
5532  * @speed_code: asynchronous event link speed code.
5533  *
5534  * This routine is to parse the giving SLI4 async event link speed code into
5535  * value of Read topology link speed.
5536  *
5537  * Return: link speed in terms of Read topology.
5538  **/
5539 static uint8_t
5540 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5541 {
5542         uint8_t port_speed;
5543
5544         switch (speed_code) {
5545         case LPFC_FC_LA_SPEED_1G:
5546                 port_speed = LPFC_LINK_SPEED_1GHZ;
5547                 break;
5548         case LPFC_FC_LA_SPEED_2G:
5549                 port_speed = LPFC_LINK_SPEED_2GHZ;
5550                 break;
5551         case LPFC_FC_LA_SPEED_4G:
5552                 port_speed = LPFC_LINK_SPEED_4GHZ;
5553                 break;
5554         case LPFC_FC_LA_SPEED_8G:
5555                 port_speed = LPFC_LINK_SPEED_8GHZ;
5556                 break;
5557         case LPFC_FC_LA_SPEED_16G:
5558                 port_speed = LPFC_LINK_SPEED_16GHZ;
5559                 break;
5560         case LPFC_FC_LA_SPEED_32G:
5561                 port_speed = LPFC_LINK_SPEED_32GHZ;
5562                 break;
5563         case LPFC_FC_LA_SPEED_64G:
5564                 port_speed = LPFC_LINK_SPEED_64GHZ;
5565                 break;
5566         case LPFC_FC_LA_SPEED_128G:
5567                 port_speed = LPFC_LINK_SPEED_128GHZ;
5568                 break;
5569         case LPFC_FC_LA_SPEED_256G:
5570                 port_speed = LPFC_LINK_SPEED_256GHZ;
5571                 break;
5572         default:
5573                 port_speed = 0;
5574                 break;
5575         }
5576
5577         return port_speed;
5578 }
5579
5580 void
5581 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5582 {
5583         if (!phba->rx_monitor) {
5584                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5585                                 "4411 Rx Monitor Info is empty.\n");
5586         } else {
5587                 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5588                                        LPFC_MAX_RXMONITOR_DUMP);
5589         }
5590 }
5591
5592 /**
5593  * lpfc_cgn_update_stat - Save data into congestion stats buffer
5594  * @phba: pointer to lpfc hba data structure.
5595  * @dtag: FPIN descriptor received
5596  *
5597  * Increment the FPIN received counter/time when it happens.
5598  */
5599 void
5600 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5601 {
5602         struct lpfc_cgn_info *cp;
5603         u32 value;
5604
5605         /* Make sure we have a congestion info buffer */
5606         if (!phba->cgn_i)
5607                 return;
5608         cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5609
5610         /* Update congestion statistics */
5611         switch (dtag) {
5612         case ELS_DTAG_LNK_INTEGRITY:
5613                 le32_add_cpu(&cp->link_integ_notification, 1);
5614                 lpfc_cgn_update_tstamp(phba, &cp->stat_lnk);
5615                 break;
5616         case ELS_DTAG_DELIVERY:
5617                 le32_add_cpu(&cp->delivery_notification, 1);
5618                 lpfc_cgn_update_tstamp(phba, &cp->stat_delivery);
5619                 break;
5620         case ELS_DTAG_PEER_CONGEST:
5621                 le32_add_cpu(&cp->cgn_peer_notification, 1);
5622                 lpfc_cgn_update_tstamp(phba, &cp->stat_peer);
5623                 break;
5624         case ELS_DTAG_CONGESTION:
5625                 le32_add_cpu(&cp->cgn_notification, 1);
5626                 lpfc_cgn_update_tstamp(phba, &cp->stat_fpin);
5627         }
5628         if (phba->cgn_fpin_frequency &&
5629             phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5630                 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5631                 cp->cgn_stat_npm = value;
5632         }
5633
5634         value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5635                                     LPFC_CGN_CRC32_SEED);
5636         cp->cgn_info_crc = cpu_to_le32(value);
5637 }
5638
5639 /**
5640  * lpfc_cgn_update_tstamp - Update cmf timestamp
5641  * @phba: pointer to lpfc hba data structure.
5642  * @ts: structure to write the timestamp to.
5643  */
5644 void
5645 lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts)
5646 {
5647         struct timespec64 cur_time;
5648         struct tm tm_val;
5649
5650         ktime_get_real_ts64(&cur_time);
5651         time64_to_tm(cur_time.tv_sec, 0, &tm_val);
5652
5653         ts->month = tm_val.tm_mon + 1;
5654         ts->day = tm_val.tm_mday;
5655         ts->year = tm_val.tm_year - 100;
5656         ts->hour = tm_val.tm_hour;
5657         ts->minute = tm_val.tm_min;
5658         ts->second = tm_val.tm_sec;
5659
5660         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5661                         "2646 Updated CMF timestamp : "
5662                         "%u/%u/%u %u:%u:%u\n",
5663                         ts->day, ts->month,
5664                         ts->year, ts->hour,
5665                         ts->minute, ts->second);
5666 }
5667
5668 /**
5669  * lpfc_cmf_stats_timer - Save data into registered congestion buffer
5670  * @timer: Timer cookie to access lpfc private data
5671  *
5672  * Save the congestion event data every minute.
5673  * On the hour collapse all the minute data into hour data. Every day
5674  * collapse all the hour data into daily data. Separate driver
5675  * and fabrc congestion event counters that will be saved out
5676  * to the registered congestion buffer every minute.
5677  */
5678 static enum hrtimer_restart
5679 lpfc_cmf_stats_timer(struct hrtimer *timer)
5680 {
5681         struct lpfc_hba *phba;
5682         struct lpfc_cgn_info *cp;
5683         uint32_t i, index;
5684         uint16_t value, mvalue;
5685         uint64_t bps;
5686         uint32_t mbps;
5687         uint32_t dvalue, wvalue, lvalue, avalue;
5688         uint64_t latsum;
5689         __le16 *ptr;
5690         __le32 *lptr;
5691         __le16 *mptr;
5692
5693         phba = container_of(timer, struct lpfc_hba, cmf_stats_timer);
5694         /* Make sure we have a congestion info buffer */
5695         if (!phba->cgn_i)
5696                 return HRTIMER_NORESTART;
5697         cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5698
5699         phba->cgn_evt_timestamp = jiffies +
5700                         msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5701         phba->cgn_evt_minute++;
5702
5703         /* We should get to this point in the routine on 1 minute intervals */
5704         lpfc_cgn_update_tstamp(phba, &cp->base_time);
5705
5706         if (phba->cgn_fpin_frequency &&
5707             phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5708                 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5709                 cp->cgn_stat_npm = value;
5710         }
5711
5712         /* Read and clear the latency counters for this minute */
5713         lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5714         latsum = atomic64_read(&phba->cgn_latency_evt);
5715         atomic_set(&phba->cgn_latency_evt_cnt, 0);
5716         atomic64_set(&phba->cgn_latency_evt, 0);
5717
5718         /* We need to store MB/sec bandwidth in the congestion information.
5719          * block_cnt is count of 512 byte blocks for the entire minute,
5720          * bps will get bytes per sec before finally converting to MB/sec.
5721          */
5722         bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5723         phba->rx_block_cnt = 0;
5724         mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5725
5726         /* Every minute */
5727         /* cgn parameters */
5728         cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5729         cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5730         cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5731         cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5732
5733         /* Fill in default LUN qdepth */
5734         value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5735         cp->cgn_lunq = cpu_to_le16(value);
5736
5737         /* Record congestion buffer info - every minute
5738          * cgn_driver_evt_cnt (Driver events)
5739          * cgn_fabric_warn_cnt (Congestion Warnings)
5740          * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5741          * cgn_fabric_alarm_cnt (Congestion Alarms)
5742          */
5743         index = ++cp->cgn_index_minute;
5744         if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5745                 cp->cgn_index_minute = 0;
5746                 index = 0;
5747         }
5748
5749         /* Get the number of driver events in this sample and reset counter */
5750         dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5751         atomic_set(&phba->cgn_driver_evt_cnt, 0);
5752
5753         /* Get the number of warning events - FPIN and Signal for this minute */
5754         wvalue = 0;
5755         if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5756             phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5757             phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5758                 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5759         atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5760
5761         /* Get the number of alarm events - FPIN and Signal for this minute */
5762         avalue = 0;
5763         if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5764             phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5765                 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5766         atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5767
5768         /* Collect the driver, warning, alarm and latency counts for this
5769          * minute into the driver congestion buffer.
5770          */
5771         ptr = &cp->cgn_drvr_min[index];
5772         value = (uint16_t)dvalue;
5773         *ptr = cpu_to_le16(value);
5774
5775         ptr = &cp->cgn_warn_min[index];
5776         value = (uint16_t)wvalue;
5777         *ptr = cpu_to_le16(value);
5778
5779         ptr = &cp->cgn_alarm_min[index];
5780         value = (uint16_t)avalue;
5781         *ptr = cpu_to_le16(value);
5782
5783         lptr = &cp->cgn_latency_min[index];
5784         if (lvalue) {
5785                 lvalue = (uint32_t)div_u64(latsum, lvalue);
5786                 *lptr = cpu_to_le32(lvalue);
5787         } else {
5788                 *lptr = 0;
5789         }
5790
5791         /* Collect the bandwidth value into the driver's congesion buffer. */
5792         mptr = &cp->cgn_bw_min[index];
5793         *mptr = cpu_to_le16(mvalue);
5794
5795         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5796                         "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5797                         index, dvalue, wvalue, *lptr, mvalue, avalue);
5798
5799         /* Every hour */
5800         if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5801                 /* Record congestion buffer info - every hour
5802                  * Collapse all minutes into an hour
5803                  */
5804                 index = ++cp->cgn_index_hour;
5805                 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5806                         cp->cgn_index_hour = 0;
5807                         index = 0;
5808                 }
5809
5810                 dvalue = 0;
5811                 wvalue = 0;
5812                 lvalue = 0;
5813                 avalue = 0;
5814                 mvalue = 0;
5815                 mbps = 0;
5816                 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5817                         dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5818                         wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5819                         lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5820                         mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5821                         avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5822                 }
5823                 if (lvalue)             /* Avg of latency averages */
5824                         lvalue /= LPFC_MIN_HOUR;
5825                 if (mbps)               /* Avg of Bandwidth averages */
5826                         mvalue = mbps / LPFC_MIN_HOUR;
5827
5828                 lptr = &cp->cgn_drvr_hr[index];
5829                 *lptr = cpu_to_le32(dvalue);
5830                 lptr = &cp->cgn_warn_hr[index];
5831                 *lptr = cpu_to_le32(wvalue);
5832                 lptr = &cp->cgn_latency_hr[index];
5833                 *lptr = cpu_to_le32(lvalue);
5834                 mptr = &cp->cgn_bw_hr[index];
5835                 *mptr = cpu_to_le16(mvalue);
5836                 lptr = &cp->cgn_alarm_hr[index];
5837                 *lptr = cpu_to_le32(avalue);
5838
5839                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5840                                 "2419 Congestion Info - hour "
5841                                 "(%d): %d %d %d %d %d\n",
5842                                 index, dvalue, wvalue, lvalue, mvalue, avalue);
5843         }
5844
5845         /* Every day */
5846         if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5847                 /* Record congestion buffer info - every hour
5848                  * Collapse all hours into a day. Rotate days
5849                  * after LPFC_MAX_CGN_DAYS.
5850                  */
5851                 index = ++cp->cgn_index_day;
5852                 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5853                         cp->cgn_index_day = 0;
5854                         index = 0;
5855                 }
5856
5857                 dvalue = 0;
5858                 wvalue = 0;
5859                 lvalue = 0;
5860                 mvalue = 0;
5861                 mbps = 0;
5862                 avalue = 0;
5863                 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5864                         dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5865                         wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5866                         lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5867                         mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5868                         avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5869                 }
5870                 if (lvalue)             /* Avg of latency averages */
5871                         lvalue /= LPFC_HOUR_DAY;
5872                 if (mbps)               /* Avg of Bandwidth averages */
5873                         mvalue = mbps / LPFC_HOUR_DAY;
5874
5875                 lptr = &cp->cgn_drvr_day[index];
5876                 *lptr = cpu_to_le32(dvalue);
5877                 lptr = &cp->cgn_warn_day[index];
5878                 *lptr = cpu_to_le32(wvalue);
5879                 lptr = &cp->cgn_latency_day[index];
5880                 *lptr = cpu_to_le32(lvalue);
5881                 mptr = &cp->cgn_bw_day[index];
5882                 *mptr = cpu_to_le16(mvalue);
5883                 lptr = &cp->cgn_alarm_day[index];
5884                 *lptr = cpu_to_le32(avalue);
5885
5886                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5887                                 "2420 Congestion Info - daily (%d): "
5888                                 "%d %d %d %d %d\n",
5889                                 index, dvalue, wvalue, lvalue, mvalue, avalue);
5890         }
5891
5892         /* Use the frequency found in the last rcv'ed FPIN */
5893         value = phba->cgn_fpin_frequency;
5894         cp->cgn_warn_freq = cpu_to_le16(value);
5895         cp->cgn_alarm_freq = cpu_to_le16(value);
5896
5897         lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5898                                      LPFC_CGN_CRC32_SEED);
5899         cp->cgn_info_crc = cpu_to_le32(lvalue);
5900
5901         hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC));
5902
5903         return HRTIMER_RESTART;
5904 }
5905
5906 /**
5907  * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5908  * @phba: The Hba for which this call is being executed.
5909  *
5910  * The routine calculates the latency from the beginning of the CMF timer
5911  * interval to the current point in time. It is called from IO completion
5912  * when we exceed our Bandwidth limitation for the time interval.
5913  */
5914 uint32_t
5915 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5916 {
5917         struct timespec64 cmpl_time;
5918         uint32_t msec = 0;
5919
5920         ktime_get_real_ts64(&cmpl_time);
5921
5922         /* This routine works on a ms granularity so sec and usec are
5923          * converted accordingly.
5924          */
5925         if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5926                 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5927                         NSEC_PER_MSEC;
5928         } else {
5929                 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5930                         msec = (cmpl_time.tv_sec -
5931                                 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5932                         msec += ((cmpl_time.tv_nsec -
5933                                   phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5934                 } else {
5935                         msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5936                                 1) * MSEC_PER_SEC;
5937                         msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5938                                  cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5939                 }
5940         }
5941         return msec;
5942 }
5943
5944 /**
5945  * lpfc_cmf_timer -  This is the timer function for one congestion
5946  * rate interval.
5947  * @timer: Pointer to the high resolution timer that expired
5948  */
5949 static enum hrtimer_restart
5950 lpfc_cmf_timer(struct hrtimer *timer)
5951 {
5952         struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5953                                              cmf_timer);
5954         struct rx_info_entry entry;
5955         uint32_t io_cnt;
5956         uint32_t busy, max_read;
5957         uint64_t total, rcv, lat, mbpi, extra, cnt;
5958         int timer_interval = LPFC_CMF_INTERVAL;
5959         uint32_t ms;
5960         struct lpfc_cgn_stat *cgs;
5961         int cpu;
5962
5963         /* Only restart the timer if congestion mgmt is on */
5964         if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5965             !phba->cmf_latency.tv_sec) {
5966                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5967                                 "6224 CMF timer exit: %d %lld\n",
5968                                 phba->cmf_active_mode,
5969                                 (uint64_t)phba->cmf_latency.tv_sec);
5970                 return HRTIMER_NORESTART;
5971         }
5972
5973         /* If pport is not ready yet, just exit and wait for
5974          * the next timer cycle to hit.
5975          */
5976         if (!phba->pport)
5977                 goto skip;
5978
5979         /* Do not block SCSI IO while in the timer routine since
5980          * total_bytes will be cleared
5981          */
5982         atomic_set(&phba->cmf_stop_io, 1);
5983
5984         /* First we need to calculate the actual ms between
5985          * the last timer interrupt and this one. We ask for
5986          * LPFC_CMF_INTERVAL, however the actual time may
5987          * vary depending on system overhead.
5988          */
5989         ms = lpfc_calc_cmf_latency(phba);
5990
5991
5992         /* Immediately after we calculate the time since the last
5993          * timer interrupt, set the start time for the next
5994          * interrupt
5995          */
5996         ktime_get_real_ts64(&phba->cmf_latency);
5997
5998         phba->cmf_link_byte_count =
5999                 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6000
6001         /* Collect all the stats from the prior timer interval */
6002         total = 0;
6003         io_cnt = 0;
6004         lat = 0;
6005         rcv = 0;
6006         for_each_present_cpu(cpu) {
6007                 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6008                 total += atomic64_xchg(&cgs->total_bytes, 0);
6009                 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6010                 lat += atomic64_xchg(&cgs->rx_latency, 0);
6011                 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6012         }
6013
6014         /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6015          * returned from the last CMF_SYNC_WQE issued, from
6016          * cmf_last_sync_bw. This will be the target BW for
6017          * this next timer interval.
6018          */
6019         if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6020             phba->link_state != LPFC_LINK_DOWN &&
6021             phba->hba_flag & HBA_SETUP) {
6022                 mbpi = phba->cmf_last_sync_bw;
6023                 phba->cmf_last_sync_bw = 0;
6024                 extra = 0;
6025
6026                 /* Calculate any extra bytes needed to account for the
6027                  * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6028                  * calculate the adjustment needed for total to reflect
6029                  * a full LPFC_CMF_INTERVAL.
6030                  */
6031                 if (ms && ms < LPFC_CMF_INTERVAL) {
6032                         cnt = div_u64(total, ms); /* bytes per ms */
6033                         cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6034                         extra = cnt - total;
6035                 }
6036                 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6037         } else {
6038                 /* For Monitor mode or link down we want mbpi
6039                  * to be the full link speed
6040                  */
6041                 mbpi = phba->cmf_link_byte_count;
6042                 extra = 0;
6043         }
6044         phba->cmf_timer_cnt++;
6045
6046         if (io_cnt) {
6047                 /* Update congestion info buffer latency in us */
6048                 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6049                 atomic64_add(lat, &phba->cgn_latency_evt);
6050         }
6051         busy = atomic_xchg(&phba->cmf_busy, 0);
6052         max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6053
6054         /* Calculate MBPI for the next timer interval */
6055         if (mbpi) {
6056                 if (mbpi > phba->cmf_link_byte_count ||
6057                     phba->cmf_active_mode == LPFC_CFG_MONITOR)
6058                         mbpi = phba->cmf_link_byte_count;
6059
6060                 /* Change max_bytes_per_interval to what the prior
6061                  * CMF_SYNC_WQE cmpl indicated.
6062                  */
6063                 if (mbpi != phba->cmf_max_bytes_per_interval)
6064                         phba->cmf_max_bytes_per_interval = mbpi;
6065         }
6066
6067         /* Save rxmonitor information for debug */
6068         if (phba->rx_monitor) {
6069                 entry.total_bytes = total;
6070                 entry.cmf_bytes = total + extra;
6071                 entry.rcv_bytes = rcv;
6072                 entry.cmf_busy = busy;
6073                 entry.cmf_info = phba->cmf_active_info;
6074                 if (io_cnt) {
6075                         entry.avg_io_latency = div_u64(lat, io_cnt);
6076                         entry.avg_io_size = div_u64(rcv, io_cnt);
6077                 } else {
6078                         entry.avg_io_latency = 0;
6079                         entry.avg_io_size = 0;
6080                 }
6081                 entry.max_read_cnt = max_read;
6082                 entry.io_cnt = io_cnt;
6083                 entry.max_bytes_per_interval = mbpi;
6084                 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6085                         entry.timer_utilization = phba->cmf_last_ts;
6086                 else
6087                         entry.timer_utilization = ms;
6088                 entry.timer_interval = ms;
6089                 phba->cmf_last_ts = 0;
6090
6091                 lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6092         }
6093
6094         if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6095                 /* If Monitor mode, check if we are oversubscribed
6096                  * against the full line rate.
6097                  */
6098                 if (mbpi && total > mbpi)
6099                         atomic_inc(&phba->cgn_driver_evt_cnt);
6100         }
6101         phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
6102
6103         /* Since total_bytes has already been zero'ed, its okay to unblock
6104          * after max_bytes_per_interval is setup.
6105          */
6106         if (atomic_xchg(&phba->cmf_bw_wait, 0))
6107                 queue_work(phba->wq, &phba->unblock_request_work);
6108
6109         /* SCSI IO is now unblocked */
6110         atomic_set(&phba->cmf_stop_io, 0);
6111
6112 skip:
6113         hrtimer_forward_now(timer,
6114                             ktime_set(0, timer_interval * NSEC_PER_MSEC));
6115         return HRTIMER_RESTART;
6116 }
6117
6118 #define trunk_link_status(__idx)\
6119         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6120                ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6121                 "Link up" : "Link down") : "NA"
6122 /* Did port __idx reported an error */
6123 #define trunk_port_fault(__idx)\
6124         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6125                (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6126
6127 static void
6128 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6129                               struct lpfc_acqe_fc_la *acqe_fc)
6130 {
6131         uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6132         uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6133         u8 cnt = 0;
6134
6135         phba->sli4_hba.link_state.speed =
6136                 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6137                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6138
6139         phba->sli4_hba.link_state.logical_speed =
6140                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6141         /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6142         phba->fc_linkspeed =
6143                  lpfc_async_link_speed_to_read_top(
6144                                 phba,
6145                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6146
6147         if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6148                 phba->trunk_link.link0.state =
6149                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6150                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6151                 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6152                 cnt++;
6153         }
6154         if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6155                 phba->trunk_link.link1.state =
6156                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6157                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6158                 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6159                 cnt++;
6160         }
6161         if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6162                 phba->trunk_link.link2.state =
6163                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6164                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6165                 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6166                 cnt++;
6167         }
6168         if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6169                 phba->trunk_link.link3.state =
6170                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6171                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6172                 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6173                 cnt++;
6174         }
6175
6176         if (cnt)
6177                 phba->trunk_link.phy_lnk_speed =
6178                         phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6179         else
6180                 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6181
6182         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6183                         "2910 Async FC Trunking Event - Speed:%d\n"
6184                         "\tLogical speed:%d "
6185                         "port0: %s port1: %s port2: %s port3: %s\n",
6186                         phba->sli4_hba.link_state.speed,
6187                         phba->sli4_hba.link_state.logical_speed,
6188                         trunk_link_status(0), trunk_link_status(1),
6189                         trunk_link_status(2), trunk_link_status(3));
6190
6191         if (phba->cmf_active_mode != LPFC_CFG_OFF)
6192                 lpfc_cmf_signal_init(phba);
6193
6194         if (port_fault)
6195                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6196                                 "3202 trunk error:0x%x (%s) seen on port0:%s "
6197                                 /*
6198                                  * SLI-4: We have only 0xA error codes
6199                                  * defined as of now. print an appropriate
6200                                  * message in case driver needs to be updated.
6201                                  */
6202                                 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6203                                 "UNDEFINED. update driver." : trunk_errmsg[err],
6204                                 trunk_port_fault(0), trunk_port_fault(1),
6205                                 trunk_port_fault(2), trunk_port_fault(3));
6206 }
6207
6208
6209 /**
6210  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6211  * @phba: pointer to lpfc hba data structure.
6212  * @acqe_fc: pointer to the async fc completion queue entry.
6213  *
6214  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6215  * that the event was received and then issue a read_topology mailbox command so
6216  * that the rest of the driver will treat it the same as SLI3.
6217  **/
6218 static void
6219 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6220 {
6221         LPFC_MBOXQ_t *pmb;
6222         MAILBOX_t *mb;
6223         struct lpfc_mbx_read_top *la;
6224         char *log_level;
6225         int rc;
6226
6227         if (bf_get(lpfc_trailer_type, acqe_fc) !=
6228             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6229                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6230                                 "2895 Non FC link Event detected.(%d)\n",
6231                                 bf_get(lpfc_trailer_type, acqe_fc));
6232                 return;
6233         }
6234
6235         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6236             LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6237                 lpfc_update_trunk_link_status(phba, acqe_fc);
6238                 return;
6239         }
6240
6241         /* Keep the link status for extra SLI4 state machine reference */
6242         phba->sli4_hba.link_state.speed =
6243                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6244                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6245         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6246         phba->sli4_hba.link_state.topology =
6247                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6248         phba->sli4_hba.link_state.status =
6249                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6250         phba->sli4_hba.link_state.type =
6251                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6252         phba->sli4_hba.link_state.number =
6253                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6254         phba->sli4_hba.link_state.fault =
6255                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
6256         phba->sli4_hba.link_state.link_status =
6257                                 bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
6258
6259         /*
6260          * Only select attention types need logical speed modification to what
6261          * was previously set.
6262          */
6263         if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6264             phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6265                 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6266                     LPFC_FC_LA_TYPE_LINK_DOWN)
6267                         phba->sli4_hba.link_state.logical_speed = 0;
6268                 else if (!phba->sli4_hba.conf_trunk)
6269                         phba->sli4_hba.link_state.logical_speed =
6270                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6271         }
6272
6273         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6274                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6275                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6276                         "%dMbps Fault:x%x Link Status:x%x\n",
6277                         phba->sli4_hba.link_state.speed,
6278                         phba->sli4_hba.link_state.topology,
6279                         phba->sli4_hba.link_state.status,
6280                         phba->sli4_hba.link_state.type,
6281                         phba->sli4_hba.link_state.number,
6282                         phba->sli4_hba.link_state.logical_speed,
6283                         phba->sli4_hba.link_state.fault,
6284                         phba->sli4_hba.link_state.link_status);
6285
6286         /*
6287          * The following attention types are informational only, providing
6288          * further details about link status.  Overwrite the value of
6289          * link_state.status appropriately.  No further action is required.
6290          */
6291         if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6292                 switch (phba->sli4_hba.link_state.status) {
6293                 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
6294                         log_level = KERN_WARNING;
6295                         phba->sli4_hba.link_state.status =
6296                                         LPFC_FC_LA_TYPE_LINK_DOWN;
6297                         break;
6298                 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
6299                         /*
6300                          * During bb credit recovery establishment, receiving
6301                          * this attention type is normal.  Link Up attention
6302                          * type is expected to occur before this informational
6303                          * attention type so keep the Link Up status.
6304                          */
6305                         log_level = KERN_INFO;
6306                         phba->sli4_hba.link_state.status =
6307                                         LPFC_FC_LA_TYPE_LINK_UP;
6308                         break;
6309                 default:
6310                         log_level = KERN_INFO;
6311                         break;
6312                 }
6313                 lpfc_log_msg(phba, log_level, LOG_SLI,
6314                              "2992 Async FC event - Informational Link "
6315                              "Attention Type x%x\n",
6316                              bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
6317                 return;
6318         }
6319
6320         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6321         if (!pmb) {
6322                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6323                                 "2897 The mboxq allocation failed\n");
6324                 return;
6325         }
6326         rc = lpfc_mbox_rsrc_prep(phba, pmb);
6327         if (rc) {
6328                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6329                                 "2898 The mboxq prep failed\n");
6330                 goto out_free_pmb;
6331         }
6332
6333         /* Cleanup any outstanding ELS commands */
6334         lpfc_els_flush_all_cmd(phba);
6335
6336         /* Block ELS IOCBs until we have done process link event */
6337         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6338
6339         /* Update link event statistics */
6340         phba->sli.slistat.link_event++;
6341
6342         /* Create lpfc_handle_latt mailbox command from link ACQE */
6343         lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6344         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6345         pmb->vport = phba->pport;
6346
6347         if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6348                 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6349
6350                 switch (phba->sli4_hba.link_state.status) {
6351                 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6352                         phba->link_flag |= LS_MDS_LINK_DOWN;
6353                         break;
6354                 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6355                         phba->link_flag |= LS_MDS_LOOPBACK;
6356                         break;
6357                 default:
6358                         break;
6359                 }
6360
6361                 /* Initialize completion status */
6362                 mb = &pmb->u.mb;
6363                 mb->mbxStatus = MBX_SUCCESS;
6364
6365                 /* Parse port fault information field */
6366                 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6367
6368                 /* Parse and translate link attention fields */
6369                 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6370                 la->eventTag = acqe_fc->event_tag;
6371
6372                 if (phba->sli4_hba.link_state.status ==
6373                     LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6374                         bf_set(lpfc_mbx_read_top_att_type, la,
6375                                LPFC_FC_LA_TYPE_UNEXP_WWPN);
6376                 } else {
6377                         bf_set(lpfc_mbx_read_top_att_type, la,
6378                                LPFC_FC_LA_TYPE_LINK_DOWN);
6379                 }
6380                 /* Invoke the mailbox command callback function */
6381                 lpfc_mbx_cmpl_read_topology(phba, pmb);
6382
6383                 return;
6384         }
6385
6386         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6387         if (rc == MBX_NOT_FINISHED)
6388                 goto out_free_pmb;
6389         return;
6390
6391 out_free_pmb:
6392         lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6393 }
6394
6395 /**
6396  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6397  * @phba: pointer to lpfc hba data structure.
6398  * @acqe_sli: pointer to the async SLI completion queue entry.
6399  *
6400  * This routine is to handle the SLI4 asynchronous SLI events.
6401  **/
6402 static void
6403 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6404 {
6405         char port_name;
6406         char message[128];
6407         uint8_t status;
6408         uint8_t evt_type;
6409         uint8_t operational = 0;
6410         struct temp_event temp_event_data;
6411         struct lpfc_acqe_misconfigured_event *misconfigured;
6412         struct lpfc_acqe_cgn_signal *cgn_signal;
6413         struct Scsi_Host  *shost;
6414         struct lpfc_vport **vports;
6415         int rc, i, cnt;
6416
6417         evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6418
6419         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6420                         "2901 Async SLI event - Type:%d, Event Data: x%08x "
6421                         "x%08x x%08x x%08x\n", evt_type,
6422                         acqe_sli->event_data1, acqe_sli->event_data2,
6423                         acqe_sli->event_data3, acqe_sli->trailer);
6424
6425         port_name = phba->Port[0];
6426         if (port_name == 0x00)
6427                 port_name = '?'; /* get port name is empty */
6428
6429         switch (evt_type) {
6430         case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6431                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6432                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6433                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6434
6435                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6436                                 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6437                                 acqe_sli->event_data1, port_name);
6438
6439                 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6440                 shost = lpfc_shost_from_vport(phba->pport);
6441                 fc_host_post_vendor_event(shost, fc_get_event_number(),
6442                                           sizeof(temp_event_data),
6443                                           (char *)&temp_event_data,
6444                                           SCSI_NL_VID_TYPE_PCI
6445                                           | PCI_VENDOR_ID_EMULEX);
6446                 break;
6447         case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6448                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6449                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6450                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6451
6452                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
6453                                 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6454                                 acqe_sli->event_data1, port_name);
6455
6456                 shost = lpfc_shost_from_vport(phba->pport);
6457                 fc_host_post_vendor_event(shost, fc_get_event_number(),
6458                                           sizeof(temp_event_data),
6459                                           (char *)&temp_event_data,
6460                                           SCSI_NL_VID_TYPE_PCI
6461                                           | PCI_VENDOR_ID_EMULEX);
6462                 break;
6463         case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6464                 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6465                                         &acqe_sli->event_data1;
6466
6467                 /* fetch the status for this port */
6468                 switch (phba->sli4_hba.lnk_info.lnk_no) {
6469                 case LPFC_LINK_NUMBER_0:
6470                         status = bf_get(lpfc_sli_misconfigured_port0_state,
6471                                         &misconfigured->theEvent);
6472                         operational = bf_get(lpfc_sli_misconfigured_port0_op,
6473                                         &misconfigured->theEvent);
6474                         break;
6475                 case LPFC_LINK_NUMBER_1:
6476                         status = bf_get(lpfc_sli_misconfigured_port1_state,
6477                                         &misconfigured->theEvent);
6478                         operational = bf_get(lpfc_sli_misconfigured_port1_op,
6479                                         &misconfigured->theEvent);
6480                         break;
6481                 case LPFC_LINK_NUMBER_2:
6482                         status = bf_get(lpfc_sli_misconfigured_port2_state,
6483                                         &misconfigured->theEvent);
6484                         operational = bf_get(lpfc_sli_misconfigured_port2_op,
6485                                         &misconfigured->theEvent);
6486                         break;
6487                 case LPFC_LINK_NUMBER_3:
6488                         status = bf_get(lpfc_sli_misconfigured_port3_state,
6489                                         &misconfigured->theEvent);
6490                         operational = bf_get(lpfc_sli_misconfigured_port3_op,
6491                                         &misconfigured->theEvent);
6492                         break;
6493                 default:
6494                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6495                                         "3296 "
6496                                         "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6497                                         "event: Invalid link %d",
6498                                         phba->sli4_hba.lnk_info.lnk_no);
6499                         return;
6500                 }
6501
6502                 /* Skip if optic state unchanged */
6503                 if (phba->sli4_hba.lnk_info.optic_state == status)
6504                         return;
6505
6506                 switch (status) {
6507                 case LPFC_SLI_EVENT_STATUS_VALID:
6508                         sprintf(message, "Physical Link is functional");
6509                         break;
6510                 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6511                         sprintf(message, "Optics faulted/incorrectly "
6512                                 "installed/not installed - Reseat optics, "
6513                                 "if issue not resolved, replace.");
6514                         break;
6515                 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6516                         sprintf(message,
6517                                 "Optics of two types installed - Remove one "
6518                                 "optic or install matching pair of optics.");
6519                         break;
6520                 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6521                         sprintf(message, "Incompatible optics - Replace with "
6522                                 "compatible optics for card to function.");
6523                         break;
6524                 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6525                         sprintf(message, "Unqualified optics - Replace with "
6526                                 "Avago optics for Warranty and Technical "
6527                                 "Support - Link is%s operational",
6528                                 (operational) ? " not" : "");
6529                         break;
6530                 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6531                         sprintf(message, "Uncertified optics - Replace with "
6532                                 "Avago-certified optics to enable link "
6533                                 "operation - Link is%s operational",
6534                                 (operational) ? " not" : "");
6535                         break;
6536                 default:
6537                         /* firmware is reporting a status we don't know about */
6538                         sprintf(message, "Unknown event status x%02x", status);
6539                         break;
6540                 }
6541
6542                 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6543                 rc = lpfc_sli4_read_config(phba);
6544                 if (rc) {
6545                         phba->lmt = 0;
6546                         lpfc_printf_log(phba, KERN_ERR,
6547                                         LOG_TRACE_EVENT,
6548                                         "3194 Unable to retrieve supported "
6549                                         "speeds, rc = 0x%x\n", rc);
6550                 }
6551                 rc = lpfc_sli4_refresh_params(phba);
6552                 if (rc) {
6553                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6554                                         "3174 Unable to update pls support, "
6555                                         "rc x%x\n", rc);
6556                 }
6557                 vports = lpfc_create_vport_work_array(phba);
6558                 if (vports != NULL) {
6559                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6560                                         i++) {
6561                                 shost = lpfc_shost_from_vport(vports[i]);
6562                                 lpfc_host_supported_speeds_set(shost);
6563                         }
6564                 }
6565                 lpfc_destroy_vport_work_array(phba, vports);
6566
6567                 phba->sli4_hba.lnk_info.optic_state = status;
6568                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6569                                 "3176 Port Name %c %s\n", port_name, message);
6570                 break;
6571         case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6572                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6573                                 "3192 Remote DPort Test Initiated - "
6574                                 "Event Data1:x%08x Event Data2: x%08x\n",
6575                                 acqe_sli->event_data1, acqe_sli->event_data2);
6576                 break;
6577         case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6578                 /* Call FW to obtain active parms */
6579                 lpfc_sli4_cgn_parm_chg_evt(phba);
6580                 break;
6581         case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6582                 /* Misconfigured WWN. Reports that the SLI Port is configured
6583                  * to use FA-WWN, but the attached device doesn’t support it.
6584                  * Event Data1 - N.A, Event Data2 - N.A
6585                  * This event only happens on the physical port.
6586                  */
6587                 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6588                              "2699 Misconfigured FA-PWWN - Attached device "
6589                              "does not support FA-PWWN\n");
6590                 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6591                 memset(phba->pport->fc_portname.u.wwn, 0,
6592                        sizeof(struct lpfc_name));
6593                 break;
6594         case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6595                 /* EEPROM failure. No driver action is required */
6596                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6597                              "2518 EEPROM failure - "
6598                              "Event Data1: x%08x Event Data2: x%08x\n",
6599                              acqe_sli->event_data1, acqe_sli->event_data2);
6600                 break;
6601         case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6602                 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6603                         break;
6604                 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6605                                         &acqe_sli->event_data1;
6606                 phba->cgn_acqe_cnt++;
6607
6608                 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6609                 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6610                 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6611
6612                 /* no threshold for CMF, even 1 signal will trigger an event */
6613
6614                 /* Alarm overrides warning, so check that first */
6615                 if (cgn_signal->alarm_cnt) {
6616                         if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6617                                 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6618                                 atomic_add(cgn_signal->alarm_cnt,
6619                                            &phba->cgn_sync_alarm_cnt);
6620                         }
6621                 } else if (cnt) {
6622                         /* signal action needs to be taken */
6623                         if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6624                             phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6625                                 /* Keep track of warning cnt for CMF_SYNC_WQE */
6626                                 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6627                         }
6628                 }
6629                 break;
6630         case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
6631                 /* May be accompanied by a temperature event */
6632                 lpfc_printf_log(phba, KERN_INFO,
6633                                 LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
6634                                 "2902 Remote Degrade Signaling: x%08x x%08x "
6635                                 "x%08x\n",
6636                                 acqe_sli->event_data1, acqe_sli->event_data2,
6637                                 acqe_sli->event_data3);
6638                 break;
6639         default:
6640                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6641                                 "3193 Unrecognized SLI event, type: 0x%x",
6642                                 evt_type);
6643                 break;
6644         }
6645 }
6646
6647 /**
6648  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6649  * @vport: pointer to vport data structure.
6650  *
6651  * This routine is to perform Clear Virtual Link (CVL) on a vport in
6652  * response to a CVL event.
6653  *
6654  * Return the pointer to the ndlp with the vport if successful, otherwise
6655  * return NULL.
6656  **/
6657 static struct lpfc_nodelist *
6658 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6659 {
6660         struct lpfc_nodelist *ndlp;
6661         struct Scsi_Host *shost;
6662         struct lpfc_hba *phba;
6663
6664         if (!vport)
6665                 return NULL;
6666         phba = vport->phba;
6667         if (!phba)
6668                 return NULL;
6669         ndlp = lpfc_findnode_did(vport, Fabric_DID);
6670         if (!ndlp) {
6671                 /* Cannot find existing Fabric ndlp, so allocate a new one */
6672                 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6673                 if (!ndlp)
6674                         return NULL;
6675                 /* Set the node type */
6676                 ndlp->nlp_type |= NLP_FABRIC;
6677                 /* Put ndlp onto node list */
6678                 lpfc_enqueue_node(vport, ndlp);
6679         }
6680         if ((phba->pport->port_state < LPFC_FLOGI) &&
6681                 (phba->pport->port_state != LPFC_VPORT_FAILED))
6682                 return NULL;
6683         /* If virtual link is not yet instantiated ignore CVL */
6684         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6685                 && (vport->port_state != LPFC_VPORT_FAILED))
6686                 return NULL;
6687         shost = lpfc_shost_from_vport(vport);
6688         if (!shost)
6689                 return NULL;
6690         lpfc_linkdown_port(vport);
6691         lpfc_cleanup_pending_mbox(vport);
6692         spin_lock_irq(shost->host_lock);
6693         vport->fc_flag |= FC_VPORT_CVL_RCVD;
6694         spin_unlock_irq(shost->host_lock);
6695
6696         return ndlp;
6697 }
6698
6699 /**
6700  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6701  * @phba: pointer to lpfc hba data structure.
6702  *
6703  * This routine is to perform Clear Virtual Link (CVL) on all vports in
6704  * response to a FCF dead event.
6705  **/
6706 static void
6707 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6708 {
6709         struct lpfc_vport **vports;
6710         int i;
6711
6712         vports = lpfc_create_vport_work_array(phba);
6713         if (vports)
6714                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6715                         lpfc_sli4_perform_vport_cvl(vports[i]);
6716         lpfc_destroy_vport_work_array(phba, vports);
6717 }
6718
6719 /**
6720  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6721  * @phba: pointer to lpfc hba data structure.
6722  * @acqe_fip: pointer to the async fcoe completion queue entry.
6723  *
6724  * This routine is to handle the SLI4 asynchronous fcoe event.
6725  **/
6726 static void
6727 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6728                         struct lpfc_acqe_fip *acqe_fip)
6729 {
6730         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6731         int rc;
6732         struct lpfc_vport *vport;
6733         struct lpfc_nodelist *ndlp;
6734         int active_vlink_present;
6735         struct lpfc_vport **vports;
6736         int i;
6737
6738         phba->fc_eventTag = acqe_fip->event_tag;
6739         phba->fcoe_eventtag = acqe_fip->event_tag;
6740         switch (event_type) {
6741         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6742         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6743                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6744                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6745                                         "2546 New FCF event, evt_tag:x%x, "
6746                                         "index:x%x\n",
6747                                         acqe_fip->event_tag,
6748                                         acqe_fip->index);
6749                 else
6750                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6751                                         LOG_DISCOVERY,
6752                                         "2788 FCF param modified event, "
6753                                         "evt_tag:x%x, index:x%x\n",
6754                                         acqe_fip->event_tag,
6755                                         acqe_fip->index);
6756                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6757                         /*
6758                          * During period of FCF discovery, read the FCF
6759                          * table record indexed by the event to update
6760                          * FCF roundrobin failover eligible FCF bmask.
6761                          */
6762                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6763                                         LOG_DISCOVERY,
6764                                         "2779 Read FCF (x%x) for updating "
6765                                         "roundrobin FCF failover bmask\n",
6766                                         acqe_fip->index);
6767                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6768                 }
6769
6770                 /* If the FCF discovery is in progress, do nothing. */
6771                 spin_lock_irq(&phba->hbalock);
6772                 if (phba->hba_flag & FCF_TS_INPROG) {
6773                         spin_unlock_irq(&phba->hbalock);
6774                         break;
6775                 }
6776                 /* If fast FCF failover rescan event is pending, do nothing */
6777                 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6778                         spin_unlock_irq(&phba->hbalock);
6779                         break;
6780                 }
6781
6782                 /* If the FCF has been in discovered state, do nothing. */
6783                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6784                         spin_unlock_irq(&phba->hbalock);
6785                         break;
6786                 }
6787                 spin_unlock_irq(&phba->hbalock);
6788
6789                 /* Otherwise, scan the entire FCF table and re-discover SAN */
6790                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6791                                 "2770 Start FCF table scan per async FCF "
6792                                 "event, evt_tag:x%x, index:x%x\n",
6793                                 acqe_fip->event_tag, acqe_fip->index);
6794                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6795                                                      LPFC_FCOE_FCF_GET_FIRST);
6796                 if (rc)
6797                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6798                                         "2547 Issue FCF scan read FCF mailbox "
6799                                         "command failed (x%x)\n", rc);
6800                 break;
6801
6802         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6803                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6804                                 "2548 FCF Table full count 0x%x tag 0x%x\n",
6805                                 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6806                                 acqe_fip->event_tag);
6807                 break;
6808
6809         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6810                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6811                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6812                                 "2549 FCF (x%x) disconnected from network, "
6813                                  "tag:x%x\n", acqe_fip->index,
6814                                  acqe_fip->event_tag);
6815                 /*
6816                  * If we are in the middle of FCF failover process, clear
6817                  * the corresponding FCF bit in the roundrobin bitmap.
6818                  */
6819                 spin_lock_irq(&phba->hbalock);
6820                 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6821                     (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6822                         spin_unlock_irq(&phba->hbalock);
6823                         /* Update FLOGI FCF failover eligible FCF bmask */
6824                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6825                         break;
6826                 }
6827                 spin_unlock_irq(&phba->hbalock);
6828
6829                 /* If the event is not for currently used fcf do nothing */
6830                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6831                         break;
6832
6833                 /*
6834                  * Otherwise, request the port to rediscover the entire FCF
6835                  * table for a fast recovery from case that the current FCF
6836                  * is no longer valid as we are not in the middle of FCF
6837                  * failover process already.
6838                  */
6839                 spin_lock_irq(&phba->hbalock);
6840                 /* Mark the fast failover process in progress */
6841                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6842                 spin_unlock_irq(&phba->hbalock);
6843
6844                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6845                                 "2771 Start FCF fast failover process due to "
6846                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6847                                 "\n", acqe_fip->event_tag, acqe_fip->index);
6848                 rc = lpfc_sli4_redisc_fcf_table(phba);
6849                 if (rc) {
6850                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6851                                         LOG_TRACE_EVENT,
6852                                         "2772 Issue FCF rediscover mailbox "
6853                                         "command failed, fail through to FCF "
6854                                         "dead event\n");
6855                         spin_lock_irq(&phba->hbalock);
6856                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6857                         spin_unlock_irq(&phba->hbalock);
6858                         /*
6859                          * Last resort will fail over by treating this
6860                          * as a link down to FCF registration.
6861                          */
6862                         lpfc_sli4_fcf_dead_failthrough(phba);
6863                 } else {
6864                         /* Reset FCF roundrobin bmask for new discovery */
6865                         lpfc_sli4_clear_fcf_rr_bmask(phba);
6866                         /*
6867                          * Handling fast FCF failover to a DEAD FCF event is
6868                          * considered equalivant to receiving CVL to all vports.
6869                          */
6870                         lpfc_sli4_perform_all_vport_cvl(phba);
6871                 }
6872                 break;
6873         case LPFC_FIP_EVENT_TYPE_CVL:
6874                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6875                 lpfc_printf_log(phba, KERN_ERR,
6876                                 LOG_TRACE_EVENT,
6877                         "2718 Clear Virtual Link Received for VPI 0x%x"
6878                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6879
6880                 vport = lpfc_find_vport_by_vpid(phba,
6881                                                 acqe_fip->index);
6882                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6883                 if (!ndlp)
6884                         break;
6885                 active_vlink_present = 0;
6886
6887                 vports = lpfc_create_vport_work_array(phba);
6888                 if (vports) {
6889                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6890                                         i++) {
6891                                 if ((!(vports[i]->fc_flag &
6892                                         FC_VPORT_CVL_RCVD)) &&
6893                                         (vports[i]->port_state > LPFC_FDISC)) {
6894                                         active_vlink_present = 1;
6895                                         break;
6896                                 }
6897                         }
6898                         lpfc_destroy_vport_work_array(phba, vports);
6899                 }
6900
6901                 /*
6902                  * Don't re-instantiate if vport is marked for deletion.
6903                  * If we are here first then vport_delete is going to wait
6904                  * for discovery to complete.
6905                  */
6906                 if (!(vport->load_flag & FC_UNLOADING) &&
6907                                         active_vlink_present) {
6908                         /*
6909                          * If there are other active VLinks present,
6910                          * re-instantiate the Vlink using FDISC.
6911                          */
6912                         mod_timer(&ndlp->nlp_delayfunc,
6913                                   jiffies + msecs_to_jiffies(1000));
6914                         spin_lock_irq(&ndlp->lock);
6915                         ndlp->nlp_flag |= NLP_DELAY_TMO;
6916                         spin_unlock_irq(&ndlp->lock);
6917                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6918                         vport->port_state = LPFC_FDISC;
6919                 } else {
6920                         /*
6921                          * Otherwise, we request port to rediscover
6922                          * the entire FCF table for a fast recovery
6923                          * from possible case that the current FCF
6924                          * is no longer valid if we are not already
6925                          * in the FCF failover process.
6926                          */
6927                         spin_lock_irq(&phba->hbalock);
6928                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6929                                 spin_unlock_irq(&phba->hbalock);
6930                                 break;
6931                         }
6932                         /* Mark the fast failover process in progress */
6933                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6934                         spin_unlock_irq(&phba->hbalock);
6935                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6936                                         LOG_DISCOVERY,
6937                                         "2773 Start FCF failover per CVL, "
6938                                         "evt_tag:x%x\n", acqe_fip->event_tag);
6939                         rc = lpfc_sli4_redisc_fcf_table(phba);
6940                         if (rc) {
6941                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6942                                                 LOG_TRACE_EVENT,
6943                                                 "2774 Issue FCF rediscover "
6944                                                 "mailbox command failed, "
6945                                                 "through to CVL event\n");
6946                                 spin_lock_irq(&phba->hbalock);
6947                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6948                                 spin_unlock_irq(&phba->hbalock);
6949                                 /*
6950                                  * Last resort will be re-try on the
6951                                  * the current registered FCF entry.
6952                                  */
6953                                 lpfc_retry_pport_discovery(phba);
6954                         } else
6955                                 /*
6956                                  * Reset FCF roundrobin bmask for new
6957                                  * discovery.
6958                                  */
6959                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
6960                 }
6961                 break;
6962         default:
6963                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6964                                 "0288 Unknown FCoE event type 0x%x event tag "
6965                                 "0x%x\n", event_type, acqe_fip->event_tag);
6966                 break;
6967         }
6968 }
6969
6970 /**
6971  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6972  * @phba: pointer to lpfc hba data structure.
6973  * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6974  *
6975  * This routine is to handle the SLI4 asynchronous dcbx event.
6976  **/
6977 static void
6978 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6979                          struct lpfc_acqe_dcbx *acqe_dcbx)
6980 {
6981         phba->fc_eventTag = acqe_dcbx->event_tag;
6982         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6983                         "0290 The SLI4 DCBX asynchronous event is not "
6984                         "handled yet\n");
6985 }
6986
6987 /**
6988  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6989  * @phba: pointer to lpfc hba data structure.
6990  * @acqe_grp5: pointer to the async grp5 completion queue entry.
6991  *
6992  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6993  * is an asynchronous notified of a logical link speed change.  The Port
6994  * reports the logical link speed in units of 10Mbps.
6995  **/
6996 static void
6997 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6998                          struct lpfc_acqe_grp5 *acqe_grp5)
6999 {
7000         uint16_t prev_ll_spd;
7001
7002         phba->fc_eventTag = acqe_grp5->event_tag;
7003         phba->fcoe_eventtag = acqe_grp5->event_tag;
7004         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7005         phba->sli4_hba.link_state.logical_speed =
7006                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7007         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7008                         "2789 GRP5 Async Event: Updating logical link speed "
7009                         "from %dMbps to %dMbps\n", prev_ll_spd,
7010                         phba->sli4_hba.link_state.logical_speed);
7011 }
7012
7013 /**
7014  * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7015  * @phba: pointer to lpfc hba data structure.
7016  *
7017  * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7018  * is an asynchronous notification of a request to reset CM stats.
7019  **/
7020 static void
7021 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7022 {
7023         if (!phba->cgn_i)
7024                 return;
7025         lpfc_init_congestion_stat(phba);
7026 }
7027
7028 /**
7029  * lpfc_cgn_params_val - Validate FW congestion parameters.
7030  * @phba: pointer to lpfc hba data structure.
7031  * @p_cfg_param: pointer to FW provided congestion parameters.
7032  *
7033  * This routine validates the congestion parameters passed
7034  * by the FW to the driver via an ACQE event.
7035  **/
7036 static void
7037 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7038 {
7039         spin_lock_irq(&phba->hbalock);
7040
7041         if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7042                              LPFC_CFG_MONITOR)) {
7043                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7044                                 "6225 CMF mode param out of range: %d\n",
7045                                  p_cfg_param->cgn_param_mode);
7046                 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7047         }
7048
7049         spin_unlock_irq(&phba->hbalock);
7050 }
7051
7052 static const char * const lpfc_cmf_mode_to_str[] = {
7053         "OFF",
7054         "MANAGED",
7055         "MONITOR",
7056 };
7057
7058 /**
7059  * lpfc_cgn_params_parse - Process a FW cong parm change event
7060  * @phba: pointer to lpfc hba data structure.
7061  * @p_cgn_param: pointer to a data buffer with the FW cong params.
7062  * @len: the size of pdata in bytes.
7063  *
7064  * This routine validates the congestion management buffer signature
7065  * from the FW, validates the contents and makes corrections for
7066  * valid, in-range values.  If the signature magic is correct and
7067  * after parameter validation, the contents are copied to the driver's
7068  * @phba structure. If the magic is incorrect, an error message is
7069  * logged.
7070  **/
7071 static void
7072 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7073                       struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7074 {
7075         struct lpfc_cgn_info *cp;
7076         uint32_t crc, oldmode;
7077         char acr_string[4] = {0};
7078
7079         /* Make sure the FW has encoded the correct magic number to
7080          * validate the congestion parameter in FW memory.
7081          */
7082         if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7083                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7084                                 "4668 FW cgn parm buffer data: "
7085                                 "magic 0x%x version %d mode %d "
7086                                 "level0 %d level1 %d "
7087                                 "level2 %d byte13 %d "
7088                                 "byte14 %d byte15 %d "
7089                                 "byte11 %d byte12 %d activeMode %d\n",
7090                                 p_cgn_param->cgn_param_magic,
7091                                 p_cgn_param->cgn_param_version,
7092                                 p_cgn_param->cgn_param_mode,
7093                                 p_cgn_param->cgn_param_level0,
7094                                 p_cgn_param->cgn_param_level1,
7095                                 p_cgn_param->cgn_param_level2,
7096                                 p_cgn_param->byte13,
7097                                 p_cgn_param->byte14,
7098                                 p_cgn_param->byte15,
7099                                 p_cgn_param->byte11,
7100                                 p_cgn_param->byte12,
7101                                 phba->cmf_active_mode);
7102
7103                 oldmode = phba->cmf_active_mode;
7104
7105                 /* Any parameters out of range are corrected to defaults
7106                  * by this routine.  No need to fail.
7107                  */
7108                 lpfc_cgn_params_val(phba, p_cgn_param);
7109
7110                 /* Parameters are verified, move them into driver storage */
7111                 spin_lock_irq(&phba->hbalock);
7112                 memcpy(&phba->cgn_p, p_cgn_param,
7113                        sizeof(struct lpfc_cgn_param));
7114
7115                 /* Update parameters in congestion info buffer now */
7116                 if (phba->cgn_i) {
7117                         cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7118                         cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7119                         cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7120                         cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7121                         cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7122                         crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7123                                                   LPFC_CGN_CRC32_SEED);
7124                         cp->cgn_info_crc = cpu_to_le32(crc);
7125                 }
7126                 spin_unlock_irq(&phba->hbalock);
7127
7128                 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7129
7130                 switch (oldmode) {
7131                 case LPFC_CFG_OFF:
7132                         if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7133                                 /* Turning CMF on */
7134                                 lpfc_cmf_start(phba);
7135
7136                                 if (phba->link_state >= LPFC_LINK_UP) {
7137                                         phba->cgn_reg_fpin =
7138                                                 phba->cgn_init_reg_fpin;
7139                                         phba->cgn_reg_signal =
7140                                                 phba->cgn_init_reg_signal;
7141                                         lpfc_issue_els_edc(phba->pport, 0);
7142                                 }
7143                         }
7144                         break;
7145                 case LPFC_CFG_MANAGED:
7146                         switch (phba->cgn_p.cgn_param_mode) {
7147                         case LPFC_CFG_OFF:
7148                                 /* Turning CMF off */
7149                                 lpfc_cmf_stop(phba);
7150                                 if (phba->link_state >= LPFC_LINK_UP)
7151                                         lpfc_issue_els_edc(phba->pport, 0);
7152                                 break;
7153                         case LPFC_CFG_MONITOR:
7154                                 phba->cmf_max_bytes_per_interval =
7155                                         phba->cmf_link_byte_count;
7156
7157                                 /* Resume blocked IO - unblock on workqueue */
7158                                 queue_work(phba->wq,
7159                                            &phba->unblock_request_work);
7160                                 break;
7161                         }
7162                         break;
7163                 case LPFC_CFG_MONITOR:
7164                         switch (phba->cgn_p.cgn_param_mode) {
7165                         case LPFC_CFG_OFF:
7166                                 /* Turning CMF off */
7167                                 lpfc_cmf_stop(phba);
7168                                 if (phba->link_state >= LPFC_LINK_UP)
7169                                         lpfc_issue_els_edc(phba->pport, 0);
7170                                 break;
7171                         case LPFC_CFG_MANAGED:
7172                                 lpfc_cmf_signal_init(phba);
7173                                 break;
7174                         }
7175                         break;
7176                 }
7177                 if (oldmode != LPFC_CFG_OFF ||
7178                     oldmode != phba->cgn_p.cgn_param_mode) {
7179                         if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7180                                 scnprintf(acr_string, sizeof(acr_string), "%u",
7181                                           phba->cgn_p.cgn_param_level0);
7182                         else
7183                                 scnprintf(acr_string, sizeof(acr_string), "NA");
7184
7185                         dev_info(&phba->pcidev->dev, "%d: "
7186                                  "4663 CMF: Mode %s acr %s\n",
7187                                  phba->brd_no,
7188                                  lpfc_cmf_mode_to_str
7189                                  [phba->cgn_p.cgn_param_mode],
7190                                  acr_string);
7191                 }
7192         } else {
7193                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7194                                 "4669 FW cgn parm buf wrong magic 0x%x "
7195                                 "version %d\n", p_cgn_param->cgn_param_magic,
7196                                 p_cgn_param->cgn_param_version);
7197         }
7198 }
7199
7200 /**
7201  * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7202  * @phba: pointer to lpfc hba data structure.
7203  *
7204  * This routine issues a read_object mailbox command to
7205  * get the congestion management parameters from the FW
7206  * parses it and updates the driver maintained values.
7207  *
7208  * Returns
7209  *  0     if the object was empty
7210  *  -Eval if an error was encountered
7211  *  Count if bytes were read from object
7212  **/
7213 int
7214 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7215 {
7216         int ret = 0;
7217         struct lpfc_cgn_param *p_cgn_param = NULL;
7218         u32 *pdata = NULL;
7219         u32 len = 0;
7220
7221         /* Find out if the FW has a new set of congestion parameters. */
7222         len = sizeof(struct lpfc_cgn_param);
7223         pdata = kzalloc(len, GFP_KERNEL);
7224         if (!pdata)
7225                 return -ENOMEM;
7226         ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7227                                pdata, len);
7228
7229         /* 0 means no data.  A negative means error.  A positive means
7230          * bytes were copied.
7231          */
7232         if (!ret) {
7233                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7234                                 "4670 CGN RD OBJ returns no data\n");
7235                 goto rd_obj_err;
7236         } else if (ret < 0) {
7237                 /* Some error.  Just exit and return it to the caller.*/
7238                 goto rd_obj_err;
7239         }
7240
7241         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7242                         "6234 READ CGN PARAMS Successful %d\n", len);
7243
7244         /* Parse data pointer over len and update the phba congestion
7245          * parameters with values passed back.  The receive rate values
7246          * may have been altered in FW, but take no action here.
7247          */
7248         p_cgn_param = (struct lpfc_cgn_param *)pdata;
7249         lpfc_cgn_params_parse(phba, p_cgn_param, len);
7250
7251  rd_obj_err:
7252         kfree(pdata);
7253         return ret;
7254 }
7255
7256 /**
7257  * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7258  * @phba: pointer to lpfc hba data structure.
7259  *
7260  * The FW generated Async ACQE SLI event calls this routine when
7261  * the event type is an SLI Internal Port Event and the Event Code
7262  * indicates a change to the FW maintained congestion parameters.
7263  *
7264  * This routine executes a Read_Object mailbox call to obtain the
7265  * current congestion parameters maintained in FW and corrects
7266  * the driver's active congestion parameters.
7267  *
7268  * The acqe event is not passed because there is no further data
7269  * required.
7270  *
7271  * Returns nonzero error if event processing encountered an error.
7272  * Zero otherwise for success.
7273  **/
7274 static int
7275 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7276 {
7277         int ret = 0;
7278
7279         if (!phba->sli4_hba.pc_sli4_params.cmf) {
7280                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7281                                 "4664 Cgn Evt when E2E off. Drop event\n");
7282                 return -EACCES;
7283         }
7284
7285         /* If the event is claiming an empty object, it's ok.  A write
7286          * could have cleared it.  Only error is a negative return
7287          * status.
7288          */
7289         ret = lpfc_sli4_cgn_params_read(phba);
7290         if (ret < 0) {
7291                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7292                                 "4667 Error reading Cgn Params (%d)\n",
7293                                 ret);
7294         } else if (!ret) {
7295                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7296                                 "4673 CGN Event empty object.\n");
7297         }
7298         return ret;
7299 }
7300
7301 /**
7302  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7303  * @phba: pointer to lpfc hba data structure.
7304  *
7305  * This routine is invoked by the worker thread to process all the pending
7306  * SLI4 asynchronous events.
7307  **/
7308 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7309 {
7310         struct lpfc_cq_event *cq_event;
7311         unsigned long iflags;
7312
7313         /* First, declare the async event has been handled */
7314         spin_lock_irqsave(&phba->hbalock, iflags);
7315         phba->hba_flag &= ~ASYNC_EVENT;
7316         spin_unlock_irqrestore(&phba->hbalock, iflags);
7317
7318         /* Now, handle all the async events */
7319         spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7320         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7321                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7322                                  cq_event, struct lpfc_cq_event, list);
7323                 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7324                                        iflags);
7325
7326                 /* Process the asynchronous event */
7327                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7328                 case LPFC_TRAILER_CODE_LINK:
7329                         lpfc_sli4_async_link_evt(phba,
7330                                                  &cq_event->cqe.acqe_link);
7331                         break;
7332                 case LPFC_TRAILER_CODE_FCOE:
7333                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7334                         break;
7335                 case LPFC_TRAILER_CODE_DCBX:
7336                         lpfc_sli4_async_dcbx_evt(phba,
7337                                                  &cq_event->cqe.acqe_dcbx);
7338                         break;
7339                 case LPFC_TRAILER_CODE_GRP5:
7340                         lpfc_sli4_async_grp5_evt(phba,
7341                                                  &cq_event->cqe.acqe_grp5);
7342                         break;
7343                 case LPFC_TRAILER_CODE_FC:
7344                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7345                         break;
7346                 case LPFC_TRAILER_CODE_SLI:
7347                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7348                         break;
7349                 case LPFC_TRAILER_CODE_CMSTAT:
7350                         lpfc_sli4_async_cmstat_evt(phba);
7351                         break;
7352                 default:
7353                         lpfc_printf_log(phba, KERN_ERR,
7354                                         LOG_TRACE_EVENT,
7355                                         "1804 Invalid asynchronous event code: "
7356                                         "x%x\n", bf_get(lpfc_trailer_code,
7357                                         &cq_event->cqe.mcqe_cmpl));
7358                         break;
7359                 }
7360
7361                 /* Free the completion event processed to the free pool */
7362                 lpfc_sli4_cq_event_release(phba, cq_event);
7363                 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7364         }
7365         spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7366 }
7367
7368 /**
7369  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7370  * @phba: pointer to lpfc hba data structure.
7371  *
7372  * This routine is invoked by the worker thread to process FCF table
7373  * rediscovery pending completion event.
7374  **/
7375 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7376 {
7377         int rc;
7378
7379         spin_lock_irq(&phba->hbalock);
7380         /* Clear FCF rediscovery timeout event */
7381         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7382         /* Clear driver fast failover FCF record flag */
7383         phba->fcf.failover_rec.flag = 0;
7384         /* Set state for FCF fast failover */
7385         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7386         spin_unlock_irq(&phba->hbalock);
7387
7388         /* Scan FCF table from the first entry to re-discover SAN */
7389         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7390                         "2777 Start post-quiescent FCF table scan\n");
7391         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7392         if (rc)
7393                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7394                                 "2747 Issue FCF scan read FCF mailbox "
7395                                 "command failed 0x%x\n", rc);
7396 }
7397
7398 /**
7399  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7400  * @phba: pointer to lpfc hba data structure.
7401  * @dev_grp: The HBA PCI-Device group number.
7402  *
7403  * This routine is invoked to set up the per HBA PCI-Device group function
7404  * API jump table entries.
7405  *
7406  * Return: 0 if success, otherwise -ENODEV
7407  **/
7408 int
7409 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7410 {
7411         int rc;
7412
7413         /* Set up lpfc PCI-device group */
7414         phba->pci_dev_grp = dev_grp;
7415
7416         /* The LPFC_PCI_DEV_OC uses SLI4 */
7417         if (dev_grp == LPFC_PCI_DEV_OC)
7418                 phba->sli_rev = LPFC_SLI_REV4;
7419
7420         /* Set up device INIT API function jump table */
7421         rc = lpfc_init_api_table_setup(phba, dev_grp);
7422         if (rc)
7423                 return -ENODEV;
7424         /* Set up SCSI API function jump table */
7425         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7426         if (rc)
7427                 return -ENODEV;
7428         /* Set up SLI API function jump table */
7429         rc = lpfc_sli_api_table_setup(phba, dev_grp);
7430         if (rc)
7431                 return -ENODEV;
7432         /* Set up MBOX API function jump table */
7433         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7434         if (rc)
7435                 return -ENODEV;
7436
7437         return 0;
7438 }
7439
7440 /**
7441  * lpfc_log_intr_mode - Log the active interrupt mode
7442  * @phba: pointer to lpfc hba data structure.
7443  * @intr_mode: active interrupt mode adopted.
7444  *
7445  * This routine it invoked to log the currently used active interrupt mode
7446  * to the device.
7447  **/
7448 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7449 {
7450         switch (intr_mode) {
7451         case 0:
7452                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7453                                 "0470 Enable INTx interrupt mode.\n");
7454                 break;
7455         case 1:
7456                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7457                                 "0481 Enabled MSI interrupt mode.\n");
7458                 break;
7459         case 2:
7460                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7461                                 "0480 Enabled MSI-X interrupt mode.\n");
7462                 break;
7463         default:
7464                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7465                                 "0482 Illegal interrupt mode.\n");
7466                 break;
7467         }
7468         return;
7469 }
7470
7471 /**
7472  * lpfc_enable_pci_dev - Enable a generic PCI device.
7473  * @phba: pointer to lpfc hba data structure.
7474  *
7475  * This routine is invoked to enable the PCI device that is common to all
7476  * PCI devices.
7477  *
7478  * Return codes
7479  *      0 - successful
7480  *      other values - error
7481  **/
7482 static int
7483 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7484 {
7485         struct pci_dev *pdev;
7486
7487         /* Obtain PCI device reference */
7488         if (!phba->pcidev)
7489                 goto out_error;
7490         else
7491                 pdev = phba->pcidev;
7492         /* Enable PCI device */
7493         if (pci_enable_device_mem(pdev))
7494                 goto out_error;
7495         /* Request PCI resource for the device */
7496         if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7497                 goto out_disable_device;
7498         /* Set up device as PCI master and save state for EEH */
7499         pci_set_master(pdev);
7500         pci_try_set_mwi(pdev);
7501         pci_save_state(pdev);
7502
7503         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7504         if (pci_is_pcie(pdev))
7505                 pdev->needs_freset = 1;
7506
7507         return 0;
7508
7509 out_disable_device:
7510         pci_disable_device(pdev);
7511 out_error:
7512         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7513                         "1401 Failed to enable pci device\n");
7514         return -ENODEV;
7515 }
7516
7517 /**
7518  * lpfc_disable_pci_dev - Disable a generic PCI device.
7519  * @phba: pointer to lpfc hba data structure.
7520  *
7521  * This routine is invoked to disable the PCI device that is common to all
7522  * PCI devices.
7523  **/
7524 static void
7525 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7526 {
7527         struct pci_dev *pdev;
7528
7529         /* Obtain PCI device reference */
7530         if (!phba->pcidev)
7531                 return;
7532         else
7533                 pdev = phba->pcidev;
7534         /* Release PCI resource and disable PCI device */
7535         pci_release_mem_regions(pdev);
7536         pci_disable_device(pdev);
7537
7538         return;
7539 }
7540
7541 /**
7542  * lpfc_reset_hba - Reset a hba
7543  * @phba: pointer to lpfc hba data structure.
7544  *
7545  * This routine is invoked to reset a hba device. It brings the HBA
7546  * offline, performs a board restart, and then brings the board back
7547  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7548  * on outstanding mailbox commands.
7549  **/
7550 void
7551 lpfc_reset_hba(struct lpfc_hba *phba)
7552 {
7553         /* If resets are disabled then set error state and return. */
7554         if (!phba->cfg_enable_hba_reset) {
7555                 phba->link_state = LPFC_HBA_ERROR;
7556                 return;
7557         }
7558
7559         /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7560         if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7561                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7562         } else {
7563                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7564                 lpfc_sli_flush_io_rings(phba);
7565         }
7566         lpfc_offline(phba);
7567         lpfc_sli_brdrestart(phba);
7568         lpfc_online(phba);
7569         lpfc_unblock_mgmt_io(phba);
7570 }
7571
7572 /**
7573  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7574  * @phba: pointer to lpfc hba data structure.
7575  *
7576  * This function enables the PCI SR-IOV virtual functions to a physical
7577  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7578  * enable the number of virtual functions to the physical function. As
7579  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7580  * API call does not considered as an error condition for most of the device.
7581  **/
7582 uint16_t
7583 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7584 {
7585         struct pci_dev *pdev = phba->pcidev;
7586         uint16_t nr_virtfn;
7587         int pos;
7588
7589         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7590         if (pos == 0)
7591                 return 0;
7592
7593         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7594         return nr_virtfn;
7595 }
7596
7597 /**
7598  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7599  * @phba: pointer to lpfc hba data structure.
7600  * @nr_vfn: number of virtual functions to be enabled.
7601  *
7602  * This function enables the PCI SR-IOV virtual functions to a physical
7603  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7604  * enable the number of virtual functions to the physical function. As
7605  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7606  * API call does not considered as an error condition for most of the device.
7607  **/
7608 int
7609 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7610 {
7611         struct pci_dev *pdev = phba->pcidev;
7612         uint16_t max_nr_vfn;
7613         int rc;
7614
7615         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7616         if (nr_vfn > max_nr_vfn) {
7617                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7618                                 "3057 Requested vfs (%d) greater than "
7619                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7620                 return -EINVAL;
7621         }
7622
7623         rc = pci_enable_sriov(pdev, nr_vfn);
7624         if (rc) {
7625                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7626                                 "2806 Failed to enable sriov on this device "
7627                                 "with vfn number nr_vf:%d, rc:%d\n",
7628                                 nr_vfn, rc);
7629         } else
7630                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7631                                 "2807 Successful enable sriov on this device "
7632                                 "with vfn number nr_vf:%d\n", nr_vfn);
7633         return rc;
7634 }
7635
7636 static void
7637 lpfc_unblock_requests_work(struct work_struct *work)
7638 {
7639         struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7640                                              unblock_request_work);
7641
7642         lpfc_unblock_requests(phba);
7643 }
7644
7645 /**
7646  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7647  * @phba: pointer to lpfc hba data structure.
7648  *
7649  * This routine is invoked to set up the driver internal resources before the
7650  * device specific resource setup to support the HBA device it attached to.
7651  *
7652  * Return codes
7653  *      0 - successful
7654  *      other values - error
7655  **/
7656 static int
7657 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7658 {
7659         struct lpfc_sli *psli = &phba->sli;
7660
7661         /*
7662          * Driver resources common to all SLI revisions
7663          */
7664         atomic_set(&phba->fast_event_count, 0);
7665         atomic_set(&phba->dbg_log_idx, 0);
7666         atomic_set(&phba->dbg_log_cnt, 0);
7667         atomic_set(&phba->dbg_log_dmping, 0);
7668         spin_lock_init(&phba->hbalock);
7669
7670         /* Initialize port_list spinlock */
7671         spin_lock_init(&phba->port_list_lock);
7672         INIT_LIST_HEAD(&phba->port_list);
7673
7674         INIT_LIST_HEAD(&phba->work_list);
7675
7676         /* Initialize the wait queue head for the kernel thread */
7677         init_waitqueue_head(&phba->work_waitq);
7678
7679         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7680                         "1403 Protocols supported %s %s %s\n",
7681                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7682                                 "SCSI" : " "),
7683                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7684                                 "NVME" : " "),
7685                         (phba->nvmet_support ? "NVMET" : " "));
7686
7687         /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7688         spin_lock_init(&phba->scsi_buf_list_get_lock);
7689         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7690         spin_lock_init(&phba->scsi_buf_list_put_lock);
7691         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7692
7693         /* Initialize the fabric iocb list */
7694         INIT_LIST_HEAD(&phba->fabric_iocb_list);
7695
7696         /* Initialize list to save ELS buffers */
7697         INIT_LIST_HEAD(&phba->elsbuf);
7698
7699         /* Initialize FCF connection rec list */
7700         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7701
7702         /* Initialize OAS configuration list */
7703         spin_lock_init(&phba->devicelock);
7704         INIT_LIST_HEAD(&phba->luns);
7705
7706         /* MBOX heartbeat timer */
7707         timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7708         /* Fabric block timer */
7709         timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7710         /* EA polling mode timer */
7711         timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7712         /* Heartbeat timer */
7713         timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7714
7715         INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7716
7717         INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7718                           lpfc_idle_stat_delay_work);
7719         INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7720         return 0;
7721 }
7722
7723 /**
7724  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7725  * @phba: pointer to lpfc hba data structure.
7726  *
7727  * This routine is invoked to set up the driver internal resources specific to
7728  * support the SLI-3 HBA device it attached to.
7729  *
7730  * Return codes
7731  * 0 - successful
7732  * other values - error
7733  **/
7734 static int
7735 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7736 {
7737         int rc, entry_sz;
7738
7739         /*
7740          * Initialize timers used by driver
7741          */
7742
7743         /* FCP polling mode timer */
7744         timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7745
7746         /* Host attention work mask setup */
7747         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7748         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7749
7750         /* Get all the module params for configuring this host */
7751         lpfc_get_cfgparam(phba);
7752         /* Set up phase-1 common device driver resources */
7753
7754         rc = lpfc_setup_driver_resource_phase1(phba);
7755         if (rc)
7756                 return -ENODEV;
7757
7758         if (!phba->sli.sli3_ring)
7759                 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7760                                               sizeof(struct lpfc_sli_ring),
7761                                               GFP_KERNEL);
7762         if (!phba->sli.sli3_ring)
7763                 return -ENOMEM;
7764
7765         /*
7766          * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7767          * used to create the sg_dma_buf_pool must be dynamically calculated.
7768          */
7769
7770         if (phba->sli_rev == LPFC_SLI_REV4)
7771                 entry_sz = sizeof(struct sli4_sge);
7772         else
7773                 entry_sz = sizeof(struct ulp_bde64);
7774
7775         /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7776         if (phba->cfg_enable_bg) {
7777                 /*
7778                  * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7779                  * the FCP rsp, and a BDE for each. Sice we have no control
7780                  * over how many protection data segments the SCSI Layer
7781                  * will hand us (ie: there could be one for every block
7782                  * in the IO), we just allocate enough BDEs to accomidate
7783                  * our max amount and we need to limit lpfc_sg_seg_cnt to
7784                  * minimize the risk of running out.
7785                  */
7786                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7787                         sizeof(struct fcp_rsp) +
7788                         (LPFC_MAX_SG_SEG_CNT * entry_sz);
7789
7790                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7791                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7792
7793                 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7794                 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7795         } else {
7796                 /*
7797                  * The scsi_buf for a regular I/O will hold the FCP cmnd,
7798                  * the FCP rsp, a BDE for each, and a BDE for up to
7799                  * cfg_sg_seg_cnt data segments.
7800                  */
7801                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7802                         sizeof(struct fcp_rsp) +
7803                         ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7804
7805                 /* Total BDEs in BPL for scsi_sg_list */
7806                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7807         }
7808
7809         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7810                         "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7811                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7812                         phba->cfg_total_seg_cnt);
7813
7814         phba->max_vpi = LPFC_MAX_VPI;
7815         /* This will be set to correct value after config_port mbox */
7816         phba->max_vports = 0;
7817
7818         /*
7819          * Initialize the SLI Layer to run with lpfc HBAs.
7820          */
7821         lpfc_sli_setup(phba);
7822         lpfc_sli_queue_init(phba);
7823
7824         /* Allocate device driver memory */
7825         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7826                 return -ENOMEM;
7827
7828         phba->lpfc_sg_dma_buf_pool =
7829                 dma_pool_create("lpfc_sg_dma_buf_pool",
7830                                 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7831                                 BPL_ALIGN_SZ, 0);
7832
7833         if (!phba->lpfc_sg_dma_buf_pool)
7834                 goto fail_free_mem;
7835
7836         phba->lpfc_cmd_rsp_buf_pool =
7837                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
7838                                         &phba->pcidev->dev,
7839                                         sizeof(struct fcp_cmnd) +
7840                                         sizeof(struct fcp_rsp),
7841                                         BPL_ALIGN_SZ, 0);
7842
7843         if (!phba->lpfc_cmd_rsp_buf_pool)
7844                 goto fail_free_dma_buf_pool;
7845
7846         /*
7847          * Enable sr-iov virtual functions if supported and configured
7848          * through the module parameter.
7849          */
7850         if (phba->cfg_sriov_nr_virtfn > 0) {
7851                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7852                                                  phba->cfg_sriov_nr_virtfn);
7853                 if (rc) {
7854                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7855                                         "2808 Requested number of SR-IOV "
7856                                         "virtual functions (%d) is not "
7857                                         "supported\n",
7858                                         phba->cfg_sriov_nr_virtfn);
7859                         phba->cfg_sriov_nr_virtfn = 0;
7860                 }
7861         }
7862
7863         return 0;
7864
7865 fail_free_dma_buf_pool:
7866         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7867         phba->lpfc_sg_dma_buf_pool = NULL;
7868 fail_free_mem:
7869         lpfc_mem_free(phba);
7870         return -ENOMEM;
7871 }
7872
7873 /**
7874  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7875  * @phba: pointer to lpfc hba data structure.
7876  *
7877  * This routine is invoked to unset the driver internal resources set up
7878  * specific for supporting the SLI-3 HBA device it attached to.
7879  **/
7880 static void
7881 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7882 {
7883         /* Free device driver memory allocated */
7884         lpfc_mem_free_all(phba);
7885
7886         return;
7887 }
7888
7889 /**
7890  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7891  * @phba: pointer to lpfc hba data structure.
7892  *
7893  * This routine is invoked to set up the driver internal resources specific to
7894  * support the SLI-4 HBA device it attached to.
7895  *
7896  * Return codes
7897  *      0 - successful
7898  *      other values - error
7899  **/
7900 static int
7901 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7902 {
7903         LPFC_MBOXQ_t *mboxq;
7904         MAILBOX_t *mb;
7905         int rc, i, max_buf_size;
7906         int longs;
7907         int extra;
7908         uint64_t wwn;
7909         u32 if_type;
7910         u32 if_fam;
7911
7912         phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7913         phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7914         phba->sli4_hba.curr_disp_cpu = 0;
7915
7916         /* Get all the module params for configuring this host */
7917         lpfc_get_cfgparam(phba);
7918
7919         /* Set up phase-1 common device driver resources */
7920         rc = lpfc_setup_driver_resource_phase1(phba);
7921         if (rc)
7922                 return -ENODEV;
7923
7924         /* Before proceed, wait for POST done and device ready */
7925         rc = lpfc_sli4_post_status_check(phba);
7926         if (rc)
7927                 return -ENODEV;
7928
7929         /* Allocate all driver workqueues here */
7930
7931         /* The lpfc_wq workqueue for deferred irq use */
7932         phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7933         if (!phba->wq)
7934                 return -ENOMEM;
7935
7936         /*
7937          * Initialize timers used by driver
7938          */
7939
7940         timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7941
7942         /* FCF rediscover timer */
7943         timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7944
7945         /* CMF congestion timer */
7946         hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7947         phba->cmf_timer.function = lpfc_cmf_timer;
7948         /* CMF 1 minute stats collection timer */
7949         hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7950         phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
7951
7952         /*
7953          * Control structure for handling external multi-buffer mailbox
7954          * command pass-through.
7955          */
7956         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7957                 sizeof(struct lpfc_mbox_ext_buf_ctx));
7958         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7959
7960         phba->max_vpi = LPFC_MAX_VPI;
7961
7962         /* This will be set to correct value after the read_config mbox */
7963         phba->max_vports = 0;
7964
7965         /* Program the default value of vlan_id and fc_map */
7966         phba->valid_vlan = 0;
7967         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7968         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7969         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7970
7971         /*
7972          * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7973          * we will associate a new ring, for each EQ/CQ/WQ tuple.
7974          * The WQ create will allocate the ring.
7975          */
7976
7977         /* Initialize buffer queue management fields */
7978         INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7979         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7980         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7981
7982         /* for VMID idle timeout if VMID is enabled */
7983         if (lpfc_is_vmid_enabled(phba))
7984                 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7985
7986         /*
7987          * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
7988          */
7989         /* Initialize the Abort buffer list used by driver */
7990         spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7991         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7992
7993         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7994                 /* Initialize the Abort nvme buffer list used by driver */
7995                 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7996                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7997                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7998                 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7999                 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8000         }
8001
8002         /* This abort list used by worker thread */
8003         spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8004         spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8005         spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8006         spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8007
8008         /*
8009          * Initialize driver internal slow-path work queues
8010          */
8011
8012         /* Driver internel slow-path CQ Event pool */
8013         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8014         /* Response IOCB work queue list */
8015         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8016         /* Asynchronous event CQ Event work queue list */
8017         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8018         /* Slow-path XRI aborted CQ Event work queue list */
8019         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8020         /* Receive queue CQ Event work queue list */
8021         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8022
8023         /* Initialize extent block lists. */
8024         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8025         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8026         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8027         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8028
8029         /* Initialize mboxq lists. If the early init routines fail
8030          * these lists need to be correctly initialized.
8031          */
8032         INIT_LIST_HEAD(&phba->sli.mboxq);
8033         INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8034
8035         /* initialize optic_state to 0xFF */
8036         phba->sli4_hba.lnk_info.optic_state = 0xff;
8037
8038         /* Allocate device driver memory */
8039         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8040         if (rc)
8041                 goto out_destroy_workqueue;
8042
8043         /* IF Type 2 ports get initialized now. */
8044         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8045             LPFC_SLI_INTF_IF_TYPE_2) {
8046                 rc = lpfc_pci_function_reset(phba);
8047                 if (unlikely(rc)) {
8048                         rc = -ENODEV;
8049                         goto out_free_mem;
8050                 }
8051                 phba->temp_sensor_support = 1;
8052         }
8053
8054         /* Create the bootstrap mailbox command */
8055         rc = lpfc_create_bootstrap_mbox(phba);
8056         if (unlikely(rc))
8057                 goto out_free_mem;
8058
8059         /* Set up the host's endian order with the device. */
8060         rc = lpfc_setup_endian_order(phba);
8061         if (unlikely(rc))
8062                 goto out_free_bsmbx;
8063
8064         /* Set up the hba's configuration parameters. */
8065         rc = lpfc_sli4_read_config(phba);
8066         if (unlikely(rc))
8067                 goto out_free_bsmbx;
8068
8069         if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8070                 /* Right now the link is down, if FA-PWWN is configured the
8071                  * firmware will try FLOGI before the driver gets a link up.
8072                  * If it fails, the driver should get a MISCONFIGURED async
8073                  * event which will clear this flag. The only notification
8074                  * the driver gets is if it fails, if it succeeds there is no
8075                  * notification given. Assume success.
8076                  */
8077                 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8078         }
8079
8080         rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8081         if (unlikely(rc))
8082                 goto out_free_bsmbx;
8083
8084         /* IF Type 0 ports get initialized now. */
8085         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8086             LPFC_SLI_INTF_IF_TYPE_0) {
8087                 rc = lpfc_pci_function_reset(phba);
8088                 if (unlikely(rc))
8089                         goto out_free_bsmbx;
8090         }
8091
8092         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8093                                                        GFP_KERNEL);
8094         if (!mboxq) {
8095                 rc = -ENOMEM;
8096                 goto out_free_bsmbx;
8097         }
8098
8099         /* Check for NVMET being configured */
8100         phba->nvmet_support = 0;
8101         if (lpfc_enable_nvmet_cnt) {
8102
8103                 /* First get WWN of HBA instance */
8104                 lpfc_read_nv(phba, mboxq);
8105                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8106                 if (rc != MBX_SUCCESS) {
8107                         lpfc_printf_log(phba, KERN_ERR,
8108                                         LOG_TRACE_EVENT,
8109                                         "6016 Mailbox failed , mbxCmd x%x "
8110                                         "READ_NV, mbxStatus x%x\n",
8111                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8112                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8113                         mempool_free(mboxq, phba->mbox_mem_pool);
8114                         rc = -EIO;
8115                         goto out_free_bsmbx;
8116                 }
8117                 mb = &mboxq->u.mb;
8118                 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8119                        sizeof(uint64_t));
8120                 wwn = cpu_to_be64(wwn);
8121                 phba->sli4_hba.wwnn.u.name = wwn;
8122                 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8123                        sizeof(uint64_t));
8124                 /* wwn is WWPN of HBA instance */
8125                 wwn = cpu_to_be64(wwn);
8126                 phba->sli4_hba.wwpn.u.name = wwn;
8127
8128                 /* Check to see if it matches any module parameter */
8129                 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8130                         if (wwn == lpfc_enable_nvmet[i]) {
8131 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8132                                 if (lpfc_nvmet_mem_alloc(phba))
8133                                         break;
8134
8135                                 phba->nvmet_support = 1; /* a match */
8136
8137                                 lpfc_printf_log(phba, KERN_ERR,
8138                                                 LOG_TRACE_EVENT,
8139                                                 "6017 NVME Target %016llx\n",
8140                                                 wwn);
8141 #else
8142                                 lpfc_printf_log(phba, KERN_ERR,
8143                                                 LOG_TRACE_EVENT,
8144                                                 "6021 Can't enable NVME Target."
8145                                                 " NVME_TARGET_FC infrastructure"
8146                                                 " is not in kernel\n");
8147 #endif
8148                                 /* Not supported for NVMET */
8149                                 phba->cfg_xri_rebalancing = 0;
8150                                 if (phba->irq_chann_mode == NHT_MODE) {
8151                                         phba->cfg_irq_chann =
8152                                                 phba->sli4_hba.num_present_cpu;
8153                                         phba->cfg_hdw_queue =
8154                                                 phba->sli4_hba.num_present_cpu;
8155                                         phba->irq_chann_mode = NORMAL_MODE;
8156                                 }
8157                                 break;
8158                         }
8159                 }
8160         }
8161
8162         lpfc_nvme_mod_param_dep(phba);
8163
8164         /*
8165          * Get sli4 parameters that override parameters from Port capabilities.
8166          * If this call fails, it isn't critical unless the SLI4 parameters come
8167          * back in conflict.
8168          */
8169         rc = lpfc_get_sli4_parameters(phba, mboxq);
8170         if (rc) {
8171                 if_type = bf_get(lpfc_sli_intf_if_type,
8172                                  &phba->sli4_hba.sli_intf);
8173                 if_fam = bf_get(lpfc_sli_intf_sli_family,
8174                                 &phba->sli4_hba.sli_intf);
8175                 if (phba->sli4_hba.extents_in_use &&
8176                     phba->sli4_hba.rpi_hdrs_in_use) {
8177                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8178                                         "2999 Unsupported SLI4 Parameters "
8179                                         "Extents and RPI headers enabled.\n");
8180                         if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8181                             if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
8182                                 mempool_free(mboxq, phba->mbox_mem_pool);
8183                                 rc = -EIO;
8184                                 goto out_free_bsmbx;
8185                         }
8186                 }
8187                 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8188                       if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8189                         mempool_free(mboxq, phba->mbox_mem_pool);
8190                         rc = -EIO;
8191                         goto out_free_bsmbx;
8192                 }
8193         }
8194
8195         /*
8196          * 1 for cmd, 1 for rsp, NVME adds an extra one
8197          * for boundary conditions in its max_sgl_segment template.
8198          */
8199         extra = 2;
8200         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8201                 extra++;
8202
8203         /*
8204          * It doesn't matter what family our adapter is in, we are
8205          * limited to 2 Pages, 512 SGEs, for our SGL.
8206          * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8207          */
8208         max_buf_size = (2 * SLI4_PAGE_SIZE);
8209
8210         /*
8211          * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8212          * used to create the sg_dma_buf_pool must be calculated.
8213          */
8214         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8215                 /* Both cfg_enable_bg and cfg_external_dif code paths */
8216
8217                 /*
8218                  * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8219                  * the FCP rsp, and a SGE. Sice we have no control
8220                  * over how many protection segments the SCSI Layer
8221                  * will hand us (ie: there could be one for every block
8222                  * in the IO), just allocate enough SGEs to accomidate
8223                  * our max amount and we need to limit lpfc_sg_seg_cnt
8224                  * to minimize the risk of running out.
8225                  */
8226                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8227                                 sizeof(struct fcp_rsp) + max_buf_size;
8228
8229                 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8230                 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8231
8232                 /*
8233                  * If supporting DIF, reduce the seg count for scsi to
8234                  * allow room for the DIF sges.
8235                  */
8236                 if (phba->cfg_enable_bg &&
8237                     phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8238                         phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8239                 else
8240                         phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8241
8242         } else {
8243                 /*
8244                  * The scsi_buf for a regular I/O holds the FCP cmnd,
8245                  * the FCP rsp, a SGE for each, and a SGE for up to
8246                  * cfg_sg_seg_cnt data segments.
8247                  */
8248                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8249                                 sizeof(struct fcp_rsp) +
8250                                 ((phba->cfg_sg_seg_cnt + extra) *
8251                                 sizeof(struct sli4_sge));
8252
8253                 /* Total SGEs for scsi_sg_list */
8254                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8255                 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8256
8257                 /*
8258                  * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8259                  * need to post 1 page for the SGL.
8260                  */
8261         }
8262
8263         if (phba->cfg_xpsgl && !phba->nvmet_support)
8264                 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8265         else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
8266                 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8267         else
8268                 phba->cfg_sg_dma_buf_size =
8269                                 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8270
8271         phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8272                                sizeof(struct sli4_sge);
8273
8274         /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8275         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8276                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8277                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8278                                         "6300 Reducing NVME sg segment "
8279                                         "cnt to %d\n",
8280                                         LPFC_MAX_NVME_SEG_CNT);
8281                         phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8282                 } else
8283                         phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8284         }
8285
8286         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8287                         "9087 sg_seg_cnt:%d dmabuf_size:%d "
8288                         "total:%d scsi:%d nvme:%d\n",
8289                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8290                         phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
8291                         phba->cfg_nvme_seg_cnt);
8292
8293         if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8294                 i = phba->cfg_sg_dma_buf_size;
8295         else
8296                 i = SLI4_PAGE_SIZE;
8297
8298         phba->lpfc_sg_dma_buf_pool =
8299                         dma_pool_create("lpfc_sg_dma_buf_pool",
8300                                         &phba->pcidev->dev,
8301                                         phba->cfg_sg_dma_buf_size,
8302                                         i, 0);
8303         if (!phba->lpfc_sg_dma_buf_pool) {
8304                 rc = -ENOMEM;
8305                 goto out_free_bsmbx;
8306         }
8307
8308         phba->lpfc_cmd_rsp_buf_pool =
8309                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
8310                                         &phba->pcidev->dev,
8311                                         sizeof(struct fcp_cmnd) +
8312                                         sizeof(struct fcp_rsp),
8313                                         i, 0);
8314         if (!phba->lpfc_cmd_rsp_buf_pool) {
8315                 rc = -ENOMEM;
8316                 goto out_free_sg_dma_buf;
8317         }
8318
8319         mempool_free(mboxq, phba->mbox_mem_pool);
8320
8321         /* Verify OAS is supported */
8322         lpfc_sli4_oas_verify(phba);
8323
8324         /* Verify RAS support on adapter */
8325         lpfc_sli4_ras_init(phba);
8326
8327         /* Verify all the SLI4 queues */
8328         rc = lpfc_sli4_queue_verify(phba);
8329         if (rc)
8330                 goto out_free_cmd_rsp_buf;
8331
8332         /* Create driver internal CQE event pool */
8333         rc = lpfc_sli4_cq_event_pool_create(phba);
8334         if (rc)
8335                 goto out_free_cmd_rsp_buf;
8336
8337         /* Initialize sgl lists per host */
8338         lpfc_init_sgl_list(phba);
8339
8340         /* Allocate and initialize active sgl array */
8341         rc = lpfc_init_active_sgl_array(phba);
8342         if (rc) {
8343                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8344                                 "1430 Failed to initialize sgl list.\n");
8345                 goto out_destroy_cq_event_pool;
8346         }
8347         rc = lpfc_sli4_init_rpi_hdrs(phba);
8348         if (rc) {
8349                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8350                                 "1432 Failed to initialize rpi headers.\n");
8351                 goto out_free_active_sgl;
8352         }
8353
8354         /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8355         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8356         phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8357                                          GFP_KERNEL);
8358         if (!phba->fcf.fcf_rr_bmask) {
8359                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8360                                 "2759 Failed allocate memory for FCF round "
8361                                 "robin failover bmask\n");
8362                 rc = -ENOMEM;
8363                 goto out_remove_rpi_hdrs;
8364         }
8365
8366         phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8367                                             sizeof(struct lpfc_hba_eq_hdl),
8368                                             GFP_KERNEL);
8369         if (!phba->sli4_hba.hba_eq_hdl) {
8370                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8371                                 "2572 Failed allocate memory for "
8372                                 "fast-path per-EQ handle array\n");
8373                 rc = -ENOMEM;
8374                 goto out_free_fcf_rr_bmask;
8375         }
8376
8377         phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8378                                         sizeof(struct lpfc_vector_map_info),
8379                                         GFP_KERNEL);
8380         if (!phba->sli4_hba.cpu_map) {
8381                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8382                                 "3327 Failed allocate memory for msi-x "
8383                                 "interrupt vector mapping\n");
8384                 rc = -ENOMEM;
8385                 goto out_free_hba_eq_hdl;
8386         }
8387
8388         phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8389         if (!phba->sli4_hba.eq_info) {
8390                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8391                                 "3321 Failed allocation for per_cpu stats\n");
8392                 rc = -ENOMEM;
8393                 goto out_free_hba_cpu_map;
8394         }
8395
8396         phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8397                                            sizeof(*phba->sli4_hba.idle_stat),
8398                                            GFP_KERNEL);
8399         if (!phba->sli4_hba.idle_stat) {
8400                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8401                                 "3390 Failed allocation for idle_stat\n");
8402                 rc = -ENOMEM;
8403                 goto out_free_hba_eq_info;
8404         }
8405
8406 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8407         phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8408         if (!phba->sli4_hba.c_stat) {
8409                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8410                                 "3332 Failed allocating per cpu hdwq stats\n");
8411                 rc = -ENOMEM;
8412                 goto out_free_hba_idle_stat;
8413         }
8414 #endif
8415
8416         phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8417         if (!phba->cmf_stat) {
8418                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8419                                 "3331 Failed allocating per cpu cgn stats\n");
8420                 rc = -ENOMEM;
8421                 goto out_free_hba_hdwq_info;
8422         }
8423
8424         /*
8425          * Enable sr-iov virtual functions if supported and configured
8426          * through the module parameter.
8427          */
8428         if (phba->cfg_sriov_nr_virtfn > 0) {
8429                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8430                                                  phba->cfg_sriov_nr_virtfn);
8431                 if (rc) {
8432                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8433                                         "3020 Requested number of SR-IOV "
8434                                         "virtual functions (%d) is not "
8435                                         "supported\n",
8436                                         phba->cfg_sriov_nr_virtfn);
8437                         phba->cfg_sriov_nr_virtfn = 0;
8438                 }
8439         }
8440
8441         return 0;
8442
8443 out_free_hba_hdwq_info:
8444 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8445         free_percpu(phba->sli4_hba.c_stat);
8446 out_free_hba_idle_stat:
8447 #endif
8448         kfree(phba->sli4_hba.idle_stat);
8449 out_free_hba_eq_info:
8450         free_percpu(phba->sli4_hba.eq_info);
8451 out_free_hba_cpu_map:
8452         kfree(phba->sli4_hba.cpu_map);
8453 out_free_hba_eq_hdl:
8454         kfree(phba->sli4_hba.hba_eq_hdl);
8455 out_free_fcf_rr_bmask:
8456         kfree(phba->fcf.fcf_rr_bmask);
8457 out_remove_rpi_hdrs:
8458         lpfc_sli4_remove_rpi_hdrs(phba);
8459 out_free_active_sgl:
8460         lpfc_free_active_sgl(phba);
8461 out_destroy_cq_event_pool:
8462         lpfc_sli4_cq_event_pool_destroy(phba);
8463 out_free_cmd_rsp_buf:
8464         dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8465         phba->lpfc_cmd_rsp_buf_pool = NULL;
8466 out_free_sg_dma_buf:
8467         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8468         phba->lpfc_sg_dma_buf_pool = NULL;
8469 out_free_bsmbx:
8470         lpfc_destroy_bootstrap_mbox(phba);
8471 out_free_mem:
8472         lpfc_mem_free(phba);
8473 out_destroy_workqueue:
8474         destroy_workqueue(phba->wq);
8475         phba->wq = NULL;
8476         return rc;
8477 }
8478
8479 /**
8480  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8481  * @phba: pointer to lpfc hba data structure.
8482  *
8483  * This routine is invoked to unset the driver internal resources set up
8484  * specific for supporting the SLI-4 HBA device it attached to.
8485  **/
8486 static void
8487 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8488 {
8489         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8490
8491         free_percpu(phba->sli4_hba.eq_info);
8492 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8493         free_percpu(phba->sli4_hba.c_stat);
8494 #endif
8495         free_percpu(phba->cmf_stat);
8496         kfree(phba->sli4_hba.idle_stat);
8497
8498         /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8499         kfree(phba->sli4_hba.cpu_map);
8500         phba->sli4_hba.num_possible_cpu = 0;
8501         phba->sli4_hba.num_present_cpu = 0;
8502         phba->sli4_hba.curr_disp_cpu = 0;
8503         cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8504
8505         /* Free memory allocated for fast-path work queue handles */
8506         kfree(phba->sli4_hba.hba_eq_hdl);
8507
8508         /* Free the allocated rpi headers. */
8509         lpfc_sli4_remove_rpi_hdrs(phba);
8510         lpfc_sli4_remove_rpis(phba);
8511
8512         /* Free eligible FCF index bmask */
8513         kfree(phba->fcf.fcf_rr_bmask);
8514
8515         /* Free the ELS sgl list */
8516         lpfc_free_active_sgl(phba);
8517         lpfc_free_els_sgl_list(phba);
8518         lpfc_free_nvmet_sgl_list(phba);
8519
8520         /* Free the completion queue EQ event pool */
8521         lpfc_sli4_cq_event_release_all(phba);
8522         lpfc_sli4_cq_event_pool_destroy(phba);
8523
8524         /* Release resource identifiers. */
8525         lpfc_sli4_dealloc_resource_identifiers(phba);
8526
8527         /* Free the bsmbx region. */
8528         lpfc_destroy_bootstrap_mbox(phba);
8529
8530         /* Free the SLI Layer memory with SLI4 HBAs */
8531         lpfc_mem_free_all(phba);
8532
8533         /* Free the current connect table */
8534         list_for_each_entry_safe(conn_entry, next_conn_entry,
8535                 &phba->fcf_conn_rec_list, list) {
8536                 list_del_init(&conn_entry->list);
8537                 kfree(conn_entry);
8538         }
8539
8540         return;
8541 }
8542
8543 /**
8544  * lpfc_init_api_table_setup - Set up init api function jump table
8545  * @phba: The hba struct for which this call is being executed.
8546  * @dev_grp: The HBA PCI-Device group number.
8547  *
8548  * This routine sets up the device INIT interface API function jump table
8549  * in @phba struct.
8550  *
8551  * Returns: 0 - success, -ENODEV - failure.
8552  **/
8553 int
8554 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8555 {
8556         phba->lpfc_hba_init_link = lpfc_hba_init_link;
8557         phba->lpfc_hba_down_link = lpfc_hba_down_link;
8558         phba->lpfc_selective_reset = lpfc_selective_reset;
8559         switch (dev_grp) {
8560         case LPFC_PCI_DEV_LP:
8561                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8562                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8563                 phba->lpfc_stop_port = lpfc_stop_port_s3;
8564                 break;
8565         case LPFC_PCI_DEV_OC:
8566                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8567                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8568                 phba->lpfc_stop_port = lpfc_stop_port_s4;
8569                 break;
8570         default:
8571                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8572                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
8573                                 dev_grp);
8574                 return -ENODEV;
8575         }
8576         return 0;
8577 }
8578
8579 /**
8580  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8581  * @phba: pointer to lpfc hba data structure.
8582  *
8583  * This routine is invoked to set up the driver internal resources after the
8584  * device specific resource setup to support the HBA device it attached to.
8585  *
8586  * Return codes
8587  *      0 - successful
8588  *      other values - error
8589  **/
8590 static int
8591 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8592 {
8593         int error;
8594
8595         /* Startup the kernel thread for this host adapter. */
8596         phba->worker_thread = kthread_run(lpfc_do_work, phba,
8597                                           "lpfc_worker_%d", phba->brd_no);
8598         if (IS_ERR(phba->worker_thread)) {
8599                 error = PTR_ERR(phba->worker_thread);
8600                 return error;
8601         }
8602
8603         return 0;
8604 }
8605
8606 /**
8607  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8608  * @phba: pointer to lpfc hba data structure.
8609  *
8610  * This routine is invoked to unset the driver internal resources set up after
8611  * the device specific resource setup for supporting the HBA device it
8612  * attached to.
8613  **/
8614 static void
8615 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8616 {
8617         if (phba->wq) {
8618                 destroy_workqueue(phba->wq);
8619                 phba->wq = NULL;
8620         }
8621
8622         /* Stop kernel worker thread */
8623         if (phba->worker_thread)
8624                 kthread_stop(phba->worker_thread);
8625 }
8626
8627 /**
8628  * lpfc_free_iocb_list - Free iocb list.
8629  * @phba: pointer to lpfc hba data structure.
8630  *
8631  * This routine is invoked to free the driver's IOCB list and memory.
8632  **/
8633 void
8634 lpfc_free_iocb_list(struct lpfc_hba *phba)
8635 {
8636         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8637
8638         spin_lock_irq(&phba->hbalock);
8639         list_for_each_entry_safe(iocbq_entry, iocbq_next,
8640                                  &phba->lpfc_iocb_list, list) {
8641                 list_del(&iocbq_entry->list);
8642                 kfree(iocbq_entry);
8643                 phba->total_iocbq_bufs--;
8644         }
8645         spin_unlock_irq(&phba->hbalock);
8646
8647         return;
8648 }
8649
8650 /**
8651  * lpfc_init_iocb_list - Allocate and initialize iocb list.
8652  * @phba: pointer to lpfc hba data structure.
8653  * @iocb_count: number of requested iocbs
8654  *
8655  * This routine is invoked to allocate and initizlize the driver's IOCB
8656  * list and set up the IOCB tag array accordingly.
8657  *
8658  * Return codes
8659  *      0 - successful
8660  *      other values - error
8661  **/
8662 int
8663 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8664 {
8665         struct lpfc_iocbq *iocbq_entry = NULL;
8666         uint16_t iotag;
8667         int i;
8668
8669         /* Initialize and populate the iocb list per host.  */
8670         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8671         for (i = 0; i < iocb_count; i++) {
8672                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8673                 if (iocbq_entry == NULL) {
8674                         printk(KERN_ERR "%s: only allocated %d iocbs of "
8675                                 "expected %d count. Unloading driver.\n",
8676                                 __func__, i, iocb_count);
8677                         goto out_free_iocbq;
8678                 }
8679
8680                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8681                 if (iotag == 0) {
8682                         kfree(iocbq_entry);
8683                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
8684                                 "Unloading driver.\n", __func__);
8685                         goto out_free_iocbq;
8686                 }
8687                 iocbq_entry->sli4_lxritag = NO_XRI;
8688                 iocbq_entry->sli4_xritag = NO_XRI;
8689
8690                 spin_lock_irq(&phba->hbalock);
8691                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8692                 phba->total_iocbq_bufs++;
8693                 spin_unlock_irq(&phba->hbalock);
8694         }
8695
8696         return 0;
8697
8698 out_free_iocbq:
8699         lpfc_free_iocb_list(phba);
8700
8701         return -ENOMEM;
8702 }
8703
8704 /**
8705  * lpfc_free_sgl_list - Free a given sgl list.
8706  * @phba: pointer to lpfc hba data structure.
8707  * @sglq_list: pointer to the head of sgl list.
8708  *
8709  * This routine is invoked to free a give sgl list and memory.
8710  **/
8711 void
8712 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8713 {
8714         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8715
8716         list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8717                 list_del(&sglq_entry->list);
8718                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8719                 kfree(sglq_entry);
8720         }
8721 }
8722
8723 /**
8724  * lpfc_free_els_sgl_list - Free els sgl list.
8725  * @phba: pointer to lpfc hba data structure.
8726  *
8727  * This routine is invoked to free the driver's els sgl list and memory.
8728  **/
8729 static void
8730 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8731 {
8732         LIST_HEAD(sglq_list);
8733
8734         /* Retrieve all els sgls from driver list */
8735         spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8736         list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8737         spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8738
8739         /* Now free the sgl list */
8740         lpfc_free_sgl_list(phba, &sglq_list);
8741 }
8742
8743 /**
8744  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8745  * @phba: pointer to lpfc hba data structure.
8746  *
8747  * This routine is invoked to free the driver's nvmet sgl list and memory.
8748  **/
8749 static void
8750 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8751 {
8752         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8753         LIST_HEAD(sglq_list);
8754
8755         /* Retrieve all nvmet sgls from driver list */
8756         spin_lock_irq(&phba->hbalock);
8757         spin_lock(&phba->sli4_hba.sgl_list_lock);
8758         list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8759         spin_unlock(&phba->sli4_hba.sgl_list_lock);
8760         spin_unlock_irq(&phba->hbalock);
8761
8762         /* Now free the sgl list */
8763         list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8764                 list_del(&sglq_entry->list);
8765                 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8766                 kfree(sglq_entry);
8767         }
8768
8769         /* Update the nvmet_xri_cnt to reflect no current sgls.
8770          * The next initialization cycle sets the count and allocates
8771          * the sgls over again.
8772          */
8773         phba->sli4_hba.nvmet_xri_cnt = 0;
8774 }
8775
8776 /**
8777  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8778  * @phba: pointer to lpfc hba data structure.
8779  *
8780  * This routine is invoked to allocate the driver's active sgl memory.
8781  * This array will hold the sglq_entry's for active IOs.
8782  **/
8783 static int
8784 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8785 {
8786         int size;
8787         size = sizeof(struct lpfc_sglq *);
8788         size *= phba->sli4_hba.max_cfg_param.max_xri;
8789
8790         phba->sli4_hba.lpfc_sglq_active_list =
8791                 kzalloc(size, GFP_KERNEL);
8792         if (!phba->sli4_hba.lpfc_sglq_active_list)
8793                 return -ENOMEM;
8794         return 0;
8795 }
8796
8797 /**
8798  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8799  * @phba: pointer to lpfc hba data structure.
8800  *
8801  * This routine is invoked to walk through the array of active sglq entries
8802  * and free all of the resources.
8803  * This is just a place holder for now.
8804  **/
8805 static void
8806 lpfc_free_active_sgl(struct lpfc_hba *phba)
8807 {
8808         kfree(phba->sli4_hba.lpfc_sglq_active_list);
8809 }
8810
8811 /**
8812  * lpfc_init_sgl_list - Allocate and initialize sgl list.
8813  * @phba: pointer to lpfc hba data structure.
8814  *
8815  * This routine is invoked to allocate and initizlize the driver's sgl
8816  * list and set up the sgl xritag tag array accordingly.
8817  *
8818  **/
8819 static void
8820 lpfc_init_sgl_list(struct lpfc_hba *phba)
8821 {
8822         /* Initialize and populate the sglq list per host/VF. */
8823         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8824         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8825         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8826         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8827
8828         /* els xri-sgl book keeping */
8829         phba->sli4_hba.els_xri_cnt = 0;
8830
8831         /* nvme xri-buffer book keeping */
8832         phba->sli4_hba.io_xri_cnt = 0;
8833 }
8834
8835 /**
8836  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8837  * @phba: pointer to lpfc hba data structure.
8838  *
8839  * This routine is invoked to post rpi header templates to the
8840  * port for those SLI4 ports that do not support extents.  This routine
8841  * posts a PAGE_SIZE memory region to the port to hold up to
8842  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
8843  * and should be called only when interrupts are disabled.
8844  *
8845  * Return codes
8846  *      0 - successful
8847  *      -ERROR - otherwise.
8848  **/
8849 int
8850 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8851 {
8852         int rc = 0;
8853         struct lpfc_rpi_hdr *rpi_hdr;
8854
8855         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8856         if (!phba->sli4_hba.rpi_hdrs_in_use)
8857                 return rc;
8858         if (phba->sli4_hba.extents_in_use)
8859                 return -EIO;
8860
8861         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8862         if (!rpi_hdr) {
8863                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8864                                 "0391 Error during rpi post operation\n");
8865                 lpfc_sli4_remove_rpis(phba);
8866                 rc = -ENODEV;
8867         }
8868
8869         return rc;
8870 }
8871
8872 /**
8873  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8874  * @phba: pointer to lpfc hba data structure.
8875  *
8876  * This routine is invoked to allocate a single 4KB memory region to
8877  * support rpis and stores them in the phba.  This single region
8878  * provides support for up to 64 rpis.  The region is used globally
8879  * by the device.
8880  *
8881  * Returns:
8882  *   A valid rpi hdr on success.
8883  *   A NULL pointer on any failure.
8884  **/
8885 struct lpfc_rpi_hdr *
8886 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8887 {
8888         uint16_t rpi_limit, curr_rpi_range;
8889         struct lpfc_dmabuf *dmabuf;
8890         struct lpfc_rpi_hdr *rpi_hdr;
8891
8892         /*
8893          * If the SLI4 port supports extents, posting the rpi header isn't
8894          * required.  Set the expected maximum count and let the actual value
8895          * get set when extents are fully allocated.
8896          */
8897         if (!phba->sli4_hba.rpi_hdrs_in_use)
8898                 return NULL;
8899         if (phba->sli4_hba.extents_in_use)
8900                 return NULL;
8901
8902         /* The limit on the logical index is just the max_rpi count. */
8903         rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8904
8905         spin_lock_irq(&phba->hbalock);
8906         /*
8907          * Establish the starting RPI in this header block.  The starting
8908          * rpi is normalized to a zero base because the physical rpi is
8909          * port based.
8910          */
8911         curr_rpi_range = phba->sli4_hba.next_rpi;
8912         spin_unlock_irq(&phba->hbalock);
8913
8914         /* Reached full RPI range */
8915         if (curr_rpi_range == rpi_limit)
8916                 return NULL;
8917
8918         /*
8919          * First allocate the protocol header region for the port.  The
8920          * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8921          */
8922         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8923         if (!dmabuf)
8924                 return NULL;
8925
8926         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8927                                           LPFC_HDR_TEMPLATE_SIZE,
8928                                           &dmabuf->phys, GFP_KERNEL);
8929         if (!dmabuf->virt) {
8930                 rpi_hdr = NULL;
8931                 goto err_free_dmabuf;
8932         }
8933
8934         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8935                 rpi_hdr = NULL;
8936                 goto err_free_coherent;
8937         }
8938
8939         /* Save the rpi header data for cleanup later. */
8940         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8941         if (!rpi_hdr)
8942                 goto err_free_coherent;
8943
8944         rpi_hdr->dmabuf = dmabuf;
8945         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8946         rpi_hdr->page_count = 1;
8947         spin_lock_irq(&phba->hbalock);
8948
8949         /* The rpi_hdr stores the logical index only. */
8950         rpi_hdr->start_rpi = curr_rpi_range;
8951         rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8952         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8953
8954         spin_unlock_irq(&phba->hbalock);
8955         return rpi_hdr;
8956
8957  err_free_coherent:
8958         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8959                           dmabuf->virt, dmabuf->phys);
8960  err_free_dmabuf:
8961         kfree(dmabuf);
8962         return NULL;
8963 }
8964
8965 /**
8966  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8967  * @phba: pointer to lpfc hba data structure.
8968  *
8969  * This routine is invoked to remove all memory resources allocated
8970  * to support rpis for SLI4 ports not supporting extents. This routine
8971  * presumes the caller has released all rpis consumed by fabric or port
8972  * logins and is prepared to have the header pages removed.
8973  **/
8974 void
8975 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8976 {
8977         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8978
8979         if (!phba->sli4_hba.rpi_hdrs_in_use)
8980                 goto exit;
8981
8982         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8983                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8984                 list_del(&rpi_hdr->list);
8985                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8986                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8987                 kfree(rpi_hdr->dmabuf);
8988                 kfree(rpi_hdr);
8989         }
8990  exit:
8991         /* There are no rpis available to the port now. */
8992         phba->sli4_hba.next_rpi = 0;
8993 }
8994
8995 /**
8996  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8997  * @pdev: pointer to pci device data structure.
8998  *
8999  * This routine is invoked to allocate the driver hba data structure for an
9000  * HBA device. If the allocation is successful, the phba reference to the
9001  * PCI device data structure is set.
9002  *
9003  * Return codes
9004  *      pointer to @phba - successful
9005  *      NULL - error
9006  **/
9007 static struct lpfc_hba *
9008 lpfc_hba_alloc(struct pci_dev *pdev)
9009 {
9010         struct lpfc_hba *phba;
9011
9012         /* Allocate memory for HBA structure */
9013         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9014         if (!phba) {
9015                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9016                 return NULL;
9017         }
9018
9019         /* Set reference to PCI device in HBA structure */
9020         phba->pcidev = pdev;
9021
9022         /* Assign an unused board number */
9023         phba->brd_no = lpfc_get_instance();
9024         if (phba->brd_no < 0) {
9025                 kfree(phba);
9026                 return NULL;
9027         }
9028         phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9029
9030         spin_lock_init(&phba->ct_ev_lock);
9031         INIT_LIST_HEAD(&phba->ct_ev_waiters);
9032
9033         return phba;
9034 }
9035
9036 /**
9037  * lpfc_hba_free - Free driver hba data structure with a device.
9038  * @phba: pointer to lpfc hba data structure.
9039  *
9040  * This routine is invoked to free the driver hba data structure with an
9041  * HBA device.
9042  **/
9043 static void
9044 lpfc_hba_free(struct lpfc_hba *phba)
9045 {
9046         if (phba->sli_rev == LPFC_SLI_REV4)
9047                 kfree(phba->sli4_hba.hdwq);
9048
9049         /* Release the driver assigned board number */
9050         idr_remove(&lpfc_hba_index, phba->brd_no);
9051
9052         /* Free memory allocated with sli3 rings */
9053         kfree(phba->sli.sli3_ring);
9054         phba->sli.sli3_ring = NULL;
9055
9056         kfree(phba);
9057         return;
9058 }
9059
9060 /**
9061  * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9062  * @vport: pointer to lpfc vport data structure.
9063  *
9064  * This routine is will setup initial FDMI attribute masks for
9065  * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9066  * to get these attributes first before falling back, the attribute
9067  * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9068  **/
9069 void
9070 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9071 {
9072         struct lpfc_hba *phba = vport->phba;
9073
9074         vport->load_flag |= FC_ALLOW_FDMI;
9075         if (phba->cfg_enable_SmartSAN ||
9076             phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9077                 /* Setup appropriate attribute masks */
9078                 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9079                 if (phba->cfg_enable_SmartSAN)
9080                         vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9081                 else
9082                         vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9083         }
9084
9085         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9086                         "6077 Setup FDMI mask: hba x%x port x%x\n",
9087                         vport->fdmi_hba_mask, vport->fdmi_port_mask);
9088 }
9089
9090 /**
9091  * lpfc_create_shost - Create hba physical port with associated scsi host.
9092  * @phba: pointer to lpfc hba data structure.
9093  *
9094  * This routine is invoked to create HBA physical port and associate a SCSI
9095  * host with it.
9096  *
9097  * Return codes
9098  *      0 - successful
9099  *      other values - error
9100  **/
9101 static int
9102 lpfc_create_shost(struct lpfc_hba *phba)
9103 {
9104         struct lpfc_vport *vport;
9105         struct Scsi_Host  *shost;
9106
9107         /* Initialize HBA FC structure */
9108         phba->fc_edtov = FF_DEF_EDTOV;
9109         phba->fc_ratov = FF_DEF_RATOV;
9110         phba->fc_altov = FF_DEF_ALTOV;
9111         phba->fc_arbtov = FF_DEF_ARBTOV;
9112
9113         atomic_set(&phba->sdev_cnt, 0);
9114         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9115         if (!vport)
9116                 return -ENODEV;
9117
9118         shost = lpfc_shost_from_vport(vport);
9119         phba->pport = vport;
9120
9121         if (phba->nvmet_support) {
9122                 /* Only 1 vport (pport) will support NVME target */
9123                 phba->targetport = NULL;
9124                 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9125                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9126                                 "6076 NVME Target Found\n");
9127         }
9128
9129         lpfc_debugfs_initialize(vport);
9130         /* Put reference to SCSI host to driver's device private data */
9131         pci_set_drvdata(phba->pcidev, shost);
9132
9133         lpfc_setup_fdmi_mask(vport);
9134
9135         /*
9136          * At this point we are fully registered with PSA. In addition,
9137          * any initial discovery should be completed.
9138          */
9139         return 0;
9140 }
9141
9142 /**
9143  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9144  * @phba: pointer to lpfc hba data structure.
9145  *
9146  * This routine is invoked to destroy HBA physical port and the associated
9147  * SCSI host.
9148  **/
9149 static void
9150 lpfc_destroy_shost(struct lpfc_hba *phba)
9151 {
9152         struct lpfc_vport *vport = phba->pport;
9153
9154         /* Destroy physical port that associated with the SCSI host */
9155         destroy_port(vport);
9156
9157         return;
9158 }
9159
9160 /**
9161  * lpfc_setup_bg - Setup Block guard structures and debug areas.
9162  * @phba: pointer to lpfc hba data structure.
9163  * @shost: the shost to be used to detect Block guard settings.
9164  *
9165  * This routine sets up the local Block guard protocol settings for @shost.
9166  * This routine also allocates memory for debugging bg buffers.
9167  **/
9168 static void
9169 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9170 {
9171         uint32_t old_mask;
9172         uint32_t old_guard;
9173
9174         if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9175                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9176                                 "1478 Registering BlockGuard with the "
9177                                 "SCSI layer\n");
9178
9179                 old_mask = phba->cfg_prot_mask;
9180                 old_guard = phba->cfg_prot_guard;
9181
9182                 /* Only allow supported values */
9183                 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9184                         SHOST_DIX_TYPE0_PROTECTION |
9185                         SHOST_DIX_TYPE1_PROTECTION);
9186                 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9187                                          SHOST_DIX_GUARD_CRC);
9188
9189                 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9190                 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9191                         phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9192
9193                 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9194                         if ((old_mask != phba->cfg_prot_mask) ||
9195                                 (old_guard != phba->cfg_prot_guard))
9196                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9197                                         "1475 Registering BlockGuard with the "
9198                                         "SCSI layer: mask %d  guard %d\n",
9199                                         phba->cfg_prot_mask,
9200                                         phba->cfg_prot_guard);
9201
9202                         scsi_host_set_prot(shost, phba->cfg_prot_mask);
9203                         scsi_host_set_guard(shost, phba->cfg_prot_guard);
9204                 } else
9205                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9206                                 "1479 Not Registering BlockGuard with the SCSI "
9207                                 "layer, Bad protection parameters: %d %d\n",
9208                                 old_mask, old_guard);
9209         }
9210 }
9211
9212 /**
9213  * lpfc_post_init_setup - Perform necessary device post initialization setup.
9214  * @phba: pointer to lpfc hba data structure.
9215  *
9216  * This routine is invoked to perform all the necessary post initialization
9217  * setup for the device.
9218  **/
9219 static void
9220 lpfc_post_init_setup(struct lpfc_hba *phba)
9221 {
9222         struct Scsi_Host  *shost;
9223         struct lpfc_adapter_event_header adapter_event;
9224
9225         /* Get the default values for Model Name and Description */
9226         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9227
9228         /*
9229          * hba setup may have changed the hba_queue_depth so we need to
9230          * adjust the value of can_queue.
9231          */
9232         shost = pci_get_drvdata(phba->pcidev);
9233         shost->can_queue = phba->cfg_hba_queue_depth - 10;
9234
9235         lpfc_host_attrib_init(shost);
9236
9237         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9238                 spin_lock_irq(shost->host_lock);
9239                 lpfc_poll_start_timer(phba);
9240                 spin_unlock_irq(shost->host_lock);
9241         }
9242
9243         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9244                         "0428 Perform SCSI scan\n");
9245         /* Send board arrival event to upper layer */
9246         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9247         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9248         fc_host_post_vendor_event(shost, fc_get_event_number(),
9249                                   sizeof(adapter_event),
9250                                   (char *) &adapter_event,
9251                                   LPFC_NL_VENDOR_ID);
9252         return;
9253 }
9254
9255 /**
9256  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9257  * @phba: pointer to lpfc hba data structure.
9258  *
9259  * This routine is invoked to set up the PCI device memory space for device
9260  * with SLI-3 interface spec.
9261  *
9262  * Return codes
9263  *      0 - successful
9264  *      other values - error
9265  **/
9266 static int
9267 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9268 {
9269         struct pci_dev *pdev = phba->pcidev;
9270         unsigned long bar0map_len, bar2map_len;
9271         int i, hbq_count;
9272         void *ptr;
9273         int error;
9274
9275         if (!pdev)
9276                 return -ENODEV;
9277
9278         /* Set the device DMA mask size */
9279         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9280         if (error)
9281                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9282         if (error)
9283                 return error;
9284         error = -ENODEV;
9285
9286         /* Get the bus address of Bar0 and Bar2 and the number of bytes
9287          * required by each mapping.
9288          */
9289         phba->pci_bar0_map = pci_resource_start(pdev, 0);
9290         bar0map_len = pci_resource_len(pdev, 0);
9291
9292         phba->pci_bar2_map = pci_resource_start(pdev, 2);
9293         bar2map_len = pci_resource_len(pdev, 2);
9294
9295         /* Map HBA SLIM to a kernel virtual address. */
9296         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9297         if (!phba->slim_memmap_p) {
9298                 dev_printk(KERN_ERR, &pdev->dev,
9299                            "ioremap failed for SLIM memory.\n");
9300                 goto out;
9301         }
9302
9303         /* Map HBA Control Registers to a kernel virtual address. */
9304         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9305         if (!phba->ctrl_regs_memmap_p) {
9306                 dev_printk(KERN_ERR, &pdev->dev,
9307                            "ioremap failed for HBA control registers.\n");
9308                 goto out_iounmap_slim;
9309         }
9310
9311         /* Allocate memory for SLI-2 structures */
9312         phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9313                                                &phba->slim2p.phys, GFP_KERNEL);
9314         if (!phba->slim2p.virt)
9315                 goto out_iounmap;
9316
9317         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9318         phba->mbox_ext = (phba->slim2p.virt +
9319                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9320         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9321         phba->IOCBs = (phba->slim2p.virt +
9322                        offsetof(struct lpfc_sli2_slim, IOCBs));
9323
9324         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9325                                                  lpfc_sli_hbq_size(),
9326                                                  &phba->hbqslimp.phys,
9327                                                  GFP_KERNEL);
9328         if (!phba->hbqslimp.virt)
9329                 goto out_free_slim;
9330
9331         hbq_count = lpfc_sli_hbq_count();
9332         ptr = phba->hbqslimp.virt;
9333         for (i = 0; i < hbq_count; ++i) {
9334                 phba->hbqs[i].hbq_virt = ptr;
9335                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9336                 ptr += (lpfc_hbq_defs[i]->entry_count *
9337                         sizeof(struct lpfc_hbq_entry));
9338         }
9339         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9340         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9341
9342         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9343
9344         phba->MBslimaddr = phba->slim_memmap_p;
9345         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9346         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9347         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9348         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9349
9350         return 0;
9351
9352 out_free_slim:
9353         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9354                           phba->slim2p.virt, phba->slim2p.phys);
9355 out_iounmap:
9356         iounmap(phba->ctrl_regs_memmap_p);
9357 out_iounmap_slim:
9358         iounmap(phba->slim_memmap_p);
9359 out:
9360         return error;
9361 }
9362
9363 /**
9364  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9365  * @phba: pointer to lpfc hba data structure.
9366  *
9367  * This routine is invoked to unset the PCI device memory space for device
9368  * with SLI-3 interface spec.
9369  **/
9370 static void
9371 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9372 {
9373         struct pci_dev *pdev;
9374
9375         /* Obtain PCI device reference */
9376         if (!phba->pcidev)
9377                 return;
9378         else
9379                 pdev = phba->pcidev;
9380
9381         /* Free coherent DMA memory allocated */
9382         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9383                           phba->hbqslimp.virt, phba->hbqslimp.phys);
9384         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9385                           phba->slim2p.virt, phba->slim2p.phys);
9386
9387         /* I/O memory unmap */
9388         iounmap(phba->ctrl_regs_memmap_p);
9389         iounmap(phba->slim_memmap_p);
9390
9391         return;
9392 }
9393
9394 /**
9395  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9396  * @phba: pointer to lpfc hba data structure.
9397  *
9398  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9399  * done and check status.
9400  *
9401  * Return 0 if successful, otherwise -ENODEV.
9402  **/
9403 int
9404 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9405 {
9406         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9407         struct lpfc_register reg_data;
9408         int i, port_error = 0;
9409         uint32_t if_type;
9410
9411         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9412         memset(&reg_data, 0, sizeof(reg_data));
9413         if (!phba->sli4_hba.PSMPHRregaddr)
9414                 return -ENODEV;
9415
9416         /* Wait up to 30 seconds for the SLI Port POST done and ready */
9417         for (i = 0; i < 3000; i++) {
9418                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9419                         &portsmphr_reg.word0) ||
9420                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9421                         /* Port has a fatal POST error, break out */
9422                         port_error = -ENODEV;
9423                         break;
9424                 }
9425                 if (LPFC_POST_STAGE_PORT_READY ==
9426                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9427                         break;
9428                 msleep(10);
9429         }
9430
9431         /*
9432          * If there was a port error during POST, then don't proceed with
9433          * other register reads as the data may not be valid.  Just exit.
9434          */
9435         if (port_error) {
9436                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9437                         "1408 Port Failed POST - portsmphr=0x%x, "
9438                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9439                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9440                         portsmphr_reg.word0,
9441                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9442                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9443                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9444                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9445                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9446                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9447                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9448                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9449         } else {
9450                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9451                                 "2534 Device Info: SLIFamily=0x%x, "
9452                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9453                                 "SLIHint_2=0x%x, FT=0x%x\n",
9454                                 bf_get(lpfc_sli_intf_sli_family,
9455                                        &phba->sli4_hba.sli_intf),
9456                                 bf_get(lpfc_sli_intf_slirev,
9457                                        &phba->sli4_hba.sli_intf),
9458                                 bf_get(lpfc_sli_intf_if_type,
9459                                        &phba->sli4_hba.sli_intf),
9460                                 bf_get(lpfc_sli_intf_sli_hint1,
9461                                        &phba->sli4_hba.sli_intf),
9462                                 bf_get(lpfc_sli_intf_sli_hint2,
9463                                        &phba->sli4_hba.sli_intf),
9464                                 bf_get(lpfc_sli_intf_func_type,
9465                                        &phba->sli4_hba.sli_intf));
9466                 /*
9467                  * Check for other Port errors during the initialization
9468                  * process.  Fail the load if the port did not come up
9469                  * correctly.
9470                  */
9471                 if_type = bf_get(lpfc_sli_intf_if_type,
9472                                  &phba->sli4_hba.sli_intf);
9473                 switch (if_type) {
9474                 case LPFC_SLI_INTF_IF_TYPE_0:
9475                         phba->sli4_hba.ue_mask_lo =
9476                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9477                         phba->sli4_hba.ue_mask_hi =
9478                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9479                         uerrlo_reg.word0 =
9480                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9481                         uerrhi_reg.word0 =
9482                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9483                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9484                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9485                                 lpfc_printf_log(phba, KERN_ERR,
9486                                                 LOG_TRACE_EVENT,
9487                                                 "1422 Unrecoverable Error "
9488                                                 "Detected during POST "
9489                                                 "uerr_lo_reg=0x%x, "
9490                                                 "uerr_hi_reg=0x%x, "
9491                                                 "ue_mask_lo_reg=0x%x, "
9492                                                 "ue_mask_hi_reg=0x%x\n",
9493                                                 uerrlo_reg.word0,
9494                                                 uerrhi_reg.word0,
9495                                                 phba->sli4_hba.ue_mask_lo,
9496                                                 phba->sli4_hba.ue_mask_hi);
9497                                 port_error = -ENODEV;
9498                         }
9499                         break;
9500                 case LPFC_SLI_INTF_IF_TYPE_2:
9501                 case LPFC_SLI_INTF_IF_TYPE_6:
9502                         /* Final checks.  The port status should be clean. */
9503                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9504                                 &reg_data.word0) ||
9505                                 lpfc_sli4_unrecoverable_port(&reg_data)) {
9506                                 phba->work_status[0] =
9507                                         readl(phba->sli4_hba.u.if_type2.
9508                                               ERR1regaddr);
9509                                 phba->work_status[1] =
9510                                         readl(phba->sli4_hba.u.if_type2.
9511                                               ERR2regaddr);
9512                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9513                                         "2888 Unrecoverable port error "
9514                                         "following POST: port status reg "
9515                                         "0x%x, port_smphr reg 0x%x, "
9516                                         "error 1=0x%x, error 2=0x%x\n",
9517                                         reg_data.word0,
9518                                         portsmphr_reg.word0,
9519                                         phba->work_status[0],
9520                                         phba->work_status[1]);
9521                                 port_error = -ENODEV;
9522                                 break;
9523                         }
9524
9525                         if (lpfc_pldv_detect &&
9526                             bf_get(lpfc_sli_intf_sli_family,
9527                                    &phba->sli4_hba.sli_intf) ==
9528                                         LPFC_SLI_INTF_FAMILY_G6)
9529                                 pci_write_config_byte(phba->pcidev,
9530                                                       LPFC_SLI_INTF, CFG_PLD);
9531                         break;
9532                 case LPFC_SLI_INTF_IF_TYPE_1:
9533                 default:
9534                         break;
9535                 }
9536         }
9537         return port_error;
9538 }
9539
9540 /**
9541  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9542  * @phba: pointer to lpfc hba data structure.
9543  * @if_type:  The SLI4 interface type getting configured.
9544  *
9545  * This routine is invoked to set up SLI4 BAR0 PCI config space register
9546  * memory map.
9547  **/
9548 static void
9549 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9550 {
9551         switch (if_type) {
9552         case LPFC_SLI_INTF_IF_TYPE_0:
9553                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9554                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9555                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9556                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9557                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9558                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9559                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9560                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9561                 phba->sli4_hba.SLIINTFregaddr =
9562                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9563                 break;
9564         case LPFC_SLI_INTF_IF_TYPE_2:
9565                 phba->sli4_hba.u.if_type2.EQDregaddr =
9566                         phba->sli4_hba.conf_regs_memmap_p +
9567                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9568                 phba->sli4_hba.u.if_type2.ERR1regaddr =
9569                         phba->sli4_hba.conf_regs_memmap_p +
9570                                                 LPFC_CTL_PORT_ER1_OFFSET;
9571                 phba->sli4_hba.u.if_type2.ERR2regaddr =
9572                         phba->sli4_hba.conf_regs_memmap_p +
9573                                                 LPFC_CTL_PORT_ER2_OFFSET;
9574                 phba->sli4_hba.u.if_type2.CTRLregaddr =
9575                         phba->sli4_hba.conf_regs_memmap_p +
9576                                                 LPFC_CTL_PORT_CTL_OFFSET;
9577                 phba->sli4_hba.u.if_type2.STATUSregaddr =
9578                         phba->sli4_hba.conf_regs_memmap_p +
9579                                                 LPFC_CTL_PORT_STA_OFFSET;
9580                 phba->sli4_hba.SLIINTFregaddr =
9581                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9582                 phba->sli4_hba.PSMPHRregaddr =
9583                         phba->sli4_hba.conf_regs_memmap_p +
9584                                                 LPFC_CTL_PORT_SEM_OFFSET;
9585                 phba->sli4_hba.RQDBregaddr =
9586                         phba->sli4_hba.conf_regs_memmap_p +
9587                                                 LPFC_ULP0_RQ_DOORBELL;
9588                 phba->sli4_hba.WQDBregaddr =
9589                         phba->sli4_hba.conf_regs_memmap_p +
9590                                                 LPFC_ULP0_WQ_DOORBELL;
9591                 phba->sli4_hba.CQDBregaddr =
9592                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9593                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9594                 phba->sli4_hba.MQDBregaddr =
9595                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9596                 phba->sli4_hba.BMBXregaddr =
9597                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9598                 break;
9599         case LPFC_SLI_INTF_IF_TYPE_6:
9600                 phba->sli4_hba.u.if_type2.EQDregaddr =
9601                         phba->sli4_hba.conf_regs_memmap_p +
9602                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9603                 phba->sli4_hba.u.if_type2.ERR1regaddr =
9604                         phba->sli4_hba.conf_regs_memmap_p +
9605                                                 LPFC_CTL_PORT_ER1_OFFSET;
9606                 phba->sli4_hba.u.if_type2.ERR2regaddr =
9607                         phba->sli4_hba.conf_regs_memmap_p +
9608                                                 LPFC_CTL_PORT_ER2_OFFSET;
9609                 phba->sli4_hba.u.if_type2.CTRLregaddr =
9610                         phba->sli4_hba.conf_regs_memmap_p +
9611                                                 LPFC_CTL_PORT_CTL_OFFSET;
9612                 phba->sli4_hba.u.if_type2.STATUSregaddr =
9613                         phba->sli4_hba.conf_regs_memmap_p +
9614                                                 LPFC_CTL_PORT_STA_OFFSET;
9615                 phba->sli4_hba.PSMPHRregaddr =
9616                         phba->sli4_hba.conf_regs_memmap_p +
9617                                                 LPFC_CTL_PORT_SEM_OFFSET;
9618                 phba->sli4_hba.BMBXregaddr =
9619                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9620                 break;
9621         case LPFC_SLI_INTF_IF_TYPE_1:
9622         default:
9623                 dev_printk(KERN_ERR, &phba->pcidev->dev,
9624                            "FATAL - unsupported SLI4 interface type - %d\n",
9625                            if_type);
9626                 break;
9627         }
9628 }
9629
9630 /**
9631  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9632  * @phba: pointer to lpfc hba data structure.
9633  * @if_type: sli if type to operate on.
9634  *
9635  * This routine is invoked to set up SLI4 BAR1 register memory map.
9636  **/
9637 static void
9638 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9639 {
9640         switch (if_type) {
9641         case LPFC_SLI_INTF_IF_TYPE_0:
9642                 phba->sli4_hba.PSMPHRregaddr =
9643                         phba->sli4_hba.ctrl_regs_memmap_p +
9644                         LPFC_SLIPORT_IF0_SMPHR;
9645                 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9646                         LPFC_HST_ISR0;
9647                 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9648                         LPFC_HST_IMR0;
9649                 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9650                         LPFC_HST_ISCR0;
9651                 break;
9652         case LPFC_SLI_INTF_IF_TYPE_6:
9653                 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9654                         LPFC_IF6_RQ_DOORBELL;
9655                 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9656                         LPFC_IF6_WQ_DOORBELL;
9657                 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9658                         LPFC_IF6_CQ_DOORBELL;
9659                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9660                         LPFC_IF6_EQ_DOORBELL;
9661                 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9662                         LPFC_IF6_MQ_DOORBELL;
9663                 break;
9664         case LPFC_SLI_INTF_IF_TYPE_2:
9665         case LPFC_SLI_INTF_IF_TYPE_1:
9666         default:
9667                 dev_err(&phba->pcidev->dev,
9668                            "FATAL - unsupported SLI4 interface type - %d\n",
9669                            if_type);
9670                 break;
9671         }
9672 }
9673
9674 /**
9675  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9676  * @phba: pointer to lpfc hba data structure.
9677  * @vf: virtual function number
9678  *
9679  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9680  * based on the given viftual function number, @vf.
9681  *
9682  * Return 0 if successful, otherwise -ENODEV.
9683  **/
9684 static int
9685 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9686 {
9687         if (vf > LPFC_VIR_FUNC_MAX)
9688                 return -ENODEV;
9689
9690         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9691                                 vf * LPFC_VFR_PAGE_SIZE +
9692                                         LPFC_ULP0_RQ_DOORBELL);
9693         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9694                                 vf * LPFC_VFR_PAGE_SIZE +
9695                                         LPFC_ULP0_WQ_DOORBELL);
9696         phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9697                                 vf * LPFC_VFR_PAGE_SIZE +
9698                                         LPFC_EQCQ_DOORBELL);
9699         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9700         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9701                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9702         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9703                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9704         return 0;
9705 }
9706
9707 /**
9708  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9709  * @phba: pointer to lpfc hba data structure.
9710  *
9711  * This routine is invoked to create the bootstrap mailbox
9712  * region consistent with the SLI-4 interface spec.  This
9713  * routine allocates all memory necessary to communicate
9714  * mailbox commands to the port and sets up all alignment
9715  * needs.  No locks are expected to be held when calling
9716  * this routine.
9717  *
9718  * Return codes
9719  *      0 - successful
9720  *      -ENOMEM - could not allocated memory.
9721  **/
9722 static int
9723 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9724 {
9725         uint32_t bmbx_size;
9726         struct lpfc_dmabuf *dmabuf;
9727         struct dma_address *dma_address;
9728         uint32_t pa_addr;
9729         uint64_t phys_addr;
9730
9731         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9732         if (!dmabuf)
9733                 return -ENOMEM;
9734
9735         /*
9736          * The bootstrap mailbox region is comprised of 2 parts
9737          * plus an alignment restriction of 16 bytes.
9738          */
9739         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9740         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9741                                           &dmabuf->phys, GFP_KERNEL);
9742         if (!dmabuf->virt) {
9743                 kfree(dmabuf);
9744                 return -ENOMEM;
9745         }
9746
9747         /*
9748          * Initialize the bootstrap mailbox pointers now so that the register
9749          * operations are simple later.  The mailbox dma address is required
9750          * to be 16-byte aligned.  Also align the virtual memory as each
9751          * maibox is copied into the bmbx mailbox region before issuing the
9752          * command to the port.
9753          */
9754         phba->sli4_hba.bmbx.dmabuf = dmabuf;
9755         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9756
9757         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9758                                               LPFC_ALIGN_16_BYTE);
9759         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9760                                               LPFC_ALIGN_16_BYTE);
9761
9762         /*
9763          * Set the high and low physical addresses now.  The SLI4 alignment
9764          * requirement is 16 bytes and the mailbox is posted to the port
9765          * as two 30-bit addresses.  The other data is a bit marking whether
9766          * the 30-bit address is the high or low address.
9767          * Upcast bmbx aphys to 64bits so shift instruction compiles
9768          * clean on 32 bit machines.
9769          */
9770         dma_address = &phba->sli4_hba.bmbx.dma_address;
9771         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9772         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9773         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9774                                            LPFC_BMBX_BIT1_ADDR_HI);
9775
9776         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9777         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9778                                            LPFC_BMBX_BIT1_ADDR_LO);
9779         return 0;
9780 }
9781
9782 /**
9783  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9784  * @phba: pointer to lpfc hba data structure.
9785  *
9786  * This routine is invoked to teardown the bootstrap mailbox
9787  * region and release all host resources. This routine requires
9788  * the caller to ensure all mailbox commands recovered, no
9789  * additional mailbox comands are sent, and interrupts are disabled
9790  * before calling this routine.
9791  *
9792  **/
9793 static void
9794 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9795 {
9796         dma_free_coherent(&phba->pcidev->dev,
9797                           phba->sli4_hba.bmbx.bmbx_size,
9798                           phba->sli4_hba.bmbx.dmabuf->virt,
9799                           phba->sli4_hba.bmbx.dmabuf->phys);
9800
9801         kfree(phba->sli4_hba.bmbx.dmabuf);
9802         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9803 }
9804
9805 static const char * const lpfc_topo_to_str[] = {
9806         "Loop then P2P",
9807         "Loopback",
9808         "P2P Only",
9809         "Unsupported",
9810         "Loop Only",
9811         "Unsupported",
9812         "P2P then Loop",
9813 };
9814
9815 #define LINK_FLAGS_DEF  0x0
9816 #define LINK_FLAGS_P2P  0x1
9817 #define LINK_FLAGS_LOOP 0x2
9818 /**
9819  * lpfc_map_topology - Map the topology read from READ_CONFIG
9820  * @phba: pointer to lpfc hba data structure.
9821  * @rd_config: pointer to read config data
9822  *
9823  * This routine is invoked to map the topology values as read
9824  * from the read config mailbox command. If the persistent
9825  * topology feature is supported, the firmware will provide the
9826  * saved topology information to be used in INIT_LINK
9827  **/
9828 static void
9829 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9830 {
9831         u8 ptv, tf, pt;
9832
9833         ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9834         tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9835         pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9836
9837         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9838                         "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9839                          ptv, tf, pt);
9840         if (!ptv) {
9841                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9842                                 "2019 FW does not support persistent topology "
9843                                 "Using driver parameter defined value [%s]",
9844                                 lpfc_topo_to_str[phba->cfg_topology]);
9845                 return;
9846         }
9847         /* FW supports persistent topology - override module parameter value */
9848         phba->hba_flag |= HBA_PERSISTENT_TOPO;
9849
9850         /* if ASIC_GEN_NUM >= 0xC) */
9851         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9852                     LPFC_SLI_INTF_IF_TYPE_6) ||
9853             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9854                     LPFC_SLI_INTF_FAMILY_G6)) {
9855                 if (!tf) {
9856                         phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9857                                         ? FLAGS_TOPOLOGY_MODE_LOOP
9858                                         : FLAGS_TOPOLOGY_MODE_PT_PT);
9859                 } else {
9860                         phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9861                 }
9862         } else { /* G5 */
9863                 if (tf) {
9864                         /* If topology failover set - pt is '0' or '1' */
9865                         phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9866                                               FLAGS_TOPOLOGY_MODE_LOOP_PT);
9867                 } else {
9868                         phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9869                                         ? FLAGS_TOPOLOGY_MODE_PT_PT
9870                                         : FLAGS_TOPOLOGY_MODE_LOOP);
9871                 }
9872         }
9873         if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9874                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9875                                 "2020 Using persistent topology value [%s]",
9876                                 lpfc_topo_to_str[phba->cfg_topology]);
9877         } else {
9878                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9879                                 "2021 Invalid topology values from FW "
9880                                 "Using driver parameter defined value [%s]",
9881                                 lpfc_topo_to_str[phba->cfg_topology]);
9882         }
9883 }
9884
9885 /**
9886  * lpfc_sli4_read_config - Get the config parameters.
9887  * @phba: pointer to lpfc hba data structure.
9888  *
9889  * This routine is invoked to read the configuration parameters from the HBA.
9890  * The configuration parameters are used to set the base and maximum values
9891  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9892  * allocation for the port.
9893  *
9894  * Return codes
9895  *      0 - successful
9896  *      -ENOMEM - No available memory
9897  *      -EIO - The mailbox failed to complete successfully.
9898  **/
9899 int
9900 lpfc_sli4_read_config(struct lpfc_hba *phba)
9901 {
9902         LPFC_MBOXQ_t *pmb;
9903         struct lpfc_mbx_read_config *rd_config;
9904         union  lpfc_sli4_cfg_shdr *shdr;
9905         uint32_t shdr_status, shdr_add_status;
9906         struct lpfc_mbx_get_func_cfg *get_func_cfg;
9907         struct lpfc_rsrc_desc_fcfcoe *desc;
9908         char *pdesc_0;
9909         uint16_t forced_link_speed;
9910         uint32_t if_type, qmin, fawwpn;
9911         int length, i, rc = 0, rc2;
9912
9913         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9914         if (!pmb) {
9915                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9916                                 "2011 Unable to allocate memory for issuing "
9917                                 "SLI_CONFIG_SPECIAL mailbox command\n");
9918                 return -ENOMEM;
9919         }
9920
9921         lpfc_read_config(phba, pmb);
9922
9923         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9924         if (rc != MBX_SUCCESS) {
9925                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9926                                 "2012 Mailbox failed , mbxCmd x%x "
9927                                 "READ_CONFIG, mbxStatus x%x\n",
9928                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9929                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9930                 rc = -EIO;
9931         } else {
9932                 rd_config = &pmb->u.mqe.un.rd_config;
9933                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9934                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9935                         phba->sli4_hba.lnk_info.lnk_tp =
9936                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9937                         phba->sli4_hba.lnk_info.lnk_no =
9938                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9939                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9940                                         "3081 lnk_type:%d, lnk_numb:%d\n",
9941                                         phba->sli4_hba.lnk_info.lnk_tp,
9942                                         phba->sli4_hba.lnk_info.lnk_no);
9943                 } else
9944                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9945                                         "3082 Mailbox (x%x) returned ldv:x0\n",
9946                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
9947                 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9948                         phba->bbcredit_support = 1;
9949                         phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9950                 }
9951
9952                 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9953
9954                 if (fawwpn) {
9955                         lpfc_printf_log(phba, KERN_INFO,
9956                                         LOG_INIT | LOG_DISCOVERY,
9957                                         "2702 READ_CONFIG: FA-PWWN is "
9958                                         "configured on\n");
9959                         phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9960                 } else {
9961                         /* Clear FW configured flag, preserve driver flag */
9962                         phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9963                 }
9964
9965                 phba->sli4_hba.conf_trunk =
9966                         bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9967                 phba->sli4_hba.extents_in_use =
9968                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9969
9970                 phba->sli4_hba.max_cfg_param.max_xri =
9971                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9972                 /* Reduce resource usage in kdump environment */
9973                 if (is_kdump_kernel() &&
9974                     phba->sli4_hba.max_cfg_param.max_xri > 512)
9975                         phba->sli4_hba.max_cfg_param.max_xri = 512;
9976                 phba->sli4_hba.max_cfg_param.xri_base =
9977                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9978                 phba->sli4_hba.max_cfg_param.max_vpi =
9979                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9980                 /* Limit the max we support */
9981                 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9982                         phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9983                 phba->sli4_hba.max_cfg_param.vpi_base =
9984                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9985                 phba->sli4_hba.max_cfg_param.max_rpi =
9986                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9987                 phba->sli4_hba.max_cfg_param.rpi_base =
9988                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9989                 phba->sli4_hba.max_cfg_param.max_vfi =
9990                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9991                 phba->sli4_hba.max_cfg_param.vfi_base =
9992                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9993                 phba->sli4_hba.max_cfg_param.max_fcfi =
9994                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9995                 phba->sli4_hba.max_cfg_param.max_eq =
9996                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9997                 phba->sli4_hba.max_cfg_param.max_rq =
9998                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9999                 phba->sli4_hba.max_cfg_param.max_wq =
10000                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10001                 phba->sli4_hba.max_cfg_param.max_cq =
10002                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10003                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10004                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10005                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10006                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10007                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10008                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10009                 phba->max_vports = phba->max_vpi;
10010
10011                 /* Next decide on FPIN or Signal E2E CGN support
10012                  * For congestion alarms and warnings valid combination are:
10013                  * 1. FPIN alarms / FPIN warnings
10014                  * 2. Signal alarms / Signal warnings
10015                  * 3. FPIN alarms / Signal warnings
10016                  * 4. Signal alarms / FPIN warnings
10017                  *
10018                  * Initialize the adapter frequency to 100 mSecs
10019                  */
10020                 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10021                 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10022                 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10023
10024                 if (lpfc_use_cgn_signal) {
10025                         if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10026                                 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10027                                 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10028                         }
10029                         if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10030                                 /* MUST support both alarm and warning
10031                                  * because EDC does not support alarm alone.
10032                                  */
10033                                 if (phba->cgn_reg_signal !=
10034                                     EDC_CG_SIG_WARN_ONLY) {
10035                                         /* Must support both or none */
10036                                         phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10037                                         phba->cgn_reg_signal =
10038                                                 EDC_CG_SIG_NOTSUPPORTED;
10039                                 } else {
10040                                         phba->cgn_reg_signal =
10041                                                 EDC_CG_SIG_WARN_ALARM;
10042                                         phba->cgn_reg_fpin =
10043                                                 LPFC_CGN_FPIN_NONE;
10044                                 }
10045                         }
10046                 }
10047
10048                 /* Set the congestion initial signal and fpin values. */
10049                 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10050                 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10051
10052                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10053                                 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10054                                 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10055
10056                 lpfc_map_topology(phba, rd_config);
10057                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10058                                 "2003 cfg params Extents? %d "
10059                                 "XRI(B:%d M:%d), "
10060                                 "VPI(B:%d M:%d) "
10061                                 "VFI(B:%d M:%d) "
10062                                 "RPI(B:%d M:%d) "
10063                                 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10064                                 phba->sli4_hba.extents_in_use,
10065                                 phba->sli4_hba.max_cfg_param.xri_base,
10066                                 phba->sli4_hba.max_cfg_param.max_xri,
10067                                 phba->sli4_hba.max_cfg_param.vpi_base,
10068                                 phba->sli4_hba.max_cfg_param.max_vpi,
10069                                 phba->sli4_hba.max_cfg_param.vfi_base,
10070                                 phba->sli4_hba.max_cfg_param.max_vfi,
10071                                 phba->sli4_hba.max_cfg_param.rpi_base,
10072                                 phba->sli4_hba.max_cfg_param.max_rpi,
10073                                 phba->sli4_hba.max_cfg_param.max_fcfi,
10074                                 phba->sli4_hba.max_cfg_param.max_eq,
10075                                 phba->sli4_hba.max_cfg_param.max_cq,
10076                                 phba->sli4_hba.max_cfg_param.max_wq,
10077                                 phba->sli4_hba.max_cfg_param.max_rq,
10078                                 phba->lmt);
10079
10080                 /*
10081                  * Calculate queue resources based on how
10082                  * many WQ/CQ/EQs are available.
10083                  */
10084                 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10085                 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10086                         qmin = phba->sli4_hba.max_cfg_param.max_cq;
10087                 /*
10088                  * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and
10089                  * the remainder can be used for NVME / FCP.
10090                  */
10091                 qmin -= 4;
10092                 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10093                         qmin = phba->sli4_hba.max_cfg_param.max_eq;
10094
10095                 /* Check to see if there is enough for default cfg */
10096                 if ((phba->cfg_irq_chann > qmin) ||
10097                     (phba->cfg_hdw_queue > qmin)) {
10098                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10099                                         "2005 Reducing Queues - "
10100                                         "FW resource limitation: "
10101                                         "WQ %d CQ %d EQ %d: min %d: "
10102                                         "IRQ %d HDWQ %d\n",
10103                                         phba->sli4_hba.max_cfg_param.max_wq,
10104                                         phba->sli4_hba.max_cfg_param.max_cq,
10105                                         phba->sli4_hba.max_cfg_param.max_eq,
10106                                         qmin, phba->cfg_irq_chann,
10107                                         phba->cfg_hdw_queue);
10108
10109                         if (phba->cfg_irq_chann > qmin)
10110                                 phba->cfg_irq_chann = qmin;
10111                         if (phba->cfg_hdw_queue > qmin)
10112                                 phba->cfg_hdw_queue = qmin;
10113                 }
10114         }
10115
10116         if (rc)
10117                 goto read_cfg_out;
10118
10119         /* Update link speed if forced link speed is supported */
10120         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10121         if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10122                 forced_link_speed =
10123                         bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10124                 if (forced_link_speed) {
10125                         phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10126
10127                         switch (forced_link_speed) {
10128                         case LINK_SPEED_1G:
10129                                 phba->cfg_link_speed =
10130                                         LPFC_USER_LINK_SPEED_1G;
10131                                 break;
10132                         case LINK_SPEED_2G:
10133                                 phba->cfg_link_speed =
10134                                         LPFC_USER_LINK_SPEED_2G;
10135                                 break;
10136                         case LINK_SPEED_4G:
10137                                 phba->cfg_link_speed =
10138                                         LPFC_USER_LINK_SPEED_4G;
10139                                 break;
10140                         case LINK_SPEED_8G:
10141                                 phba->cfg_link_speed =
10142                                         LPFC_USER_LINK_SPEED_8G;
10143                                 break;
10144                         case LINK_SPEED_10G:
10145                                 phba->cfg_link_speed =
10146                                         LPFC_USER_LINK_SPEED_10G;
10147                                 break;
10148                         case LINK_SPEED_16G:
10149                                 phba->cfg_link_speed =
10150                                         LPFC_USER_LINK_SPEED_16G;
10151                                 break;
10152                         case LINK_SPEED_32G:
10153                                 phba->cfg_link_speed =
10154                                         LPFC_USER_LINK_SPEED_32G;
10155                                 break;
10156                         case LINK_SPEED_64G:
10157                                 phba->cfg_link_speed =
10158                                         LPFC_USER_LINK_SPEED_64G;
10159                                 break;
10160                         case 0xffff:
10161                                 phba->cfg_link_speed =
10162                                         LPFC_USER_LINK_SPEED_AUTO;
10163                                 break;
10164                         default:
10165                                 lpfc_printf_log(phba, KERN_ERR,
10166                                                 LOG_TRACE_EVENT,
10167                                                 "0047 Unrecognized link "
10168                                                 "speed : %d\n",
10169                                                 forced_link_speed);
10170                                 phba->cfg_link_speed =
10171                                         LPFC_USER_LINK_SPEED_AUTO;
10172                         }
10173                 }
10174         }
10175
10176         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
10177         length = phba->sli4_hba.max_cfg_param.max_xri -
10178                         lpfc_sli4_get_els_iocb_cnt(phba);
10179         if (phba->cfg_hba_queue_depth > length) {
10180                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10181                                 "3361 HBA queue depth changed from %d to %d\n",
10182                                 phba->cfg_hba_queue_depth, length);
10183                 phba->cfg_hba_queue_depth = length;
10184         }
10185
10186         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10187             LPFC_SLI_INTF_IF_TYPE_2)
10188                 goto read_cfg_out;
10189
10190         /* get the pf# and vf# for SLI4 if_type 2 port */
10191         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10192                   sizeof(struct lpfc_sli4_cfg_mhdr));
10193         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10194                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10195                          length, LPFC_SLI4_MBX_EMBED);
10196
10197         rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10198         shdr = (union lpfc_sli4_cfg_shdr *)
10199                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10200         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10201         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10202         if (rc2 || shdr_status || shdr_add_status) {
10203                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10204                                 "3026 Mailbox failed , mbxCmd x%x "
10205                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10206                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10207                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10208                 goto read_cfg_out;
10209         }
10210
10211         /* search for fc_fcoe resrouce descriptor */
10212         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10213
10214         pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10215         desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10216         length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10217         if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10218                 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10219         else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10220                 goto read_cfg_out;
10221
10222         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10223                 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10224                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10225                     bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10226                         phba->sli4_hba.iov.pf_number =
10227                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10228                         phba->sli4_hba.iov.vf_number =
10229                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10230                         break;
10231                 }
10232         }
10233
10234         if (i < LPFC_RSRC_DESC_MAX_NUM)
10235                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10236                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10237                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10238                                 phba->sli4_hba.iov.vf_number);
10239         else
10240                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10241                                 "3028 GET_FUNCTION_CONFIG: failed to find "
10242                                 "Resource Descriptor:x%x\n",
10243                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
10244
10245 read_cfg_out:
10246         mempool_free(pmb, phba->mbox_mem_pool);
10247         return rc;
10248 }
10249
10250 /**
10251  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10252  * @phba: pointer to lpfc hba data structure.
10253  *
10254  * This routine is invoked to setup the port-side endian order when
10255  * the port if_type is 0.  This routine has no function for other
10256  * if_types.
10257  *
10258  * Return codes
10259  *      0 - successful
10260  *      -ENOMEM - No available memory
10261  *      -EIO - The mailbox failed to complete successfully.
10262  **/
10263 static int
10264 lpfc_setup_endian_order(struct lpfc_hba *phba)
10265 {
10266         LPFC_MBOXQ_t *mboxq;
10267         uint32_t if_type, rc = 0;
10268         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10269                                       HOST_ENDIAN_HIGH_WORD1};
10270
10271         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10272         switch (if_type) {
10273         case LPFC_SLI_INTF_IF_TYPE_0:
10274                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10275                                                        GFP_KERNEL);
10276                 if (!mboxq) {
10277                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10278                                         "0492 Unable to allocate memory for "
10279                                         "issuing SLI_CONFIG_SPECIAL mailbox "
10280                                         "command\n");
10281                         return -ENOMEM;
10282                 }
10283
10284                 /*
10285                  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10286                  * two words to contain special data values and no other data.
10287                  */
10288                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10289                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10290                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10291                 if (rc != MBX_SUCCESS) {
10292                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10293                                         "0493 SLI_CONFIG_SPECIAL mailbox "
10294                                         "failed with status x%x\n",
10295                                         rc);
10296                         rc = -EIO;
10297                 }
10298                 mempool_free(mboxq, phba->mbox_mem_pool);
10299                 break;
10300         case LPFC_SLI_INTF_IF_TYPE_6:
10301         case LPFC_SLI_INTF_IF_TYPE_2:
10302         case LPFC_SLI_INTF_IF_TYPE_1:
10303         default:
10304                 break;
10305         }
10306         return rc;
10307 }
10308
10309 /**
10310  * lpfc_sli4_queue_verify - Verify and update EQ counts
10311  * @phba: pointer to lpfc hba data structure.
10312  *
10313  * This routine is invoked to check the user settable queue counts for EQs.
10314  * After this routine is called the counts will be set to valid values that
10315  * adhere to the constraints of the system's interrupt vectors and the port's
10316  * queue resources.
10317  *
10318  * Return codes
10319  *      0 - successful
10320  *      -ENOMEM - No available memory
10321  **/
10322 static int
10323 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10324 {
10325         /*
10326          * Sanity check for configured queue parameters against the run-time
10327          * device parameters
10328          */
10329
10330         if (phba->nvmet_support) {
10331                 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10332                         phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10333                 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10334                         phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10335         }
10336
10337         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10338                         "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10339                         phba->cfg_hdw_queue, phba->cfg_irq_chann,
10340                         phba->cfg_nvmet_mrq);
10341
10342         /* Get EQ depth from module parameter, fake the default for now */
10343         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10344         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10345
10346         /* Get CQ depth from module parameter, fake the default for now */
10347         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10348         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10349         return 0;
10350 }
10351
10352 static int
10353 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10354 {
10355         struct lpfc_queue *qdesc;
10356         u32 wqesize;
10357         int cpu;
10358
10359         cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10360         /* Create Fast Path IO CQs */
10361         if (phba->enab_exp_wqcq_pages)
10362                 /* Increase the CQ size when WQEs contain an embedded cdb */
10363                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10364                                               phba->sli4_hba.cq_esize,
10365                                               LPFC_CQE_EXP_COUNT, cpu);
10366
10367         else
10368                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10369                                               phba->sli4_hba.cq_esize,
10370                                               phba->sli4_hba.cq_ecount, cpu);
10371         if (!qdesc) {
10372                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10373                                 "0499 Failed allocate fast-path IO CQ (%d)\n",
10374                                 idx);
10375                 return 1;
10376         }
10377         qdesc->qe_valid = 1;
10378         qdesc->hdwq = idx;
10379         qdesc->chann = cpu;
10380         phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10381
10382         /* Create Fast Path IO WQs */
10383         if (phba->enab_exp_wqcq_pages) {
10384                 /* Increase the WQ size when WQEs contain an embedded cdb */
10385                 wqesize = (phba->fcp_embed_io) ?
10386                         LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10387                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10388                                               wqesize,
10389                                               LPFC_WQE_EXP_COUNT, cpu);
10390         } else
10391                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10392                                               phba->sli4_hba.wq_esize,
10393                                               phba->sli4_hba.wq_ecount, cpu);
10394
10395         if (!qdesc) {
10396                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10397                                 "0503 Failed allocate fast-path IO WQ (%d)\n",
10398                                 idx);
10399                 return 1;
10400         }
10401         qdesc->hdwq = idx;
10402         qdesc->chann = cpu;
10403         phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10404         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10405         return 0;
10406 }
10407
10408 /**
10409  * lpfc_sli4_queue_create - Create all the SLI4 queues
10410  * @phba: pointer to lpfc hba data structure.
10411  *
10412  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10413  * operation. For each SLI4 queue type, the parameters such as queue entry
10414  * count (queue depth) shall be taken from the module parameter. For now,
10415  * we just use some constant number as place holder.
10416  *
10417  * Return codes
10418  *      0 - successful
10419  *      -ENOMEM - No availble memory
10420  *      -EIO - The mailbox failed to complete successfully.
10421  **/
10422 int
10423 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10424 {
10425         struct lpfc_queue *qdesc;
10426         int idx, cpu, eqcpu;
10427         struct lpfc_sli4_hdw_queue *qp;
10428         struct lpfc_vector_map_info *cpup;
10429         struct lpfc_vector_map_info *eqcpup;
10430         struct lpfc_eq_intr_info *eqi;
10431
10432         /*
10433          * Create HBA Record arrays.
10434          * Both NVME and FCP will share that same vectors / EQs
10435          */
10436         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10437         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10438         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10439         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10440         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10441         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10442         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10443         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10444         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10445         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10446
10447         if (!phba->sli4_hba.hdwq) {
10448                 phba->sli4_hba.hdwq = kcalloc(
10449                         phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10450                         GFP_KERNEL);
10451                 if (!phba->sli4_hba.hdwq) {
10452                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10453                                         "6427 Failed allocate memory for "
10454                                         "fast-path Hardware Queue array\n");
10455                         goto out_error;
10456                 }
10457                 /* Prepare hardware queues to take IO buffers */
10458                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10459                         qp = &phba->sli4_hba.hdwq[idx];
10460                         spin_lock_init(&qp->io_buf_list_get_lock);
10461                         spin_lock_init(&qp->io_buf_list_put_lock);
10462                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10463                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10464                         qp->get_io_bufs = 0;
10465                         qp->put_io_bufs = 0;
10466                         qp->total_io_bufs = 0;
10467                         spin_lock_init(&qp->abts_io_buf_list_lock);
10468                         INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10469                         qp->abts_scsi_io_bufs = 0;
10470                         qp->abts_nvme_io_bufs = 0;
10471                         INIT_LIST_HEAD(&qp->sgl_list);
10472                         INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10473                         spin_lock_init(&qp->hdwq_lock);
10474                 }
10475         }
10476
10477         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10478                 if (phba->nvmet_support) {
10479                         phba->sli4_hba.nvmet_cqset = kcalloc(
10480                                         phba->cfg_nvmet_mrq,
10481                                         sizeof(struct lpfc_queue *),
10482                                         GFP_KERNEL);
10483                         if (!phba->sli4_hba.nvmet_cqset) {
10484                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10485                                         "3121 Fail allocate memory for "
10486                                         "fast-path CQ set array\n");
10487                                 goto out_error;
10488                         }
10489                         phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10490                                         phba->cfg_nvmet_mrq,
10491                                         sizeof(struct lpfc_queue *),
10492                                         GFP_KERNEL);
10493                         if (!phba->sli4_hba.nvmet_mrq_hdr) {
10494                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10495                                         "3122 Fail allocate memory for "
10496                                         "fast-path RQ set hdr array\n");
10497                                 goto out_error;
10498                         }
10499                         phba->sli4_hba.nvmet_mrq_data = kcalloc(
10500                                         phba->cfg_nvmet_mrq,
10501                                         sizeof(struct lpfc_queue *),
10502                                         GFP_KERNEL);
10503                         if (!phba->sli4_hba.nvmet_mrq_data) {
10504                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10505                                         "3124 Fail allocate memory for "
10506                                         "fast-path RQ set data array\n");
10507                                 goto out_error;
10508                         }
10509                 }
10510         }
10511
10512         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10513
10514         /* Create HBA Event Queues (EQs) */
10515         for_each_present_cpu(cpu) {
10516                 /* We only want to create 1 EQ per vector, even though
10517                  * multiple CPUs might be using that vector. so only
10518                  * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10519                  */
10520                 cpup = &phba->sli4_hba.cpu_map[cpu];
10521                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10522                         continue;
10523
10524                 /* Get a ptr to the Hardware Queue associated with this CPU */
10525                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10526
10527                 /* Allocate an EQ */
10528                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10529                                               phba->sli4_hba.eq_esize,
10530                                               phba->sli4_hba.eq_ecount, cpu);
10531                 if (!qdesc) {
10532                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10533                                         "0497 Failed allocate EQ (%d)\n",
10534                                         cpup->hdwq);
10535                         goto out_error;
10536                 }
10537                 qdesc->qe_valid = 1;
10538                 qdesc->hdwq = cpup->hdwq;
10539                 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10540                 qdesc->last_cpu = qdesc->chann;
10541
10542                 /* Save the allocated EQ in the Hardware Queue */
10543                 qp->hba_eq = qdesc;
10544
10545                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10546                 list_add(&qdesc->cpu_list, &eqi->list);
10547         }
10548
10549         /* Now we need to populate the other Hardware Queues, that share
10550          * an IRQ vector, with the associated EQ ptr.
10551          */
10552         for_each_present_cpu(cpu) {
10553                 cpup = &phba->sli4_hba.cpu_map[cpu];
10554
10555                 /* Check for EQ already allocated in previous loop */
10556                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10557                         continue;
10558
10559                 /* Check for multiple CPUs per hdwq */
10560                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10561                 if (qp->hba_eq)
10562                         continue;
10563
10564                 /* We need to share an EQ for this hdwq */
10565                 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10566                 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10567                 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10568         }
10569
10570         /* Allocate IO Path SLI4 CQ/WQs */
10571         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10572                 if (lpfc_alloc_io_wq_cq(phba, idx))
10573                         goto out_error;
10574         }
10575
10576         if (phba->nvmet_support) {
10577                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10578                         cpu = lpfc_find_cpu_handle(phba, idx,
10579                                                    LPFC_FIND_BY_HDWQ);
10580                         qdesc = lpfc_sli4_queue_alloc(phba,
10581                                                       LPFC_DEFAULT_PAGE_SIZE,
10582                                                       phba->sli4_hba.cq_esize,
10583                                                       phba->sli4_hba.cq_ecount,
10584                                                       cpu);
10585                         if (!qdesc) {
10586                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10587                                                 "3142 Failed allocate NVME "
10588                                                 "CQ Set (%d)\n", idx);
10589                                 goto out_error;
10590                         }
10591                         qdesc->qe_valid = 1;
10592                         qdesc->hdwq = idx;
10593                         qdesc->chann = cpu;
10594                         phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10595                 }
10596         }
10597
10598         /*
10599          * Create Slow Path Completion Queues (CQs)
10600          */
10601
10602         cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10603         /* Create slow-path Mailbox Command Complete Queue */
10604         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10605                                       phba->sli4_hba.cq_esize,
10606                                       phba->sli4_hba.cq_ecount, cpu);
10607         if (!qdesc) {
10608                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10609                                 "0500 Failed allocate slow-path mailbox CQ\n");
10610                 goto out_error;
10611         }
10612         qdesc->qe_valid = 1;
10613         phba->sli4_hba.mbx_cq = qdesc;
10614
10615         /* Create slow-path ELS Complete Queue */
10616         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10617                                       phba->sli4_hba.cq_esize,
10618                                       phba->sli4_hba.cq_ecount, cpu);
10619         if (!qdesc) {
10620                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10621                                 "0501 Failed allocate slow-path ELS CQ\n");
10622                 goto out_error;
10623         }
10624         qdesc->qe_valid = 1;
10625         qdesc->chann = cpu;
10626         phba->sli4_hba.els_cq = qdesc;
10627
10628
10629         /*
10630          * Create Slow Path Work Queues (WQs)
10631          */
10632
10633         /* Create Mailbox Command Queue */
10634
10635         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10636                                       phba->sli4_hba.mq_esize,
10637                                       phba->sli4_hba.mq_ecount, cpu);
10638         if (!qdesc) {
10639                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10640                                 "0505 Failed allocate slow-path MQ\n");
10641                 goto out_error;
10642         }
10643         qdesc->chann = cpu;
10644         phba->sli4_hba.mbx_wq = qdesc;
10645
10646         /*
10647          * Create ELS Work Queues
10648          */
10649
10650         /* Create slow-path ELS Work Queue */
10651         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10652                                       phba->sli4_hba.wq_esize,
10653                                       phba->sli4_hba.wq_ecount, cpu);
10654         if (!qdesc) {
10655                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10656                                 "0504 Failed allocate slow-path ELS WQ\n");
10657                 goto out_error;
10658         }
10659         qdesc->chann = cpu;
10660         phba->sli4_hba.els_wq = qdesc;
10661         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10662
10663         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10664                 /* Create NVME LS Complete Queue */
10665                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10666                                               phba->sli4_hba.cq_esize,
10667                                               phba->sli4_hba.cq_ecount, cpu);
10668                 if (!qdesc) {
10669                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10670                                         "6079 Failed allocate NVME LS CQ\n");
10671                         goto out_error;
10672                 }
10673                 qdesc->chann = cpu;
10674                 qdesc->qe_valid = 1;
10675                 phba->sli4_hba.nvmels_cq = qdesc;
10676
10677                 /* Create NVME LS Work Queue */
10678                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10679                                               phba->sli4_hba.wq_esize,
10680                                               phba->sli4_hba.wq_ecount, cpu);
10681                 if (!qdesc) {
10682                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10683                                         "6080 Failed allocate NVME LS WQ\n");
10684                         goto out_error;
10685                 }
10686                 qdesc->chann = cpu;
10687                 phba->sli4_hba.nvmels_wq = qdesc;
10688                 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10689         }
10690
10691         /*
10692          * Create Receive Queue (RQ)
10693          */
10694
10695         /* Create Receive Queue for header */
10696         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10697                                       phba->sli4_hba.rq_esize,
10698                                       phba->sli4_hba.rq_ecount, cpu);
10699         if (!qdesc) {
10700                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10701                                 "0506 Failed allocate receive HRQ\n");
10702                 goto out_error;
10703         }
10704         phba->sli4_hba.hdr_rq = qdesc;
10705
10706         /* Create Receive Queue for data */
10707         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10708                                       phba->sli4_hba.rq_esize,
10709                                       phba->sli4_hba.rq_ecount, cpu);
10710         if (!qdesc) {
10711                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10712                                 "0507 Failed allocate receive DRQ\n");
10713                 goto out_error;
10714         }
10715         phba->sli4_hba.dat_rq = qdesc;
10716
10717         if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10718             phba->nvmet_support) {
10719                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10720                         cpu = lpfc_find_cpu_handle(phba, idx,
10721                                                    LPFC_FIND_BY_HDWQ);
10722                         /* Create NVMET Receive Queue for header */
10723                         qdesc = lpfc_sli4_queue_alloc(phba,
10724                                                       LPFC_DEFAULT_PAGE_SIZE,
10725                                                       phba->sli4_hba.rq_esize,
10726                                                       LPFC_NVMET_RQE_DEF_COUNT,
10727                                                       cpu);
10728                         if (!qdesc) {
10729                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10730                                                 "3146 Failed allocate "
10731                                                 "receive HRQ\n");
10732                                 goto out_error;
10733                         }
10734                         qdesc->hdwq = idx;
10735                         phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10736
10737                         /* Only needed for header of RQ pair */
10738                         qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10739                                                    GFP_KERNEL,
10740                                                    cpu_to_node(cpu));
10741                         if (qdesc->rqbp == NULL) {
10742                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10743                                                 "6131 Failed allocate "
10744                                                 "Header RQBP\n");
10745                                 goto out_error;
10746                         }
10747
10748                         /* Put list in known state in case driver load fails. */
10749                         INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10750
10751                         /* Create NVMET Receive Queue for data */
10752                         qdesc = lpfc_sli4_queue_alloc(phba,
10753                                                       LPFC_DEFAULT_PAGE_SIZE,
10754                                                       phba->sli4_hba.rq_esize,
10755                                                       LPFC_NVMET_RQE_DEF_COUNT,
10756                                                       cpu);
10757                         if (!qdesc) {
10758                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10759                                                 "3156 Failed allocate "
10760                                                 "receive DRQ\n");
10761                                 goto out_error;
10762                         }
10763                         qdesc->hdwq = idx;
10764                         phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10765                 }
10766         }
10767
10768         /* Clear NVME stats */
10769         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10770                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10771                         memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10772                                sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10773                 }
10774         }
10775
10776         /* Clear SCSI stats */
10777         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10778                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10779                         memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10780                                sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10781                 }
10782         }
10783
10784         return 0;
10785
10786 out_error:
10787         lpfc_sli4_queue_destroy(phba);
10788         return -ENOMEM;
10789 }
10790
10791 static inline void
10792 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10793 {
10794         if (*qp != NULL) {
10795                 lpfc_sli4_queue_free(*qp);
10796                 *qp = NULL;
10797         }
10798 }
10799
10800 static inline void
10801 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10802 {
10803         int idx;
10804
10805         if (*qs == NULL)
10806                 return;
10807
10808         for (idx = 0; idx < max; idx++)
10809                 __lpfc_sli4_release_queue(&(*qs)[idx]);
10810
10811         kfree(*qs);
10812         *qs = NULL;
10813 }
10814
10815 static inline void
10816 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10817 {
10818         struct lpfc_sli4_hdw_queue *hdwq;
10819         struct lpfc_queue *eq;
10820         uint32_t idx;
10821
10822         hdwq = phba->sli4_hba.hdwq;
10823
10824         /* Loop thru all Hardware Queues */
10825         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10826                 /* Free the CQ/WQ corresponding to the Hardware Queue */
10827                 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10828                 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10829                 hdwq[idx].hba_eq = NULL;
10830                 hdwq[idx].io_cq = NULL;
10831                 hdwq[idx].io_wq = NULL;
10832                 if (phba->cfg_xpsgl && !phba->nvmet_support)
10833                         lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10834                 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10835         }
10836         /* Loop thru all IRQ vectors */
10837         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10838                 /* Free the EQ corresponding to the IRQ vector */
10839                 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10840                 lpfc_sli4_queue_free(eq);
10841                 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10842         }
10843 }
10844
10845 /**
10846  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10847  * @phba: pointer to lpfc hba data structure.
10848  *
10849  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10850  * operation.
10851  *
10852  * Return codes
10853  *      0 - successful
10854  *      -ENOMEM - No available memory
10855  *      -EIO - The mailbox failed to complete successfully.
10856  **/
10857 void
10858 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10859 {
10860         /*
10861          * Set FREE_INIT before beginning to free the queues.
10862          * Wait until the users of queues to acknowledge to
10863          * release queues by clearing FREE_WAIT.
10864          */
10865         spin_lock_irq(&phba->hbalock);
10866         phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10867         while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10868                 spin_unlock_irq(&phba->hbalock);
10869                 msleep(20);
10870                 spin_lock_irq(&phba->hbalock);
10871         }
10872         spin_unlock_irq(&phba->hbalock);
10873
10874         lpfc_sli4_cleanup_poll_list(phba);
10875
10876         /* Release HBA eqs */
10877         if (phba->sli4_hba.hdwq)
10878                 lpfc_sli4_release_hdwq(phba);
10879
10880         if (phba->nvmet_support) {
10881                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10882                                          phba->cfg_nvmet_mrq);
10883
10884                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10885                                          phba->cfg_nvmet_mrq);
10886                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10887                                          phba->cfg_nvmet_mrq);
10888         }
10889
10890         /* Release mailbox command work queue */
10891         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10892
10893         /* Release ELS work queue */
10894         __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10895
10896         /* Release ELS work queue */
10897         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10898
10899         /* Release unsolicited receive queue */
10900         __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10901         __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10902
10903         /* Release ELS complete queue */
10904         __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10905
10906         /* Release NVME LS complete queue */
10907         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10908
10909         /* Release mailbox command complete queue */
10910         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10911
10912         /* Everything on this list has been freed */
10913         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10914
10915         /* Done with freeing the queues */
10916         spin_lock_irq(&phba->hbalock);
10917         phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10918         spin_unlock_irq(&phba->hbalock);
10919 }
10920
10921 int
10922 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10923 {
10924         struct lpfc_rqb *rqbp;
10925         struct lpfc_dmabuf *h_buf;
10926         struct rqb_dmabuf *rqb_buffer;
10927
10928         rqbp = rq->rqbp;
10929         while (!list_empty(&rqbp->rqb_buffer_list)) {
10930                 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10931                                  struct lpfc_dmabuf, list);
10932
10933                 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10934                 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10935                 rqbp->buffer_count--;
10936         }
10937         return 1;
10938 }
10939
10940 static int
10941 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10942         struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10943         int qidx, uint32_t qtype)
10944 {
10945         struct lpfc_sli_ring *pring;
10946         int rc;
10947
10948         if (!eq || !cq || !wq) {
10949                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10950                         "6085 Fast-path %s (%d) not allocated\n",
10951                         ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10952                 return -ENOMEM;
10953         }
10954
10955         /* create the Cq first */
10956         rc = lpfc_cq_create(phba, cq, eq,
10957                         (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10958         if (rc) {
10959                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10960                                 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10961                                 qidx, (uint32_t)rc);
10962                 return rc;
10963         }
10964
10965         if (qtype != LPFC_MBOX) {
10966                 /* Setup cq_map for fast lookup */
10967                 if (cq_map)
10968                         *cq_map = cq->queue_id;
10969
10970                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10971                         "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10972                         qidx, cq->queue_id, qidx, eq->queue_id);
10973
10974                 /* create the wq */
10975                 rc = lpfc_wq_create(phba, wq, cq, qtype);
10976                 if (rc) {
10977                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10978                                 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10979                                 qidx, (uint32_t)rc);
10980                         /* no need to tear down cq - caller will do so */
10981                         return rc;
10982                 }
10983
10984                 /* Bind this CQ/WQ to the NVME ring */
10985                 pring = wq->pring;
10986                 pring->sli.sli4.wqp = (void *)wq;
10987                 cq->pring = pring;
10988
10989                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10990                         "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10991                         qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10992         } else {
10993                 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10994                 if (rc) {
10995                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10996                                         "0539 Failed setup of slow-path MQ: "
10997                                         "rc = 0x%x\n", rc);
10998                         /* no need to tear down cq - caller will do so */
10999                         return rc;
11000                 }
11001
11002                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11003                         "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11004                         phba->sli4_hba.mbx_wq->queue_id,
11005                         phba->sli4_hba.mbx_cq->queue_id);
11006         }
11007
11008         return 0;
11009 }
11010
11011 /**
11012  * lpfc_setup_cq_lookup - Setup the CQ lookup table
11013  * @phba: pointer to lpfc hba data structure.
11014  *
11015  * This routine will populate the cq_lookup table by all
11016  * available CQ queue_id's.
11017  **/
11018 static void
11019 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11020 {
11021         struct lpfc_queue *eq, *childq;
11022         int qidx;
11023
11024         memset(phba->sli4_hba.cq_lookup, 0,
11025                (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11026         /* Loop thru all IRQ vectors */
11027         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11028                 /* Get the EQ corresponding to the IRQ vector */
11029                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11030                 if (!eq)
11031                         continue;
11032                 /* Loop through all CQs associated with that EQ */
11033                 list_for_each_entry(childq, &eq->child_list, list) {
11034                         if (childq->queue_id > phba->sli4_hba.cq_max)
11035                                 continue;
11036                         if (childq->subtype == LPFC_IO)
11037                                 phba->sli4_hba.cq_lookup[childq->queue_id] =
11038                                         childq;
11039                 }
11040         }
11041 }
11042
11043 /**
11044  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11045  * @phba: pointer to lpfc hba data structure.
11046  *
11047  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11048  * operation.
11049  *
11050  * Return codes
11051  *      0 - successful
11052  *      -ENOMEM - No available memory
11053  *      -EIO - The mailbox failed to complete successfully.
11054  **/
11055 int
11056 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11057 {
11058         uint32_t shdr_status, shdr_add_status;
11059         union lpfc_sli4_cfg_shdr *shdr;
11060         struct lpfc_vector_map_info *cpup;
11061         struct lpfc_sli4_hdw_queue *qp;
11062         LPFC_MBOXQ_t *mboxq;
11063         int qidx, cpu;
11064         uint32_t length, usdelay;
11065         int rc = -ENOMEM;
11066
11067         /* Check for dual-ULP support */
11068         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11069         if (!mboxq) {
11070                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11071                                 "3249 Unable to allocate memory for "
11072                                 "QUERY_FW_CFG mailbox command\n");
11073                 return -ENOMEM;
11074         }
11075         length = (sizeof(struct lpfc_mbx_query_fw_config) -
11076                   sizeof(struct lpfc_sli4_cfg_mhdr));
11077         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11078                          LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11079                          length, LPFC_SLI4_MBX_EMBED);
11080
11081         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11082
11083         shdr = (union lpfc_sli4_cfg_shdr *)
11084                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11085         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11086         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11087         if (shdr_status || shdr_add_status || rc) {
11088                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11089                                 "3250 QUERY_FW_CFG mailbox failed with status "
11090                                 "x%x add_status x%x, mbx status x%x\n",
11091                                 shdr_status, shdr_add_status, rc);
11092                 mempool_free(mboxq, phba->mbox_mem_pool);
11093                 rc = -ENXIO;
11094                 goto out_error;
11095         }
11096
11097         phba->sli4_hba.fw_func_mode =
11098                         mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11099         phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11100         phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11101         phba->sli4_hba.physical_port =
11102                         mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11103         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11104                         "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11105                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11106                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11107
11108         mempool_free(mboxq, phba->mbox_mem_pool);
11109
11110         /*
11111          * Set up HBA Event Queues (EQs)
11112          */
11113         qp = phba->sli4_hba.hdwq;
11114
11115         /* Set up HBA event queue */
11116         if (!qp) {
11117                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11118                                 "3147 Fast-path EQs not allocated\n");
11119                 rc = -ENOMEM;
11120                 goto out_error;
11121         }
11122
11123         /* Loop thru all IRQ vectors */
11124         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11125                 /* Create HBA Event Queues (EQs) in order */
11126                 for_each_present_cpu(cpu) {
11127                         cpup = &phba->sli4_hba.cpu_map[cpu];
11128
11129                         /* Look for the CPU thats using that vector with
11130                          * LPFC_CPU_FIRST_IRQ set.
11131                          */
11132                         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11133                                 continue;
11134                         if (qidx != cpup->eq)
11135                                 continue;
11136
11137                         /* Create an EQ for that vector */
11138                         rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11139                                             phba->cfg_fcp_imax);
11140                         if (rc) {
11141                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11142                                                 "0523 Failed setup of fast-path"
11143                                                 " EQ (%d), rc = 0x%x\n",
11144                                                 cpup->eq, (uint32_t)rc);
11145                                 goto out_destroy;
11146                         }
11147
11148                         /* Save the EQ for that vector in the hba_eq_hdl */
11149                         phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11150                                 qp[cpup->hdwq].hba_eq;
11151
11152                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11153                                         "2584 HBA EQ setup: queue[%d]-id=%d\n",
11154                                         cpup->eq,
11155                                         qp[cpup->hdwq].hba_eq->queue_id);
11156                 }
11157         }
11158
11159         /* Loop thru all Hardware Queues */
11160         for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11161                 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11162                 cpup = &phba->sli4_hba.cpu_map[cpu];
11163
11164                 /* Create the CQ/WQ corresponding to the Hardware Queue */
11165                 rc = lpfc_create_wq_cq(phba,
11166                                        phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11167                                        qp[qidx].io_cq,
11168                                        qp[qidx].io_wq,
11169                                        &phba->sli4_hba.hdwq[qidx].io_cq_map,
11170                                        qidx,
11171                                        LPFC_IO);
11172                 if (rc) {
11173                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11174                                         "0535 Failed to setup fastpath "
11175                                         "IO WQ/CQ (%d), rc = 0x%x\n",
11176                                         qidx, (uint32_t)rc);
11177                         goto out_destroy;
11178                 }
11179         }
11180
11181         /*
11182          * Set up Slow Path Complete Queues (CQs)
11183          */
11184
11185         /* Set up slow-path MBOX CQ/MQ */
11186
11187         if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11188                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11189                                 "0528 %s not allocated\n",
11190                                 phba->sli4_hba.mbx_cq ?
11191                                 "Mailbox WQ" : "Mailbox CQ");
11192                 rc = -ENOMEM;
11193                 goto out_destroy;
11194         }
11195
11196         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11197                                phba->sli4_hba.mbx_cq,
11198                                phba->sli4_hba.mbx_wq,
11199                                NULL, 0, LPFC_MBOX);
11200         if (rc) {
11201                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11202                         "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11203                         (uint32_t)rc);
11204                 goto out_destroy;
11205         }
11206         if (phba->nvmet_support) {
11207                 if (!phba->sli4_hba.nvmet_cqset) {
11208                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11209                                         "3165 Fast-path NVME CQ Set "
11210                                         "array not allocated\n");
11211                         rc = -ENOMEM;
11212                         goto out_destroy;
11213                 }
11214                 if (phba->cfg_nvmet_mrq > 1) {
11215                         rc = lpfc_cq_create_set(phba,
11216                                         phba->sli4_hba.nvmet_cqset,
11217                                         qp,
11218                                         LPFC_WCQ, LPFC_NVMET);
11219                         if (rc) {
11220                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11221                                                 "3164 Failed setup of NVME CQ "
11222                                                 "Set, rc = 0x%x\n",
11223                                                 (uint32_t)rc);
11224                                 goto out_destroy;
11225                         }
11226                 } else {
11227                         /* Set up NVMET Receive Complete Queue */
11228                         rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11229                                             qp[0].hba_eq,
11230                                             LPFC_WCQ, LPFC_NVMET);
11231                         if (rc) {
11232                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11233                                                 "6089 Failed setup NVMET CQ: "
11234                                                 "rc = 0x%x\n", (uint32_t)rc);
11235                                 goto out_destroy;
11236                         }
11237                         phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11238
11239                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11240                                         "6090 NVMET CQ setup: cq-id=%d, "
11241                                         "parent eq-id=%d\n",
11242                                         phba->sli4_hba.nvmet_cqset[0]->queue_id,
11243                                         qp[0].hba_eq->queue_id);
11244                 }
11245         }
11246
11247         /* Set up slow-path ELS WQ/CQ */
11248         if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11249                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11250                                 "0530 ELS %s not allocated\n",
11251                                 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11252                 rc = -ENOMEM;
11253                 goto out_destroy;
11254         }
11255         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11256                                phba->sli4_hba.els_cq,
11257                                phba->sli4_hba.els_wq,
11258                                NULL, 0, LPFC_ELS);
11259         if (rc) {
11260                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11261                                 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11262                                 (uint32_t)rc);
11263                 goto out_destroy;
11264         }
11265         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11266                         "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11267                         phba->sli4_hba.els_wq->queue_id,
11268                         phba->sli4_hba.els_cq->queue_id);
11269
11270         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11271                 /* Set up NVME LS Complete Queue */
11272                 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11273                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11274                                         "6091 LS %s not allocated\n",
11275                                         phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11276                         rc = -ENOMEM;
11277                         goto out_destroy;
11278                 }
11279                 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11280                                        phba->sli4_hba.nvmels_cq,
11281                                        phba->sli4_hba.nvmels_wq,
11282                                        NULL, 0, LPFC_NVME_LS);
11283                 if (rc) {
11284                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11285                                         "0526 Failed setup of NVVME LS WQ/CQ: "
11286                                         "rc = 0x%x\n", (uint32_t)rc);
11287                         goto out_destroy;
11288                 }
11289
11290                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11291                                 "6096 ELS WQ setup: wq-id=%d, "
11292                                 "parent cq-id=%d\n",
11293                                 phba->sli4_hba.nvmels_wq->queue_id,
11294                                 phba->sli4_hba.nvmels_cq->queue_id);
11295         }
11296
11297         /*
11298          * Create NVMET Receive Queue (RQ)
11299          */
11300         if (phba->nvmet_support) {
11301                 if ((!phba->sli4_hba.nvmet_cqset) ||
11302                     (!phba->sli4_hba.nvmet_mrq_hdr) ||
11303                     (!phba->sli4_hba.nvmet_mrq_data)) {
11304                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11305                                         "6130 MRQ CQ Queues not "
11306                                         "allocated\n");
11307                         rc = -ENOMEM;
11308                         goto out_destroy;
11309                 }
11310                 if (phba->cfg_nvmet_mrq > 1) {
11311                         rc = lpfc_mrq_create(phba,
11312                                              phba->sli4_hba.nvmet_mrq_hdr,
11313                                              phba->sli4_hba.nvmet_mrq_data,
11314                                              phba->sli4_hba.nvmet_cqset,
11315                                              LPFC_NVMET);
11316                         if (rc) {
11317                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11318                                                 "6098 Failed setup of NVMET "
11319                                                 "MRQ: rc = 0x%x\n",
11320                                                 (uint32_t)rc);
11321                                 goto out_destroy;
11322                         }
11323
11324                 } else {
11325                         rc = lpfc_rq_create(phba,
11326                                             phba->sli4_hba.nvmet_mrq_hdr[0],
11327                                             phba->sli4_hba.nvmet_mrq_data[0],
11328                                             phba->sli4_hba.nvmet_cqset[0],
11329                                             LPFC_NVMET);
11330                         if (rc) {
11331                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11332                                                 "6057 Failed setup of NVMET "
11333                                                 "Receive Queue: rc = 0x%x\n",
11334                                                 (uint32_t)rc);
11335                                 goto out_destroy;
11336                         }
11337
11338                         lpfc_printf_log(
11339                                 phba, KERN_INFO, LOG_INIT,
11340                                 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11341                                 "dat-rq-id=%d parent cq-id=%d\n",
11342                                 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11343                                 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11344                                 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11345
11346                 }
11347         }
11348
11349         if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11350                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11351                                 "0540 Receive Queue not allocated\n");
11352                 rc = -ENOMEM;
11353                 goto out_destroy;
11354         }
11355
11356         rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11357                             phba->sli4_hba.els_cq, LPFC_USOL);
11358         if (rc) {
11359                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11360                                 "0541 Failed setup of Receive Queue: "
11361                                 "rc = 0x%x\n", (uint32_t)rc);
11362                 goto out_destroy;
11363         }
11364
11365         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11366                         "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11367                         "parent cq-id=%d\n",
11368                         phba->sli4_hba.hdr_rq->queue_id,
11369                         phba->sli4_hba.dat_rq->queue_id,
11370                         phba->sli4_hba.els_cq->queue_id);
11371
11372         if (phba->cfg_fcp_imax)
11373                 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11374         else
11375                 usdelay = 0;
11376
11377         for (qidx = 0; qidx < phba->cfg_irq_chann;
11378              qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11379                 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11380                                          usdelay);
11381
11382         if (phba->sli4_hba.cq_max) {
11383                 kfree(phba->sli4_hba.cq_lookup);
11384                 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11385                         sizeof(struct lpfc_queue *), GFP_KERNEL);
11386                 if (!phba->sli4_hba.cq_lookup) {
11387                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11388                                         "0549 Failed setup of CQ Lookup table: "
11389                                         "size 0x%x\n", phba->sli4_hba.cq_max);
11390                         rc = -ENOMEM;
11391                         goto out_destroy;
11392                 }
11393                 lpfc_setup_cq_lookup(phba);
11394         }
11395         return 0;
11396
11397 out_destroy:
11398         lpfc_sli4_queue_unset(phba);
11399 out_error:
11400         return rc;
11401 }
11402
11403 /**
11404  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11405  * @phba: pointer to lpfc hba data structure.
11406  *
11407  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11408  * operation.
11409  *
11410  * Return codes
11411  *      0 - successful
11412  *      -ENOMEM - No available memory
11413  *      -EIO - The mailbox failed to complete successfully.
11414  **/
11415 void
11416 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11417 {
11418         struct lpfc_sli4_hdw_queue *qp;
11419         struct lpfc_queue *eq;
11420         int qidx;
11421
11422         /* Unset mailbox command work queue */
11423         if (phba->sli4_hba.mbx_wq)
11424                 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11425
11426         /* Unset NVME LS work queue */
11427         if (phba->sli4_hba.nvmels_wq)
11428                 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11429
11430         /* Unset ELS work queue */
11431         if (phba->sli4_hba.els_wq)
11432                 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11433
11434         /* Unset unsolicited receive queue */
11435         if (phba->sli4_hba.hdr_rq)
11436                 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11437                                 phba->sli4_hba.dat_rq);
11438
11439         /* Unset mailbox command complete queue */
11440         if (phba->sli4_hba.mbx_cq)
11441                 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11442
11443         /* Unset ELS complete queue */
11444         if (phba->sli4_hba.els_cq)
11445                 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11446
11447         /* Unset NVME LS complete queue */
11448         if (phba->sli4_hba.nvmels_cq)
11449                 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11450
11451         if (phba->nvmet_support) {
11452                 /* Unset NVMET MRQ queue */
11453                 if (phba->sli4_hba.nvmet_mrq_hdr) {
11454                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11455                                 lpfc_rq_destroy(
11456                                         phba,
11457                                         phba->sli4_hba.nvmet_mrq_hdr[qidx],
11458                                         phba->sli4_hba.nvmet_mrq_data[qidx]);
11459                 }
11460
11461                 /* Unset NVMET CQ Set complete queue */
11462                 if (phba->sli4_hba.nvmet_cqset) {
11463                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11464                                 lpfc_cq_destroy(
11465                                         phba, phba->sli4_hba.nvmet_cqset[qidx]);
11466                 }
11467         }
11468
11469         /* Unset fast-path SLI4 queues */
11470         if (phba->sli4_hba.hdwq) {
11471                 /* Loop thru all Hardware Queues */
11472                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11473                         /* Destroy the CQ/WQ corresponding to Hardware Queue */
11474                         qp = &phba->sli4_hba.hdwq[qidx];
11475                         lpfc_wq_destroy(phba, qp->io_wq);
11476                         lpfc_cq_destroy(phba, qp->io_cq);
11477                 }
11478                 /* Loop thru all IRQ vectors */
11479                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11480                         /* Destroy the EQ corresponding to the IRQ vector */
11481                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11482                         lpfc_eq_destroy(phba, eq);
11483                 }
11484         }
11485
11486         kfree(phba->sli4_hba.cq_lookup);
11487         phba->sli4_hba.cq_lookup = NULL;
11488         phba->sli4_hba.cq_max = 0;
11489 }
11490
11491 /**
11492  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11493  * @phba: pointer to lpfc hba data structure.
11494  *
11495  * This routine is invoked to allocate and set up a pool of completion queue
11496  * events. The body of the completion queue event is a completion queue entry
11497  * CQE. For now, this pool is used for the interrupt service routine to queue
11498  * the following HBA completion queue events for the worker thread to process:
11499  *   - Mailbox asynchronous events
11500  *   - Receive queue completion unsolicited events
11501  * Later, this can be used for all the slow-path events.
11502  *
11503  * Return codes
11504  *      0 - successful
11505  *      -ENOMEM - No available memory
11506  **/
11507 static int
11508 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11509 {
11510         struct lpfc_cq_event *cq_event;
11511         int i;
11512
11513         for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11514                 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11515                 if (!cq_event)
11516                         goto out_pool_create_fail;
11517                 list_add_tail(&cq_event->list,
11518                               &phba->sli4_hba.sp_cqe_event_pool);
11519         }
11520         return 0;
11521
11522 out_pool_create_fail:
11523         lpfc_sli4_cq_event_pool_destroy(phba);
11524         return -ENOMEM;
11525 }
11526
11527 /**
11528  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11529  * @phba: pointer to lpfc hba data structure.
11530  *
11531  * This routine is invoked to free the pool of completion queue events at
11532  * driver unload time. Note that, it is the responsibility of the driver
11533  * cleanup routine to free all the outstanding completion-queue events
11534  * allocated from this pool back into the pool before invoking this routine
11535  * to destroy the pool.
11536  **/
11537 static void
11538 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11539 {
11540         struct lpfc_cq_event *cq_event, *next_cq_event;
11541
11542         list_for_each_entry_safe(cq_event, next_cq_event,
11543                                  &phba->sli4_hba.sp_cqe_event_pool, list) {
11544                 list_del(&cq_event->list);
11545                 kfree(cq_event);
11546         }
11547 }
11548
11549 /**
11550  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11551  * @phba: pointer to lpfc hba data structure.
11552  *
11553  * This routine is the lock free version of the API invoked to allocate a
11554  * completion-queue event from the free pool.
11555  *
11556  * Return: Pointer to the newly allocated completion-queue event if successful
11557  *         NULL otherwise.
11558  **/
11559 struct lpfc_cq_event *
11560 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11561 {
11562         struct lpfc_cq_event *cq_event = NULL;
11563
11564         list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11565                          struct lpfc_cq_event, list);
11566         return cq_event;
11567 }
11568
11569 /**
11570  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11571  * @phba: pointer to lpfc hba data structure.
11572  *
11573  * This routine is the lock version of the API invoked to allocate a
11574  * completion-queue event from the free pool.
11575  *
11576  * Return: Pointer to the newly allocated completion-queue event if successful
11577  *         NULL otherwise.
11578  **/
11579 struct lpfc_cq_event *
11580 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11581 {
11582         struct lpfc_cq_event *cq_event;
11583         unsigned long iflags;
11584
11585         spin_lock_irqsave(&phba->hbalock, iflags);
11586         cq_event = __lpfc_sli4_cq_event_alloc(phba);
11587         spin_unlock_irqrestore(&phba->hbalock, iflags);
11588         return cq_event;
11589 }
11590
11591 /**
11592  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11593  * @phba: pointer to lpfc hba data structure.
11594  * @cq_event: pointer to the completion queue event to be freed.
11595  *
11596  * This routine is the lock free version of the API invoked to release a
11597  * completion-queue event back into the free pool.
11598  **/
11599 void
11600 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11601                              struct lpfc_cq_event *cq_event)
11602 {
11603         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11604 }
11605
11606 /**
11607  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11608  * @phba: pointer to lpfc hba data structure.
11609  * @cq_event: pointer to the completion queue event to be freed.
11610  *
11611  * This routine is the lock version of the API invoked to release a
11612  * completion-queue event back into the free pool.
11613  **/
11614 void
11615 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11616                            struct lpfc_cq_event *cq_event)
11617 {
11618         unsigned long iflags;
11619         spin_lock_irqsave(&phba->hbalock, iflags);
11620         __lpfc_sli4_cq_event_release(phba, cq_event);
11621         spin_unlock_irqrestore(&phba->hbalock, iflags);
11622 }
11623
11624 /**
11625  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11626  * @phba: pointer to lpfc hba data structure.
11627  *
11628  * This routine is to free all the pending completion-queue events to the
11629  * back into the free pool for device reset.
11630  **/
11631 static void
11632 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11633 {
11634         LIST_HEAD(cq_event_list);
11635         struct lpfc_cq_event *cq_event;
11636         unsigned long iflags;
11637
11638         /* Retrieve all the pending WCQEs from pending WCQE lists */
11639
11640         /* Pending ELS XRI abort events */
11641         spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11642         list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11643                          &cq_event_list);
11644         spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11645
11646         /* Pending asynnc events */
11647         spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11648         list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11649                          &cq_event_list);
11650         spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11651
11652         while (!list_empty(&cq_event_list)) {
11653                 list_remove_head(&cq_event_list, cq_event,
11654                                  struct lpfc_cq_event, list);
11655                 lpfc_sli4_cq_event_release(phba, cq_event);
11656         }
11657 }
11658
11659 /**
11660  * lpfc_pci_function_reset - Reset pci function.
11661  * @phba: pointer to lpfc hba data structure.
11662  *
11663  * This routine is invoked to request a PCI function reset. It will destroys
11664  * all resources assigned to the PCI function which originates this request.
11665  *
11666  * Return codes
11667  *      0 - successful
11668  *      -ENOMEM - No available memory
11669  *      -EIO - The mailbox failed to complete successfully.
11670  **/
11671 int
11672 lpfc_pci_function_reset(struct lpfc_hba *phba)
11673 {
11674         LPFC_MBOXQ_t *mboxq;
11675         uint32_t rc = 0, if_type;
11676         uint32_t shdr_status, shdr_add_status;
11677         uint32_t rdy_chk;
11678         uint32_t port_reset = 0;
11679         union lpfc_sli4_cfg_shdr *shdr;
11680         struct lpfc_register reg_data;
11681         uint16_t devid;
11682
11683         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11684         switch (if_type) {
11685         case LPFC_SLI_INTF_IF_TYPE_0:
11686                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11687                                                        GFP_KERNEL);
11688                 if (!mboxq) {
11689                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11690                                         "0494 Unable to allocate memory for "
11691                                         "issuing SLI_FUNCTION_RESET mailbox "
11692                                         "command\n");
11693                         return -ENOMEM;
11694                 }
11695
11696                 /* Setup PCI function reset mailbox-ioctl command */
11697                 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11698                                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11699                                  LPFC_SLI4_MBX_EMBED);
11700                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11701                 shdr = (union lpfc_sli4_cfg_shdr *)
11702                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11703                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11704                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11705                                          &shdr->response);
11706                 mempool_free(mboxq, phba->mbox_mem_pool);
11707                 if (shdr_status || shdr_add_status || rc) {
11708                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11709                                         "0495 SLI_FUNCTION_RESET mailbox "
11710                                         "failed with status x%x add_status x%x,"
11711                                         " mbx status x%x\n",
11712                                         shdr_status, shdr_add_status, rc);
11713                         rc = -ENXIO;
11714                 }
11715                 break;
11716         case LPFC_SLI_INTF_IF_TYPE_2:
11717         case LPFC_SLI_INTF_IF_TYPE_6:
11718 wait:
11719                 /*
11720                  * Poll the Port Status Register and wait for RDY for
11721                  * up to 30 seconds. If the port doesn't respond, treat
11722                  * it as an error.
11723                  */
11724                 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11725                         if (lpfc_readl(phba->sli4_hba.u.if_type2.
11726                                 STATUSregaddr, &reg_data.word0)) {
11727                                 rc = -ENODEV;
11728                                 goto out;
11729                         }
11730                         if (bf_get(lpfc_sliport_status_rdy, &reg_data))
11731                                 break;
11732                         msleep(20);
11733                 }
11734
11735                 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
11736                         phba->work_status[0] = readl(
11737                                 phba->sli4_hba.u.if_type2.ERR1regaddr);
11738                         phba->work_status[1] = readl(
11739                                 phba->sli4_hba.u.if_type2.ERR2regaddr);
11740                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11741                                         "2890 Port not ready, port status reg "
11742                                         "0x%x error 1=0x%x, error 2=0x%x\n",
11743                                         reg_data.word0,
11744                                         phba->work_status[0],
11745                                         phba->work_status[1]);
11746                         rc = -ENODEV;
11747                         goto out;
11748                 }
11749
11750                 if (bf_get(lpfc_sliport_status_pldv, &reg_data))
11751                         lpfc_pldv_detect = true;
11752
11753                 if (!port_reset) {
11754                         /*
11755                          * Reset the port now
11756                          */
11757                         reg_data.word0 = 0;
11758                         bf_set(lpfc_sliport_ctrl_end, &reg_data,
11759                                LPFC_SLIPORT_LITTLE_ENDIAN);
11760                         bf_set(lpfc_sliport_ctrl_ip, &reg_data,
11761                                LPFC_SLIPORT_INIT_PORT);
11762                         writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11763                                CTRLregaddr);
11764                         /* flush */
11765                         pci_read_config_word(phba->pcidev,
11766                                              PCI_DEVICE_ID, &devid);
11767
11768                         port_reset = 1;
11769                         msleep(20);
11770                         goto wait;
11771                 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
11772                         rc = -ENODEV;
11773                         goto out;
11774                 }
11775                 break;
11776
11777         case LPFC_SLI_INTF_IF_TYPE_1:
11778         default:
11779                 break;
11780         }
11781
11782 out:
11783         /* Catch the not-ready port failure after a port reset. */
11784         if (rc) {
11785                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11786                                 "3317 HBA not functional: IP Reset Failed "
11787                                 "try: echo fw_reset > board_mode\n");
11788                 rc = -ENODEV;
11789         }
11790
11791         return rc;
11792 }
11793
11794 /**
11795  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11796  * @phba: pointer to lpfc hba data structure.
11797  *
11798  * This routine is invoked to set up the PCI device memory space for device
11799  * with SLI-4 interface spec.
11800  *
11801  * Return codes
11802  *      0 - successful
11803  *      other values - error
11804  **/
11805 static int
11806 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11807 {
11808         struct pci_dev *pdev = phba->pcidev;
11809         unsigned long bar0map_len, bar1map_len, bar2map_len;
11810         int error;
11811         uint32_t if_type;
11812
11813         if (!pdev)
11814                 return -ENODEV;
11815
11816         /* Set the device DMA mask size */
11817         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11818         if (error)
11819                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11820         if (error)
11821                 return error;
11822
11823         /*
11824          * The BARs and register set definitions and offset locations are
11825          * dependent on the if_type.
11826          */
11827         if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11828                                   &phba->sli4_hba.sli_intf.word0)) {
11829                 return -ENODEV;
11830         }
11831
11832         /* There is no SLI3 failback for SLI4 devices. */
11833         if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11834             LPFC_SLI_INTF_VALID) {
11835                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11836                                 "2894 SLI_INTF reg contents invalid "
11837                                 "sli_intf reg 0x%x\n",
11838                                 phba->sli4_hba.sli_intf.word0);
11839                 return -ENODEV;
11840         }
11841
11842         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11843         /*
11844          * Get the bus address of SLI4 device Bar regions and the
11845          * number of bytes required by each mapping. The mapping of the
11846          * particular PCI BARs regions is dependent on the type of
11847          * SLI4 device.
11848          */
11849         if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11850                 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11851                 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11852
11853                 /*
11854                  * Map SLI4 PCI Config Space Register base to a kernel virtual
11855                  * addr
11856                  */
11857                 phba->sli4_hba.conf_regs_memmap_p =
11858                         ioremap(phba->pci_bar0_map, bar0map_len);
11859                 if (!phba->sli4_hba.conf_regs_memmap_p) {
11860                         dev_printk(KERN_ERR, &pdev->dev,
11861                                    "ioremap failed for SLI4 PCI config "
11862                                    "registers.\n");
11863                         return -ENODEV;
11864                 }
11865                 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11866                 /* Set up BAR0 PCI config space register memory map */
11867                 lpfc_sli4_bar0_register_memmap(phba, if_type);
11868         } else {
11869                 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11870                 bar0map_len = pci_resource_len(pdev, 1);
11871                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11872                         dev_printk(KERN_ERR, &pdev->dev,
11873                            "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11874                         return -ENODEV;
11875                 }
11876                 phba->sli4_hba.conf_regs_memmap_p =
11877                                 ioremap(phba->pci_bar0_map, bar0map_len);
11878                 if (!phba->sli4_hba.conf_regs_memmap_p) {
11879                         dev_printk(KERN_ERR, &pdev->dev,
11880                                 "ioremap failed for SLI4 PCI config "
11881                                 "registers.\n");
11882                         return -ENODEV;
11883                 }
11884                 lpfc_sli4_bar0_register_memmap(phba, if_type);
11885         }
11886
11887         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11888                 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11889                         /*
11890                          * Map SLI4 if type 0 HBA Control Register base to a
11891                          * kernel virtual address and setup the registers.
11892                          */
11893                         phba->pci_bar1_map = pci_resource_start(pdev,
11894                                                                 PCI_64BIT_BAR2);
11895                         bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11896                         phba->sli4_hba.ctrl_regs_memmap_p =
11897                                         ioremap(phba->pci_bar1_map,
11898                                                 bar1map_len);
11899                         if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11900                                 dev_err(&pdev->dev,
11901                                            "ioremap failed for SLI4 HBA "
11902                                             "control registers.\n");
11903                                 error = -ENOMEM;
11904                                 goto out_iounmap_conf;
11905                         }
11906                         phba->pci_bar2_memmap_p =
11907                                          phba->sli4_hba.ctrl_regs_memmap_p;
11908                         lpfc_sli4_bar1_register_memmap(phba, if_type);
11909                 } else {
11910                         error = -ENOMEM;
11911                         goto out_iounmap_conf;
11912                 }
11913         }
11914
11915         if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11916             (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11917                 /*
11918                  * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11919                  * virtual address and setup the registers.
11920                  */
11921                 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11922                 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11923                 phba->sli4_hba.drbl_regs_memmap_p =
11924                                 ioremap(phba->pci_bar1_map, bar1map_len);
11925                 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11926                         dev_err(&pdev->dev,
11927                            "ioremap failed for SLI4 HBA doorbell registers.\n");
11928                         error = -ENOMEM;
11929                         goto out_iounmap_conf;
11930                 }
11931                 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11932                 lpfc_sli4_bar1_register_memmap(phba, if_type);
11933         }
11934
11935         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11936                 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11937                         /*
11938                          * Map SLI4 if type 0 HBA Doorbell Register base to
11939                          * a kernel virtual address and setup the registers.
11940                          */
11941                         phba->pci_bar2_map = pci_resource_start(pdev,
11942                                                                 PCI_64BIT_BAR4);
11943                         bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11944                         phba->sli4_hba.drbl_regs_memmap_p =
11945                                         ioremap(phba->pci_bar2_map,
11946                                                 bar2map_len);
11947                         if (!phba->sli4_hba.drbl_regs_memmap_p) {
11948                                 dev_err(&pdev->dev,
11949                                            "ioremap failed for SLI4 HBA"
11950                                            " doorbell registers.\n");
11951                                 error = -ENOMEM;
11952                                 goto out_iounmap_ctrl;
11953                         }
11954                         phba->pci_bar4_memmap_p =
11955                                         phba->sli4_hba.drbl_regs_memmap_p;
11956                         error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11957                         if (error)
11958                                 goto out_iounmap_all;
11959                 } else {
11960                         error = -ENOMEM;
11961                         goto out_iounmap_ctrl;
11962                 }
11963         }
11964
11965         if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11966             pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11967                 /*
11968                  * Map SLI4 if type 6 HBA DPP Register base to a kernel
11969                  * virtual address and setup the registers.
11970                  */
11971                 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11972                 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11973                 phba->sli4_hba.dpp_regs_memmap_p =
11974                                 ioremap(phba->pci_bar2_map, bar2map_len);
11975                 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11976                         dev_err(&pdev->dev,
11977                            "ioremap failed for SLI4 HBA dpp registers.\n");
11978                         error = -ENOMEM;
11979                         goto out_iounmap_all;
11980                 }
11981                 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11982         }
11983
11984         /* Set up the EQ/CQ register handeling functions now */
11985         switch (if_type) {
11986         case LPFC_SLI_INTF_IF_TYPE_0:
11987         case LPFC_SLI_INTF_IF_TYPE_2:
11988                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11989                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11990                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11991                 break;
11992         case LPFC_SLI_INTF_IF_TYPE_6:
11993                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11994                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11995                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11996                 break;
11997         default:
11998                 break;
11999         }
12000
12001         return 0;
12002
12003 out_iounmap_all:
12004         if (phba->sli4_hba.drbl_regs_memmap_p)
12005                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12006 out_iounmap_ctrl:
12007         if (phba->sli4_hba.ctrl_regs_memmap_p)
12008                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12009 out_iounmap_conf:
12010         iounmap(phba->sli4_hba.conf_regs_memmap_p);
12011
12012         return error;
12013 }
12014
12015 /**
12016  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12017  * @phba: pointer to lpfc hba data structure.
12018  *
12019  * This routine is invoked to unset the PCI device memory space for device
12020  * with SLI-4 interface spec.
12021  **/
12022 static void
12023 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12024 {
12025         uint32_t if_type;
12026         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12027
12028         switch (if_type) {
12029         case LPFC_SLI_INTF_IF_TYPE_0:
12030                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12031                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12032                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12033                 break;
12034         case LPFC_SLI_INTF_IF_TYPE_2:
12035                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12036                 break;
12037         case LPFC_SLI_INTF_IF_TYPE_6:
12038                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12039                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12040                 if (phba->sli4_hba.dpp_regs_memmap_p)
12041                         iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12042                 break;
12043         case LPFC_SLI_INTF_IF_TYPE_1:
12044                 break;
12045         default:
12046                 dev_printk(KERN_ERR, &phba->pcidev->dev,
12047                            "FATAL - unsupported SLI4 interface type - %d\n",
12048                            if_type);
12049                 break;
12050         }
12051 }
12052
12053 /**
12054  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12055  * @phba: pointer to lpfc hba data structure.
12056  *
12057  * This routine is invoked to enable the MSI-X interrupt vectors to device
12058  * with SLI-3 interface specs.
12059  *
12060  * Return codes
12061  *   0 - successful
12062  *   other values - error
12063  **/
12064 static int
12065 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12066 {
12067         int rc;
12068         LPFC_MBOXQ_t *pmb;
12069
12070         /* Set up MSI-X multi-message vectors */
12071         rc = pci_alloc_irq_vectors(phba->pcidev,
12072                         LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12073         if (rc < 0) {
12074                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12075                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
12076                 goto vec_fail_out;
12077         }
12078
12079         /*
12080          * Assign MSI-X vectors to interrupt handlers
12081          */
12082
12083         /* vector-0 is associated to slow-path handler */
12084         rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12085                          &lpfc_sli_sp_intr_handler, 0,
12086                          LPFC_SP_DRIVER_HANDLER_NAME, phba);
12087         if (rc) {
12088                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12089                                 "0421 MSI-X slow-path request_irq failed "
12090                                 "(%d)\n", rc);
12091                 goto msi_fail_out;
12092         }
12093
12094         /* vector-1 is associated to fast-path handler */
12095         rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12096                          &lpfc_sli_fp_intr_handler, 0,
12097                          LPFC_FP_DRIVER_HANDLER_NAME, phba);
12098
12099         if (rc) {
12100                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12101                                 "0429 MSI-X fast-path request_irq failed "
12102                                 "(%d)\n", rc);
12103                 goto irq_fail_out;
12104         }
12105
12106         /*
12107          * Configure HBA MSI-X attention conditions to messages
12108          */
12109         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12110
12111         if (!pmb) {
12112                 rc = -ENOMEM;
12113                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12114                                 "0474 Unable to allocate memory for issuing "
12115                                 "MBOX_CONFIG_MSI command\n");
12116                 goto mem_fail_out;
12117         }
12118         rc = lpfc_config_msi(phba, pmb);
12119         if (rc)
12120                 goto mbx_fail_out;
12121         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12122         if (rc != MBX_SUCCESS) {
12123                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12124                                 "0351 Config MSI mailbox command failed, "
12125                                 "mbxCmd x%x, mbxStatus x%x\n",
12126                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12127                 goto mbx_fail_out;
12128         }
12129
12130         /* Free memory allocated for mailbox command */
12131         mempool_free(pmb, phba->mbox_mem_pool);
12132         return rc;
12133
12134 mbx_fail_out:
12135         /* Free memory allocated for mailbox command */
12136         mempool_free(pmb, phba->mbox_mem_pool);
12137
12138 mem_fail_out:
12139         /* free the irq already requested */
12140         free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12141
12142 irq_fail_out:
12143         /* free the irq already requested */
12144         free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12145
12146 msi_fail_out:
12147         /* Unconfigure MSI-X capability structure */
12148         pci_free_irq_vectors(phba->pcidev);
12149
12150 vec_fail_out:
12151         return rc;
12152 }
12153
12154 /**
12155  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12156  * @phba: pointer to lpfc hba data structure.
12157  *
12158  * This routine is invoked to enable the MSI interrupt mode to device with
12159  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12160  * enable the MSI vector. The device driver is responsible for calling the
12161  * request_irq() to register MSI vector with a interrupt the handler, which
12162  * is done in this function.
12163  *
12164  * Return codes
12165  *      0 - successful
12166  *      other values - error
12167  */
12168 static int
12169 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12170 {
12171         int rc;
12172
12173         rc = pci_enable_msi(phba->pcidev);
12174         if (!rc)
12175                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12176                                 "0012 PCI enable MSI mode success.\n");
12177         else {
12178                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12179                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
12180                 return rc;
12181         }
12182
12183         rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12184                          0, LPFC_DRIVER_NAME, phba);
12185         if (rc) {
12186                 pci_disable_msi(phba->pcidev);
12187                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12188                                 "0478 MSI request_irq failed (%d)\n", rc);
12189         }
12190         return rc;
12191 }
12192
12193 /**
12194  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12195  * @phba: pointer to lpfc hba data structure.
12196  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12197  *
12198  * This routine is invoked to enable device interrupt and associate driver's
12199  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12200  * spec. Depends on the interrupt mode configured to the driver, the driver
12201  * will try to fallback from the configured interrupt mode to an interrupt
12202  * mode which is supported by the platform, kernel, and device in the order
12203  * of:
12204  * MSI-X -> MSI -> IRQ.
12205  *
12206  * Return codes
12207  *   0 - successful
12208  *   other values - error
12209  **/
12210 static uint32_t
12211 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12212 {
12213         uint32_t intr_mode = LPFC_INTR_ERROR;
12214         int retval;
12215
12216         /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12217         retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12218         if (retval)
12219                 return intr_mode;
12220         phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12221
12222         if (cfg_mode == 2) {
12223                 /* Now, try to enable MSI-X interrupt mode */
12224                 retval = lpfc_sli_enable_msix(phba);
12225                 if (!retval) {
12226                         /* Indicate initialization to MSI-X mode */
12227                         phba->intr_type = MSIX;
12228                         intr_mode = 2;
12229                 }
12230         }
12231
12232         /* Fallback to MSI if MSI-X initialization failed */
12233         if (cfg_mode >= 1 && phba->intr_type == NONE) {
12234                 retval = lpfc_sli_enable_msi(phba);
12235                 if (!retval) {
12236                         /* Indicate initialization to MSI mode */
12237                         phba->intr_type = MSI;
12238                         intr_mode = 1;
12239                 }
12240         }
12241
12242         /* Fallback to INTx if both MSI-X/MSI initalization failed */
12243         if (phba->intr_type == NONE) {
12244                 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12245                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12246                 if (!retval) {
12247                         /* Indicate initialization to INTx mode */
12248                         phba->intr_type = INTx;
12249                         intr_mode = 0;
12250                 }
12251         }
12252         return intr_mode;
12253 }
12254
12255 /**
12256  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12257  * @phba: pointer to lpfc hba data structure.
12258  *
12259  * This routine is invoked to disable device interrupt and disassociate the
12260  * driver's interrupt handler(s) from interrupt vector(s) to device with
12261  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12262  * release the interrupt vector(s) for the message signaled interrupt.
12263  **/
12264 static void
12265 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12266 {
12267         int nr_irqs, i;
12268
12269         if (phba->intr_type == MSIX)
12270                 nr_irqs = LPFC_MSIX_VECTORS;
12271         else
12272                 nr_irqs = 1;
12273
12274         for (i = 0; i < nr_irqs; i++)
12275                 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12276         pci_free_irq_vectors(phba->pcidev);
12277
12278         /* Reset interrupt management states */
12279         phba->intr_type = NONE;
12280         phba->sli.slistat.sli_intr = 0;
12281 }
12282
12283 /**
12284  * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12285  * @phba: pointer to lpfc hba data structure.
12286  * @id: EQ vector index or Hardware Queue index
12287  * @match: LPFC_FIND_BY_EQ = match by EQ
12288  *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
12289  * Return the CPU that matches the selection criteria
12290  */
12291 static uint16_t
12292 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12293 {
12294         struct lpfc_vector_map_info *cpup;
12295         int cpu;
12296
12297         /* Loop through all CPUs */
12298         for_each_present_cpu(cpu) {
12299                 cpup = &phba->sli4_hba.cpu_map[cpu];
12300
12301                 /* If we are matching by EQ, there may be multiple CPUs using
12302                  * using the same vector, so select the one with
12303                  * LPFC_CPU_FIRST_IRQ set.
12304                  */
12305                 if ((match == LPFC_FIND_BY_EQ) &&
12306                     (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12307                     (cpup->eq == id))
12308                         return cpu;
12309
12310                 /* If matching by HDWQ, select the first CPU that matches */
12311                 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12312                         return cpu;
12313         }
12314         return 0;
12315 }
12316
12317 #ifdef CONFIG_X86
12318 /**
12319  * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12320  * @phba: pointer to lpfc hba data structure.
12321  * @cpu: CPU map index
12322  * @phys_id: CPU package physical id
12323  * @core_id: CPU core id
12324  */
12325 static int
12326 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12327                 uint16_t phys_id, uint16_t core_id)
12328 {
12329         struct lpfc_vector_map_info *cpup;
12330         int idx;
12331
12332         for_each_present_cpu(idx) {
12333                 cpup = &phba->sli4_hba.cpu_map[idx];
12334                 /* Does the cpup match the one we are looking for */
12335                 if ((cpup->phys_id == phys_id) &&
12336                     (cpup->core_id == core_id) &&
12337                     (cpu != idx))
12338                         return 1;
12339         }
12340         return 0;
12341 }
12342 #endif
12343
12344 /*
12345  * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12346  * @phba: pointer to lpfc hba data structure.
12347  * @eqidx: index for eq and irq vector
12348  * @flag: flags to set for vector_map structure
12349  * @cpu: cpu used to index vector_map structure
12350  *
12351  * The routine assigns eq info into vector_map structure
12352  */
12353 static inline void
12354 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12355                         unsigned int cpu)
12356 {
12357         struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12358         struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12359
12360         cpup->eq = eqidx;
12361         cpup->flag |= flag;
12362
12363         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12364                         "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12365                         cpu, eqhdl->irq, cpup->eq, cpup->flag);
12366 }
12367
12368 /**
12369  * lpfc_cpu_map_array_init - Initialize cpu_map structure
12370  * @phba: pointer to lpfc hba data structure.
12371  *
12372  * The routine initializes the cpu_map array structure
12373  */
12374 static void
12375 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12376 {
12377         struct lpfc_vector_map_info *cpup;
12378         struct lpfc_eq_intr_info *eqi;
12379         int cpu;
12380
12381         for_each_possible_cpu(cpu) {
12382                 cpup = &phba->sli4_hba.cpu_map[cpu];
12383                 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12384                 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12385                 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12386                 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12387                 cpup->flag = 0;
12388                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12389                 INIT_LIST_HEAD(&eqi->list);
12390                 eqi->icnt = 0;
12391         }
12392 }
12393
12394 /**
12395  * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12396  * @phba: pointer to lpfc hba data structure.
12397  *
12398  * The routine initializes the hba_eq_hdl array structure
12399  */
12400 static void
12401 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12402 {
12403         struct lpfc_hba_eq_hdl *eqhdl;
12404         int i;
12405
12406         for (i = 0; i < phba->cfg_irq_chann; i++) {
12407                 eqhdl = lpfc_get_eq_hdl(i);
12408                 eqhdl->irq = LPFC_IRQ_EMPTY;
12409                 eqhdl->phba = phba;
12410         }
12411 }
12412
12413 /**
12414  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12415  * @phba: pointer to lpfc hba data structure.
12416  * @vectors: number of msix vectors allocated.
12417  *
12418  * The routine will figure out the CPU affinity assignment for every
12419  * MSI-X vector allocated for the HBA.
12420  * In addition, the CPU to IO channel mapping will be calculated
12421  * and the phba->sli4_hba.cpu_map array will reflect this.
12422  */
12423 static void
12424 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12425 {
12426         int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12427         int max_phys_id, min_phys_id;
12428         int max_core_id, min_core_id;
12429         struct lpfc_vector_map_info *cpup;
12430         struct lpfc_vector_map_info *new_cpup;
12431 #ifdef CONFIG_X86
12432         struct cpuinfo_x86 *cpuinfo;
12433 #endif
12434 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12435         struct lpfc_hdwq_stat *c_stat;
12436 #endif
12437
12438         max_phys_id = 0;
12439         min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12440         max_core_id = 0;
12441         min_core_id = LPFC_VECTOR_MAP_EMPTY;
12442
12443         /* Update CPU map with physical id and core id of each CPU */
12444         for_each_present_cpu(cpu) {
12445                 cpup = &phba->sli4_hba.cpu_map[cpu];
12446 #ifdef CONFIG_X86
12447                 cpuinfo = &cpu_data(cpu);
12448                 cpup->phys_id = cpuinfo->phys_proc_id;
12449                 cpup->core_id = cpuinfo->cpu_core_id;
12450                 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12451                         cpup->flag |= LPFC_CPU_MAP_HYPER;
12452 #else
12453                 /* No distinction between CPUs for other platforms */
12454                 cpup->phys_id = 0;
12455                 cpup->core_id = cpu;
12456 #endif
12457
12458                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12459                                 "3328 CPU %d physid %d coreid %d flag x%x\n",
12460                                 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12461
12462                 if (cpup->phys_id > max_phys_id)
12463                         max_phys_id = cpup->phys_id;
12464                 if (cpup->phys_id < min_phys_id)
12465                         min_phys_id = cpup->phys_id;
12466
12467                 if (cpup->core_id > max_core_id)
12468                         max_core_id = cpup->core_id;
12469                 if (cpup->core_id < min_core_id)
12470                         min_core_id = cpup->core_id;
12471         }
12472
12473         /* After looking at each irq vector assigned to this pcidev, its
12474          * possible to see that not ALL CPUs have been accounted for.
12475          * Next we will set any unassigned (unaffinitized) cpu map
12476          * entries to a IRQ on the same phys_id.
12477          */
12478         first_cpu = cpumask_first(cpu_present_mask);
12479         start_cpu = first_cpu;
12480
12481         for_each_present_cpu(cpu) {
12482                 cpup = &phba->sli4_hba.cpu_map[cpu];
12483
12484                 /* Is this CPU entry unassigned */
12485                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12486                         /* Mark CPU as IRQ not assigned by the kernel */
12487                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12488
12489                         /* If so, find a new_cpup that is on the SAME
12490                          * phys_id as cpup. start_cpu will start where we
12491                          * left off so all unassigned entries don't get assgined
12492                          * the IRQ of the first entry.
12493                          */
12494                         new_cpu = start_cpu;
12495                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12496                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12497                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12498                                     (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12499                                     (new_cpup->phys_id == cpup->phys_id))
12500                                         goto found_same;
12501                                 new_cpu = cpumask_next(
12502                                         new_cpu, cpu_present_mask);
12503                                 if (new_cpu >= nr_cpu_ids)
12504                                         new_cpu = first_cpu;
12505                         }
12506                         /* At this point, we leave the CPU as unassigned */
12507                         continue;
12508 found_same:
12509                         /* We found a matching phys_id, so copy the IRQ info */
12510                         cpup->eq = new_cpup->eq;
12511
12512                         /* Bump start_cpu to the next slot to minmize the
12513                          * chance of having multiple unassigned CPU entries
12514                          * selecting the same IRQ.
12515                          */
12516                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12517                         if (start_cpu >= nr_cpu_ids)
12518                                 start_cpu = first_cpu;
12519
12520                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12521                                         "3337 Set Affinity: CPU %d "
12522                                         "eq %d from peer cpu %d same "
12523                                         "phys_id (%d)\n",
12524                                         cpu, cpup->eq, new_cpu,
12525                                         cpup->phys_id);
12526                 }
12527         }
12528
12529         /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12530         start_cpu = first_cpu;
12531
12532         for_each_present_cpu(cpu) {
12533                 cpup = &phba->sli4_hba.cpu_map[cpu];
12534
12535                 /* Is this entry unassigned */
12536                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12537                         /* Mark it as IRQ not assigned by the kernel */
12538                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12539
12540                         /* If so, find a new_cpup thats on ANY phys_id
12541                          * as the cpup. start_cpu will start where we
12542                          * left off so all unassigned entries don't get
12543                          * assigned the IRQ of the first entry.
12544                          */
12545                         new_cpu = start_cpu;
12546                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12547                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12548                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12549                                     (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12550                                         goto found_any;
12551                                 new_cpu = cpumask_next(
12552                                         new_cpu, cpu_present_mask);
12553                                 if (new_cpu >= nr_cpu_ids)
12554                                         new_cpu = first_cpu;
12555                         }
12556                         /* We should never leave an entry unassigned */
12557                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12558                                         "3339 Set Affinity: CPU %d "
12559                                         "eq %d UNASSIGNED\n",
12560                                         cpup->hdwq, cpup->eq);
12561                         continue;
12562 found_any:
12563                         /* We found an available entry, copy the IRQ info */
12564                         cpup->eq = new_cpup->eq;
12565
12566                         /* Bump start_cpu to the next slot to minmize the
12567                          * chance of having multiple unassigned CPU entries
12568                          * selecting the same IRQ.
12569                          */
12570                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12571                         if (start_cpu >= nr_cpu_ids)
12572                                 start_cpu = first_cpu;
12573
12574                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12575                                         "3338 Set Affinity: CPU %d "
12576                                         "eq %d from peer cpu %d (%d/%d)\n",
12577                                         cpu, cpup->eq, new_cpu,
12578                                         new_cpup->phys_id, new_cpup->core_id);
12579                 }
12580         }
12581
12582         /* Assign hdwq indices that are unique across all cpus in the map
12583          * that are also FIRST_CPUs.
12584          */
12585         idx = 0;
12586         for_each_present_cpu(cpu) {
12587                 cpup = &phba->sli4_hba.cpu_map[cpu];
12588
12589                 /* Only FIRST IRQs get a hdwq index assignment. */
12590                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12591                         continue;
12592
12593                 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12594                 cpup->hdwq = idx;
12595                 idx++;
12596                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12597                                 "3333 Set Affinity: CPU %d (phys %d core %d): "
12598                                 "hdwq %d eq %d flg x%x\n",
12599                                 cpu, cpup->phys_id, cpup->core_id,
12600                                 cpup->hdwq, cpup->eq, cpup->flag);
12601         }
12602         /* Associate a hdwq with each cpu_map entry
12603          * This will be 1 to 1 - hdwq to cpu, unless there are less
12604          * hardware queues then CPUs. For that case we will just round-robin
12605          * the available hardware queues as they get assigned to CPUs.
12606          * The next_idx is the idx from the FIRST_CPU loop above to account
12607          * for irq_chann < hdwq.  The idx is used for round-robin assignments
12608          * and needs to start at 0.
12609          */
12610         next_idx = idx;
12611         start_cpu = 0;
12612         idx = 0;
12613         for_each_present_cpu(cpu) {
12614                 cpup = &phba->sli4_hba.cpu_map[cpu];
12615
12616                 /* FIRST cpus are already mapped. */
12617                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12618                         continue;
12619
12620                 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12621                  * of the unassigned cpus to the next idx so that all
12622                  * hdw queues are fully utilized.
12623                  */
12624                 if (next_idx < phba->cfg_hdw_queue) {
12625                         cpup->hdwq = next_idx;
12626                         next_idx++;
12627                         continue;
12628                 }
12629
12630                 /* Not a First CPU and all hdw_queues are used.  Reuse a
12631                  * Hardware Queue for another CPU, so be smart about it
12632                  * and pick one that has its IRQ/EQ mapped to the same phys_id
12633                  * (CPU package) and core_id.
12634                  */
12635                 new_cpu = start_cpu;
12636                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12637                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12638                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12639                             new_cpup->phys_id == cpup->phys_id &&
12640                             new_cpup->core_id == cpup->core_id) {
12641                                 goto found_hdwq;
12642                         }
12643                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12644                         if (new_cpu >= nr_cpu_ids)
12645                                 new_cpu = first_cpu;
12646                 }
12647
12648                 /* If we can't match both phys_id and core_id,
12649                  * settle for just a phys_id match.
12650                  */
12651                 new_cpu = start_cpu;
12652                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12653                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12654                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12655                             new_cpup->phys_id == cpup->phys_id)
12656                                 goto found_hdwq;
12657
12658                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12659                         if (new_cpu >= nr_cpu_ids)
12660                                 new_cpu = first_cpu;
12661                 }
12662
12663                 /* Otherwise just round robin on cfg_hdw_queue */
12664                 cpup->hdwq = idx % phba->cfg_hdw_queue;
12665                 idx++;
12666                 goto logit;
12667  found_hdwq:
12668                 /* We found an available entry, copy the IRQ info */
12669                 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12670                 if (start_cpu >= nr_cpu_ids)
12671                         start_cpu = first_cpu;
12672                 cpup->hdwq = new_cpup->hdwq;
12673  logit:
12674                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12675                                 "3335 Set Affinity: CPU %d (phys %d core %d): "
12676                                 "hdwq %d eq %d flg x%x\n",
12677                                 cpu, cpup->phys_id, cpup->core_id,
12678                                 cpup->hdwq, cpup->eq, cpup->flag);
12679         }
12680
12681         /*
12682          * Initialize the cpu_map slots for not-present cpus in case
12683          * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12684          */
12685         idx = 0;
12686         for_each_possible_cpu(cpu) {
12687                 cpup = &phba->sli4_hba.cpu_map[cpu];
12688 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12689                 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12690                 c_stat->hdwq_no = cpup->hdwq;
12691 #endif
12692                 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12693                         continue;
12694
12695                 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12696 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12697                 c_stat->hdwq_no = cpup->hdwq;
12698 #endif
12699                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12700                                 "3340 Set Affinity: not present "
12701                                 "CPU %d hdwq %d\n",
12702                                 cpu, cpup->hdwq);
12703         }
12704
12705         /* The cpu_map array will be used later during initialization
12706          * when EQ / CQ / WQs are allocated and configured.
12707          */
12708         return;
12709 }
12710
12711 /**
12712  * lpfc_cpuhp_get_eq
12713  *
12714  * @phba:   pointer to lpfc hba data structure.
12715  * @cpu:    cpu going offline
12716  * @eqlist: eq list to append to
12717  */
12718 static int
12719 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12720                   struct list_head *eqlist)
12721 {
12722         const struct cpumask *maskp;
12723         struct lpfc_queue *eq;
12724         struct cpumask *tmp;
12725         u16 idx;
12726
12727         tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12728         if (!tmp)
12729                 return -ENOMEM;
12730
12731         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12732                 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12733                 if (!maskp)
12734                         continue;
12735                 /*
12736                  * if irq is not affinitized to the cpu going
12737                  * then we don't need to poll the eq attached
12738                  * to it.
12739                  */
12740                 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12741                         continue;
12742                 /* get the cpus that are online and are affini-
12743                  * tized to this irq vector.  If the count is
12744                  * more than 1 then cpuhp is not going to shut-
12745                  * down this vector.  Since this cpu has not
12746                  * gone offline yet, we need >1.
12747                  */
12748                 cpumask_and(tmp, maskp, cpu_online_mask);
12749                 if (cpumask_weight(tmp) > 1)
12750                         continue;
12751
12752                 /* Now that we have an irq to shutdown, get the eq
12753                  * mapped to this irq.  Note: multiple hdwq's in
12754                  * the software can share an eq, but eventually
12755                  * only eq will be mapped to this vector
12756                  */
12757                 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12758                 list_add(&eq->_poll_list, eqlist);
12759         }
12760         kfree(tmp);
12761         return 0;
12762 }
12763
12764 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12765 {
12766         if (phba->sli_rev != LPFC_SLI_REV4)
12767                 return;
12768
12769         cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12770                                             &phba->cpuhp);
12771         /*
12772          * unregistering the instance doesn't stop the polling
12773          * timer. Wait for the poll timer to retire.
12774          */
12775         synchronize_rcu();
12776         del_timer_sync(&phba->cpuhp_poll_timer);
12777 }
12778
12779 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12780 {
12781         if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
12782                 return;
12783
12784         __lpfc_cpuhp_remove(phba);
12785 }
12786
12787 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12788 {
12789         if (phba->sli_rev != LPFC_SLI_REV4)
12790                 return;
12791
12792         rcu_read_lock();
12793
12794         if (!list_empty(&phba->poll_list))
12795                 mod_timer(&phba->cpuhp_poll_timer,
12796                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12797
12798         rcu_read_unlock();
12799
12800         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12801                                          &phba->cpuhp);
12802 }
12803
12804 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12805 {
12806         if (phba->pport->load_flag & FC_UNLOADING) {
12807                 *retval = -EAGAIN;
12808                 return true;
12809         }
12810
12811         if (phba->sli_rev != LPFC_SLI_REV4) {
12812                 *retval = 0;
12813                 return true;
12814         }
12815
12816         /* proceed with the hotplug */
12817         return false;
12818 }
12819
12820 /**
12821  * lpfc_irq_set_aff - set IRQ affinity
12822  * @eqhdl: EQ handle
12823  * @cpu: cpu to set affinity
12824  *
12825  **/
12826 static inline void
12827 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12828 {
12829         cpumask_clear(&eqhdl->aff_mask);
12830         cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12831         irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12832         irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12833 }
12834
12835 /**
12836  * lpfc_irq_clear_aff - clear IRQ affinity
12837  * @eqhdl: EQ handle
12838  *
12839  **/
12840 static inline void
12841 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12842 {
12843         cpumask_clear(&eqhdl->aff_mask);
12844         irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12845 }
12846
12847 /**
12848  * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12849  * @phba: pointer to HBA context object.
12850  * @cpu: cpu going offline/online
12851  * @offline: true, cpu is going offline. false, cpu is coming online.
12852  *
12853  * If cpu is going offline, we'll try our best effort to find the next
12854  * online cpu on the phba's original_mask and migrate all offlining IRQ
12855  * affinities.
12856  *
12857  * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12858  *
12859  * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12860  *       PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12861  *
12862  **/
12863 static void
12864 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12865 {
12866         struct lpfc_vector_map_info *cpup;
12867         struct cpumask *aff_mask;
12868         unsigned int cpu_select, cpu_next, idx;
12869         const struct cpumask *orig_mask;
12870
12871         if (phba->irq_chann_mode == NORMAL_MODE)
12872                 return;
12873
12874         orig_mask = &phba->sli4_hba.irq_aff_mask;
12875
12876         if (!cpumask_test_cpu(cpu, orig_mask))
12877                 return;
12878
12879         cpup = &phba->sli4_hba.cpu_map[cpu];
12880
12881         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12882                 return;
12883
12884         if (offline) {
12885                 /* Find next online CPU on original mask */
12886                 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12887                 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12888
12889                 /* Found a valid CPU */
12890                 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12891                         /* Go through each eqhdl and ensure offlining
12892                          * cpu aff_mask is migrated
12893                          */
12894                         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12895                                 aff_mask = lpfc_get_aff_mask(idx);
12896
12897                                 /* Migrate affinity */
12898                                 if (cpumask_test_cpu(cpu, aff_mask))
12899                                         lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12900                                                          cpu_select);
12901                         }
12902                 } else {
12903                         /* Rely on irqbalance if no online CPUs left on NUMA */
12904                         for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12905                                 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12906                 }
12907         } else {
12908                 /* Migrate affinity back to this CPU */
12909                 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12910         }
12911 }
12912
12913 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12914 {
12915         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12916         struct lpfc_queue *eq, *next;
12917         LIST_HEAD(eqlist);
12918         int retval;
12919
12920         if (!phba) {
12921                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12922                 return 0;
12923         }
12924
12925         if (__lpfc_cpuhp_checks(phba, &retval))
12926                 return retval;
12927
12928         lpfc_irq_rebalance(phba, cpu, true);
12929
12930         retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12931         if (retval)
12932                 return retval;
12933
12934         /* start polling on these eq's */
12935         list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12936                 list_del_init(&eq->_poll_list);
12937                 lpfc_sli4_start_polling(eq);
12938         }
12939
12940         return 0;
12941 }
12942
12943 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12944 {
12945         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12946         struct lpfc_queue *eq, *next;
12947         unsigned int n;
12948         int retval;
12949
12950         if (!phba) {
12951                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12952                 return 0;
12953         }
12954
12955         if (__lpfc_cpuhp_checks(phba, &retval))
12956                 return retval;
12957
12958         lpfc_irq_rebalance(phba, cpu, false);
12959
12960         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12961                 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12962                 if (n == cpu)
12963                         lpfc_sli4_stop_polling(eq);
12964         }
12965
12966         return 0;
12967 }
12968
12969 /**
12970  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12971  * @phba: pointer to lpfc hba data structure.
12972  *
12973  * This routine is invoked to enable the MSI-X interrupt vectors to device
12974  * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
12975  * to cpus on the system.
12976  *
12977  * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12978  * the number of cpus on the same numa node as this adapter.  The vectors are
12979  * allocated without requesting OS affinity mapping.  A vector will be
12980  * allocated and assigned to each online and offline cpu.  If the cpu is
12981  * online, then affinity will be set to that cpu.  If the cpu is offline, then
12982  * affinity will be set to the nearest peer cpu within the numa node that is
12983  * online.  If there are no online cpus within the numa node, affinity is not
12984  * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12985  * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12986  * configured.
12987  *
12988  * If numa mode is not enabled and there is more than 1 vector allocated, then
12989  * the driver relies on the managed irq interface where the OS assigns vector to
12990  * cpu affinity.  The driver will then use that affinity mapping to setup its
12991  * cpu mapping table.
12992  *
12993  * Return codes
12994  * 0 - successful
12995  * other values - error
12996  **/
12997 static int
12998 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12999 {
13000         int vectors, rc, index;
13001         char *name;
13002         const struct cpumask *aff_mask = NULL;
13003         unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13004         struct lpfc_vector_map_info *cpup;
13005         struct lpfc_hba_eq_hdl *eqhdl;
13006         const struct cpumask *maskp;
13007         unsigned int flags = PCI_IRQ_MSIX;
13008
13009         /* Set up MSI-X multi-message vectors */
13010         vectors = phba->cfg_irq_chann;
13011
13012         if (phba->irq_chann_mode != NORMAL_MODE)
13013                 aff_mask = &phba->sli4_hba.irq_aff_mask;
13014
13015         if (aff_mask) {
13016                 cpu_cnt = cpumask_weight(aff_mask);
13017                 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13018
13019                 /* cpu: iterates over aff_mask including offline or online
13020                  * cpu_select: iterates over online aff_mask to set affinity
13021                  */
13022                 cpu = cpumask_first(aff_mask);
13023                 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13024         } else {
13025                 flags |= PCI_IRQ_AFFINITY;
13026         }
13027
13028         rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13029         if (rc < 0) {
13030                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13031                                 "0484 PCI enable MSI-X failed (%d)\n", rc);
13032                 goto vec_fail_out;
13033         }
13034         vectors = rc;
13035
13036         /* Assign MSI-X vectors to interrupt handlers */
13037         for (index = 0; index < vectors; index++) {
13038                 eqhdl = lpfc_get_eq_hdl(index);
13039                 name = eqhdl->handler_name;
13040                 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13041                 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13042                          LPFC_DRIVER_HANDLER_NAME"%d", index);
13043
13044                 eqhdl->idx = index;
13045                 rc = pci_irq_vector(phba->pcidev, index);
13046                 if (rc < 0) {
13047                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13048                                         "0489 MSI-X fast-path (%d) "
13049                                         "pci_irq_vec failed (%d)\n", index, rc);
13050                         goto cfg_fail_out;
13051                 }
13052                 eqhdl->irq = rc;
13053
13054                 rc = request_threaded_irq(eqhdl->irq,
13055                                           &lpfc_sli4_hba_intr_handler,
13056                                           &lpfc_sli4_hba_intr_handler_th,
13057                                           IRQF_ONESHOT, name, eqhdl);
13058                 if (rc) {
13059                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13060                                         "0486 MSI-X fast-path (%d) "
13061                                         "request_irq failed (%d)\n", index, rc);
13062                         goto cfg_fail_out;
13063                 }
13064
13065                 if (aff_mask) {
13066                         /* If found a neighboring online cpu, set affinity */
13067                         if (cpu_select < nr_cpu_ids)
13068                                 lpfc_irq_set_aff(eqhdl, cpu_select);
13069
13070                         /* Assign EQ to cpu_map */
13071                         lpfc_assign_eq_map_info(phba, index,
13072                                                 LPFC_CPU_FIRST_IRQ,
13073                                                 cpu);
13074
13075                         /* Iterate to next offline or online cpu in aff_mask */
13076                         cpu = cpumask_next(cpu, aff_mask);
13077
13078                         /* Find next online cpu in aff_mask to set affinity */
13079                         cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13080                 } else if (vectors == 1) {
13081                         cpu = cpumask_first(cpu_present_mask);
13082                         lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13083                                                 cpu);
13084                 } else {
13085                         maskp = pci_irq_get_affinity(phba->pcidev, index);
13086
13087                         /* Loop through all CPUs associated with vector index */
13088                         for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13089                                 cpup = &phba->sli4_hba.cpu_map[cpu];
13090
13091                                 /* If this is the first CPU thats assigned to
13092                                  * this vector, set LPFC_CPU_FIRST_IRQ.
13093                                  *
13094                                  * With certain platforms its possible that irq
13095                                  * vectors are affinitized to all the cpu's.
13096                                  * This can result in each cpu_map.eq to be set
13097                                  * to the last vector, resulting in overwrite
13098                                  * of all the previous cpu_map.eq.  Ensure that
13099                                  * each vector receives a place in cpu_map.
13100                                  * Later call to lpfc_cpu_affinity_check will
13101                                  * ensure we are nicely balanced out.
13102                                  */
13103                                 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13104                                         continue;
13105                                 lpfc_assign_eq_map_info(phba, index,
13106                                                         LPFC_CPU_FIRST_IRQ,
13107                                                         cpu);
13108                                 break;
13109                         }
13110                 }
13111         }
13112
13113         if (vectors != phba->cfg_irq_chann) {
13114                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13115                                 "3238 Reducing IO channels to match number of "
13116                                 "MSI-X vectors, requested %d got %d\n",
13117                                 phba->cfg_irq_chann, vectors);
13118                 if (phba->cfg_irq_chann > vectors)
13119                         phba->cfg_irq_chann = vectors;
13120         }
13121
13122         return rc;
13123
13124 cfg_fail_out:
13125         /* free the irq already requested */
13126         for (--index; index >= 0; index--) {
13127                 eqhdl = lpfc_get_eq_hdl(index);
13128                 lpfc_irq_clear_aff(eqhdl);
13129                 free_irq(eqhdl->irq, eqhdl);
13130         }
13131
13132         /* Unconfigure MSI-X capability structure */
13133         pci_free_irq_vectors(phba->pcidev);
13134
13135 vec_fail_out:
13136         return rc;
13137 }
13138
13139 /**
13140  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13141  * @phba: pointer to lpfc hba data structure.
13142  *
13143  * This routine is invoked to enable the MSI interrupt mode to device with
13144  * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13145  * called to enable the MSI vector. The device driver is responsible for
13146  * calling the request_irq() to register MSI vector with a interrupt the
13147  * handler, which is done in this function.
13148  *
13149  * Return codes
13150  *      0 - successful
13151  *      other values - error
13152  **/
13153 static int
13154 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13155 {
13156         int rc, index;
13157         unsigned int cpu;
13158         struct lpfc_hba_eq_hdl *eqhdl;
13159
13160         rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13161                                    PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13162         if (rc > 0)
13163                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13164                                 "0487 PCI enable MSI mode success.\n");
13165         else {
13166                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13167                                 "0488 PCI enable MSI mode failed (%d)\n", rc);
13168                 return rc ? rc : -1;
13169         }
13170
13171         rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13172                          0, LPFC_DRIVER_NAME, phba);
13173         if (rc) {
13174                 pci_free_irq_vectors(phba->pcidev);
13175                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13176                                 "0490 MSI request_irq failed (%d)\n", rc);
13177                 return rc;
13178         }
13179
13180         eqhdl = lpfc_get_eq_hdl(0);
13181         rc = pci_irq_vector(phba->pcidev, 0);
13182         if (rc < 0) {
13183                 pci_free_irq_vectors(phba->pcidev);
13184                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13185                                 "0496 MSI pci_irq_vec failed (%d)\n", rc);
13186                 return rc;
13187         }
13188         eqhdl->irq = rc;
13189
13190         cpu = cpumask_first(cpu_present_mask);
13191         lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13192
13193         for (index = 0; index < phba->cfg_irq_chann; index++) {
13194                 eqhdl = lpfc_get_eq_hdl(index);
13195                 eqhdl->idx = index;
13196         }
13197
13198         return 0;
13199 }
13200
13201 /**
13202  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13203  * @phba: pointer to lpfc hba data structure.
13204  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13205  *
13206  * This routine is invoked to enable device interrupt and associate driver's
13207  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13208  * interface spec. Depends on the interrupt mode configured to the driver,
13209  * the driver will try to fallback from the configured interrupt mode to an
13210  * interrupt mode which is supported by the platform, kernel, and device in
13211  * the order of:
13212  * MSI-X -> MSI -> IRQ.
13213  *
13214  * Return codes
13215  *      Interrupt mode (2, 1, 0) - successful
13216  *      LPFC_INTR_ERROR - error
13217  **/
13218 static uint32_t
13219 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13220 {
13221         uint32_t intr_mode = LPFC_INTR_ERROR;
13222         int retval, idx;
13223
13224         if (cfg_mode == 2) {
13225                 /* Preparation before conf_msi mbox cmd */
13226                 retval = 0;
13227                 if (!retval) {
13228                         /* Now, try to enable MSI-X interrupt mode */
13229                         retval = lpfc_sli4_enable_msix(phba);
13230                         if (!retval) {
13231                                 /* Indicate initialization to MSI-X mode */
13232                                 phba->intr_type = MSIX;
13233                                 intr_mode = 2;
13234                         }
13235                 }
13236         }
13237
13238         /* Fallback to MSI if MSI-X initialization failed */
13239         if (cfg_mode >= 1 && phba->intr_type == NONE) {
13240                 retval = lpfc_sli4_enable_msi(phba);
13241                 if (!retval) {
13242                         /* Indicate initialization to MSI mode */
13243                         phba->intr_type = MSI;
13244                         intr_mode = 1;
13245                 }
13246         }
13247
13248         /* Fallback to INTx if both MSI-X/MSI initalization failed */
13249         if (phba->intr_type == NONE) {
13250                 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13251                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13252                 if (!retval) {
13253                         struct lpfc_hba_eq_hdl *eqhdl;
13254                         unsigned int cpu;
13255
13256                         /* Indicate initialization to INTx mode */
13257                         phba->intr_type = INTx;
13258                         intr_mode = 0;
13259
13260                         eqhdl = lpfc_get_eq_hdl(0);
13261                         retval = pci_irq_vector(phba->pcidev, 0);
13262                         if (retval < 0) {
13263                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13264                                         "0502 INTR pci_irq_vec failed (%d)\n",
13265                                          retval);
13266                                 return LPFC_INTR_ERROR;
13267                         }
13268                         eqhdl->irq = retval;
13269
13270                         cpu = cpumask_first(cpu_present_mask);
13271                         lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13272                                                 cpu);
13273                         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13274                                 eqhdl = lpfc_get_eq_hdl(idx);
13275                                 eqhdl->idx = idx;
13276                         }
13277                 }
13278         }
13279         return intr_mode;
13280 }
13281
13282 /**
13283  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13284  * @phba: pointer to lpfc hba data structure.
13285  *
13286  * This routine is invoked to disable device interrupt and disassociate
13287  * the driver's interrupt handler(s) from interrupt vector(s) to device
13288  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13289  * will release the interrupt vector(s) for the message signaled interrupt.
13290  **/
13291 static void
13292 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13293 {
13294         /* Disable the currently initialized interrupt mode */
13295         if (phba->intr_type == MSIX) {
13296                 int index;
13297                 struct lpfc_hba_eq_hdl *eqhdl;
13298
13299                 /* Free up MSI-X multi-message vectors */
13300                 for (index = 0; index < phba->cfg_irq_chann; index++) {
13301                         eqhdl = lpfc_get_eq_hdl(index);
13302                         lpfc_irq_clear_aff(eqhdl);
13303                         free_irq(eqhdl->irq, eqhdl);
13304                 }
13305         } else {
13306                 free_irq(phba->pcidev->irq, phba);
13307         }
13308
13309         pci_free_irq_vectors(phba->pcidev);
13310
13311         /* Reset interrupt management states */
13312         phba->intr_type = NONE;
13313         phba->sli.slistat.sli_intr = 0;
13314 }
13315
13316 /**
13317  * lpfc_unset_hba - Unset SLI3 hba device initialization
13318  * @phba: pointer to lpfc hba data structure.
13319  *
13320  * This routine is invoked to unset the HBA device initialization steps to
13321  * a device with SLI-3 interface spec.
13322  **/
13323 static void
13324 lpfc_unset_hba(struct lpfc_hba *phba)
13325 {
13326         struct lpfc_vport *vport = phba->pport;
13327         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
13328
13329         spin_lock_irq(shost->host_lock);
13330         vport->load_flag |= FC_UNLOADING;
13331         spin_unlock_irq(shost->host_lock);
13332
13333         kfree(phba->vpi_bmask);
13334         kfree(phba->vpi_ids);
13335
13336         lpfc_stop_hba_timers(phba);
13337
13338         phba->pport->work_port_events = 0;
13339
13340         lpfc_sli_hba_down(phba);
13341
13342         lpfc_sli_brdrestart(phba);
13343
13344         lpfc_sli_disable_intr(phba);
13345
13346         return;
13347 }
13348
13349 /**
13350  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13351  * @phba: Pointer to HBA context object.
13352  *
13353  * This function is called in the SLI4 code path to wait for completion
13354  * of device's XRIs exchange busy. It will check the XRI exchange busy
13355  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13356  * that, it will check the XRI exchange busy on outstanding FCP and ELS
13357  * I/Os every 30 seconds, log error message, and wait forever. Only when
13358  * all XRI exchange busy complete, the driver unload shall proceed with
13359  * invoking the function reset ioctl mailbox command to the CNA and the
13360  * the rest of the driver unload resource release.
13361  **/
13362 static void
13363 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13364 {
13365         struct lpfc_sli4_hdw_queue *qp;
13366         int idx, ccnt;
13367         int wait_time = 0;
13368         int io_xri_cmpl = 1;
13369         int nvmet_xri_cmpl = 1;
13370         int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13371
13372         /* Driver just aborted IOs during the hba_unset process.  Pause
13373          * here to give the HBA time to complete the IO and get entries
13374          * into the abts lists.
13375          */
13376         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13377
13378         /* Wait for NVME pending IO to flush back to transport. */
13379         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13380                 lpfc_nvme_wait_for_io_drain(phba);
13381
13382         ccnt = 0;
13383         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13384                 qp = &phba->sli4_hba.hdwq[idx];
13385                 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13386                 if (!io_xri_cmpl) /* if list is NOT empty */
13387                         ccnt++;
13388         }
13389         if (ccnt)
13390                 io_xri_cmpl = 0;
13391
13392         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13393                 nvmet_xri_cmpl =
13394                         list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13395         }
13396
13397         while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13398                 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13399                         if (!nvmet_xri_cmpl)
13400                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13401                                                 "6424 NVMET XRI exchange busy "
13402                                                 "wait time: %d seconds.\n",
13403                                                 wait_time/1000);
13404                         if (!io_xri_cmpl)
13405                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13406                                                 "6100 IO XRI exchange busy "
13407                                                 "wait time: %d seconds.\n",
13408                                                 wait_time/1000);
13409                         if (!els_xri_cmpl)
13410                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13411                                                 "2878 ELS XRI exchange busy "
13412                                                 "wait time: %d seconds.\n",
13413                                                 wait_time/1000);
13414                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13415                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13416                 } else {
13417                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13418                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13419                 }
13420
13421                 ccnt = 0;
13422                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13423                         qp = &phba->sli4_hba.hdwq[idx];
13424                         io_xri_cmpl = list_empty(
13425                             &qp->lpfc_abts_io_buf_list);
13426                         if (!io_xri_cmpl) /* if list is NOT empty */
13427                                 ccnt++;
13428                 }
13429                 if (ccnt)
13430                         io_xri_cmpl = 0;
13431
13432                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13433                         nvmet_xri_cmpl = list_empty(
13434                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13435                 }
13436                 els_xri_cmpl =
13437                         list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13438
13439         }
13440 }
13441
13442 /**
13443  * lpfc_sli4_hba_unset - Unset the fcoe hba
13444  * @phba: Pointer to HBA context object.
13445  *
13446  * This function is called in the SLI4 code path to reset the HBA's FCoE
13447  * function. The caller is not required to hold any lock. This routine
13448  * issues PCI function reset mailbox command to reset the FCoE function.
13449  * At the end of the function, it calls lpfc_hba_down_post function to
13450  * free any pending commands.
13451  **/
13452 static void
13453 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13454 {
13455         int wait_cnt = 0;
13456         LPFC_MBOXQ_t *mboxq;
13457         struct pci_dev *pdev = phba->pcidev;
13458
13459         lpfc_stop_hba_timers(phba);
13460         hrtimer_cancel(&phba->cmf_stats_timer);
13461         hrtimer_cancel(&phba->cmf_timer);
13462
13463         if (phba->pport)
13464                 phba->sli4_hba.intr_enable = 0;
13465
13466         /*
13467          * Gracefully wait out the potential current outstanding asynchronous
13468          * mailbox command.
13469          */
13470
13471         /* First, block any pending async mailbox command from posted */
13472         spin_lock_irq(&phba->hbalock);
13473         phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13474         spin_unlock_irq(&phba->hbalock);
13475         /* Now, trying to wait it out if we can */
13476         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13477                 msleep(10);
13478                 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13479                         break;
13480         }
13481         /* Forcefully release the outstanding mailbox command if timed out */
13482         if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13483                 spin_lock_irq(&phba->hbalock);
13484                 mboxq = phba->sli.mbox_active;
13485                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13486                 __lpfc_mbox_cmpl_put(phba, mboxq);
13487                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13488                 phba->sli.mbox_active = NULL;
13489                 spin_unlock_irq(&phba->hbalock);
13490         }
13491
13492         /* Abort all iocbs associated with the hba */
13493         lpfc_sli_hba_iocb_abort(phba);
13494
13495         if (!pci_channel_offline(phba->pcidev))
13496                 /* Wait for completion of device XRI exchange busy */
13497                 lpfc_sli4_xri_exchange_busy_wait(phba);
13498
13499         /* per-phba callback de-registration for hotplug event */
13500         if (phba->pport)
13501                 lpfc_cpuhp_remove(phba);
13502
13503         /* Disable PCI subsystem interrupt */
13504         lpfc_sli4_disable_intr(phba);
13505
13506         /* Disable SR-IOV if enabled */
13507         if (phba->cfg_sriov_nr_virtfn)
13508                 pci_disable_sriov(pdev);
13509
13510         /* Stop kthread signal shall trigger work_done one more time */
13511         kthread_stop(phba->worker_thread);
13512
13513         /* Disable FW logging to host memory */
13514         lpfc_ras_stop_fwlog(phba);
13515
13516         /* Reset SLI4 HBA FCoE function */
13517         lpfc_pci_function_reset(phba);
13518
13519         /* release all queue allocated resources. */
13520         lpfc_sli4_queue_destroy(phba);
13521
13522         /* Free RAS DMA memory */
13523         if (phba->ras_fwlog.ras_enabled)
13524                 lpfc_sli4_ras_dma_free(phba);
13525
13526         /* Stop the SLI4 device port */
13527         if (phba->pport)
13528                 phba->pport->work_port_events = 0;
13529 }
13530
13531 static uint32_t
13532 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13533 {
13534         uint32_t msb = 0;
13535         uint32_t bit;
13536
13537         for (bit = 0; bit < 8; bit++) {
13538                 msb = (crc >> 31) & 1;
13539                 crc <<= 1;
13540
13541                 if (msb ^ (byte & 1)) {
13542                         crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13543                         crc |= 1;
13544                 }
13545                 byte >>= 1;
13546         }
13547         return crc;
13548 }
13549
13550 static uint32_t
13551 lpfc_cgn_reverse_bits(uint32_t wd)
13552 {
13553         uint32_t result = 0;
13554         uint32_t i;
13555
13556         for (i = 0; i < 32; i++) {
13557                 result <<= 1;
13558                 result |= (1 & (wd >> i));
13559         }
13560         return result;
13561 }
13562
13563 /*
13564  * The routine corresponds with the algorithm the HBA firmware
13565  * uses to validate the data integrity.
13566  */
13567 uint32_t
13568 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13569 {
13570         uint32_t  i;
13571         uint32_t result;
13572         uint8_t  *data = (uint8_t *)ptr;
13573
13574         for (i = 0; i < byteLen; ++i)
13575                 crc = lpfc_cgn_crc32(crc, data[i]);
13576
13577         result = ~lpfc_cgn_reverse_bits(crc);
13578         return result;
13579 }
13580
13581 void
13582 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13583 {
13584         struct lpfc_cgn_info *cp;
13585         uint16_t size;
13586         uint32_t crc;
13587
13588         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13589                         "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13590
13591         if (!phba->cgn_i)
13592                 return;
13593         cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13594
13595         atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13596         atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13597         atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13598         atomic_set(&phba->cgn_sync_warn_cnt, 0);
13599
13600         atomic_set(&phba->cgn_driver_evt_cnt, 0);
13601         atomic_set(&phba->cgn_latency_evt_cnt, 0);
13602         atomic64_set(&phba->cgn_latency_evt, 0);
13603         phba->cgn_evt_minute = 0;
13604
13605         memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13606         cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13607         cp->cgn_info_version = LPFC_CGN_INFO_V4;
13608
13609         /* cgn parameters */
13610         cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13611         cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13612         cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13613         cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13614
13615         lpfc_cgn_update_tstamp(phba, &cp->base_time);
13616
13617         /* Fill in default LUN qdepth */
13618         if (phba->pport) {
13619                 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13620                 cp->cgn_lunq = cpu_to_le16(size);
13621         }
13622
13623         /* last used Index initialized to 0xff already */
13624
13625         cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13626         cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13627         crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13628         cp->cgn_info_crc = cpu_to_le32(crc);
13629
13630         phba->cgn_evt_timestamp = jiffies +
13631                 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13632 }
13633
13634 void
13635 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13636 {
13637         struct lpfc_cgn_info *cp;
13638         uint32_t crc;
13639
13640         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13641                         "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13642
13643         if (!phba->cgn_i)
13644                 return;
13645
13646         cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13647         memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13648
13649         lpfc_cgn_update_tstamp(phba, &cp->stat_start);
13650         crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13651         cp->cgn_info_crc = cpu_to_le32(crc);
13652 }
13653
13654 /**
13655  * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13656  * @phba: Pointer to hba context object.
13657  * @reg: flag to determine register or unregister.
13658  */
13659 static int
13660 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13661 {
13662         struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13663         union  lpfc_sli4_cfg_shdr *shdr;
13664         uint32_t shdr_status, shdr_add_status;
13665         LPFC_MBOXQ_t *mboxq;
13666         int length, rc;
13667
13668         if (!phba->cgn_i)
13669                 return -ENXIO;
13670
13671         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13672         if (!mboxq) {
13673                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13674                                 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13675                                 "HBA state x%x reg %d\n",
13676                                 phba->pport->port_state, reg);
13677                 return -ENOMEM;
13678         }
13679
13680         length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13681                 sizeof(struct lpfc_sli4_cfg_mhdr));
13682         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13683                          LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13684                          LPFC_SLI4_MBX_EMBED);
13685         reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13686         bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13687         if (reg > 0)
13688                 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13689         else
13690                 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13691         reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13692         reg_congestion_buf->addr_lo =
13693                 putPaddrLow(phba->cgn_i->phys);
13694         reg_congestion_buf->addr_hi =
13695                 putPaddrHigh(phba->cgn_i->phys);
13696
13697         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13698         shdr = (union lpfc_sli4_cfg_shdr *)
13699                 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13700         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13701         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13702                                  &shdr->response);
13703         mempool_free(mboxq, phba->mbox_mem_pool);
13704         if (shdr_status || shdr_add_status || rc) {
13705                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13706                                 "2642 REG_CONGESTION_BUF mailbox "
13707                                 "failed with status x%x add_status x%x,"
13708                                 " mbx status x%x reg %d\n",
13709                                 shdr_status, shdr_add_status, rc, reg);
13710                 return -ENXIO;
13711         }
13712         return 0;
13713 }
13714
13715 int
13716 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13717 {
13718         lpfc_cmf_stop(phba);
13719         return __lpfc_reg_congestion_buf(phba, 0);
13720 }
13721
13722 int
13723 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13724 {
13725         return __lpfc_reg_congestion_buf(phba, 1);
13726 }
13727
13728 /**
13729  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13730  * @phba: Pointer to HBA context object.
13731  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13732  *
13733  * This function is called in the SLI4 code path to read the port's
13734  * sli4 capabilities.
13735  *
13736  * This function may be be called from any context that can block-wait
13737  * for the completion.  The expectation is that this routine is called
13738  * typically from probe_one or from the online routine.
13739  **/
13740 int
13741 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13742 {
13743         int rc;
13744         struct lpfc_mqe *mqe = &mboxq->u.mqe;
13745         struct lpfc_pc_sli4_params *sli4_params;
13746         uint32_t mbox_tmo;
13747         int length;
13748         bool exp_wqcq_pages = true;
13749         struct lpfc_sli4_parameters *mbx_sli4_parameters;
13750
13751         /*
13752          * By default, the driver assumes the SLI4 port requires RPI
13753          * header postings.  The SLI4_PARAM response will correct this
13754          * assumption.
13755          */
13756         phba->sli4_hba.rpi_hdrs_in_use = 1;
13757
13758         /* Read the port's SLI4 Config Parameters */
13759         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13760                   sizeof(struct lpfc_sli4_cfg_mhdr));
13761         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13762                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13763                          length, LPFC_SLI4_MBX_EMBED);
13764         if (!phba->sli4_hba.intr_enable)
13765                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13766         else {
13767                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13768                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13769         }
13770         if (unlikely(rc))
13771                 return rc;
13772         sli4_params = &phba->sli4_hba.pc_sli4_params;
13773         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13774         sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13775         sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13776         sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13777         sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13778                                              mbx_sli4_parameters);
13779         sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13780                                              mbx_sli4_parameters);
13781         if (bf_get(cfg_phwq, mbx_sli4_parameters))
13782                 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13783         else
13784                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13785         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13786         sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13787                                            mbx_sli4_parameters);
13788         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13789         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13790         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13791         sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13792         sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13793         sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13794         sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13795         sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13796         sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13797         sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13798         sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13799                                             mbx_sli4_parameters);
13800         sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13801         sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13802                                            mbx_sli4_parameters);
13803         phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13804         phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13805         sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters);
13806
13807         /* Check for Extended Pre-Registered SGL support */
13808         phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13809
13810         /* Check for firmware nvme support */
13811         rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13812                      bf_get(cfg_xib, mbx_sli4_parameters));
13813
13814         if (rc) {
13815                 /* Save this to indicate the Firmware supports NVME */
13816                 sli4_params->nvme = 1;
13817
13818                 /* Firmware NVME support, check driver FC4 NVME support */
13819                 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13820                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13821                                         "6133 Disabling NVME support: "
13822                                         "FC4 type not supported: x%x\n",
13823                                         phba->cfg_enable_fc4_type);
13824                         goto fcponly;
13825                 }
13826         } else {
13827                 /* No firmware NVME support, check driver FC4 NVME support */
13828                 sli4_params->nvme = 0;
13829                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13830                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13831                                         "6101 Disabling NVME support: Not "
13832                                         "supported by firmware (%d %d) x%x\n",
13833                                         bf_get(cfg_nvme, mbx_sli4_parameters),
13834                                         bf_get(cfg_xib, mbx_sli4_parameters),
13835                                         phba->cfg_enable_fc4_type);
13836 fcponly:
13837                         phba->nvmet_support = 0;
13838                         phba->cfg_nvmet_mrq = 0;
13839                         phba->cfg_nvme_seg_cnt = 0;
13840
13841                         /* If no FC4 type support, move to just SCSI support */
13842                         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13843                                 return -ENODEV;
13844                         phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13845                 }
13846         }
13847
13848         /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13849          * accommodate 512K and 1M IOs in a single nvme buf.
13850          */
13851         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13852                 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13853
13854         /* Enable embedded Payload BDE if support is indicated */
13855         if (bf_get(cfg_pbde, mbx_sli4_parameters))
13856                 phba->cfg_enable_pbde = 1;
13857         else
13858                 phba->cfg_enable_pbde = 0;
13859
13860         /*
13861          * To support Suppress Response feature we must satisfy 3 conditions.
13862          * lpfc_suppress_rsp module parameter must be set (default).
13863          * In SLI4-Parameters Descriptor:
13864          * Extended Inline Buffers (XIB) must be supported.
13865          * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13866          * (double negative).
13867          */
13868         if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13869             !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13870                 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13871         else
13872                 phba->cfg_suppress_rsp = 0;
13873
13874         if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13875                 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13876
13877         /* Make sure that sge_supp_len can be handled by the driver */
13878         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13879                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13880
13881         rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13882         if (unlikely(rc)) {
13883                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13884                                 "6400 Can't set dma maximum segment size\n");
13885                 return rc;
13886         }
13887
13888         /*
13889          * Check whether the adapter supports an embedded copy of the
13890          * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13891          * to use this option, 128-byte WQEs must be used.
13892          */
13893         if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13894                 phba->fcp_embed_io = 1;
13895         else
13896                 phba->fcp_embed_io = 0;
13897
13898         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13899                         "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13900                         bf_get(cfg_xib, mbx_sli4_parameters),
13901                         phba->cfg_enable_pbde,
13902                         phba->fcp_embed_io, sli4_params->nvme,
13903                         phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13904
13905         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13906             LPFC_SLI_INTF_IF_TYPE_2) &&
13907             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13908                  LPFC_SLI_INTF_FAMILY_LNCR_A0))
13909                 exp_wqcq_pages = false;
13910
13911         if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13912             (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13913             exp_wqcq_pages &&
13914             (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13915                 phba->enab_exp_wqcq_pages = 1;
13916         else
13917                 phba->enab_exp_wqcq_pages = 0;
13918         /*
13919          * Check if the SLI port supports MDS Diagnostics
13920          */
13921         if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13922                 phba->mds_diags_support = 1;
13923         else
13924                 phba->mds_diags_support = 0;
13925
13926         /*
13927          * Check if the SLI port supports NSLER
13928          */
13929         if (bf_get(cfg_nsler, mbx_sli4_parameters))
13930                 phba->nsler = 1;
13931         else
13932                 phba->nsler = 0;
13933
13934         return 0;
13935 }
13936
13937 /**
13938  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13939  * @pdev: pointer to PCI device
13940  * @pid: pointer to PCI device identifier
13941  *
13942  * This routine is to be called to attach a device with SLI-3 interface spec
13943  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13944  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13945  * information of the device and driver to see if the driver state that it can
13946  * support this kind of device. If the match is successful, the driver core
13947  * invokes this routine. If this routine determines it can claim the HBA, it
13948  * does all the initialization that it needs to do to handle the HBA properly.
13949  *
13950  * Return code
13951  *      0 - driver can claim the device
13952  *      negative value - driver can not claim the device
13953  **/
13954 static int
13955 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13956 {
13957         struct lpfc_hba   *phba;
13958         struct lpfc_vport *vport = NULL;
13959         struct Scsi_Host  *shost = NULL;
13960         int error;
13961         uint32_t cfg_mode, intr_mode;
13962
13963         /* Allocate memory for HBA structure */
13964         phba = lpfc_hba_alloc(pdev);
13965         if (!phba)
13966                 return -ENOMEM;
13967
13968         /* Perform generic PCI device enabling operation */
13969         error = lpfc_enable_pci_dev(phba);
13970         if (error)
13971                 goto out_free_phba;
13972
13973         /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13974         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13975         if (error)
13976                 goto out_disable_pci_dev;
13977
13978         /* Set up SLI-3 specific device PCI memory space */
13979         error = lpfc_sli_pci_mem_setup(phba);
13980         if (error) {
13981                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13982                                 "1402 Failed to set up pci memory space.\n");
13983                 goto out_disable_pci_dev;
13984         }
13985
13986         /* Set up SLI-3 specific device driver resources */
13987         error = lpfc_sli_driver_resource_setup(phba);
13988         if (error) {
13989                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13990                                 "1404 Failed to set up driver resource.\n");
13991                 goto out_unset_pci_mem_s3;
13992         }
13993
13994         /* Initialize and populate the iocb list per host */
13995
13996         error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13997         if (error) {
13998                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13999                                 "1405 Failed to initialize iocb list.\n");
14000                 goto out_unset_driver_resource_s3;
14001         }
14002
14003         /* Set up common device driver resources */
14004         error = lpfc_setup_driver_resource_phase2(phba);
14005         if (error) {
14006                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14007                                 "1406 Failed to set up driver resource.\n");
14008                 goto out_free_iocb_list;
14009         }
14010
14011         /* Get the default values for Model Name and Description */
14012         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14013
14014         /* Create SCSI host to the physical port */
14015         error = lpfc_create_shost(phba);
14016         if (error) {
14017                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14018                                 "1407 Failed to create scsi host.\n");
14019                 goto out_unset_driver_resource;
14020         }
14021
14022         /* Configure sysfs attributes */
14023         vport = phba->pport;
14024         error = lpfc_alloc_sysfs_attr(vport);
14025         if (error) {
14026                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14027                                 "1476 Failed to allocate sysfs attr\n");
14028                 goto out_destroy_shost;
14029         }
14030
14031         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14032         /* Now, trying to enable interrupt and bring up the device */
14033         cfg_mode = phba->cfg_use_msi;
14034         while (true) {
14035                 /* Put device to a known state before enabling interrupt */
14036                 lpfc_stop_port(phba);
14037                 /* Configure and enable interrupt */
14038                 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14039                 if (intr_mode == LPFC_INTR_ERROR) {
14040                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14041                                         "0431 Failed to enable interrupt.\n");
14042                         error = -ENODEV;
14043                         goto out_free_sysfs_attr;
14044                 }
14045                 /* SLI-3 HBA setup */
14046                 if (lpfc_sli_hba_setup(phba)) {
14047                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14048                                         "1477 Failed to set up hba\n");
14049                         error = -ENODEV;
14050                         goto out_remove_device;
14051                 }
14052
14053                 /* Wait 50ms for the interrupts of previous mailbox commands */
14054                 msleep(50);
14055                 /* Check active interrupts on message signaled interrupts */
14056                 if (intr_mode == 0 ||
14057                     phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14058                         /* Log the current active interrupt mode */
14059                         phba->intr_mode = intr_mode;
14060                         lpfc_log_intr_mode(phba, intr_mode);
14061                         break;
14062                 } else {
14063                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14064                                         "0447 Configure interrupt mode (%d) "
14065                                         "failed active interrupt test.\n",
14066                                         intr_mode);
14067                         /* Disable the current interrupt mode */
14068                         lpfc_sli_disable_intr(phba);
14069                         /* Try next level of interrupt mode */
14070                         cfg_mode = --intr_mode;
14071                 }
14072         }
14073
14074         /* Perform post initialization setup */
14075         lpfc_post_init_setup(phba);
14076
14077         /* Check if there are static vports to be created. */
14078         lpfc_create_static_vport(phba);
14079
14080         return 0;
14081
14082 out_remove_device:
14083         lpfc_unset_hba(phba);
14084 out_free_sysfs_attr:
14085         lpfc_free_sysfs_attr(vport);
14086 out_destroy_shost:
14087         lpfc_destroy_shost(phba);
14088 out_unset_driver_resource:
14089         lpfc_unset_driver_resource_phase2(phba);
14090 out_free_iocb_list:
14091         lpfc_free_iocb_list(phba);
14092 out_unset_driver_resource_s3:
14093         lpfc_sli_driver_resource_unset(phba);
14094 out_unset_pci_mem_s3:
14095         lpfc_sli_pci_mem_unset(phba);
14096 out_disable_pci_dev:
14097         lpfc_disable_pci_dev(phba);
14098         if (shost)
14099                 scsi_host_put(shost);
14100 out_free_phba:
14101         lpfc_hba_free(phba);
14102         return error;
14103 }
14104
14105 /**
14106  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14107  * @pdev: pointer to PCI device
14108  *
14109  * This routine is to be called to disattach a device with SLI-3 interface
14110  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14111  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14112  * device to be removed from the PCI subsystem properly.
14113  **/
14114 static void
14115 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14116 {
14117         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
14118         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14119         struct lpfc_vport **vports;
14120         struct lpfc_hba   *phba = vport->phba;
14121         int i;
14122
14123         spin_lock_irq(&phba->hbalock);
14124         vport->load_flag |= FC_UNLOADING;
14125         spin_unlock_irq(&phba->hbalock);
14126
14127         lpfc_free_sysfs_attr(vport);
14128
14129         /* Release all the vports against this physical port */
14130         vports = lpfc_create_vport_work_array(phba);
14131         if (vports != NULL)
14132                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14133                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14134                                 continue;
14135                         fc_vport_terminate(vports[i]->fc_vport);
14136                 }
14137         lpfc_destroy_vport_work_array(phba, vports);
14138
14139         /* Remove FC host with the physical port */
14140         fc_remove_host(shost);
14141         scsi_remove_host(shost);
14142
14143         /* Clean up all nodes, mailboxes and IOs. */
14144         lpfc_cleanup(vport);
14145
14146         /*
14147          * Bring down the SLI Layer. This step disable all interrupts,
14148          * clears the rings, discards all mailbox commands, and resets
14149          * the HBA.
14150          */
14151
14152         /* HBA interrupt will be disabled after this call */
14153         lpfc_sli_hba_down(phba);
14154         /* Stop kthread signal shall trigger work_done one more time */
14155         kthread_stop(phba->worker_thread);
14156         /* Final cleanup of txcmplq and reset the HBA */
14157         lpfc_sli_brdrestart(phba);
14158
14159         kfree(phba->vpi_bmask);
14160         kfree(phba->vpi_ids);
14161
14162         lpfc_stop_hba_timers(phba);
14163         spin_lock_irq(&phba->port_list_lock);
14164         list_del_init(&vport->listentry);
14165         spin_unlock_irq(&phba->port_list_lock);
14166
14167         lpfc_debugfs_terminate(vport);
14168
14169         /* Disable SR-IOV if enabled */
14170         if (phba->cfg_sriov_nr_virtfn)
14171                 pci_disable_sriov(pdev);
14172
14173         /* Disable interrupt */
14174         lpfc_sli_disable_intr(phba);
14175
14176         scsi_host_put(shost);
14177
14178         /*
14179          * Call scsi_free before mem_free since scsi bufs are released to their
14180          * corresponding pools here.
14181          */
14182         lpfc_scsi_free(phba);
14183         lpfc_free_iocb_list(phba);
14184
14185         lpfc_mem_free_all(phba);
14186
14187         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14188                           phba->hbqslimp.virt, phba->hbqslimp.phys);
14189
14190         /* Free resources associated with SLI2 interface */
14191         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14192                           phba->slim2p.virt, phba->slim2p.phys);
14193
14194         /* unmap adapter SLIM and Control Registers */
14195         iounmap(phba->ctrl_regs_memmap_p);
14196         iounmap(phba->slim_memmap_p);
14197
14198         lpfc_hba_free(phba);
14199
14200         pci_release_mem_regions(pdev);
14201         pci_disable_device(pdev);
14202 }
14203
14204 /**
14205  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14206  * @dev_d: pointer to device
14207  *
14208  * This routine is to be called from the kernel's PCI subsystem to support
14209  * system Power Management (PM) to device with SLI-3 interface spec. When
14210  * PM invokes this method, it quiesces the device by stopping the driver's
14211  * worker thread for the device, turning off device's interrupt and DMA,
14212  * and bring the device offline. Note that as the driver implements the
14213  * minimum PM requirements to a power-aware driver's PM support for the
14214  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14215  * to the suspend() method call will be treated as SUSPEND and the driver will
14216  * fully reinitialize its device during resume() method call, the driver will
14217  * set device to PCI_D3hot state in PCI config space instead of setting it
14218  * according to the @msg provided by the PM.
14219  *
14220  * Return code
14221  *      0 - driver suspended the device
14222  *      Error otherwise
14223  **/
14224 static int __maybe_unused
14225 lpfc_pci_suspend_one_s3(struct device *dev_d)
14226 {
14227         struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14228         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14229
14230         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14231                         "0473 PCI device Power Management suspend.\n");
14232
14233         /* Bring down the device */
14234         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14235         lpfc_offline(phba);
14236         kthread_stop(phba->worker_thread);
14237
14238         /* Disable interrupt from device */
14239         lpfc_sli_disable_intr(phba);
14240
14241         return 0;
14242 }
14243
14244 /**
14245  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14246  * @dev_d: pointer to device
14247  *
14248  * This routine is to be called from the kernel's PCI subsystem to support
14249  * system Power Management (PM) to device with SLI-3 interface spec. When PM
14250  * invokes this method, it restores the device's PCI config space state and
14251  * fully reinitializes the device and brings it online. Note that as the
14252  * driver implements the minimum PM requirements to a power-aware driver's
14253  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14254  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14255  * driver will fully reinitialize its device during resume() method call,
14256  * the device will be set to PCI_D0 directly in PCI config space before
14257  * restoring the state.
14258  *
14259  * Return code
14260  *      0 - driver suspended the device
14261  *      Error otherwise
14262  **/
14263 static int __maybe_unused
14264 lpfc_pci_resume_one_s3(struct device *dev_d)
14265 {
14266         struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14267         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14268         uint32_t intr_mode;
14269         int error;
14270
14271         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14272                         "0452 PCI device Power Management resume.\n");
14273
14274         /* Startup the kernel thread for this host adapter. */
14275         phba->worker_thread = kthread_run(lpfc_do_work, phba,
14276                                         "lpfc_worker_%d", phba->brd_no);
14277         if (IS_ERR(phba->worker_thread)) {
14278                 error = PTR_ERR(phba->worker_thread);
14279                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14280                                 "0434 PM resume failed to start worker "
14281                                 "thread: error=x%x.\n", error);
14282                 return error;
14283         }
14284
14285         /* Init cpu_map array */
14286         lpfc_cpu_map_array_init(phba);
14287         /* Init hba_eq_hdl array */
14288         lpfc_hba_eq_hdl_array_init(phba);
14289         /* Configure and enable interrupt */
14290         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14291         if (intr_mode == LPFC_INTR_ERROR) {
14292                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14293                                 "0430 PM resume Failed to enable interrupt\n");
14294                 return -EIO;
14295         } else
14296                 phba->intr_mode = intr_mode;
14297
14298         /* Restart HBA and bring it online */
14299         lpfc_sli_brdrestart(phba);
14300         lpfc_online(phba);
14301
14302         /* Log the current active interrupt mode */
14303         lpfc_log_intr_mode(phba, phba->intr_mode);
14304
14305         return 0;
14306 }
14307
14308 /**
14309  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14310  * @phba: pointer to lpfc hba data structure.
14311  *
14312  * This routine is called to prepare the SLI3 device for PCI slot recover. It
14313  * aborts all the outstanding SCSI I/Os to the pci device.
14314  **/
14315 static void
14316 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14317 {
14318         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14319                         "2723 PCI channel I/O abort preparing for recovery\n");
14320
14321         /*
14322          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14323          * and let the SCSI mid-layer to retry them to recover.
14324          */
14325         lpfc_sli_abort_fcp_rings(phba);
14326 }
14327
14328 /**
14329  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14330  * @phba: pointer to lpfc hba data structure.
14331  *
14332  * This routine is called to prepare the SLI3 device for PCI slot reset. It
14333  * disables the device interrupt and pci device, and aborts the internal FCP
14334  * pending I/Os.
14335  **/
14336 static void
14337 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14338 {
14339         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14340                         "2710 PCI channel disable preparing for reset\n");
14341
14342         /* Block any management I/Os to the device */
14343         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14344
14345         /* Block all SCSI devices' I/Os on the host */
14346         lpfc_scsi_dev_block(phba);
14347
14348         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14349         lpfc_sli_flush_io_rings(phba);
14350
14351         /* stop all timers */
14352         lpfc_stop_hba_timers(phba);
14353
14354         /* Disable interrupt and pci device */
14355         lpfc_sli_disable_intr(phba);
14356         pci_disable_device(phba->pcidev);
14357 }
14358
14359 /**
14360  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14361  * @phba: pointer to lpfc hba data structure.
14362  *
14363  * This routine is called to prepare the SLI3 device for PCI slot permanently
14364  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14365  * pending I/Os.
14366  **/
14367 static void
14368 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14369 {
14370         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14371                         "2711 PCI channel permanent disable for failure\n");
14372         /* Block all SCSI devices' I/Os on the host */
14373         lpfc_scsi_dev_block(phba);
14374         lpfc_sli4_prep_dev_for_reset(phba);
14375
14376         /* stop all timers */
14377         lpfc_stop_hba_timers(phba);
14378
14379         /* Clean up all driver's outstanding SCSI I/Os */
14380         lpfc_sli_flush_io_rings(phba);
14381 }
14382
14383 /**
14384  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14385  * @pdev: pointer to PCI device.
14386  * @state: the current PCI connection state.
14387  *
14388  * This routine is called from the PCI subsystem for I/O error handling to
14389  * device with SLI-3 interface spec. This function is called by the PCI
14390  * subsystem after a PCI bus error affecting this device has been detected.
14391  * When this function is invoked, it will need to stop all the I/Os and
14392  * interrupt(s) to the device. Once that is done, it will return
14393  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14394  * as desired.
14395  *
14396  * Return codes
14397  *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14398  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14399  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14400  **/
14401 static pci_ers_result_t
14402 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14403 {
14404         struct Scsi_Host *shost = pci_get_drvdata(pdev);
14405         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14406
14407         switch (state) {
14408         case pci_channel_io_normal:
14409                 /* Non-fatal error, prepare for recovery */
14410                 lpfc_sli_prep_dev_for_recover(phba);
14411                 return PCI_ERS_RESULT_CAN_RECOVER;
14412         case pci_channel_io_frozen:
14413                 /* Fatal error, prepare for slot reset */
14414                 lpfc_sli_prep_dev_for_reset(phba);
14415                 return PCI_ERS_RESULT_NEED_RESET;
14416         case pci_channel_io_perm_failure:
14417                 /* Permanent failure, prepare for device down */
14418                 lpfc_sli_prep_dev_for_perm_failure(phba);
14419                 return PCI_ERS_RESULT_DISCONNECT;
14420         default:
14421                 /* Unknown state, prepare and request slot reset */
14422                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14423                                 "0472 Unknown PCI error state: x%x\n", state);
14424                 lpfc_sli_prep_dev_for_reset(phba);
14425                 return PCI_ERS_RESULT_NEED_RESET;
14426         }
14427 }
14428
14429 /**
14430  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14431  * @pdev: pointer to PCI device.
14432  *
14433  * This routine is called from the PCI subsystem for error handling to
14434  * device with SLI-3 interface spec. This is called after PCI bus has been
14435  * reset to restart the PCI card from scratch, as if from a cold-boot.
14436  * During the PCI subsystem error recovery, after driver returns
14437  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14438  * recovery and then call this routine before calling the .resume method
14439  * to recover the device. This function will initialize the HBA device,
14440  * enable the interrupt, but it will just put the HBA to offline state
14441  * without passing any I/O traffic.
14442  *
14443  * Return codes
14444  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
14445  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14446  */
14447 static pci_ers_result_t
14448 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14449 {
14450         struct Scsi_Host *shost = pci_get_drvdata(pdev);
14451         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14452         struct lpfc_sli *psli = &phba->sli;
14453         uint32_t intr_mode;
14454
14455         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14456         if (pci_enable_device_mem(pdev)) {
14457                 printk(KERN_ERR "lpfc: Cannot re-enable "
14458                         "PCI device after reset.\n");
14459                 return PCI_ERS_RESULT_DISCONNECT;
14460         }
14461
14462         pci_restore_state(pdev);
14463
14464         /*
14465          * As the new kernel behavior of pci_restore_state() API call clears
14466          * device saved_state flag, need to save the restored state again.
14467          */
14468         pci_save_state(pdev);
14469
14470         if (pdev->is_busmaster)
14471                 pci_set_master(pdev);
14472
14473         spin_lock_irq(&phba->hbalock);
14474         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14475         spin_unlock_irq(&phba->hbalock);
14476
14477         /* Configure and enable interrupt */
14478         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14479         if (intr_mode == LPFC_INTR_ERROR) {
14480                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14481                                 "0427 Cannot re-enable interrupt after "
14482                                 "slot reset.\n");
14483                 return PCI_ERS_RESULT_DISCONNECT;
14484         } else
14485                 phba->intr_mode = intr_mode;
14486
14487         /* Take device offline, it will perform cleanup */
14488         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14489         lpfc_offline(phba);
14490         lpfc_sli_brdrestart(phba);
14491
14492         /* Log the current active interrupt mode */
14493         lpfc_log_intr_mode(phba, phba->intr_mode);
14494
14495         return PCI_ERS_RESULT_RECOVERED;
14496 }
14497
14498 /**
14499  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14500  * @pdev: pointer to PCI device
14501  *
14502  * This routine is called from the PCI subsystem for error handling to device
14503  * with SLI-3 interface spec. It is called when kernel error recovery tells
14504  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14505  * error recovery. After this call, traffic can start to flow from this device
14506  * again.
14507  */
14508 static void
14509 lpfc_io_resume_s3(struct pci_dev *pdev)
14510 {
14511         struct Scsi_Host *shost = pci_get_drvdata(pdev);
14512         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14513
14514         /* Bring device online, it will be no-op for non-fatal error resume */
14515         lpfc_online(phba);
14516 }
14517
14518 /**
14519  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14520  * @phba: pointer to lpfc hba data structure.
14521  *
14522  * returns the number of ELS/CT IOCBs to reserve
14523  **/
14524 int
14525 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14526 {
14527         int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14528
14529         if (phba->sli_rev == LPFC_SLI_REV4) {
14530                 if (max_xri <= 100)
14531                         return 10;
14532                 else if (max_xri <= 256)
14533                         return 25;
14534                 else if (max_xri <= 512)
14535                         return 50;
14536                 else if (max_xri <= 1024)
14537                         return 100;
14538                 else if (max_xri <= 1536)
14539                         return 150;
14540                 else if (max_xri <= 2048)
14541                         return 200;
14542                 else
14543                         return 250;
14544         } else
14545                 return 0;
14546 }
14547
14548 /**
14549  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14550  * @phba: pointer to lpfc hba data structure.
14551  *
14552  * returns the number of ELS/CT + NVMET IOCBs to reserve
14553  **/
14554 int
14555 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14556 {
14557         int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14558
14559         if (phba->nvmet_support)
14560                 max_xri += LPFC_NVMET_BUF_POST;
14561         return max_xri;
14562 }
14563
14564
14565 static int
14566 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14567         uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14568         const struct firmware *fw)
14569 {
14570         int rc;
14571         u8 sli_family;
14572
14573         sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14574         /* Three cases:  (1) FW was not supported on the detected adapter.
14575          * (2) FW update has been locked out administratively.
14576          * (3) Some other error during FW update.
14577          * In each case, an unmaskable message is written to the console
14578          * for admin diagnosis.
14579          */
14580         if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14581             (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14582              magic_number != MAGIC_NUMBER_G6) ||
14583             (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14584              magic_number != MAGIC_NUMBER_G7) ||
14585             (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14586              magic_number != MAGIC_NUMBER_G7P)) {
14587                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14588                                 "3030 This firmware version is not supported on"
14589                                 " this HBA model. Device:%x Magic:%x Type:%x "
14590                                 "ID:%x Size %d %zd\n",
14591                                 phba->pcidev->device, magic_number, ftype, fid,
14592                                 fsize, fw->size);
14593                 rc = -EINVAL;
14594         } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14595                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14596                                 "3021 Firmware downloads have been prohibited "
14597                                 "by a system configuration setting on "
14598                                 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14599                                 "%zd\n",
14600                                 phba->pcidev->device, magic_number, ftype, fid,
14601                                 fsize, fw->size);
14602                 rc = -EACCES;
14603         } else {
14604                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14605                                 "3022 FW Download failed. Add Status x%x "
14606                                 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14607                                 "%zd\n",
14608                                 offset, phba->pcidev->device, magic_number,
14609                                 ftype, fid, fsize, fw->size);
14610                 rc = -EIO;
14611         }
14612         return rc;
14613 }
14614
14615 /**
14616  * lpfc_write_firmware - attempt to write a firmware image to the port
14617  * @fw: pointer to firmware image returned from request_firmware.
14618  * @context: pointer to firmware image returned from request_firmware.
14619  *
14620  **/
14621 static void
14622 lpfc_write_firmware(const struct firmware *fw, void *context)
14623 {
14624         struct lpfc_hba *phba = (struct lpfc_hba *)context;
14625         char fwrev[FW_REV_STR_SIZE];
14626         struct lpfc_grp_hdr *image;
14627         struct list_head dma_buffer_list;
14628         int i, rc = 0;
14629         struct lpfc_dmabuf *dmabuf, *next;
14630         uint32_t offset = 0, temp_offset = 0;
14631         uint32_t magic_number, ftype, fid, fsize;
14632
14633         /* It can be null in no-wait mode, sanity check */
14634         if (!fw) {
14635                 rc = -ENXIO;
14636                 goto out;
14637         }
14638         image = (struct lpfc_grp_hdr *)fw->data;
14639
14640         magic_number = be32_to_cpu(image->magic_number);
14641         ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14642         fid = bf_get_be32(lpfc_grp_hdr_id, image);
14643         fsize = be32_to_cpu(image->size);
14644
14645         INIT_LIST_HEAD(&dma_buffer_list);
14646         lpfc_decode_firmware_rev(phba, fwrev, 1);
14647         if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14648                 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14649                              "3023 Updating Firmware, Current Version:%s "
14650                              "New Version:%s\n",
14651                              fwrev, image->revision);
14652                 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14653                         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14654                                          GFP_KERNEL);
14655                         if (!dmabuf) {
14656                                 rc = -ENOMEM;
14657                                 goto release_out;
14658                         }
14659                         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14660                                                           SLI4_PAGE_SIZE,
14661                                                           &dmabuf->phys,
14662                                                           GFP_KERNEL);
14663                         if (!dmabuf->virt) {
14664                                 kfree(dmabuf);
14665                                 rc = -ENOMEM;
14666                                 goto release_out;
14667                         }
14668                         list_add_tail(&dmabuf->list, &dma_buffer_list);
14669                 }
14670                 while (offset < fw->size) {
14671                         temp_offset = offset;
14672                         list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14673                                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14674                                         memcpy(dmabuf->virt,
14675                                                fw->data + temp_offset,
14676                                                fw->size - temp_offset);
14677                                         temp_offset = fw->size;
14678                                         break;
14679                                 }
14680                                 memcpy(dmabuf->virt, fw->data + temp_offset,
14681                                        SLI4_PAGE_SIZE);
14682                                 temp_offset += SLI4_PAGE_SIZE;
14683                         }
14684                         rc = lpfc_wr_object(phba, &dma_buffer_list,
14685                                     (fw->size - offset), &offset);
14686                         if (rc) {
14687                                 rc = lpfc_log_write_firmware_error(phba, offset,
14688                                                                    magic_number,
14689                                                                    ftype,
14690                                                                    fid,
14691                                                                    fsize,
14692                                                                    fw);
14693                                 goto release_out;
14694                         }
14695                 }
14696                 rc = offset;
14697         } else
14698                 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14699                              "3029 Skipped Firmware update, Current "
14700                              "Version:%s New Version:%s\n",
14701                              fwrev, image->revision);
14702
14703 release_out:
14704         list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14705                 list_del(&dmabuf->list);
14706                 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14707                                   dmabuf->virt, dmabuf->phys);
14708                 kfree(dmabuf);
14709         }
14710         release_firmware(fw);
14711 out:
14712         if (rc < 0)
14713                 lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI,
14714                              "3062 Firmware update error, status %d.\n", rc);
14715         else
14716                 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14717                              "3024 Firmware update success: size %d.\n", rc);
14718 }
14719
14720 /**
14721  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14722  * @phba: pointer to lpfc hba data structure.
14723  * @fw_upgrade: which firmware to update.
14724  *
14725  * This routine is called to perform Linux generic firmware upgrade on device
14726  * that supports such feature.
14727  **/
14728 int
14729 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14730 {
14731         uint8_t file_name[ELX_MODEL_NAME_SIZE];
14732         int ret;
14733         const struct firmware *fw;
14734
14735         /* Only supported on SLI4 interface type 2 for now */
14736         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14737             LPFC_SLI_INTF_IF_TYPE_2)
14738                 return -EPERM;
14739
14740         snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14741
14742         if (fw_upgrade == INT_FW_UPGRADE) {
14743                 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14744                                         file_name, &phba->pcidev->dev,
14745                                         GFP_KERNEL, (void *)phba,
14746                                         lpfc_write_firmware);
14747         } else if (fw_upgrade == RUN_FW_UPGRADE) {
14748                 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14749                 if (!ret)
14750                         lpfc_write_firmware(fw, (void *)phba);
14751         } else {
14752                 ret = -EINVAL;
14753         }
14754
14755         return ret;
14756 }
14757
14758 /**
14759  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14760  * @pdev: pointer to PCI device
14761  * @pid: pointer to PCI device identifier
14762  *
14763  * This routine is called from the kernel's PCI subsystem to device with
14764  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14765  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14766  * information of the device and driver to see if the driver state that it
14767  * can support this kind of device. If the match is successful, the driver
14768  * core invokes this routine. If this routine determines it can claim the HBA,
14769  * it does all the initialization that it needs to do to handle the HBA
14770  * properly.
14771  *
14772  * Return code
14773  *      0 - driver can claim the device
14774  *      negative value - driver can not claim the device
14775  **/
14776 static int
14777 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14778 {
14779         struct lpfc_hba   *phba;
14780         struct lpfc_vport *vport = NULL;
14781         struct Scsi_Host  *shost = NULL;
14782         int error;
14783         uint32_t cfg_mode, intr_mode;
14784
14785         /* Allocate memory for HBA structure */
14786         phba = lpfc_hba_alloc(pdev);
14787         if (!phba)
14788                 return -ENOMEM;
14789
14790         INIT_LIST_HEAD(&phba->poll_list);
14791
14792         /* Perform generic PCI device enabling operation */
14793         error = lpfc_enable_pci_dev(phba);
14794         if (error)
14795                 goto out_free_phba;
14796
14797         /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14798         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14799         if (error)
14800                 goto out_disable_pci_dev;
14801
14802         /* Set up SLI-4 specific device PCI memory space */
14803         error = lpfc_sli4_pci_mem_setup(phba);
14804         if (error) {
14805                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14806                                 "1410 Failed to set up pci memory space.\n");
14807                 goto out_disable_pci_dev;
14808         }
14809
14810         /* Set up SLI-4 Specific device driver resources */
14811         error = lpfc_sli4_driver_resource_setup(phba);
14812         if (error) {
14813                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14814                                 "1412 Failed to set up driver resource.\n");
14815                 goto out_unset_pci_mem_s4;
14816         }
14817
14818         INIT_LIST_HEAD(&phba->active_rrq_list);
14819         INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14820
14821         /* Set up common device driver resources */
14822         error = lpfc_setup_driver_resource_phase2(phba);
14823         if (error) {
14824                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14825                                 "1414 Failed to set up driver resource.\n");
14826                 goto out_unset_driver_resource_s4;
14827         }
14828
14829         /* Get the default values for Model Name and Description */
14830         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14831
14832         /* Now, trying to enable interrupt and bring up the device */
14833         cfg_mode = phba->cfg_use_msi;
14834
14835         /* Put device to a known state before enabling interrupt */
14836         phba->pport = NULL;
14837         lpfc_stop_port(phba);
14838
14839         /* Init cpu_map array */
14840         lpfc_cpu_map_array_init(phba);
14841
14842         /* Init hba_eq_hdl array */
14843         lpfc_hba_eq_hdl_array_init(phba);
14844
14845         /* Configure and enable interrupt */
14846         intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14847         if (intr_mode == LPFC_INTR_ERROR) {
14848                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14849                                 "0426 Failed to enable interrupt.\n");
14850                 error = -ENODEV;
14851                 goto out_unset_driver_resource;
14852         }
14853         /* Default to single EQ for non-MSI-X */
14854         if (phba->intr_type != MSIX) {
14855                 phba->cfg_irq_chann = 1;
14856                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14857                         if (phba->nvmet_support)
14858                                 phba->cfg_nvmet_mrq = 1;
14859                 }
14860         }
14861         lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14862
14863         /* Create SCSI host to the physical port */
14864         error = lpfc_create_shost(phba);
14865         if (error) {
14866                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14867                                 "1415 Failed to create scsi host.\n");
14868                 goto out_disable_intr;
14869         }
14870         vport = phba->pport;
14871         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14872
14873         /* Configure sysfs attributes */
14874         error = lpfc_alloc_sysfs_attr(vport);
14875         if (error) {
14876                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14877                                 "1416 Failed to allocate sysfs attr\n");
14878                 goto out_destroy_shost;
14879         }
14880
14881         /* Set up SLI-4 HBA */
14882         if (lpfc_sli4_hba_setup(phba)) {
14883                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14884                                 "1421 Failed to set up hba\n");
14885                 error = -ENODEV;
14886                 goto out_free_sysfs_attr;
14887         }
14888
14889         /* Log the current active interrupt mode */
14890         phba->intr_mode = intr_mode;
14891         lpfc_log_intr_mode(phba, intr_mode);
14892
14893         /* Perform post initialization setup */
14894         lpfc_post_init_setup(phba);
14895
14896         /* NVME support in FW earlier in the driver load corrects the
14897          * FC4 type making a check for nvme_support unnecessary.
14898          */
14899         if (phba->nvmet_support == 0) {
14900                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14901                         /* Create NVME binding with nvme_fc_transport. This
14902                          * ensures the vport is initialized.  If the localport
14903                          * create fails, it should not unload the driver to
14904                          * support field issues.
14905                          */
14906                         error = lpfc_nvme_create_localport(vport);
14907                         if (error) {
14908                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14909                                                 "6004 NVME registration "
14910                                                 "failed, error x%x\n",
14911                                                 error);
14912                         }
14913                 }
14914         }
14915
14916         /* check for firmware upgrade or downgrade */
14917         if (phba->cfg_request_firmware_upgrade)
14918                 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14919
14920         /* Check if there are static vports to be created. */
14921         lpfc_create_static_vport(phba);
14922
14923         timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14924         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14925
14926         return 0;
14927
14928 out_free_sysfs_attr:
14929         lpfc_free_sysfs_attr(vport);
14930 out_destroy_shost:
14931         lpfc_destroy_shost(phba);
14932 out_disable_intr:
14933         lpfc_sli4_disable_intr(phba);
14934 out_unset_driver_resource:
14935         lpfc_unset_driver_resource_phase2(phba);
14936 out_unset_driver_resource_s4:
14937         lpfc_sli4_driver_resource_unset(phba);
14938 out_unset_pci_mem_s4:
14939         lpfc_sli4_pci_mem_unset(phba);
14940 out_disable_pci_dev:
14941         lpfc_disable_pci_dev(phba);
14942         if (shost)
14943                 scsi_host_put(shost);
14944 out_free_phba:
14945         lpfc_hba_free(phba);
14946         return error;
14947 }
14948
14949 /**
14950  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14951  * @pdev: pointer to PCI device
14952  *
14953  * This routine is called from the kernel's PCI subsystem to device with
14954  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14955  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14956  * device to be removed from the PCI subsystem properly.
14957  **/
14958 static void
14959 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14960 {
14961         struct Scsi_Host *shost = pci_get_drvdata(pdev);
14962         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14963         struct lpfc_vport **vports;
14964         struct lpfc_hba *phba = vport->phba;
14965         int i;
14966
14967         /* Mark the device unloading flag */
14968         spin_lock_irq(&phba->hbalock);
14969         vport->load_flag |= FC_UNLOADING;
14970         spin_unlock_irq(&phba->hbalock);
14971         if (phba->cgn_i)
14972                 lpfc_unreg_congestion_buf(phba);
14973
14974         lpfc_free_sysfs_attr(vport);
14975
14976         /* Release all the vports against this physical port */
14977         vports = lpfc_create_vport_work_array(phba);
14978         if (vports != NULL)
14979                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14980                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14981                                 continue;
14982                         fc_vport_terminate(vports[i]->fc_vport);
14983                 }
14984         lpfc_destroy_vport_work_array(phba, vports);
14985
14986         /* Remove FC host with the physical port */
14987         fc_remove_host(shost);
14988         scsi_remove_host(shost);
14989
14990         /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
14991          * localports are destroyed after to cleanup all transport memory.
14992          */
14993         lpfc_cleanup(vport);
14994         lpfc_nvmet_destroy_targetport(phba);
14995         lpfc_nvme_destroy_localport(vport);
14996
14997         /* De-allocate multi-XRI pools */
14998         if (phba->cfg_xri_rebalancing)
14999                 lpfc_destroy_multixri_pools(phba);
15000
15001         /*
15002          * Bring down the SLI Layer. This step disables all interrupts,
15003          * clears the rings, discards all mailbox commands, and resets
15004          * the HBA FCoE function.
15005          */
15006         lpfc_debugfs_terminate(vport);
15007
15008         lpfc_stop_hba_timers(phba);
15009         spin_lock_irq(&phba->port_list_lock);
15010         list_del_init(&vport->listentry);
15011         spin_unlock_irq(&phba->port_list_lock);
15012
15013         /* Perform scsi free before driver resource_unset since scsi
15014          * buffers are released to their corresponding pools here.
15015          */
15016         lpfc_io_free(phba);
15017         lpfc_free_iocb_list(phba);
15018         lpfc_sli4_hba_unset(phba);
15019
15020         lpfc_unset_driver_resource_phase2(phba);
15021         lpfc_sli4_driver_resource_unset(phba);
15022
15023         /* Unmap adapter Control and Doorbell registers */
15024         lpfc_sli4_pci_mem_unset(phba);
15025
15026         /* Release PCI resources and disable device's PCI function */
15027         scsi_host_put(shost);
15028         lpfc_disable_pci_dev(phba);
15029
15030         /* Finally, free the driver's device data structure */
15031         lpfc_hba_free(phba);
15032
15033         return;
15034 }
15035
15036 /**
15037  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15038  * @dev_d: pointer to device
15039  *
15040  * This routine is called from the kernel's PCI subsystem to support system
15041  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15042  * this method, it quiesces the device by stopping the driver's worker
15043  * thread for the device, turning off device's interrupt and DMA, and bring
15044  * the device offline. Note that as the driver implements the minimum PM
15045  * requirements to a power-aware driver's PM support for suspend/resume -- all
15046  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15047  * method call will be treated as SUSPEND and the driver will fully
15048  * reinitialize its device during resume() method call, the driver will set
15049  * device to PCI_D3hot state in PCI config space instead of setting it
15050  * according to the @msg provided by the PM.
15051  *
15052  * Return code
15053  *      0 - driver suspended the device
15054  *      Error otherwise
15055  **/
15056 static int __maybe_unused
15057 lpfc_pci_suspend_one_s4(struct device *dev_d)
15058 {
15059         struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15060         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15061
15062         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15063                         "2843 PCI device Power Management suspend.\n");
15064
15065         /* Bring down the device */
15066         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15067         lpfc_offline(phba);
15068         kthread_stop(phba->worker_thread);
15069
15070         /* Disable interrupt from device */
15071         lpfc_sli4_disable_intr(phba);
15072         lpfc_sli4_queue_destroy(phba);
15073
15074         return 0;
15075 }
15076
15077 /**
15078  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15079  * @dev_d: pointer to device
15080  *
15081  * This routine is called from the kernel's PCI subsystem to support system
15082  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15083  * this method, it restores the device's PCI config space state and fully
15084  * reinitializes the device and brings it online. Note that as the driver
15085  * implements the minimum PM requirements to a power-aware driver's PM for
15086  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15087  * to the suspend() method call will be treated as SUSPEND and the driver
15088  * will fully reinitialize its device during resume() method call, the device
15089  * will be set to PCI_D0 directly in PCI config space before restoring the
15090  * state.
15091  *
15092  * Return code
15093  *      0 - driver suspended the device
15094  *      Error otherwise
15095  **/
15096 static int __maybe_unused
15097 lpfc_pci_resume_one_s4(struct device *dev_d)
15098 {
15099         struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15100         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15101         uint32_t intr_mode;
15102         int error;
15103
15104         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15105                         "0292 PCI device Power Management resume.\n");
15106
15107          /* Startup the kernel thread for this host adapter. */
15108         phba->worker_thread = kthread_run(lpfc_do_work, phba,
15109                                         "lpfc_worker_%d", phba->brd_no);
15110         if (IS_ERR(phba->worker_thread)) {
15111                 error = PTR_ERR(phba->worker_thread);
15112                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15113                                 "0293 PM resume failed to start worker "
15114                                 "thread: error=x%x.\n", error);
15115                 return error;
15116         }
15117
15118         /* Configure and enable interrupt */
15119         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15120         if (intr_mode == LPFC_INTR_ERROR) {
15121                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15122                                 "0294 PM resume Failed to enable interrupt\n");
15123                 return -EIO;
15124         } else
15125                 phba->intr_mode = intr_mode;
15126
15127         /* Restart HBA and bring it online */
15128         lpfc_sli_brdrestart(phba);
15129         lpfc_online(phba);
15130
15131         /* Log the current active interrupt mode */
15132         lpfc_log_intr_mode(phba, phba->intr_mode);
15133
15134         return 0;
15135 }
15136
15137 /**
15138  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15139  * @phba: pointer to lpfc hba data structure.
15140  *
15141  * This routine is called to prepare the SLI4 device for PCI slot recover. It
15142  * aborts all the outstanding SCSI I/Os to the pci device.
15143  **/
15144 static void
15145 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15146 {
15147         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15148                         "2828 PCI channel I/O abort preparing for recovery\n");
15149         /*
15150          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15151          * and let the SCSI mid-layer to retry them to recover.
15152          */
15153         lpfc_sli_abort_fcp_rings(phba);
15154 }
15155
15156 /**
15157  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15158  * @phba: pointer to lpfc hba data structure.
15159  *
15160  * This routine is called to prepare the SLI4 device for PCI slot reset. It
15161  * disables the device interrupt and pci device, and aborts the internal FCP
15162  * pending I/Os.
15163  **/
15164 static void
15165 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15166 {
15167         int offline =  pci_channel_offline(phba->pcidev);
15168
15169         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15170                         "2826 PCI channel disable preparing for reset offline"
15171                         " %d\n", offline);
15172
15173         /* Block any management I/Os to the device */
15174         lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15175
15176
15177         /* HBA_PCI_ERR was set in io_error_detect */
15178         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15179         /* Flush all driver's outstanding I/Os as we are to reset */
15180         lpfc_sli_flush_io_rings(phba);
15181         lpfc_offline(phba);
15182
15183         /* stop all timers */
15184         lpfc_stop_hba_timers(phba);
15185
15186         lpfc_sli4_queue_destroy(phba);
15187         /* Disable interrupt and pci device */
15188         lpfc_sli4_disable_intr(phba);
15189         pci_disable_device(phba->pcidev);
15190 }
15191
15192 /**
15193  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15194  * @phba: pointer to lpfc hba data structure.
15195  *
15196  * This routine is called to prepare the SLI4 device for PCI slot permanently
15197  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15198  * pending I/Os.
15199  **/
15200 static void
15201 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15202 {
15203         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15204                         "2827 PCI channel permanent disable for failure\n");
15205
15206         /* Block all SCSI devices' I/Os on the host */
15207         lpfc_scsi_dev_block(phba);
15208
15209         /* stop all timers */
15210         lpfc_stop_hba_timers(phba);
15211
15212         /* Clean up all driver's outstanding I/Os */
15213         lpfc_sli_flush_io_rings(phba);
15214 }
15215
15216 /**
15217  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15218  * @pdev: pointer to PCI device.
15219  * @state: the current PCI connection state.
15220  *
15221  * This routine is called from the PCI subsystem for error handling to device
15222  * with SLI-4 interface spec. This function is called by the PCI subsystem
15223  * after a PCI bus error affecting this device has been detected. When this
15224  * function is invoked, it will need to stop all the I/Os and interrupt(s)
15225  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15226  * for the PCI subsystem to perform proper recovery as desired.
15227  *
15228  * Return codes
15229  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15230  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15231  **/
15232 static pci_ers_result_t
15233 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15234 {
15235         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15236         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15237         bool hba_pci_err;
15238
15239         switch (state) {
15240         case pci_channel_io_normal:
15241                 /* Non-fatal error, prepare for recovery */
15242                 lpfc_sli4_prep_dev_for_recover(phba);
15243                 return PCI_ERS_RESULT_CAN_RECOVER;
15244         case pci_channel_io_frozen:
15245                 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15246                 /* Fatal error, prepare for slot reset */
15247                 if (!hba_pci_err)
15248                         lpfc_sli4_prep_dev_for_reset(phba);
15249                 else
15250                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15251                                         "2832  Already handling PCI error "
15252                                         "state: x%x\n", state);
15253                 return PCI_ERS_RESULT_NEED_RESET;
15254         case pci_channel_io_perm_failure:
15255                 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15256                 /* Permanent failure, prepare for device down */
15257                 lpfc_sli4_prep_dev_for_perm_failure(phba);
15258                 return PCI_ERS_RESULT_DISCONNECT;
15259         default:
15260                 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15261                 if (!hba_pci_err)
15262                         lpfc_sli4_prep_dev_for_reset(phba);
15263                 /* Unknown state, prepare and request slot reset */
15264                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15265                                 "2825 Unknown PCI error state: x%x\n", state);
15266                 lpfc_sli4_prep_dev_for_reset(phba);
15267                 return PCI_ERS_RESULT_NEED_RESET;
15268         }
15269 }
15270
15271 /**
15272  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15273  * @pdev: pointer to PCI device.
15274  *
15275  * This routine is called from the PCI subsystem for error handling to device
15276  * with SLI-4 interface spec. It is called after PCI bus has been reset to
15277  * restart the PCI card from scratch, as if from a cold-boot. During the
15278  * PCI subsystem error recovery, after the driver returns
15279  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15280  * recovery and then call this routine before calling the .resume method to
15281  * recover the device. This function will initialize the HBA device, enable
15282  * the interrupt, but it will just put the HBA to offline state without
15283  * passing any I/O traffic.
15284  *
15285  * Return codes
15286  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
15287  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15288  */
15289 static pci_ers_result_t
15290 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15291 {
15292         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15293         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15294         struct lpfc_sli *psli = &phba->sli;
15295         uint32_t intr_mode;
15296         bool hba_pci_err;
15297
15298         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15299         if (pci_enable_device_mem(pdev)) {
15300                 printk(KERN_ERR "lpfc: Cannot re-enable "
15301                        "PCI device after reset.\n");
15302                 return PCI_ERS_RESULT_DISCONNECT;
15303         }
15304
15305         pci_restore_state(pdev);
15306
15307         hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15308         if (!hba_pci_err)
15309                 dev_info(&pdev->dev,
15310                          "hba_pci_err was not set, recovering slot reset.\n");
15311         /*
15312          * As the new kernel behavior of pci_restore_state() API call clears
15313          * device saved_state flag, need to save the restored state again.
15314          */
15315         pci_save_state(pdev);
15316
15317         if (pdev->is_busmaster)
15318                 pci_set_master(pdev);
15319
15320         spin_lock_irq(&phba->hbalock);
15321         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15322         spin_unlock_irq(&phba->hbalock);
15323
15324         /* Init cpu_map array */
15325         lpfc_cpu_map_array_init(phba);
15326         /* Configure and enable interrupt */
15327         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15328         if (intr_mode == LPFC_INTR_ERROR) {
15329                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15330                                 "2824 Cannot re-enable interrupt after "
15331                                 "slot reset.\n");
15332                 return PCI_ERS_RESULT_DISCONNECT;
15333         } else
15334                 phba->intr_mode = intr_mode;
15335         lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15336
15337         /* Log the current active interrupt mode */
15338         lpfc_log_intr_mode(phba, phba->intr_mode);
15339
15340         return PCI_ERS_RESULT_RECOVERED;
15341 }
15342
15343 /**
15344  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15345  * @pdev: pointer to PCI device
15346  *
15347  * This routine is called from the PCI subsystem for error handling to device
15348  * with SLI-4 interface spec. It is called when kernel error recovery tells
15349  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15350  * error recovery. After this call, traffic can start to flow from this device
15351  * again.
15352  **/
15353 static void
15354 lpfc_io_resume_s4(struct pci_dev *pdev)
15355 {
15356         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15357         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15358
15359         /*
15360          * In case of slot reset, as function reset is performed through
15361          * mailbox command which needs DMA to be enabled, this operation
15362          * has to be moved to the io resume phase. Taking device offline
15363          * will perform the necessary cleanup.
15364          */
15365         if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15366                 /* Perform device reset */
15367                 lpfc_sli_brdrestart(phba);
15368                 /* Bring the device back online */
15369                 lpfc_online(phba);
15370         }
15371 }
15372
15373 /**
15374  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15375  * @pdev: pointer to PCI device
15376  * @pid: pointer to PCI device identifier
15377  *
15378  * This routine is to be registered to the kernel's PCI subsystem. When an
15379  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15380  * at PCI device-specific information of the device and driver to see if the
15381  * driver state that it can support this kind of device. If the match is
15382  * successful, the driver core invokes this routine. This routine dispatches
15383  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15384  * do all the initialization that it needs to do to handle the HBA device
15385  * properly.
15386  *
15387  * Return code
15388  *      0 - driver can claim the device
15389  *      negative value - driver can not claim the device
15390  **/
15391 static int
15392 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15393 {
15394         int rc;
15395         struct lpfc_sli_intf intf;
15396
15397         if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15398                 return -ENODEV;
15399
15400         if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15401             (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15402                 rc = lpfc_pci_probe_one_s4(pdev, pid);
15403         else
15404                 rc = lpfc_pci_probe_one_s3(pdev, pid);
15405
15406         return rc;
15407 }
15408
15409 /**
15410  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15411  * @pdev: pointer to PCI device
15412  *
15413  * This routine is to be registered to the kernel's PCI subsystem. When an
15414  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15415  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15416  * remove routine, which will perform all the necessary cleanup for the
15417  * device to be removed from the PCI subsystem properly.
15418  **/
15419 static void
15420 lpfc_pci_remove_one(struct pci_dev *pdev)
15421 {
15422         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15423         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15424
15425         switch (phba->pci_dev_grp) {
15426         case LPFC_PCI_DEV_LP:
15427                 lpfc_pci_remove_one_s3(pdev);
15428                 break;
15429         case LPFC_PCI_DEV_OC:
15430                 lpfc_pci_remove_one_s4(pdev);
15431                 break;
15432         default:
15433                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15434                                 "1424 Invalid PCI device group: 0x%x\n",
15435                                 phba->pci_dev_grp);
15436                 break;
15437         }
15438         return;
15439 }
15440
15441 /**
15442  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15443  * @dev: pointer to device
15444  *
15445  * This routine is to be registered to the kernel's PCI subsystem to support
15446  * system Power Management (PM). When PM invokes this method, it dispatches
15447  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15448  * suspend the device.
15449  *
15450  * Return code
15451  *      0 - driver suspended the device
15452  *      Error otherwise
15453  **/
15454 static int __maybe_unused
15455 lpfc_pci_suspend_one(struct device *dev)
15456 {
15457         struct Scsi_Host *shost = dev_get_drvdata(dev);
15458         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15459         int rc = -ENODEV;
15460
15461         switch (phba->pci_dev_grp) {
15462         case LPFC_PCI_DEV_LP:
15463                 rc = lpfc_pci_suspend_one_s3(dev);
15464                 break;
15465         case LPFC_PCI_DEV_OC:
15466                 rc = lpfc_pci_suspend_one_s4(dev);
15467                 break;
15468         default:
15469                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15470                                 "1425 Invalid PCI device group: 0x%x\n",
15471                                 phba->pci_dev_grp);
15472                 break;
15473         }
15474         return rc;
15475 }
15476
15477 /**
15478  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15479  * @dev: pointer to device
15480  *
15481  * This routine is to be registered to the kernel's PCI subsystem to support
15482  * system Power Management (PM). When PM invokes this method, it dispatches
15483  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15484  * resume the device.
15485  *
15486  * Return code
15487  *      0 - driver suspended the device
15488  *      Error otherwise
15489  **/
15490 static int __maybe_unused
15491 lpfc_pci_resume_one(struct device *dev)
15492 {
15493         struct Scsi_Host *shost = dev_get_drvdata(dev);
15494         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15495         int rc = -ENODEV;
15496
15497         switch (phba->pci_dev_grp) {
15498         case LPFC_PCI_DEV_LP:
15499                 rc = lpfc_pci_resume_one_s3(dev);
15500                 break;
15501         case LPFC_PCI_DEV_OC:
15502                 rc = lpfc_pci_resume_one_s4(dev);
15503                 break;
15504         default:
15505                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15506                                 "1426 Invalid PCI device group: 0x%x\n",
15507                                 phba->pci_dev_grp);
15508                 break;
15509         }
15510         return rc;
15511 }
15512
15513 /**
15514  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15515  * @pdev: pointer to PCI device.
15516  * @state: the current PCI connection state.
15517  *
15518  * This routine is registered to the PCI subsystem for error handling. This
15519  * function is called by the PCI subsystem after a PCI bus error affecting
15520  * this device has been detected. When this routine is invoked, it dispatches
15521  * the action to the proper SLI-3 or SLI-4 device error detected handling
15522  * routine, which will perform the proper error detected operation.
15523  *
15524  * Return codes
15525  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15526  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15527  **/
15528 static pci_ers_result_t
15529 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15530 {
15531         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15532         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15533         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15534
15535         if (phba->link_state == LPFC_HBA_ERROR &&
15536             phba->hba_flag & HBA_IOQ_FLUSH)
15537                 return PCI_ERS_RESULT_NEED_RESET;
15538
15539         switch (phba->pci_dev_grp) {
15540         case LPFC_PCI_DEV_LP:
15541                 rc = lpfc_io_error_detected_s3(pdev, state);
15542                 break;
15543         case LPFC_PCI_DEV_OC:
15544                 rc = lpfc_io_error_detected_s4(pdev, state);
15545                 break;
15546         default:
15547                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15548                                 "1427 Invalid PCI device group: 0x%x\n",
15549                                 phba->pci_dev_grp);
15550                 break;
15551         }
15552         return rc;
15553 }
15554
15555 /**
15556  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15557  * @pdev: pointer to PCI device.
15558  *
15559  * This routine is registered to the PCI subsystem for error handling. This
15560  * function is called after PCI bus has been reset to restart the PCI card
15561  * from scratch, as if from a cold-boot. When this routine is invoked, it
15562  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15563  * routine, which will perform the proper device reset.
15564  *
15565  * Return codes
15566  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
15567  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15568  **/
15569 static pci_ers_result_t
15570 lpfc_io_slot_reset(struct pci_dev *pdev)
15571 {
15572         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15573         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15574         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15575
15576         switch (phba->pci_dev_grp) {
15577         case LPFC_PCI_DEV_LP:
15578                 rc = lpfc_io_slot_reset_s3(pdev);
15579                 break;
15580         case LPFC_PCI_DEV_OC:
15581                 rc = lpfc_io_slot_reset_s4(pdev);
15582                 break;
15583         default:
15584                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15585                                 "1428 Invalid PCI device group: 0x%x\n",
15586                                 phba->pci_dev_grp);
15587                 break;
15588         }
15589         return rc;
15590 }
15591
15592 /**
15593  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15594  * @pdev: pointer to PCI device
15595  *
15596  * This routine is registered to the PCI subsystem for error handling. It
15597  * is called when kernel error recovery tells the lpfc driver that it is
15598  * OK to resume normal PCI operation after PCI bus error recovery. When
15599  * this routine is invoked, it dispatches the action to the proper SLI-3
15600  * or SLI-4 device io_resume routine, which will resume the device operation.
15601  **/
15602 static void
15603 lpfc_io_resume(struct pci_dev *pdev)
15604 {
15605         struct Scsi_Host *shost = pci_get_drvdata(pdev);
15606         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15607
15608         switch (phba->pci_dev_grp) {
15609         case LPFC_PCI_DEV_LP:
15610                 lpfc_io_resume_s3(pdev);
15611                 break;
15612         case LPFC_PCI_DEV_OC:
15613                 lpfc_io_resume_s4(pdev);
15614                 break;
15615         default:
15616                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15617                                 "1429 Invalid PCI device group: 0x%x\n",
15618                                 phba->pci_dev_grp);
15619                 break;
15620         }
15621         return;
15622 }
15623
15624 /**
15625  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15626  * @phba: pointer to lpfc hba data structure.
15627  *
15628  * This routine checks to see if OAS is supported for this adapter. If
15629  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
15630  * the enable oas flag is cleared and the pool created for OAS device data
15631  * is destroyed.
15632  *
15633  **/
15634 static void
15635 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15636 {
15637
15638         if (!phba->cfg_EnableXLane)
15639                 return;
15640
15641         if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15642                 phba->cfg_fof = 1;
15643         } else {
15644                 phba->cfg_fof = 0;
15645                 mempool_destroy(phba->device_data_mem_pool);
15646                 phba->device_data_mem_pool = NULL;
15647         }
15648
15649         return;
15650 }
15651
15652 /**
15653  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15654  * @phba: pointer to lpfc hba data structure.
15655  *
15656  * This routine checks to see if RAS is supported by the adapter. Check the
15657  * function through which RAS support enablement is to be done.
15658  **/
15659 void
15660 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15661 {
15662         /* if ASIC_GEN_NUM >= 0xC) */
15663         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15664                     LPFC_SLI_INTF_IF_TYPE_6) ||
15665             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15666                     LPFC_SLI_INTF_FAMILY_G6)) {
15667                 phba->ras_fwlog.ras_hwsupport = true;
15668                 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15669                     phba->cfg_ras_fwlog_buffsize)
15670                         phba->ras_fwlog.ras_enabled = true;
15671                 else
15672                         phba->ras_fwlog.ras_enabled = false;
15673         } else {
15674                 phba->ras_fwlog.ras_hwsupport = false;
15675         }
15676 }
15677
15678
15679 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15680
15681 static const struct pci_error_handlers lpfc_err_handler = {
15682         .error_detected = lpfc_io_error_detected,
15683         .slot_reset = lpfc_io_slot_reset,
15684         .resume = lpfc_io_resume,
15685 };
15686
15687 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15688                          lpfc_pci_suspend_one,
15689                          lpfc_pci_resume_one);
15690
15691 static struct pci_driver lpfc_driver = {
15692         .name           = LPFC_DRIVER_NAME,
15693         .id_table       = lpfc_id_table,
15694         .probe          = lpfc_pci_probe_one,
15695         .remove         = lpfc_pci_remove_one,
15696         .shutdown       = lpfc_pci_remove_one,
15697         .driver.pm      = &lpfc_pci_pm_ops_one,
15698         .err_handler    = &lpfc_err_handler,
15699 };
15700
15701 static const struct file_operations lpfc_mgmt_fop = {
15702         .owner = THIS_MODULE,
15703 };
15704
15705 static struct miscdevice lpfc_mgmt_dev = {
15706         .minor = MISC_DYNAMIC_MINOR,
15707         .name = "lpfcmgmt",
15708         .fops = &lpfc_mgmt_fop,
15709 };
15710
15711 /**
15712  * lpfc_init - lpfc module initialization routine
15713  *
15714  * This routine is to be invoked when the lpfc module is loaded into the
15715  * kernel. The special kernel macro module_init() is used to indicate the
15716  * role of this routine to the kernel as lpfc module entry point.
15717  *
15718  * Return codes
15719  *   0 - successful
15720  *   -ENOMEM - FC attach transport failed
15721  *   all others - failed
15722  */
15723 static int __init
15724 lpfc_init(void)
15725 {
15726         int error = 0;
15727
15728         pr_info(LPFC_MODULE_DESC "\n");
15729         pr_info(LPFC_COPYRIGHT "\n");
15730
15731         error = misc_register(&lpfc_mgmt_dev);
15732         if (error)
15733                 printk(KERN_ERR "Could not register lpfcmgmt device, "
15734                         "misc_register returned with status %d", error);
15735
15736         error = -ENOMEM;
15737         lpfc_transport_functions.vport_create = lpfc_vport_create;
15738         lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15739         lpfc_transport_template =
15740                                 fc_attach_transport(&lpfc_transport_functions);
15741         if (lpfc_transport_template == NULL)
15742                 goto unregister;
15743         lpfc_vport_transport_template =
15744                 fc_attach_transport(&lpfc_vport_transport_functions);
15745         if (lpfc_vport_transport_template == NULL) {
15746                 fc_release_transport(lpfc_transport_template);
15747                 goto unregister;
15748         }
15749         lpfc_wqe_cmd_template();
15750         lpfc_nvmet_cmd_template();
15751
15752         /* Initialize in case vector mapping is needed */
15753         lpfc_present_cpu = num_present_cpus();
15754
15755         lpfc_pldv_detect = false;
15756
15757         error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15758                                         "lpfc/sli4:online",
15759                                         lpfc_cpu_online, lpfc_cpu_offline);
15760         if (error < 0)
15761                 goto cpuhp_failure;
15762         lpfc_cpuhp_state = error;
15763
15764         error = pci_register_driver(&lpfc_driver);
15765         if (error)
15766                 goto unwind;
15767
15768         return error;
15769
15770 unwind:
15771         cpuhp_remove_multi_state(lpfc_cpuhp_state);
15772 cpuhp_failure:
15773         fc_release_transport(lpfc_transport_template);
15774         fc_release_transport(lpfc_vport_transport_template);
15775 unregister:
15776         misc_deregister(&lpfc_mgmt_dev);
15777
15778         return error;
15779 }
15780
15781 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15782 {
15783         unsigned int start_idx;
15784         unsigned int dbg_cnt;
15785         unsigned int temp_idx;
15786         int i;
15787         int j = 0;
15788         unsigned long rem_nsec;
15789
15790         if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15791                 return;
15792
15793         start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15794         dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15795         if (!dbg_cnt)
15796                 goto out;
15797         temp_idx = start_idx;
15798         if (dbg_cnt >= DBG_LOG_SZ) {
15799                 dbg_cnt = DBG_LOG_SZ;
15800                 temp_idx -= 1;
15801         } else {
15802                 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15803                         temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15804                 } else {
15805                         if (start_idx < dbg_cnt)
15806                                 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15807                         else
15808                                 start_idx -= dbg_cnt;
15809                 }
15810         }
15811         dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15812                  start_idx, temp_idx, dbg_cnt);
15813
15814         for (i = 0; i < dbg_cnt; i++) {
15815                 if ((start_idx + i) < DBG_LOG_SZ)
15816                         temp_idx = (start_idx + i) % DBG_LOG_SZ;
15817                 else
15818                         temp_idx = j++;
15819                 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15820                 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15821                          temp_idx,
15822                          (unsigned long)phba->dbg_log[temp_idx].t_ns,
15823                          rem_nsec / 1000,
15824                          phba->dbg_log[temp_idx].log);
15825         }
15826 out:
15827         atomic_set(&phba->dbg_log_cnt, 0);
15828         atomic_set(&phba->dbg_log_dmping, 0);
15829 }
15830
15831 __printf(2, 3)
15832 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15833 {
15834         unsigned int idx;
15835         va_list args;
15836         int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15837         struct va_format vaf;
15838
15839
15840         va_start(args, fmt);
15841         if (unlikely(dbg_dmping)) {
15842                 vaf.fmt = fmt;
15843                 vaf.va = &args;
15844                 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15845                 va_end(args);
15846                 return;
15847         }
15848         idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15849                 DBG_LOG_SZ;
15850
15851         atomic_inc(&phba->dbg_log_cnt);
15852
15853         vscnprintf(phba->dbg_log[idx].log,
15854                    sizeof(phba->dbg_log[idx].log), fmt, args);
15855         va_end(args);
15856
15857         phba->dbg_log[idx].t_ns = local_clock();
15858 }
15859
15860 /**
15861  * lpfc_exit - lpfc module removal routine
15862  *
15863  * This routine is invoked when the lpfc module is removed from the kernel.
15864  * The special kernel macro module_exit() is used to indicate the role of
15865  * this routine to the kernel as lpfc module exit point.
15866  */
15867 static void __exit
15868 lpfc_exit(void)
15869 {
15870         misc_deregister(&lpfc_mgmt_dev);
15871         pci_unregister_driver(&lpfc_driver);
15872         cpuhp_remove_multi_state(lpfc_cpuhp_state);
15873         fc_release_transport(lpfc_transport_template);
15874         fc_release_transport(lpfc_vport_transport_template);
15875         idr_destroy(&lpfc_hba_index);
15876 }
15877
15878 module_init(lpfc_init);
15879 module_exit(lpfc_exit);
15880 MODULE_LICENSE("GPL");
15881 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15882 MODULE_AUTHOR("Broadcom");
15883 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
This page took 0.962262 seconds and 4 git commands to generate.