]> Git Repo - linux.git/blob - drivers/scsi/lpfc/lpfc_init.c
nfsd4: a client's own opens needn't prevent delegations
[linux.git] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
45
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
52
53 #include "lpfc_hw4.h"
54 #include "lpfc_hw.h"
55 #include "lpfc_sli.h"
56 #include "lpfc_sli4.h"
57 #include "lpfc_nl.h"
58 #include "lpfc_disc.h"
59 #include "lpfc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
66 #include "lpfc_ids.h"
67
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
71
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96
97 static struct scsi_transport_template *lpfc_transport_template = NULL;
98 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99 static DEFINE_IDR(lpfc_hba_index);
100 #define LPFC_NVMET_BUF_POST 254
101
102 /**
103  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
104  * @phba: pointer to lpfc hba data structure.
105  *
106  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
107  * mailbox command. It retrieves the revision information from the HBA and
108  * collects the Vital Product Data (VPD) about the HBA for preparing the
109  * configuration of the HBA.
110  *
111  * Return codes:
112  *   0 - success.
113  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
114  *   Any other value - indicates an error.
115  **/
116 int
117 lpfc_config_port_prep(struct lpfc_hba *phba)
118 {
119         lpfc_vpd_t *vp = &phba->vpd;
120         int i = 0, rc;
121         LPFC_MBOXQ_t *pmb;
122         MAILBOX_t *mb;
123         char *lpfc_vpd_data = NULL;
124         uint16_t offset = 0;
125         static char licensed[56] =
126                     "key unlock for use with gnu public licensed code only\0";
127         static int init_key = 1;
128
129         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130         if (!pmb) {
131                 phba->link_state = LPFC_HBA_ERROR;
132                 return -ENOMEM;
133         }
134
135         mb = &pmb->u.mb;
136         phba->link_state = LPFC_INIT_MBX_CMDS;
137
138         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
139                 if (init_key) {
140                         uint32_t *ptext = (uint32_t *) licensed;
141
142                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143                                 *ptext = cpu_to_be32(*ptext);
144                         init_key = 0;
145                 }
146
147                 lpfc_read_nv(phba, pmb);
148                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149                         sizeof (mb->un.varRDnvp.rsvd3));
150                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151                          sizeof (licensed));
152
153                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155                 if (rc != MBX_SUCCESS) {
156                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
157                                         "0324 Config Port initialization "
158                                         "error, mbxCmd x%x READ_NVPARM, "
159                                         "mbxStatus x%x\n",
160                                         mb->mbxCommand, mb->mbxStatus);
161                         mempool_free(pmb, phba->mbox_mem_pool);
162                         return -ERESTART;
163                 }
164                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
165                        sizeof(phba->wwnn));
166                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167                        sizeof(phba->wwpn));
168         }
169
170         /*
171          * Clear all option bits except LPFC_SLI3_BG_ENABLED,
172          * which was already set in lpfc_get_cfgparam()
173          */
174         phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
175
176         /* Setup and issue mailbox READ REV command */
177         lpfc_read_rev(phba, pmb);
178         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179         if (rc != MBX_SUCCESS) {
180                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
181                                 "0439 Adapter failed to init, mbxCmd x%x "
182                                 "READ_REV, mbxStatus x%x\n",
183                                 mb->mbxCommand, mb->mbxStatus);
184                 mempool_free( pmb, phba->mbox_mem_pool);
185                 return -ERESTART;
186         }
187
188
189         /*
190          * The value of rr must be 1 since the driver set the cv field to 1.
191          * This setting requires the FW to set all revision fields.
192          */
193         if (mb->un.varRdRev.rr == 0) {
194                 vp->rev.rBit = 0;
195                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
196                                 "0440 Adapter failed to init, READ_REV has "
197                                 "missing revision information.\n");
198                 mempool_free(pmb, phba->mbox_mem_pool);
199                 return -ERESTART;
200         }
201
202         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
203                 mempool_free(pmb, phba->mbox_mem_pool);
204                 return -EINVAL;
205         }
206
207         /* Save information as VPD data */
208         vp->rev.rBit = 1;
209         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
210         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
211         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
212         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
213         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
214         vp->rev.biuRev = mb->un.varRdRev.biuRev;
215         vp->rev.smRev = mb->un.varRdRev.smRev;
216         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
217         vp->rev.endecRev = mb->un.varRdRev.endecRev;
218         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
219         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
220         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
221         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
222         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
223         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
224
225         /* If the sli feature level is less then 9, we must
226          * tear down all RPIs and VPIs on link down if NPIV
227          * is enabled.
228          */
229         if (vp->rev.feaLevelHigh < 9)
230                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
231
232         if (lpfc_is_LC_HBA(phba->pcidev->device))
233                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
234                                                 sizeof (phba->RandomData));
235
236         /* Get adapter VPD information */
237         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
238         if (!lpfc_vpd_data)
239                 goto out_free_mbox;
240         do {
241                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
242                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243
244                 if (rc != MBX_SUCCESS) {
245                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
246                                         "0441 VPD not present on adapter, "
247                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248                                         mb->mbxCommand, mb->mbxStatus);
249                         mb->un.varDmp.word_cnt = 0;
250                 }
251                 /* dump mem may return a zero when finished or we got a
252                  * mailbox error, either way we are done.
253                  */
254                 if (mb->un.varDmp.word_cnt == 0)
255                         break;
256                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
257                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
258                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
259                                       lpfc_vpd_data + offset,
260                                       mb->un.varDmp.word_cnt);
261                 offset += mb->un.varDmp.word_cnt;
262         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
263         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
264
265         kfree(lpfc_vpd_data);
266 out_free_mbox:
267         mempool_free(pmb, phba->mbox_mem_pool);
268         return 0;
269 }
270
271 /**
272  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
273  * @phba: pointer to lpfc hba data structure.
274  * @pmboxq: pointer to the driver internal queue element for mailbox command.
275  *
276  * This is the completion handler for driver's configuring asynchronous event
277  * mailbox command to the device. If the mailbox command returns successfully,
278  * it will set internal async event support flag to 1; otherwise, it will
279  * set internal async event support flag to 0.
280  **/
281 static void
282 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
283 {
284         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
285                 phba->temp_sensor_support = 1;
286         else
287                 phba->temp_sensor_support = 0;
288         mempool_free(pmboxq, phba->mbox_mem_pool);
289         return;
290 }
291
292 /**
293  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
294  * @phba: pointer to lpfc hba data structure.
295  * @pmboxq: pointer to the driver internal queue element for mailbox command.
296  *
297  * This is the completion handler for dump mailbox command for getting
298  * wake up parameters. When this command complete, the response contain
299  * Option rom version of the HBA. This function translate the version number
300  * into a human readable string and store it in OptionROMVersion.
301  **/
302 static void
303 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
304 {
305         struct prog_id *prg;
306         uint32_t prog_id_word;
307         char dist = ' ';
308         /* character array used for decoding dist type. */
309         char dist_char[] = "nabx";
310
311         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
312                 mempool_free(pmboxq, phba->mbox_mem_pool);
313                 return;
314         }
315
316         prg = (struct prog_id *) &prog_id_word;
317
318         /* word 7 contain option rom version */
319         prog_id_word = pmboxq->u.mb.un.varWords[7];
320
321         /* Decode the Option rom version word to a readable string */
322         if (prg->dist < 4)
323                 dist = dist_char[prg->dist];
324
325         if ((prg->dist == 3) && (prg->num == 0))
326                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
327                         prg->ver, prg->rev, prg->lev);
328         else
329                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
330                         prg->ver, prg->rev, prg->lev,
331                         dist, prg->num);
332         mempool_free(pmboxq, phba->mbox_mem_pool);
333         return;
334 }
335
336 /**
337  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
338  *      cfg_soft_wwnn, cfg_soft_wwpn
339  * @vport: pointer to lpfc vport data structure.
340  *
341  *
342  * Return codes
343  *   None.
344  **/
345 void
346 lpfc_update_vport_wwn(struct lpfc_vport *vport)
347 {
348         uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
349         u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
350
351         /* If the soft name exists then update it using the service params */
352         if (vport->phba->cfg_soft_wwnn)
353                 u64_to_wwn(vport->phba->cfg_soft_wwnn,
354                            vport->fc_sparam.nodeName.u.wwn);
355         if (vport->phba->cfg_soft_wwpn)
356                 u64_to_wwn(vport->phba->cfg_soft_wwpn,
357                            vport->fc_sparam.portName.u.wwn);
358
359         /*
360          * If the name is empty or there exists a soft name
361          * then copy the service params name, otherwise use the fc name
362          */
363         if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
364                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
365                         sizeof(struct lpfc_name));
366         else
367                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
368                         sizeof(struct lpfc_name));
369
370         /*
371          * If the port name has changed, then set the Param changes flag
372          * to unreg the login
373          */
374         if (vport->fc_portname.u.wwn[0] != 0 &&
375                 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
376                         sizeof(struct lpfc_name)))
377                 vport->vport_flag |= FAWWPN_PARAM_CHG;
378
379         if (vport->fc_portname.u.wwn[0] == 0 ||
380             vport->phba->cfg_soft_wwpn ||
381             (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
382             vport->vport_flag & FAWWPN_SET) {
383                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
384                         sizeof(struct lpfc_name));
385                 vport->vport_flag &= ~FAWWPN_SET;
386                 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
387                         vport->vport_flag |= FAWWPN_SET;
388         }
389         else
390                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
391                         sizeof(struct lpfc_name));
392 }
393
394 /**
395  * lpfc_config_port_post - Perform lpfc initialization after config port
396  * @phba: pointer to lpfc hba data structure.
397  *
398  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
399  * command call. It performs all internal resource and state setups on the
400  * port: post IOCB buffers, enable appropriate host interrupt attentions,
401  * ELS ring timers, etc.
402  *
403  * Return codes
404  *   0 - success.
405  *   Any other value - error.
406  **/
407 int
408 lpfc_config_port_post(struct lpfc_hba *phba)
409 {
410         struct lpfc_vport *vport = phba->pport;
411         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
412         LPFC_MBOXQ_t *pmb;
413         MAILBOX_t *mb;
414         struct lpfc_dmabuf *mp;
415         struct lpfc_sli *psli = &phba->sli;
416         uint32_t status, timeout;
417         int i, j;
418         int rc;
419
420         spin_lock_irq(&phba->hbalock);
421         /*
422          * If the Config port completed correctly the HBA is not
423          * over heated any more.
424          */
425         if (phba->over_temp_state == HBA_OVER_TEMP)
426                 phba->over_temp_state = HBA_NORMAL_TEMP;
427         spin_unlock_irq(&phba->hbalock);
428
429         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
430         if (!pmb) {
431                 phba->link_state = LPFC_HBA_ERROR;
432                 return -ENOMEM;
433         }
434         mb = &pmb->u.mb;
435
436         /* Get login parameters for NID.  */
437         rc = lpfc_read_sparam(phba, pmb, 0);
438         if (rc) {
439                 mempool_free(pmb, phba->mbox_mem_pool);
440                 return -ENOMEM;
441         }
442
443         pmb->vport = vport;
444         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
445                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
446                                 "0448 Adapter failed init, mbxCmd x%x "
447                                 "READ_SPARM mbxStatus x%x\n",
448                                 mb->mbxCommand, mb->mbxStatus);
449                 phba->link_state = LPFC_HBA_ERROR;
450                 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
451                 mempool_free(pmb, phba->mbox_mem_pool);
452                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
453                 kfree(mp);
454                 return -EIO;
455         }
456
457         mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
458
459         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
460         lpfc_mbuf_free(phba, mp->virt, mp->phys);
461         kfree(mp);
462         pmb->ctx_buf = NULL;
463         lpfc_update_vport_wwn(vport);
464
465         /* Update the fc_host data structures with new wwn. */
466         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
467         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
468         fc_host_max_npiv_vports(shost) = phba->max_vpi;
469
470         /* If no serial number in VPD data, use low 6 bytes of WWNN */
471         /* This should be consolidated into parse_vpd ? - mr */
472         if (phba->SerialNumber[0] == 0) {
473                 uint8_t *outptr;
474
475                 outptr = &vport->fc_nodename.u.s.IEEE[0];
476                 for (i = 0; i < 12; i++) {
477                         status = *outptr++;
478                         j = ((status & 0xf0) >> 4);
479                         if (j <= 9)
480                                 phba->SerialNumber[i] =
481                                     (char)((uint8_t) 0x30 + (uint8_t) j);
482                         else
483                                 phba->SerialNumber[i] =
484                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
485                         i++;
486                         j = (status & 0xf);
487                         if (j <= 9)
488                                 phba->SerialNumber[i] =
489                                     (char)((uint8_t) 0x30 + (uint8_t) j);
490                         else
491                                 phba->SerialNumber[i] =
492                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
493                 }
494         }
495
496         lpfc_read_config(phba, pmb);
497         pmb->vport = vport;
498         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
499                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
500                                 "0453 Adapter failed to init, mbxCmd x%x "
501                                 "READ_CONFIG, mbxStatus x%x\n",
502                                 mb->mbxCommand, mb->mbxStatus);
503                 phba->link_state = LPFC_HBA_ERROR;
504                 mempool_free( pmb, phba->mbox_mem_pool);
505                 return -EIO;
506         }
507
508         /* Check if the port is disabled */
509         lpfc_sli_read_link_ste(phba);
510
511         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
512         if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
513                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
514                                 "3359 HBA queue depth changed from %d to %d\n",
515                                 phba->cfg_hba_queue_depth,
516                                 mb->un.varRdConfig.max_xri);
517                 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
518         }
519
520         phba->lmt = mb->un.varRdConfig.lmt;
521
522         /* Get the default values for Model Name and Description */
523         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
524
525         phba->link_state = LPFC_LINK_DOWN;
526
527         /* Only process IOCBs on ELS ring till hba_state is READY */
528         if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
529                 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
530         if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
531                 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
532
533         /* Post receive buffers for desired rings */
534         if (phba->sli_rev != 3)
535                 lpfc_post_rcv_buf(phba);
536
537         /*
538          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
539          */
540         if (phba->intr_type == MSIX) {
541                 rc = lpfc_config_msi(phba, pmb);
542                 if (rc) {
543                         mempool_free(pmb, phba->mbox_mem_pool);
544                         return -EIO;
545                 }
546                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
547                 if (rc != MBX_SUCCESS) {
548                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
549                                         "0352 Config MSI mailbox command "
550                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
551                                         pmb->u.mb.mbxCommand,
552                                         pmb->u.mb.mbxStatus);
553                         mempool_free(pmb, phba->mbox_mem_pool);
554                         return -EIO;
555                 }
556         }
557
558         spin_lock_irq(&phba->hbalock);
559         /* Initialize ERATT handling flag */
560         phba->hba_flag &= ~HBA_ERATT_HANDLED;
561
562         /* Enable appropriate host interrupts */
563         if (lpfc_readl(phba->HCregaddr, &status)) {
564                 spin_unlock_irq(&phba->hbalock);
565                 return -EIO;
566         }
567         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
568         if (psli->num_rings > 0)
569                 status |= HC_R0INT_ENA;
570         if (psli->num_rings > 1)
571                 status |= HC_R1INT_ENA;
572         if (psli->num_rings > 2)
573                 status |= HC_R2INT_ENA;
574         if (psli->num_rings > 3)
575                 status |= HC_R3INT_ENA;
576
577         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
578             (phba->cfg_poll & DISABLE_FCP_RING_INT))
579                 status &= ~(HC_R0INT_ENA);
580
581         writel(status, phba->HCregaddr);
582         readl(phba->HCregaddr); /* flush */
583         spin_unlock_irq(&phba->hbalock);
584
585         /* Set up ring-0 (ELS) timer */
586         timeout = phba->fc_ratov * 2;
587         mod_timer(&vport->els_tmofunc,
588                   jiffies + msecs_to_jiffies(1000 * timeout));
589         /* Set up heart beat (HB) timer */
590         mod_timer(&phba->hb_tmofunc,
591                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
592         phba->hb_outstanding = 0;
593         phba->last_completion_time = jiffies;
594         /* Set up error attention (ERATT) polling timer */
595         mod_timer(&phba->eratt_poll,
596                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
597
598         if (phba->hba_flag & LINK_DISABLED) {
599                 lpfc_printf_log(phba,
600                         KERN_ERR, LOG_INIT,
601                         "2598 Adapter Link is disabled.\n");
602                 lpfc_down_link(phba, pmb);
603                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
604                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
605                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
606                         lpfc_printf_log(phba,
607                         KERN_ERR, LOG_INIT,
608                         "2599 Adapter failed to issue DOWN_LINK"
609                         " mbox command rc 0x%x\n", rc);
610
611                         mempool_free(pmb, phba->mbox_mem_pool);
612                         return -EIO;
613                 }
614         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
615                 mempool_free(pmb, phba->mbox_mem_pool);
616                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
617                 if (rc)
618                         return rc;
619         }
620         /* MBOX buffer will be freed in mbox compl */
621         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
622         if (!pmb) {
623                 phba->link_state = LPFC_HBA_ERROR;
624                 return -ENOMEM;
625         }
626
627         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
628         pmb->mbox_cmpl = lpfc_config_async_cmpl;
629         pmb->vport = phba->pport;
630         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
631
632         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
633                 lpfc_printf_log(phba,
634                                 KERN_ERR,
635                                 LOG_INIT,
636                                 "0456 Adapter failed to issue "
637                                 "ASYNCEVT_ENABLE mbox status x%x\n",
638                                 rc);
639                 mempool_free(pmb, phba->mbox_mem_pool);
640         }
641
642         /* Get Option rom version */
643         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644         if (!pmb) {
645                 phba->link_state = LPFC_HBA_ERROR;
646                 return -ENOMEM;
647         }
648
649         lpfc_dump_wakeup_param(phba, pmb);
650         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651         pmb->vport = phba->pport;
652         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
653
654         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
656                                 "to get Option ROM version status x%x\n", rc);
657                 mempool_free(pmb, phba->mbox_mem_pool);
658         }
659
660         return 0;
661 }
662
663 /**
664  * lpfc_hba_init_link - Initialize the FC link
665  * @phba: pointer to lpfc hba data structure.
666  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
667  *
668  * This routine will issue the INIT_LINK mailbox command call.
669  * It is available to other drivers through the lpfc_hba data
670  * structure for use as a delayed link up mechanism with the
671  * module parameter lpfc_suppress_link_up.
672  *
673  * Return code
674  *              0 - success
675  *              Any other value - error
676  **/
677 static int
678 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
679 {
680         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
681 }
682
683 /**
684  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
685  * @phba: pointer to lpfc hba data structure.
686  * @fc_topology: desired fc topology.
687  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
688  *
689  * This routine will issue the INIT_LINK mailbox command call.
690  * It is available to other drivers through the lpfc_hba data
691  * structure for use as a delayed link up mechanism with the
692  * module parameter lpfc_suppress_link_up.
693  *
694  * Return code
695  *              0 - success
696  *              Any other value - error
697  **/
698 int
699 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
700                                uint32_t flag)
701 {
702         struct lpfc_vport *vport = phba->pport;
703         LPFC_MBOXQ_t *pmb;
704         MAILBOX_t *mb;
705         int rc;
706
707         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
708         if (!pmb) {
709                 phba->link_state = LPFC_HBA_ERROR;
710                 return -ENOMEM;
711         }
712         mb = &pmb->u.mb;
713         pmb->vport = vport;
714
715         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
716             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
717              !(phba->lmt & LMT_1Gb)) ||
718             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
719              !(phba->lmt & LMT_2Gb)) ||
720             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
721              !(phba->lmt & LMT_4Gb)) ||
722             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
723              !(phba->lmt & LMT_8Gb)) ||
724             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
725              !(phba->lmt & LMT_10Gb)) ||
726             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
727              !(phba->lmt & LMT_16Gb)) ||
728             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
729              !(phba->lmt & LMT_32Gb)) ||
730             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
731              !(phba->lmt & LMT_64Gb))) {
732                 /* Reset link speed to auto */
733                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
734                         "1302 Invalid speed for this board:%d "
735                         "Reset link speed to auto.\n",
736                         phba->cfg_link_speed);
737                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
738         }
739         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
740         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
741         if (phba->sli_rev < LPFC_SLI_REV4)
742                 lpfc_set_loopback_flag(phba);
743         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
745                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
746                         "0498 Adapter failed to init, mbxCmd x%x "
747                         "INIT_LINK, mbxStatus x%x\n",
748                         mb->mbxCommand, mb->mbxStatus);
749                 if (phba->sli_rev <= LPFC_SLI_REV3) {
750                         /* Clear all interrupt enable conditions */
751                         writel(0, phba->HCregaddr);
752                         readl(phba->HCregaddr); /* flush */
753                         /* Clear all pending interrupts */
754                         writel(0xffffffff, phba->HAregaddr);
755                         readl(phba->HAregaddr); /* flush */
756                 }
757                 phba->link_state = LPFC_HBA_ERROR;
758                 if (rc != MBX_BUSY || flag == MBX_POLL)
759                         mempool_free(pmb, phba->mbox_mem_pool);
760                 return -EIO;
761         }
762         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
763         if (flag == MBX_POLL)
764                 mempool_free(pmb, phba->mbox_mem_pool);
765
766         return 0;
767 }
768
769 /**
770  * lpfc_hba_down_link - this routine downs the FC link
771  * @phba: pointer to lpfc hba data structure.
772  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
773  *
774  * This routine will issue the DOWN_LINK mailbox command call.
775  * It is available to other drivers through the lpfc_hba data
776  * structure for use to stop the link.
777  *
778  * Return code
779  *              0 - success
780  *              Any other value - error
781  **/
782 static int
783 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
784 {
785         LPFC_MBOXQ_t *pmb;
786         int rc;
787
788         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
789         if (!pmb) {
790                 phba->link_state = LPFC_HBA_ERROR;
791                 return -ENOMEM;
792         }
793
794         lpfc_printf_log(phba,
795                 KERN_ERR, LOG_INIT,
796                 "0491 Adapter Link is disabled.\n");
797         lpfc_down_link(phba, pmb);
798         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801                 lpfc_printf_log(phba,
802                 KERN_ERR, LOG_INIT,
803                 "2522 Adapter failed to issue DOWN_LINK"
804                 " mbox command rc 0x%x\n", rc);
805
806                 mempool_free(pmb, phba->mbox_mem_pool);
807                 return -EIO;
808         }
809         if (flag == MBX_POLL)
810                 mempool_free(pmb, phba->mbox_mem_pool);
811
812         return 0;
813 }
814
815 /**
816  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
817  * @phba: pointer to lpfc HBA data structure.
818  *
819  * This routine will do LPFC uninitialization before the HBA is reset when
820  * bringing down the SLI Layer.
821  *
822  * Return codes
823  *   0 - success.
824  *   Any other value - error.
825  **/
826 int
827 lpfc_hba_down_prep(struct lpfc_hba *phba)
828 {
829         struct lpfc_vport **vports;
830         int i;
831
832         if (phba->sli_rev <= LPFC_SLI_REV3) {
833                 /* Disable interrupts */
834                 writel(0, phba->HCregaddr);
835                 readl(phba->HCregaddr); /* flush */
836         }
837
838         if (phba->pport->load_flag & FC_UNLOADING)
839                 lpfc_cleanup_discovery_resources(phba->pport);
840         else {
841                 vports = lpfc_create_vport_work_array(phba);
842                 if (vports != NULL)
843                         for (i = 0; i <= phba->max_vports &&
844                                 vports[i] != NULL; i++)
845                                 lpfc_cleanup_discovery_resources(vports[i]);
846                 lpfc_destroy_vport_work_array(phba, vports);
847         }
848         return 0;
849 }
850
851 /**
852  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
853  * rspiocb which got deferred
854  *
855  * @phba: pointer to lpfc HBA data structure.
856  *
857  * This routine will cleanup completed slow path events after HBA is reset
858  * when bringing down the SLI Layer.
859  *
860  *
861  * Return codes
862  *   void.
863  **/
864 static void
865 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
866 {
867         struct lpfc_iocbq *rspiocbq;
868         struct hbq_dmabuf *dmabuf;
869         struct lpfc_cq_event *cq_event;
870
871         spin_lock_irq(&phba->hbalock);
872         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
873         spin_unlock_irq(&phba->hbalock);
874
875         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
876                 /* Get the response iocb from the head of work queue */
877                 spin_lock_irq(&phba->hbalock);
878                 list_remove_head(&phba->sli4_hba.sp_queue_event,
879                                  cq_event, struct lpfc_cq_event, list);
880                 spin_unlock_irq(&phba->hbalock);
881
882                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
883                 case CQE_CODE_COMPL_WQE:
884                         rspiocbq = container_of(cq_event, struct lpfc_iocbq,
885                                                  cq_event);
886                         lpfc_sli_release_iocbq(phba, rspiocbq);
887                         break;
888                 case CQE_CODE_RECEIVE:
889                 case CQE_CODE_RECEIVE_V1:
890                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
891                                               cq_event);
892                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
893                 }
894         }
895 }
896
897 /**
898  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
899  * @phba: pointer to lpfc HBA data structure.
900  *
901  * This routine will cleanup posted ELS buffers after the HBA is reset
902  * when bringing down the SLI Layer.
903  *
904  *
905  * Return codes
906  *   void.
907  **/
908 static void
909 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
910 {
911         struct lpfc_sli *psli = &phba->sli;
912         struct lpfc_sli_ring *pring;
913         struct lpfc_dmabuf *mp, *next_mp;
914         LIST_HEAD(buflist);
915         int count;
916
917         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
918                 lpfc_sli_hbqbuf_free_all(phba);
919         else {
920                 /* Cleanup preposted buffers on the ELS ring */
921                 pring = &psli->sli3_ring[LPFC_ELS_RING];
922                 spin_lock_irq(&phba->hbalock);
923                 list_splice_init(&pring->postbufq, &buflist);
924                 spin_unlock_irq(&phba->hbalock);
925
926                 count = 0;
927                 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
928                         list_del(&mp->list);
929                         count++;
930                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
931                         kfree(mp);
932                 }
933
934                 spin_lock_irq(&phba->hbalock);
935                 pring->postbufq_cnt -= count;
936                 spin_unlock_irq(&phba->hbalock);
937         }
938 }
939
940 /**
941  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
942  * @phba: pointer to lpfc HBA data structure.
943  *
944  * This routine will cleanup the txcmplq after the HBA is reset when bringing
945  * down the SLI Layer.
946  *
947  * Return codes
948  *   void
949  **/
950 static void
951 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
952 {
953         struct lpfc_sli *psli = &phba->sli;
954         struct lpfc_queue *qp = NULL;
955         struct lpfc_sli_ring *pring;
956         LIST_HEAD(completions);
957         int i;
958         struct lpfc_iocbq *piocb, *next_iocb;
959
960         if (phba->sli_rev != LPFC_SLI_REV4) {
961                 for (i = 0; i < psli->num_rings; i++) {
962                         pring = &psli->sli3_ring[i];
963                         spin_lock_irq(&phba->hbalock);
964                         /* At this point in time the HBA is either reset or DOA
965                          * Nothing should be on txcmplq as it will
966                          * NEVER complete.
967                          */
968                         list_splice_init(&pring->txcmplq, &completions);
969                         pring->txcmplq_cnt = 0;
970                         spin_unlock_irq(&phba->hbalock);
971
972                         lpfc_sli_abort_iocb_ring(phba, pring);
973                 }
974                 /* Cancel all the IOCBs from the completions list */
975                 lpfc_sli_cancel_iocbs(phba, &completions,
976                                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
977                 return;
978         }
979         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
980                 pring = qp->pring;
981                 if (!pring)
982                         continue;
983                 spin_lock_irq(&pring->ring_lock);
984                 list_for_each_entry_safe(piocb, next_iocb,
985                                          &pring->txcmplq, list)
986                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
987                 list_splice_init(&pring->txcmplq, &completions);
988                 pring->txcmplq_cnt = 0;
989                 spin_unlock_irq(&pring->ring_lock);
990                 lpfc_sli_abort_iocb_ring(phba, pring);
991         }
992         /* Cancel all the IOCBs from the completions list */
993         lpfc_sli_cancel_iocbs(phba, &completions,
994                               IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
995 }
996
997 /**
998  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
999         int i;
1000  * @phba: pointer to lpfc HBA data structure.
1001  *
1002  * This routine will do uninitialization after the HBA is reset when bring
1003  * down the SLI Layer.
1004  *
1005  * Return codes
1006  *   0 - success.
1007  *   Any other value - error.
1008  **/
1009 static int
1010 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1011 {
1012         lpfc_hba_free_post_buf(phba);
1013         lpfc_hba_clean_txcmplq(phba);
1014         return 0;
1015 }
1016
1017 /**
1018  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1019  * @phba: pointer to lpfc HBA data structure.
1020  *
1021  * This routine will do uninitialization after the HBA is reset when bring
1022  * down the SLI Layer.
1023  *
1024  * Return codes
1025  *   0 - success.
1026  *   Any other value - error.
1027  **/
1028 static int
1029 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1030 {
1031         struct lpfc_io_buf *psb, *psb_next;
1032         struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1033         struct lpfc_sli4_hdw_queue *qp;
1034         LIST_HEAD(aborts);
1035         LIST_HEAD(nvme_aborts);
1036         LIST_HEAD(nvmet_aborts);
1037         struct lpfc_sglq *sglq_entry = NULL;
1038         int cnt, idx;
1039
1040
1041         lpfc_sli_hbqbuf_free_all(phba);
1042         lpfc_hba_clean_txcmplq(phba);
1043
1044         /* At this point in time the HBA is either reset or DOA. Either
1045          * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1046          * on the lpfc_els_sgl_list so that it can either be freed if the
1047          * driver is unloading or reposted if the driver is restarting
1048          * the port.
1049          */
1050         spin_lock_irq(&phba->hbalock);  /* required for lpfc_els_sgl_list and */
1051                                         /* scsl_buf_list */
1052         /* sgl_list_lock required because worker thread uses this
1053          * list.
1054          */
1055         spin_lock(&phba->sli4_hba.sgl_list_lock);
1056         list_for_each_entry(sglq_entry,
1057                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1058                 sglq_entry->state = SGL_FREED;
1059
1060         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1061                         &phba->sli4_hba.lpfc_els_sgl_list);
1062
1063
1064         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1065
1066         /* abts_xxxx_buf_list_lock required because worker thread uses this
1067          * list.
1068          */
1069         cnt = 0;
1070         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1071                 qp = &phba->sli4_hba.hdwq[idx];
1072
1073                 spin_lock(&qp->abts_io_buf_list_lock);
1074                 list_splice_init(&qp->lpfc_abts_io_buf_list,
1075                                  &aborts);
1076
1077                 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1078                         psb->pCmd = NULL;
1079                         psb->status = IOSTAT_SUCCESS;
1080                         cnt++;
1081                 }
1082                 spin_lock(&qp->io_buf_list_put_lock);
1083                 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1084                 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1085                 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1086                 qp->abts_scsi_io_bufs = 0;
1087                 qp->abts_nvme_io_bufs = 0;
1088                 spin_unlock(&qp->io_buf_list_put_lock);
1089                 spin_unlock(&qp->abts_io_buf_list_lock);
1090         }
1091         spin_unlock_irq(&phba->hbalock);
1092
1093         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1094                 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1095                 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1096                                  &nvmet_aborts);
1097                 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1098                 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1099                         ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1100                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1101                 }
1102         }
1103
1104         lpfc_sli4_free_sp_events(phba);
1105         return cnt;
1106 }
1107
1108 /**
1109  * lpfc_hba_down_post - Wrapper func for hba down post routine
1110  * @phba: pointer to lpfc HBA data structure.
1111  *
1112  * This routine wraps the actual SLI3 or SLI4 routine for performing
1113  * uninitialization after the HBA is reset when bring down the SLI Layer.
1114  *
1115  * Return codes
1116  *   0 - success.
1117  *   Any other value - error.
1118  **/
1119 int
1120 lpfc_hba_down_post(struct lpfc_hba *phba)
1121 {
1122         return (*phba->lpfc_hba_down_post)(phba);
1123 }
1124
1125 /**
1126  * lpfc_hb_timeout - The HBA-timer timeout handler
1127  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1128  *
1129  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1130  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1131  * work-port-events bitmap and the worker thread is notified. This timeout
1132  * event will be used by the worker thread to invoke the actual timeout
1133  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1134  * be performed in the timeout handler and the HBA timeout event bit shall
1135  * be cleared by the worker thread after it has taken the event bitmap out.
1136  **/
1137 static void
1138 lpfc_hb_timeout(struct timer_list *t)
1139 {
1140         struct lpfc_hba *phba;
1141         uint32_t tmo_posted;
1142         unsigned long iflag;
1143
1144         phba = from_timer(phba, t, hb_tmofunc);
1145
1146         /* Check for heart beat timeout conditions */
1147         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1148         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1149         if (!tmo_posted)
1150                 phba->pport->work_port_events |= WORKER_HB_TMO;
1151         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1152
1153         /* Tell the worker thread there is work to do */
1154         if (!tmo_posted)
1155                 lpfc_worker_wake_up(phba);
1156         return;
1157 }
1158
1159 /**
1160  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1161  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1162  *
1163  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1164  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1165  * work-port-events bitmap and the worker thread is notified. This timeout
1166  * event will be used by the worker thread to invoke the actual timeout
1167  * handler routine, lpfc_rrq_handler. Any periodical operations will
1168  * be performed in the timeout handler and the RRQ timeout event bit shall
1169  * be cleared by the worker thread after it has taken the event bitmap out.
1170  **/
1171 static void
1172 lpfc_rrq_timeout(struct timer_list *t)
1173 {
1174         struct lpfc_hba *phba;
1175         unsigned long iflag;
1176
1177         phba = from_timer(phba, t, rrq_tmr);
1178         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1179         if (!(phba->pport->load_flag & FC_UNLOADING))
1180                 phba->hba_flag |= HBA_RRQ_ACTIVE;
1181         else
1182                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1183         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1184
1185         if (!(phba->pport->load_flag & FC_UNLOADING))
1186                 lpfc_worker_wake_up(phba);
1187 }
1188
1189 /**
1190  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1191  * @phba: pointer to lpfc hba data structure.
1192  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1193  *
1194  * This is the callback function to the lpfc heart-beat mailbox command.
1195  * If configured, the lpfc driver issues the heart-beat mailbox command to
1196  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1197  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1198  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1199  * heart-beat outstanding state. Once the mailbox command comes back and
1200  * no error conditions detected, the heart-beat mailbox command timer is
1201  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1202  * state is cleared for the next heart-beat. If the timer expired with the
1203  * heart-beat outstanding state set, the driver will put the HBA offline.
1204  **/
1205 static void
1206 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1207 {
1208         unsigned long drvr_flag;
1209
1210         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1211         phba->hb_outstanding = 0;
1212         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1213
1214         /* Check and reset heart-beat timer is necessary */
1215         mempool_free(pmboxq, phba->mbox_mem_pool);
1216         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1217                 !(phba->link_state == LPFC_HBA_ERROR) &&
1218                 !(phba->pport->load_flag & FC_UNLOADING))
1219                 mod_timer(&phba->hb_tmofunc,
1220                           jiffies +
1221                           msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1222         return;
1223 }
1224
1225 static void
1226 lpfc_hb_eq_delay_work(struct work_struct *work)
1227 {
1228         struct lpfc_hba *phba = container_of(to_delayed_work(work),
1229                                              struct lpfc_hba, eq_delay_work);
1230         struct lpfc_eq_intr_info *eqi, *eqi_new;
1231         struct lpfc_queue *eq, *eq_next;
1232         unsigned char *ena_delay = NULL;
1233         uint32_t usdelay;
1234         int i;
1235
1236         if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1237                 return;
1238
1239         if (phba->link_state == LPFC_HBA_ERROR ||
1240             phba->pport->fc_flag & FC_OFFLINE_MODE)
1241                 goto requeue;
1242
1243         ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1244                             GFP_KERNEL);
1245         if (!ena_delay)
1246                 goto requeue;
1247
1248         for (i = 0; i < phba->cfg_irq_chann; i++) {
1249                 /* Get the EQ corresponding to the IRQ vector */
1250                 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1251                 if (!eq)
1252                         continue;
1253                 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1254                         eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1255                         ena_delay[eq->last_cpu] = 1;
1256                 }
1257         }
1258
1259         for_each_present_cpu(i) {
1260                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1261                 if (ena_delay[i]) {
1262                         usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1263                         if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1264                                 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1265                 } else {
1266                         usdelay = 0;
1267                 }
1268
1269                 eqi->icnt = 0;
1270
1271                 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1272                         if (unlikely(eq->last_cpu != i)) {
1273                                 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1274                                                       eq->last_cpu);
1275                                 list_move_tail(&eq->cpu_list, &eqi_new->list);
1276                                 continue;
1277                         }
1278                         if (usdelay != eq->q_mode)
1279                                 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1280                                                          usdelay);
1281                 }
1282         }
1283
1284         kfree(ena_delay);
1285
1286 requeue:
1287         queue_delayed_work(phba->wq, &phba->eq_delay_work,
1288                            msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1289 }
1290
1291 /**
1292  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1293  * @phba: pointer to lpfc hba data structure.
1294  *
1295  * For each heartbeat, this routine does some heuristic methods to adjust
1296  * XRI distribution. The goal is to fully utilize free XRIs.
1297  **/
1298 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1299 {
1300         u32 i;
1301         u32 hwq_count;
1302
1303         hwq_count = phba->cfg_hdw_queue;
1304         for (i = 0; i < hwq_count; i++) {
1305                 /* Adjust XRIs in private pool */
1306                 lpfc_adjust_pvt_pool_count(phba, i);
1307
1308                 /* Adjust high watermark */
1309                 lpfc_adjust_high_watermark(phba, i);
1310
1311 #ifdef LPFC_MXP_STAT
1312                 /* Snapshot pbl, pvt and busy count */
1313                 lpfc_snapshot_mxp(phba, i);
1314 #endif
1315         }
1316 }
1317
1318 /**
1319  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1320  * @phba: pointer to lpfc hba data structure.
1321  *
1322  * This is the actual HBA-timer timeout handler to be invoked by the worker
1323  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1324  * handler performs any periodic operations needed for the device. If such
1325  * periodic event has already been attended to either in the interrupt handler
1326  * or by processing slow-ring or fast-ring events within the HBA-timer
1327  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1328  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1329  * is configured and there is no heart-beat mailbox command outstanding, a
1330  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1331  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1332  * to offline.
1333  **/
1334 void
1335 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1336 {
1337         struct lpfc_vport **vports;
1338         LPFC_MBOXQ_t *pmboxq;
1339         struct lpfc_dmabuf *buf_ptr;
1340         int retval, i;
1341         struct lpfc_sli *psli = &phba->sli;
1342         LIST_HEAD(completions);
1343
1344         if (phba->cfg_xri_rebalancing) {
1345                 /* Multi-XRI pools handler */
1346                 lpfc_hb_mxp_handler(phba);
1347         }
1348
1349         vports = lpfc_create_vport_work_array(phba);
1350         if (vports != NULL)
1351                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1352                         lpfc_rcv_seq_check_edtov(vports[i]);
1353                         lpfc_fdmi_change_check(vports[i]);
1354                 }
1355         lpfc_destroy_vport_work_array(phba, vports);
1356
1357         if ((phba->link_state == LPFC_HBA_ERROR) ||
1358                 (phba->pport->load_flag & FC_UNLOADING) ||
1359                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1360                 return;
1361
1362         spin_lock_irq(&phba->pport->work_port_lock);
1363
1364         if (time_after(phba->last_completion_time +
1365                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1366                         jiffies)) {
1367                 spin_unlock_irq(&phba->pport->work_port_lock);
1368                 if (!phba->hb_outstanding)
1369                         mod_timer(&phba->hb_tmofunc,
1370                                 jiffies +
1371                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1372                 else
1373                         mod_timer(&phba->hb_tmofunc,
1374                                 jiffies +
1375                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1376                 return;
1377         }
1378         spin_unlock_irq(&phba->pport->work_port_lock);
1379
1380         if (phba->elsbuf_cnt &&
1381                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1382                 spin_lock_irq(&phba->hbalock);
1383                 list_splice_init(&phba->elsbuf, &completions);
1384                 phba->elsbuf_cnt = 0;
1385                 phba->elsbuf_prev_cnt = 0;
1386                 spin_unlock_irq(&phba->hbalock);
1387
1388                 while (!list_empty(&completions)) {
1389                         list_remove_head(&completions, buf_ptr,
1390                                 struct lpfc_dmabuf, list);
1391                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1392                         kfree(buf_ptr);
1393                 }
1394         }
1395         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1396
1397         /* If there is no heart beat outstanding, issue a heartbeat command */
1398         if (phba->cfg_enable_hba_heartbeat) {
1399                 if (!phba->hb_outstanding) {
1400                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1401                                 (list_empty(&psli->mboxq))) {
1402                                 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1403                                                         GFP_KERNEL);
1404                                 if (!pmboxq) {
1405                                         mod_timer(&phba->hb_tmofunc,
1406                                                  jiffies +
1407                                                  msecs_to_jiffies(1000 *
1408                                                  LPFC_HB_MBOX_INTERVAL));
1409                                         return;
1410                                 }
1411
1412                                 lpfc_heart_beat(phba, pmboxq);
1413                                 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1414                                 pmboxq->vport = phba->pport;
1415                                 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1416                                                 MBX_NOWAIT);
1417
1418                                 if (retval != MBX_BUSY &&
1419                                         retval != MBX_SUCCESS) {
1420                                         mempool_free(pmboxq,
1421                                                         phba->mbox_mem_pool);
1422                                         mod_timer(&phba->hb_tmofunc,
1423                                                 jiffies +
1424                                                 msecs_to_jiffies(1000 *
1425                                                 LPFC_HB_MBOX_INTERVAL));
1426                                         return;
1427                                 }
1428                                 phba->skipped_hb = 0;
1429                                 phba->hb_outstanding = 1;
1430                         } else if (time_before_eq(phba->last_completion_time,
1431                                         phba->skipped_hb)) {
1432                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1433                                         "2857 Last completion time not "
1434                                         " updated in %d ms\n",
1435                                         jiffies_to_msecs(jiffies
1436                                                  - phba->last_completion_time));
1437                         } else
1438                                 phba->skipped_hb = jiffies;
1439
1440                         mod_timer(&phba->hb_tmofunc,
1441                                  jiffies +
1442                                  msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1443                         return;
1444                 } else {
1445                         /*
1446                         * If heart beat timeout called with hb_outstanding set
1447                         * we need to give the hb mailbox cmd a chance to
1448                         * complete or TMO.
1449                         */
1450                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1451                                         "0459 Adapter heartbeat still out"
1452                                         "standing:last compl time was %d ms.\n",
1453                                         jiffies_to_msecs(jiffies
1454                                                  - phba->last_completion_time));
1455                         mod_timer(&phba->hb_tmofunc,
1456                                 jiffies +
1457                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1458                 }
1459         } else {
1460                         mod_timer(&phba->hb_tmofunc,
1461                                 jiffies +
1462                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1463         }
1464 }
1465
1466 /**
1467  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1468  * @phba: pointer to lpfc hba data structure.
1469  *
1470  * This routine is called to bring the HBA offline when HBA hardware error
1471  * other than Port Error 6 has been detected.
1472  **/
1473 static void
1474 lpfc_offline_eratt(struct lpfc_hba *phba)
1475 {
1476         struct lpfc_sli   *psli = &phba->sli;
1477
1478         spin_lock_irq(&phba->hbalock);
1479         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1480         spin_unlock_irq(&phba->hbalock);
1481         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1482
1483         lpfc_offline(phba);
1484         lpfc_reset_barrier(phba);
1485         spin_lock_irq(&phba->hbalock);
1486         lpfc_sli_brdreset(phba);
1487         spin_unlock_irq(&phba->hbalock);
1488         lpfc_hba_down_post(phba);
1489         lpfc_sli_brdready(phba, HS_MBRDY);
1490         lpfc_unblock_mgmt_io(phba);
1491         phba->link_state = LPFC_HBA_ERROR;
1492         return;
1493 }
1494
1495 /**
1496  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1497  * @phba: pointer to lpfc hba data structure.
1498  *
1499  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1500  * other than Port Error 6 has been detected.
1501  **/
1502 void
1503 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1504 {
1505         spin_lock_irq(&phba->hbalock);
1506         phba->link_state = LPFC_HBA_ERROR;
1507         spin_unlock_irq(&phba->hbalock);
1508
1509         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1510         lpfc_sli_flush_io_rings(phba);
1511         lpfc_offline(phba);
1512         lpfc_hba_down_post(phba);
1513         lpfc_unblock_mgmt_io(phba);
1514 }
1515
1516 /**
1517  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1518  * @phba: pointer to lpfc hba data structure.
1519  *
1520  * This routine is invoked to handle the deferred HBA hardware error
1521  * conditions. This type of error is indicated by HBA by setting ER1
1522  * and another ER bit in the host status register. The driver will
1523  * wait until the ER1 bit clears before handling the error condition.
1524  **/
1525 static void
1526 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1527 {
1528         uint32_t old_host_status = phba->work_hs;
1529         struct lpfc_sli *psli = &phba->sli;
1530
1531         /* If the pci channel is offline, ignore possible errors,
1532          * since we cannot communicate with the pci card anyway.
1533          */
1534         if (pci_channel_offline(phba->pcidev)) {
1535                 spin_lock_irq(&phba->hbalock);
1536                 phba->hba_flag &= ~DEFER_ERATT;
1537                 spin_unlock_irq(&phba->hbalock);
1538                 return;
1539         }
1540
1541         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1542                 "0479 Deferred Adapter Hardware Error "
1543                 "Data: x%x x%x x%x\n",
1544                 phba->work_hs,
1545                 phba->work_status[0], phba->work_status[1]);
1546
1547         spin_lock_irq(&phba->hbalock);
1548         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1549         spin_unlock_irq(&phba->hbalock);
1550
1551
1552         /*
1553          * Firmware stops when it triggred erratt. That could cause the I/Os
1554          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1555          * SCSI layer retry it after re-establishing link.
1556          */
1557         lpfc_sli_abort_fcp_rings(phba);
1558
1559         /*
1560          * There was a firmware error. Take the hba offline and then
1561          * attempt to restart it.
1562          */
1563         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1564         lpfc_offline(phba);
1565
1566         /* Wait for the ER1 bit to clear.*/
1567         while (phba->work_hs & HS_FFER1) {
1568                 msleep(100);
1569                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1570                         phba->work_hs = UNPLUG_ERR ;
1571                         break;
1572                 }
1573                 /* If driver is unloading let the worker thread continue */
1574                 if (phba->pport->load_flag & FC_UNLOADING) {
1575                         phba->work_hs = 0;
1576                         break;
1577                 }
1578         }
1579
1580         /*
1581          * This is to ptrotect against a race condition in which
1582          * first write to the host attention register clear the
1583          * host status register.
1584          */
1585         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1586                 phba->work_hs = old_host_status & ~HS_FFER1;
1587
1588         spin_lock_irq(&phba->hbalock);
1589         phba->hba_flag &= ~DEFER_ERATT;
1590         spin_unlock_irq(&phba->hbalock);
1591         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1592         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1593 }
1594
1595 static void
1596 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1597 {
1598         struct lpfc_board_event_header board_event;
1599         struct Scsi_Host *shost;
1600
1601         board_event.event_type = FC_REG_BOARD_EVENT;
1602         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1603         shost = lpfc_shost_from_vport(phba->pport);
1604         fc_host_post_vendor_event(shost, fc_get_event_number(),
1605                                   sizeof(board_event),
1606                                   (char *) &board_event,
1607                                   LPFC_NL_VENDOR_ID);
1608 }
1609
1610 /**
1611  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1612  * @phba: pointer to lpfc hba data structure.
1613  *
1614  * This routine is invoked to handle the following HBA hardware error
1615  * conditions:
1616  * 1 - HBA error attention interrupt
1617  * 2 - DMA ring index out of range
1618  * 3 - Mailbox command came back as unknown
1619  **/
1620 static void
1621 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1622 {
1623         struct lpfc_vport *vport = phba->pport;
1624         struct lpfc_sli   *psli = &phba->sli;
1625         uint32_t event_data;
1626         unsigned long temperature;
1627         struct temp_event temp_event_data;
1628         struct Scsi_Host  *shost;
1629
1630         /* If the pci channel is offline, ignore possible errors,
1631          * since we cannot communicate with the pci card anyway.
1632          */
1633         if (pci_channel_offline(phba->pcidev)) {
1634                 spin_lock_irq(&phba->hbalock);
1635                 phba->hba_flag &= ~DEFER_ERATT;
1636                 spin_unlock_irq(&phba->hbalock);
1637                 return;
1638         }
1639
1640         /* If resets are disabled then leave the HBA alone and return */
1641         if (!phba->cfg_enable_hba_reset)
1642                 return;
1643
1644         /* Send an internal error event to mgmt application */
1645         lpfc_board_errevt_to_mgmt(phba);
1646
1647         if (phba->hba_flag & DEFER_ERATT)
1648                 lpfc_handle_deferred_eratt(phba);
1649
1650         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1651                 if (phba->work_hs & HS_FFER6)
1652                         /* Re-establishing Link */
1653                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1654                                         "1301 Re-establishing Link "
1655                                         "Data: x%x x%x x%x\n",
1656                                         phba->work_hs, phba->work_status[0],
1657                                         phba->work_status[1]);
1658                 if (phba->work_hs & HS_FFER8)
1659                         /* Device Zeroization */
1660                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1661                                         "2861 Host Authentication device "
1662                                         "zeroization Data:x%x x%x x%x\n",
1663                                         phba->work_hs, phba->work_status[0],
1664                                         phba->work_status[1]);
1665
1666                 spin_lock_irq(&phba->hbalock);
1667                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1668                 spin_unlock_irq(&phba->hbalock);
1669
1670                 /*
1671                 * Firmware stops when it triggled erratt with HS_FFER6.
1672                 * That could cause the I/Os dropped by the firmware.
1673                 * Error iocb (I/O) on txcmplq and let the SCSI layer
1674                 * retry it after re-establishing link.
1675                 */
1676                 lpfc_sli_abort_fcp_rings(phba);
1677
1678                 /*
1679                  * There was a firmware error.  Take the hba offline and then
1680                  * attempt to restart it.
1681                  */
1682                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1683                 lpfc_offline(phba);
1684                 lpfc_sli_brdrestart(phba);
1685                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1686                         lpfc_unblock_mgmt_io(phba);
1687                         return;
1688                 }
1689                 lpfc_unblock_mgmt_io(phba);
1690         } else if (phba->work_hs & HS_CRIT_TEMP) {
1691                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1692                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1693                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1694                 temp_event_data.data = (uint32_t)temperature;
1695
1696                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1697                                 "0406 Adapter maximum temperature exceeded "
1698                                 "(%ld), taking this port offline "
1699                                 "Data: x%x x%x x%x\n",
1700                                 temperature, phba->work_hs,
1701                                 phba->work_status[0], phba->work_status[1]);
1702
1703                 shost = lpfc_shost_from_vport(phba->pport);
1704                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1705                                           sizeof(temp_event_data),
1706                                           (char *) &temp_event_data,
1707                                           SCSI_NL_VID_TYPE_PCI
1708                                           | PCI_VENDOR_ID_EMULEX);
1709
1710                 spin_lock_irq(&phba->hbalock);
1711                 phba->over_temp_state = HBA_OVER_TEMP;
1712                 spin_unlock_irq(&phba->hbalock);
1713                 lpfc_offline_eratt(phba);
1714
1715         } else {
1716                 /* The if clause above forces this code path when the status
1717                  * failure is a value other than FFER6. Do not call the offline
1718                  * twice. This is the adapter hardware error path.
1719                  */
1720                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1721                                 "0457 Adapter Hardware Error "
1722                                 "Data: x%x x%x x%x\n",
1723                                 phba->work_hs,
1724                                 phba->work_status[0], phba->work_status[1]);
1725
1726                 event_data = FC_REG_DUMP_EVENT;
1727                 shost = lpfc_shost_from_vport(vport);
1728                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1729                                 sizeof(event_data), (char *) &event_data,
1730                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1731
1732                 lpfc_offline_eratt(phba);
1733         }
1734         return;
1735 }
1736
1737 /**
1738  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1739  * @phba: pointer to lpfc hba data structure.
1740  * @mbx_action: flag for mailbox shutdown action.
1741  *
1742  * This routine is invoked to perform an SLI4 port PCI function reset in
1743  * response to port status register polling attention. It waits for port
1744  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1745  * During this process, interrupt vectors are freed and later requested
1746  * for handling possible port resource change.
1747  **/
1748 static int
1749 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1750                             bool en_rn_msg)
1751 {
1752         int rc;
1753         uint32_t intr_mode;
1754
1755         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1756             LPFC_SLI_INTF_IF_TYPE_2) {
1757                 /*
1758                  * On error status condition, driver need to wait for port
1759                  * ready before performing reset.
1760                  */
1761                 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1762                 if (rc)
1763                         return rc;
1764         }
1765
1766         /* need reset: attempt for port recovery */
1767         if (en_rn_msg)
1768                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1769                                 "2887 Reset Needed: Attempting Port "
1770                                 "Recovery...\n");
1771         lpfc_offline_prep(phba, mbx_action);
1772         lpfc_sli_flush_io_rings(phba);
1773         lpfc_offline(phba);
1774         /* release interrupt for possible resource change */
1775         lpfc_sli4_disable_intr(phba);
1776         rc = lpfc_sli_brdrestart(phba);
1777         if (rc) {
1778                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1779                                 "6309 Failed to restart board\n");
1780                 return rc;
1781         }
1782         /* request and enable interrupt */
1783         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1784         if (intr_mode == LPFC_INTR_ERROR) {
1785                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1786                                 "3175 Failed to enable interrupt\n");
1787                 return -EIO;
1788         }
1789         phba->intr_mode = intr_mode;
1790         rc = lpfc_online(phba);
1791         if (rc == 0)
1792                 lpfc_unblock_mgmt_io(phba);
1793
1794         return rc;
1795 }
1796
1797 /**
1798  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1799  * @phba: pointer to lpfc hba data structure.
1800  *
1801  * This routine is invoked to handle the SLI4 HBA hardware error attention
1802  * conditions.
1803  **/
1804 static void
1805 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1806 {
1807         struct lpfc_vport *vport = phba->pport;
1808         uint32_t event_data;
1809         struct Scsi_Host *shost;
1810         uint32_t if_type;
1811         struct lpfc_register portstat_reg = {0};
1812         uint32_t reg_err1, reg_err2;
1813         uint32_t uerrlo_reg, uemasklo_reg;
1814         uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1815         bool en_rn_msg = true;
1816         struct temp_event temp_event_data;
1817         struct lpfc_register portsmphr_reg;
1818         int rc, i;
1819
1820         /* If the pci channel is offline, ignore possible errors, since
1821          * we cannot communicate with the pci card anyway.
1822          */
1823         if (pci_channel_offline(phba->pcidev)) {
1824                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1825                                 "3166 pci channel is offline\n");
1826                 lpfc_sli4_offline_eratt(phba);
1827                 return;
1828         }
1829
1830         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1831         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1832         switch (if_type) {
1833         case LPFC_SLI_INTF_IF_TYPE_0:
1834                 pci_rd_rc1 = lpfc_readl(
1835                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1836                                 &uerrlo_reg);
1837                 pci_rd_rc2 = lpfc_readl(
1838                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1839                                 &uemasklo_reg);
1840                 /* consider PCI bus read error as pci_channel_offline */
1841                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1842                         return;
1843                 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1844                         lpfc_sli4_offline_eratt(phba);
1845                         return;
1846                 }
1847                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1848                                 "7623 Checking UE recoverable");
1849
1850                 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1851                         if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1852                                        &portsmphr_reg.word0))
1853                                 continue;
1854
1855                         smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1856                                                    &portsmphr_reg);
1857                         if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1858                             LPFC_PORT_SEM_UE_RECOVERABLE)
1859                                 break;
1860                         /*Sleep for 1Sec, before checking SEMAPHORE */
1861                         msleep(1000);
1862                 }
1863
1864                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1865                                 "4827 smphr_port_status x%x : Waited %dSec",
1866                                 smphr_port_status, i);
1867
1868                 /* Recoverable UE, reset the HBA device */
1869                 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1870                     LPFC_PORT_SEM_UE_RECOVERABLE) {
1871                         for (i = 0; i < 20; i++) {
1872                                 msleep(1000);
1873                                 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1874                                     &portsmphr_reg.word0) &&
1875                                     (LPFC_POST_STAGE_PORT_READY ==
1876                                      bf_get(lpfc_port_smphr_port_status,
1877                                      &portsmphr_reg))) {
1878                                         rc = lpfc_sli4_port_sta_fn_reset(phba,
1879                                                 LPFC_MBX_NO_WAIT, en_rn_msg);
1880                                         if (rc == 0)
1881                                                 return;
1882                                         lpfc_printf_log(phba,
1883                                                 KERN_ERR, LOG_INIT,
1884                                                 "4215 Failed to recover UE");
1885                                         break;
1886                                 }
1887                         }
1888                 }
1889                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1890                                 "7624 Firmware not ready: Failing UE recovery,"
1891                                 " waited %dSec", i);
1892                 phba->link_state = LPFC_HBA_ERROR;
1893                 break;
1894
1895         case LPFC_SLI_INTF_IF_TYPE_2:
1896         case LPFC_SLI_INTF_IF_TYPE_6:
1897                 pci_rd_rc1 = lpfc_readl(
1898                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
1899                                 &portstat_reg.word0);
1900                 /* consider PCI bus read error as pci_channel_offline */
1901                 if (pci_rd_rc1 == -EIO) {
1902                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1903                                 "3151 PCI bus read access failure: x%x\n",
1904                                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1905                         lpfc_sli4_offline_eratt(phba);
1906                         return;
1907                 }
1908                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1909                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1910                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1911                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1912                                 "2889 Port Overtemperature event, "
1913                                 "taking port offline Data: x%x x%x\n",
1914                                 reg_err1, reg_err2);
1915
1916                         phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1917                         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1918                         temp_event_data.event_code = LPFC_CRIT_TEMP;
1919                         temp_event_data.data = 0xFFFFFFFF;
1920
1921                         shost = lpfc_shost_from_vport(phba->pport);
1922                         fc_host_post_vendor_event(shost, fc_get_event_number(),
1923                                                   sizeof(temp_event_data),
1924                                                   (char *)&temp_event_data,
1925                                                   SCSI_NL_VID_TYPE_PCI
1926                                                   | PCI_VENDOR_ID_EMULEX);
1927
1928                         spin_lock_irq(&phba->hbalock);
1929                         phba->over_temp_state = HBA_OVER_TEMP;
1930                         spin_unlock_irq(&phba->hbalock);
1931                         lpfc_sli4_offline_eratt(phba);
1932                         return;
1933                 }
1934                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1935                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1936                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1937                                         "3143 Port Down: Firmware Update "
1938                                         "Detected\n");
1939                         en_rn_msg = false;
1940                 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1941                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1942                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1943                                         "3144 Port Down: Debug Dump\n");
1944                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1945                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1946                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1947                                         "3145 Port Down: Provisioning\n");
1948
1949                 /* If resets are disabled then leave the HBA alone and return */
1950                 if (!phba->cfg_enable_hba_reset)
1951                         return;
1952
1953                 /* Check port status register for function reset */
1954                 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1955                                 en_rn_msg);
1956                 if (rc == 0) {
1957                         /* don't report event on forced debug dump */
1958                         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1959                             reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1960                                 return;
1961                         else
1962                                 break;
1963                 }
1964                 /* fall through for not able to recover */
1965                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1966                                 "3152 Unrecoverable error\n");
1967                 phba->link_state = LPFC_HBA_ERROR;
1968                 break;
1969         case LPFC_SLI_INTF_IF_TYPE_1:
1970         default:
1971                 break;
1972         }
1973         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1974                         "3123 Report dump event to upper layer\n");
1975         /* Send an internal error event to mgmt application */
1976         lpfc_board_errevt_to_mgmt(phba);
1977
1978         event_data = FC_REG_DUMP_EVENT;
1979         shost = lpfc_shost_from_vport(vport);
1980         fc_host_post_vendor_event(shost, fc_get_event_number(),
1981                                   sizeof(event_data), (char *) &event_data,
1982                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1983 }
1984
1985 /**
1986  * lpfc_handle_eratt - Wrapper func for handling hba error attention
1987  * @phba: pointer to lpfc HBA data structure.
1988  *
1989  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1990  * routine from the API jump table function pointer from the lpfc_hba struct.
1991  *
1992  * Return codes
1993  *   0 - success.
1994  *   Any other value - error.
1995  **/
1996 void
1997 lpfc_handle_eratt(struct lpfc_hba *phba)
1998 {
1999         (*phba->lpfc_handle_eratt)(phba);
2000 }
2001
2002 /**
2003  * lpfc_handle_latt - The HBA link event handler
2004  * @phba: pointer to lpfc hba data structure.
2005  *
2006  * This routine is invoked from the worker thread to handle a HBA host
2007  * attention link event. SLI3 only.
2008  **/
2009 void
2010 lpfc_handle_latt(struct lpfc_hba *phba)
2011 {
2012         struct lpfc_vport *vport = phba->pport;
2013         struct lpfc_sli   *psli = &phba->sli;
2014         LPFC_MBOXQ_t *pmb;
2015         volatile uint32_t control;
2016         struct lpfc_dmabuf *mp;
2017         int rc = 0;
2018
2019         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2020         if (!pmb) {
2021                 rc = 1;
2022                 goto lpfc_handle_latt_err_exit;
2023         }
2024
2025         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2026         if (!mp) {
2027                 rc = 2;
2028                 goto lpfc_handle_latt_free_pmb;
2029         }
2030
2031         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2032         if (!mp->virt) {
2033                 rc = 3;
2034                 goto lpfc_handle_latt_free_mp;
2035         }
2036
2037         /* Cleanup any outstanding ELS commands */
2038         lpfc_els_flush_all_cmd(phba);
2039
2040         psli->slistat.link_event++;
2041         lpfc_read_topology(phba, pmb, mp);
2042         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2043         pmb->vport = vport;
2044         /* Block ELS IOCBs until we have processed this mbox command */
2045         phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2046         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2047         if (rc == MBX_NOT_FINISHED) {
2048                 rc = 4;
2049                 goto lpfc_handle_latt_free_mbuf;
2050         }
2051
2052         /* Clear Link Attention in HA REG */
2053         spin_lock_irq(&phba->hbalock);
2054         writel(HA_LATT, phba->HAregaddr);
2055         readl(phba->HAregaddr); /* flush */
2056         spin_unlock_irq(&phba->hbalock);
2057
2058         return;
2059
2060 lpfc_handle_latt_free_mbuf:
2061         phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2062         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2063 lpfc_handle_latt_free_mp:
2064         kfree(mp);
2065 lpfc_handle_latt_free_pmb:
2066         mempool_free(pmb, phba->mbox_mem_pool);
2067 lpfc_handle_latt_err_exit:
2068         /* Enable Link attention interrupts */
2069         spin_lock_irq(&phba->hbalock);
2070         psli->sli_flag |= LPFC_PROCESS_LA;
2071         control = readl(phba->HCregaddr);
2072         control |= HC_LAINT_ENA;
2073         writel(control, phba->HCregaddr);
2074         readl(phba->HCregaddr); /* flush */
2075
2076         /* Clear Link Attention in HA REG */
2077         writel(HA_LATT, phba->HAregaddr);
2078         readl(phba->HAregaddr); /* flush */
2079         spin_unlock_irq(&phba->hbalock);
2080         lpfc_linkdown(phba);
2081         phba->link_state = LPFC_HBA_ERROR;
2082
2083         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2084                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2085
2086         return;
2087 }
2088
2089 /**
2090  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2091  * @phba: pointer to lpfc hba data structure.
2092  * @vpd: pointer to the vital product data.
2093  * @len: length of the vital product data in bytes.
2094  *
2095  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2096  * an array of characters. In this routine, the ModelName, ProgramType, and
2097  * ModelDesc, etc. fields of the phba data structure will be populated.
2098  *
2099  * Return codes
2100  *   0 - pointer to the VPD passed in is NULL
2101  *   1 - success
2102  **/
2103 int
2104 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2105 {
2106         uint8_t lenlo, lenhi;
2107         int Length;
2108         int i, j;
2109         int finished = 0;
2110         int index = 0;
2111
2112         if (!vpd)
2113                 return 0;
2114
2115         /* Vital Product */
2116         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2117                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
2118                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2119                         (uint32_t) vpd[3]);
2120         while (!finished && (index < (len - 4))) {
2121                 switch (vpd[index]) {
2122                 case 0x82:
2123                 case 0x91:
2124                         index += 1;
2125                         lenlo = vpd[index];
2126                         index += 1;
2127                         lenhi = vpd[index];
2128                         index += 1;
2129                         i = ((((unsigned short)lenhi) << 8) + lenlo);
2130                         index += i;
2131                         break;
2132                 case 0x90:
2133                         index += 1;
2134                         lenlo = vpd[index];
2135                         index += 1;
2136                         lenhi = vpd[index];
2137                         index += 1;
2138                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
2139                         if (Length > len - index)
2140                                 Length = len - index;
2141                         while (Length > 0) {
2142                         /* Look for Serial Number */
2143                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2144                                 index += 2;
2145                                 i = vpd[index];
2146                                 index += 1;
2147                                 j = 0;
2148                                 Length -= (3+i);
2149                                 while(i--) {
2150                                         phba->SerialNumber[j++] = vpd[index++];
2151                                         if (j == 31)
2152                                                 break;
2153                                 }
2154                                 phba->SerialNumber[j] = 0;
2155                                 continue;
2156                         }
2157                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2158                                 phba->vpd_flag |= VPD_MODEL_DESC;
2159                                 index += 2;
2160                                 i = vpd[index];
2161                                 index += 1;
2162                                 j = 0;
2163                                 Length -= (3+i);
2164                                 while(i--) {
2165                                         phba->ModelDesc[j++] = vpd[index++];
2166                                         if (j == 255)
2167                                                 break;
2168                                 }
2169                                 phba->ModelDesc[j] = 0;
2170                                 continue;
2171                         }
2172                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2173                                 phba->vpd_flag |= VPD_MODEL_NAME;
2174                                 index += 2;
2175                                 i = vpd[index];
2176                                 index += 1;
2177                                 j = 0;
2178                                 Length -= (3+i);
2179                                 while(i--) {
2180                                         phba->ModelName[j++] = vpd[index++];
2181                                         if (j == 79)
2182                                                 break;
2183                                 }
2184                                 phba->ModelName[j] = 0;
2185                                 continue;
2186                         }
2187                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2188                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2189                                 index += 2;
2190                                 i = vpd[index];
2191                                 index += 1;
2192                                 j = 0;
2193                                 Length -= (3+i);
2194                                 while(i--) {
2195                                         phba->ProgramType[j++] = vpd[index++];
2196                                         if (j == 255)
2197                                                 break;
2198                                 }
2199                                 phba->ProgramType[j] = 0;
2200                                 continue;
2201                         }
2202                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2203                                 phba->vpd_flag |= VPD_PORT;
2204                                 index += 2;
2205                                 i = vpd[index];
2206                                 index += 1;
2207                                 j = 0;
2208                                 Length -= (3+i);
2209                                 while(i--) {
2210                                         if ((phba->sli_rev == LPFC_SLI_REV4) &&
2211                                             (phba->sli4_hba.pport_name_sta ==
2212                                              LPFC_SLI4_PPNAME_GET)) {
2213                                                 j++;
2214                                                 index++;
2215                                         } else
2216                                                 phba->Port[j++] = vpd[index++];
2217                                         if (j == 19)
2218                                                 break;
2219                                 }
2220                                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2221                                     (phba->sli4_hba.pport_name_sta ==
2222                                      LPFC_SLI4_PPNAME_NON))
2223                                         phba->Port[j] = 0;
2224                                 continue;
2225                         }
2226                         else {
2227                                 index += 2;
2228                                 i = vpd[index];
2229                                 index += 1;
2230                                 index += i;
2231                                 Length -= (3 + i);
2232                         }
2233                 }
2234                 finished = 0;
2235                 break;
2236                 case 0x78:
2237                         finished = 1;
2238                         break;
2239                 default:
2240                         index ++;
2241                         break;
2242                 }
2243         }
2244
2245         return(1);
2246 }
2247
2248 /**
2249  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2250  * @phba: pointer to lpfc hba data structure.
2251  * @mdp: pointer to the data structure to hold the derived model name.
2252  * @descp: pointer to the data structure to hold the derived description.
2253  *
2254  * This routine retrieves HBA's description based on its registered PCI device
2255  * ID. The @descp passed into this function points to an array of 256 chars. It
2256  * shall be returned with the model name, maximum speed, and the host bus type.
2257  * The @mdp passed into this function points to an array of 80 chars. When the
2258  * function returns, the @mdp will be filled with the model name.
2259  **/
2260 static void
2261 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2262 {
2263         lpfc_vpd_t *vp;
2264         uint16_t dev_id = phba->pcidev->device;
2265         int max_speed;
2266         int GE = 0;
2267         int oneConnect = 0; /* default is not a oneConnect */
2268         struct {
2269                 char *name;
2270                 char *bus;
2271                 char *function;
2272         } m = {"<Unknown>", "", ""};
2273
2274         if (mdp && mdp[0] != '\0'
2275                 && descp && descp[0] != '\0')
2276                 return;
2277
2278         if (phba->lmt & LMT_64Gb)
2279                 max_speed = 64;
2280         else if (phba->lmt & LMT_32Gb)
2281                 max_speed = 32;
2282         else if (phba->lmt & LMT_16Gb)
2283                 max_speed = 16;
2284         else if (phba->lmt & LMT_10Gb)
2285                 max_speed = 10;
2286         else if (phba->lmt & LMT_8Gb)
2287                 max_speed = 8;
2288         else if (phba->lmt & LMT_4Gb)
2289                 max_speed = 4;
2290         else if (phba->lmt & LMT_2Gb)
2291                 max_speed = 2;
2292         else if (phba->lmt & LMT_1Gb)
2293                 max_speed = 1;
2294         else
2295                 max_speed = 0;
2296
2297         vp = &phba->vpd;
2298
2299         switch (dev_id) {
2300         case PCI_DEVICE_ID_FIREFLY:
2301                 m = (typeof(m)){"LP6000", "PCI",
2302                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2303                 break;
2304         case PCI_DEVICE_ID_SUPERFLY:
2305                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2306                         m = (typeof(m)){"LP7000", "PCI", ""};
2307                 else
2308                         m = (typeof(m)){"LP7000E", "PCI", ""};
2309                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2310                 break;
2311         case PCI_DEVICE_ID_DRAGONFLY:
2312                 m = (typeof(m)){"LP8000", "PCI",
2313                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2314                 break;
2315         case PCI_DEVICE_ID_CENTAUR:
2316                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2317                         m = (typeof(m)){"LP9002", "PCI", ""};
2318                 else
2319                         m = (typeof(m)){"LP9000", "PCI", ""};
2320                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2321                 break;
2322         case PCI_DEVICE_ID_RFLY:
2323                 m = (typeof(m)){"LP952", "PCI",
2324                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2325                 break;
2326         case PCI_DEVICE_ID_PEGASUS:
2327                 m = (typeof(m)){"LP9802", "PCI-X",
2328                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2329                 break;
2330         case PCI_DEVICE_ID_THOR:
2331                 m = (typeof(m)){"LP10000", "PCI-X",
2332                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2333                 break;
2334         case PCI_DEVICE_ID_VIPER:
2335                 m = (typeof(m)){"LPX1000",  "PCI-X",
2336                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2337                 break;
2338         case PCI_DEVICE_ID_PFLY:
2339                 m = (typeof(m)){"LP982", "PCI-X",
2340                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2341                 break;
2342         case PCI_DEVICE_ID_TFLY:
2343                 m = (typeof(m)){"LP1050", "PCI-X",
2344                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2345                 break;
2346         case PCI_DEVICE_ID_HELIOS:
2347                 m = (typeof(m)){"LP11000", "PCI-X2",
2348                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2349                 break;
2350         case PCI_DEVICE_ID_HELIOS_SCSP:
2351                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2352                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2353                 break;
2354         case PCI_DEVICE_ID_HELIOS_DCSP:
2355                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2356                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2357                 break;
2358         case PCI_DEVICE_ID_NEPTUNE:
2359                 m = (typeof(m)){"LPe1000", "PCIe",
2360                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2361                 break;
2362         case PCI_DEVICE_ID_NEPTUNE_SCSP:
2363                 m = (typeof(m)){"LPe1000-SP", "PCIe",
2364                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2365                 break;
2366         case PCI_DEVICE_ID_NEPTUNE_DCSP:
2367                 m = (typeof(m)){"LPe1002-SP", "PCIe",
2368                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2369                 break;
2370         case PCI_DEVICE_ID_BMID:
2371                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2372                 break;
2373         case PCI_DEVICE_ID_BSMB:
2374                 m = (typeof(m)){"LP111", "PCI-X2",
2375                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2376                 break;
2377         case PCI_DEVICE_ID_ZEPHYR:
2378                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2379                 break;
2380         case PCI_DEVICE_ID_ZEPHYR_SCSP:
2381                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2382                 break;
2383         case PCI_DEVICE_ID_ZEPHYR_DCSP:
2384                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2385                 GE = 1;
2386                 break;
2387         case PCI_DEVICE_ID_ZMID:
2388                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2389                 break;
2390         case PCI_DEVICE_ID_ZSMB:
2391                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2392                 break;
2393         case PCI_DEVICE_ID_LP101:
2394                 m = (typeof(m)){"LP101", "PCI-X",
2395                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2396                 break;
2397         case PCI_DEVICE_ID_LP10000S:
2398                 m = (typeof(m)){"LP10000-S", "PCI",
2399                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2400                 break;
2401         case PCI_DEVICE_ID_LP11000S:
2402                 m = (typeof(m)){"LP11000-S", "PCI-X2",
2403                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2404                 break;
2405         case PCI_DEVICE_ID_LPE11000S:
2406                 m = (typeof(m)){"LPe11000-S", "PCIe",
2407                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2408                 break;
2409         case PCI_DEVICE_ID_SAT:
2410                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2411                 break;
2412         case PCI_DEVICE_ID_SAT_MID:
2413                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2414                 break;
2415         case PCI_DEVICE_ID_SAT_SMB:
2416                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2417                 break;
2418         case PCI_DEVICE_ID_SAT_DCSP:
2419                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2420                 break;
2421         case PCI_DEVICE_ID_SAT_SCSP:
2422                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2423                 break;
2424         case PCI_DEVICE_ID_SAT_S:
2425                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2426                 break;
2427         case PCI_DEVICE_ID_HORNET:
2428                 m = (typeof(m)){"LP21000", "PCIe",
2429                                 "Obsolete, Unsupported FCoE Adapter"};
2430                 GE = 1;
2431                 break;
2432         case PCI_DEVICE_ID_PROTEUS_VF:
2433                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2434                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2435                 break;
2436         case PCI_DEVICE_ID_PROTEUS_PF:
2437                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2438                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2439                 break;
2440         case PCI_DEVICE_ID_PROTEUS_S:
2441                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2442                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2443                 break;
2444         case PCI_DEVICE_ID_TIGERSHARK:
2445                 oneConnect = 1;
2446                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2447                 break;
2448         case PCI_DEVICE_ID_TOMCAT:
2449                 oneConnect = 1;
2450                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2451                 break;
2452         case PCI_DEVICE_ID_FALCON:
2453                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2454                                 "EmulexSecure Fibre"};
2455                 break;
2456         case PCI_DEVICE_ID_BALIUS:
2457                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2458                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2459                 break;
2460         case PCI_DEVICE_ID_LANCER_FC:
2461                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2462                 break;
2463         case PCI_DEVICE_ID_LANCER_FC_VF:
2464                 m = (typeof(m)){"LPe16000", "PCIe",
2465                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2466                 break;
2467         case PCI_DEVICE_ID_LANCER_FCOE:
2468                 oneConnect = 1;
2469                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2470                 break;
2471         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2472                 oneConnect = 1;
2473                 m = (typeof(m)){"OCe15100", "PCIe",
2474                                 "Obsolete, Unsupported FCoE"};
2475                 break;
2476         case PCI_DEVICE_ID_LANCER_G6_FC:
2477                 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2478                 break;
2479         case PCI_DEVICE_ID_LANCER_G7_FC:
2480                 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2481                 break;
2482         case PCI_DEVICE_ID_SKYHAWK:
2483         case PCI_DEVICE_ID_SKYHAWK_VF:
2484                 oneConnect = 1;
2485                 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2486                 break;
2487         default:
2488                 m = (typeof(m)){"Unknown", "", ""};
2489                 break;
2490         }
2491
2492         if (mdp && mdp[0] == '\0')
2493                 snprintf(mdp, 79,"%s", m.name);
2494         /*
2495          * oneConnect hba requires special processing, they are all initiators
2496          * and we put the port number on the end
2497          */
2498         if (descp && descp[0] == '\0') {
2499                 if (oneConnect)
2500                         snprintf(descp, 255,
2501                                 "Emulex OneConnect %s, %s Initiator %s",
2502                                 m.name, m.function,
2503                                 phba->Port);
2504                 else if (max_speed == 0)
2505                         snprintf(descp, 255,
2506                                 "Emulex %s %s %s",
2507                                 m.name, m.bus, m.function);
2508                 else
2509                         snprintf(descp, 255,
2510                                 "Emulex %s %d%s %s %s",
2511                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2512                                 m.bus, m.function);
2513         }
2514 }
2515
2516 /**
2517  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2518  * @phba: pointer to lpfc hba data structure.
2519  * @pring: pointer to a IOCB ring.
2520  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2521  *
2522  * This routine posts a given number of IOCBs with the associated DMA buffer
2523  * descriptors specified by the cnt argument to the given IOCB ring.
2524  *
2525  * Return codes
2526  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2527  **/
2528 int
2529 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2530 {
2531         IOCB_t *icmd;
2532         struct lpfc_iocbq *iocb;
2533         struct lpfc_dmabuf *mp1, *mp2;
2534
2535         cnt += pring->missbufcnt;
2536
2537         /* While there are buffers to post */
2538         while (cnt > 0) {
2539                 /* Allocate buffer for  command iocb */
2540                 iocb = lpfc_sli_get_iocbq(phba);
2541                 if (iocb == NULL) {
2542                         pring->missbufcnt = cnt;
2543                         return cnt;
2544                 }
2545                 icmd = &iocb->iocb;
2546
2547                 /* 2 buffers can be posted per command */
2548                 /* Allocate buffer to post */
2549                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2550                 if (mp1)
2551                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2552                 if (!mp1 || !mp1->virt) {
2553                         kfree(mp1);
2554                         lpfc_sli_release_iocbq(phba, iocb);
2555                         pring->missbufcnt = cnt;
2556                         return cnt;
2557                 }
2558
2559                 INIT_LIST_HEAD(&mp1->list);
2560                 /* Allocate buffer to post */
2561                 if (cnt > 1) {
2562                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2563                         if (mp2)
2564                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2565                                                             &mp2->phys);
2566                         if (!mp2 || !mp2->virt) {
2567                                 kfree(mp2);
2568                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2569                                 kfree(mp1);
2570                                 lpfc_sli_release_iocbq(phba, iocb);
2571                                 pring->missbufcnt = cnt;
2572                                 return cnt;
2573                         }
2574
2575                         INIT_LIST_HEAD(&mp2->list);
2576                 } else {
2577                         mp2 = NULL;
2578                 }
2579
2580                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2581                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2582                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2583                 icmd->ulpBdeCount = 1;
2584                 cnt--;
2585                 if (mp2) {
2586                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2587                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2588                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2589                         cnt--;
2590                         icmd->ulpBdeCount = 2;
2591                 }
2592
2593                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2594                 icmd->ulpLe = 1;
2595
2596                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2597                     IOCB_ERROR) {
2598                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2599                         kfree(mp1);
2600                         cnt++;
2601                         if (mp2) {
2602                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2603                                 kfree(mp2);
2604                                 cnt++;
2605                         }
2606                         lpfc_sli_release_iocbq(phba, iocb);
2607                         pring->missbufcnt = cnt;
2608                         return cnt;
2609                 }
2610                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2611                 if (mp2)
2612                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2613         }
2614         pring->missbufcnt = 0;
2615         return 0;
2616 }
2617
2618 /**
2619  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2620  * @phba: pointer to lpfc hba data structure.
2621  *
2622  * This routine posts initial receive IOCB buffers to the ELS ring. The
2623  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2624  * set to 64 IOCBs. SLI3 only.
2625  *
2626  * Return codes
2627  *   0 - success (currently always success)
2628  **/
2629 static int
2630 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2631 {
2632         struct lpfc_sli *psli = &phba->sli;
2633
2634         /* Ring 0, ELS / CT buffers */
2635         lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2636         /* Ring 2 - FCP no buffers needed */
2637
2638         return 0;
2639 }
2640
2641 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2642
2643 /**
2644  * lpfc_sha_init - Set up initial array of hash table entries
2645  * @HashResultPointer: pointer to an array as hash table.
2646  *
2647  * This routine sets up the initial values to the array of hash table entries
2648  * for the LC HBAs.
2649  **/
2650 static void
2651 lpfc_sha_init(uint32_t * HashResultPointer)
2652 {
2653         HashResultPointer[0] = 0x67452301;
2654         HashResultPointer[1] = 0xEFCDAB89;
2655         HashResultPointer[2] = 0x98BADCFE;
2656         HashResultPointer[3] = 0x10325476;
2657         HashResultPointer[4] = 0xC3D2E1F0;
2658 }
2659
2660 /**
2661  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2662  * @HashResultPointer: pointer to an initial/result hash table.
2663  * @HashWorkingPointer: pointer to an working hash table.
2664  *
2665  * This routine iterates an initial hash table pointed by @HashResultPointer
2666  * with the values from the working hash table pointeed by @HashWorkingPointer.
2667  * The results are putting back to the initial hash table, returned through
2668  * the @HashResultPointer as the result hash table.
2669  **/
2670 static void
2671 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2672 {
2673         int t;
2674         uint32_t TEMP;
2675         uint32_t A, B, C, D, E;
2676         t = 16;
2677         do {
2678                 HashWorkingPointer[t] =
2679                     S(1,
2680                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2681                                                                      8] ^
2682                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2683         } while (++t <= 79);
2684         t = 0;
2685         A = HashResultPointer[0];
2686         B = HashResultPointer[1];
2687         C = HashResultPointer[2];
2688         D = HashResultPointer[3];
2689         E = HashResultPointer[4];
2690
2691         do {
2692                 if (t < 20) {
2693                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2694                 } else if (t < 40) {
2695                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2696                 } else if (t < 60) {
2697                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2698                 } else {
2699                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2700                 }
2701                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2702                 E = D;
2703                 D = C;
2704                 C = S(30, B);
2705                 B = A;
2706                 A = TEMP;
2707         } while (++t <= 79);
2708
2709         HashResultPointer[0] += A;
2710         HashResultPointer[1] += B;
2711         HashResultPointer[2] += C;
2712         HashResultPointer[3] += D;
2713         HashResultPointer[4] += E;
2714
2715 }
2716
2717 /**
2718  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2719  * @RandomChallenge: pointer to the entry of host challenge random number array.
2720  * @HashWorking: pointer to the entry of the working hash array.
2721  *
2722  * This routine calculates the working hash array referred by @HashWorking
2723  * from the challenge random numbers associated with the host, referred by
2724  * @RandomChallenge. The result is put into the entry of the working hash
2725  * array and returned by reference through @HashWorking.
2726  **/
2727 static void
2728 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2729 {
2730         *HashWorking = (*RandomChallenge ^ *HashWorking);
2731 }
2732
2733 /**
2734  * lpfc_hba_init - Perform special handling for LC HBA initialization
2735  * @phba: pointer to lpfc hba data structure.
2736  * @hbainit: pointer to an array of unsigned 32-bit integers.
2737  *
2738  * This routine performs the special handling for LC HBA initialization.
2739  **/
2740 void
2741 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2742 {
2743         int t;
2744         uint32_t *HashWorking;
2745         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2746
2747         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2748         if (!HashWorking)
2749                 return;
2750
2751         HashWorking[0] = HashWorking[78] = *pwwnn++;
2752         HashWorking[1] = HashWorking[79] = *pwwnn;
2753
2754         for (t = 0; t < 7; t++)
2755                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2756
2757         lpfc_sha_init(hbainit);
2758         lpfc_sha_iterate(hbainit, HashWorking);
2759         kfree(HashWorking);
2760 }
2761
2762 /**
2763  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2764  * @vport: pointer to a virtual N_Port data structure.
2765  *
2766  * This routine performs the necessary cleanups before deleting the @vport.
2767  * It invokes the discovery state machine to perform necessary state
2768  * transitions and to release the ndlps associated with the @vport. Note,
2769  * the physical port is treated as @vport 0.
2770  **/
2771 void
2772 lpfc_cleanup(struct lpfc_vport *vport)
2773 {
2774         struct lpfc_hba   *phba = vport->phba;
2775         struct lpfc_nodelist *ndlp, *next_ndlp;
2776         int i = 0;
2777
2778         if (phba->link_state > LPFC_LINK_DOWN)
2779                 lpfc_port_link_failure(vport);
2780
2781         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2782                 if (!NLP_CHK_NODE_ACT(ndlp)) {
2783                         ndlp = lpfc_enable_node(vport, ndlp,
2784                                                 NLP_STE_UNUSED_NODE);
2785                         if (!ndlp)
2786                                 continue;
2787                         spin_lock_irq(&phba->ndlp_lock);
2788                         NLP_SET_FREE_REQ(ndlp);
2789                         spin_unlock_irq(&phba->ndlp_lock);
2790                         /* Trigger the release of the ndlp memory */
2791                         lpfc_nlp_put(ndlp);
2792                         continue;
2793                 }
2794                 spin_lock_irq(&phba->ndlp_lock);
2795                 if (NLP_CHK_FREE_REQ(ndlp)) {
2796                         /* The ndlp should not be in memory free mode already */
2797                         spin_unlock_irq(&phba->ndlp_lock);
2798                         continue;
2799                 } else
2800                         /* Indicate request for freeing ndlp memory */
2801                         NLP_SET_FREE_REQ(ndlp);
2802                 spin_unlock_irq(&phba->ndlp_lock);
2803
2804                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2805                     ndlp->nlp_DID == Fabric_DID) {
2806                         /* Just free up ndlp with Fabric_DID for vports */
2807                         lpfc_nlp_put(ndlp);
2808                         continue;
2809                 }
2810
2811                 /* take care of nodes in unused state before the state
2812                  * machine taking action.
2813                  */
2814                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2815                         lpfc_nlp_put(ndlp);
2816                         continue;
2817                 }
2818
2819                 if (ndlp->nlp_type & NLP_FABRIC)
2820                         lpfc_disc_state_machine(vport, ndlp, NULL,
2821                                         NLP_EVT_DEVICE_RECOVERY);
2822
2823                 lpfc_disc_state_machine(vport, ndlp, NULL,
2824                                              NLP_EVT_DEVICE_RM);
2825         }
2826
2827         /* At this point, ALL ndlp's should be gone
2828          * because of the previous NLP_EVT_DEVICE_RM.
2829          * Lets wait for this to happen, if needed.
2830          */
2831         while (!list_empty(&vport->fc_nodes)) {
2832                 if (i++ > 3000) {
2833                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2834                                 "0233 Nodelist not empty\n");
2835                         list_for_each_entry_safe(ndlp, next_ndlp,
2836                                                 &vport->fc_nodes, nlp_listp) {
2837                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2838                                                 LOG_NODE,
2839                                                 "0282 did:x%x ndlp:x%px "
2840                                                 "usgmap:x%x refcnt:%d\n",
2841                                                 ndlp->nlp_DID, (void *)ndlp,
2842                                                 ndlp->nlp_usg_map,
2843                                                 kref_read(&ndlp->kref));
2844                         }
2845                         break;
2846                 }
2847
2848                 /* Wait for any activity on ndlps to settle */
2849                 msleep(10);
2850         }
2851         lpfc_cleanup_vports_rrqs(vport, NULL);
2852 }
2853
2854 /**
2855  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2856  * @vport: pointer to a virtual N_Port data structure.
2857  *
2858  * This routine stops all the timers associated with a @vport. This function
2859  * is invoked before disabling or deleting a @vport. Note that the physical
2860  * port is treated as @vport 0.
2861  **/
2862 void
2863 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2864 {
2865         del_timer_sync(&vport->els_tmofunc);
2866         del_timer_sync(&vport->delayed_disc_tmo);
2867         lpfc_can_disctmo(vport);
2868         return;
2869 }
2870
2871 /**
2872  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2873  * @phba: pointer to lpfc hba data structure.
2874  *
2875  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2876  * caller of this routine should already hold the host lock.
2877  **/
2878 void
2879 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2880 {
2881         /* Clear pending FCF rediscovery wait flag */
2882         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2883
2884         /* Now, try to stop the timer */
2885         del_timer(&phba->fcf.redisc_wait);
2886 }
2887
2888 /**
2889  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2890  * @phba: pointer to lpfc hba data structure.
2891  *
2892  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2893  * checks whether the FCF rediscovery wait timer is pending with the host
2894  * lock held before proceeding with disabling the timer and clearing the
2895  * wait timer pendig flag.
2896  **/
2897 void
2898 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2899 {
2900         spin_lock_irq(&phba->hbalock);
2901         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2902                 /* FCF rediscovery timer already fired or stopped */
2903                 spin_unlock_irq(&phba->hbalock);
2904                 return;
2905         }
2906         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2907         /* Clear failover in progress flags */
2908         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2909         spin_unlock_irq(&phba->hbalock);
2910 }
2911
2912 /**
2913  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2914  * @phba: pointer to lpfc hba data structure.
2915  *
2916  * This routine stops all the timers associated with a HBA. This function is
2917  * invoked before either putting a HBA offline or unloading the driver.
2918  **/
2919 void
2920 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2921 {
2922         if (phba->pport)
2923                 lpfc_stop_vport_timers(phba->pport);
2924         cancel_delayed_work_sync(&phba->eq_delay_work);
2925         del_timer_sync(&phba->sli.mbox_tmo);
2926         del_timer_sync(&phba->fabric_block_timer);
2927         del_timer_sync(&phba->eratt_poll);
2928         del_timer_sync(&phba->hb_tmofunc);
2929         if (phba->sli_rev == LPFC_SLI_REV4) {
2930                 del_timer_sync(&phba->rrq_tmr);
2931                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2932         }
2933         phba->hb_outstanding = 0;
2934
2935         switch (phba->pci_dev_grp) {
2936         case LPFC_PCI_DEV_LP:
2937                 /* Stop any LightPulse device specific driver timers */
2938                 del_timer_sync(&phba->fcp_poll_timer);
2939                 break;
2940         case LPFC_PCI_DEV_OC:
2941                 /* Stop any OneConnect device specific driver timers */
2942                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2943                 break;
2944         default:
2945                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2946                                 "0297 Invalid device group (x%x)\n",
2947                                 phba->pci_dev_grp);
2948                 break;
2949         }
2950         return;
2951 }
2952
2953 /**
2954  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2955  * @phba: pointer to lpfc hba data structure.
2956  *
2957  * This routine marks a HBA's management interface as blocked. Once the HBA's
2958  * management interface is marked as blocked, all the user space access to
2959  * the HBA, whether they are from sysfs interface or libdfc interface will
2960  * all be blocked. The HBA is set to block the management interface when the
2961  * driver prepares the HBA interface for online or offline.
2962  **/
2963 static void
2964 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2965 {
2966         unsigned long iflag;
2967         uint8_t actcmd = MBX_HEARTBEAT;
2968         unsigned long timeout;
2969
2970         spin_lock_irqsave(&phba->hbalock, iflag);
2971         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2972         spin_unlock_irqrestore(&phba->hbalock, iflag);
2973         if (mbx_action == LPFC_MBX_NO_WAIT)
2974                 return;
2975         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2976         spin_lock_irqsave(&phba->hbalock, iflag);
2977         if (phba->sli.mbox_active) {
2978                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2979                 /* Determine how long we might wait for the active mailbox
2980                  * command to be gracefully completed by firmware.
2981                  */
2982                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2983                                 phba->sli.mbox_active) * 1000) + jiffies;
2984         }
2985         spin_unlock_irqrestore(&phba->hbalock, iflag);
2986
2987         /* Wait for the outstnading mailbox command to complete */
2988         while (phba->sli.mbox_active) {
2989                 /* Check active mailbox complete status every 2ms */
2990                 msleep(2);
2991                 if (time_after(jiffies, timeout)) {
2992                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2993                                 "2813 Mgmt IO is Blocked %x "
2994                                 "- mbox cmd %x still active\n",
2995                                 phba->sli.sli_flag, actcmd);
2996                         break;
2997                 }
2998         }
2999 }
3000
3001 /**
3002  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3003  * @phba: pointer to lpfc hba data structure.
3004  *
3005  * Allocate RPIs for all active remote nodes. This is needed whenever
3006  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3007  * is to fixup the temporary rpi assignments.
3008  **/
3009 void
3010 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3011 {
3012         struct lpfc_nodelist  *ndlp, *next_ndlp;
3013         struct lpfc_vport **vports;
3014         int i, rpi;
3015         unsigned long flags;
3016
3017         if (phba->sli_rev != LPFC_SLI_REV4)
3018                 return;
3019
3020         vports = lpfc_create_vport_work_array(phba);
3021         if (vports == NULL)
3022                 return;
3023
3024         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3025                 if (vports[i]->load_flag & FC_UNLOADING)
3026                         continue;
3027
3028                 list_for_each_entry_safe(ndlp, next_ndlp,
3029                                          &vports[i]->fc_nodes,
3030                                          nlp_listp) {
3031                         if (!NLP_CHK_NODE_ACT(ndlp))
3032                                 continue;
3033                         rpi = lpfc_sli4_alloc_rpi(phba);
3034                         if (rpi == LPFC_RPI_ALLOC_ERROR) {
3035                                 spin_lock_irqsave(&phba->ndlp_lock, flags);
3036                                 NLP_CLR_NODE_ACT(ndlp);
3037                                 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3038                                 continue;
3039                         }
3040                         ndlp->nlp_rpi = rpi;
3041                         lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3042                                          LOG_NODE | LOG_DISCOVERY,
3043                                          "0009 Assign RPI x%x to ndlp x%px "
3044                                          "DID:x%06x flg:x%x map:x%x\n",
3045                                          ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3046                                          ndlp->nlp_flag, ndlp->nlp_usg_map);
3047                 }
3048         }
3049         lpfc_destroy_vport_work_array(phba, vports);
3050 }
3051
3052 /**
3053  * lpfc_create_expedite_pool - create expedite pool
3054  * @phba: pointer to lpfc hba data structure.
3055  *
3056  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3057  * to expedite pool. Mark them as expedite.
3058  **/
3059 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3060 {
3061         struct lpfc_sli4_hdw_queue *qp;
3062         struct lpfc_io_buf *lpfc_ncmd;
3063         struct lpfc_io_buf *lpfc_ncmd_next;
3064         struct lpfc_epd_pool *epd_pool;
3065         unsigned long iflag;
3066
3067         epd_pool = &phba->epd_pool;
3068         qp = &phba->sli4_hba.hdwq[0];
3069
3070         spin_lock_init(&epd_pool->lock);
3071         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3072         spin_lock(&epd_pool->lock);
3073         INIT_LIST_HEAD(&epd_pool->list);
3074         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3075                                  &qp->lpfc_io_buf_list_put, list) {
3076                 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3077                 lpfc_ncmd->expedite = true;
3078                 qp->put_io_bufs--;
3079                 epd_pool->count++;
3080                 if (epd_pool->count >= XRI_BATCH)
3081                         break;
3082         }
3083         spin_unlock(&epd_pool->lock);
3084         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3085 }
3086
3087 /**
3088  * lpfc_destroy_expedite_pool - destroy expedite pool
3089  * @phba: pointer to lpfc hba data structure.
3090  *
3091  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3092  * of HWQ 0. Clear the mark.
3093  **/
3094 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3095 {
3096         struct lpfc_sli4_hdw_queue *qp;
3097         struct lpfc_io_buf *lpfc_ncmd;
3098         struct lpfc_io_buf *lpfc_ncmd_next;
3099         struct lpfc_epd_pool *epd_pool;
3100         unsigned long iflag;
3101
3102         epd_pool = &phba->epd_pool;
3103         qp = &phba->sli4_hba.hdwq[0];
3104
3105         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3106         spin_lock(&epd_pool->lock);
3107         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3108                                  &epd_pool->list, list) {
3109                 list_move_tail(&lpfc_ncmd->list,
3110                                &qp->lpfc_io_buf_list_put);
3111                 lpfc_ncmd->flags = false;
3112                 qp->put_io_bufs++;
3113                 epd_pool->count--;
3114         }
3115         spin_unlock(&epd_pool->lock);
3116         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3117 }
3118
3119 /**
3120  * lpfc_create_multixri_pools - create multi-XRI pools
3121  * @phba: pointer to lpfc hba data structure.
3122  *
3123  * This routine initialize public, private per HWQ. Then, move XRIs from
3124  * lpfc_io_buf_list_put to public pool. High and low watermark are also
3125  * Initialized.
3126  **/
3127 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3128 {
3129         u32 i, j;
3130         u32 hwq_count;
3131         u32 count_per_hwq;
3132         struct lpfc_io_buf *lpfc_ncmd;
3133         struct lpfc_io_buf *lpfc_ncmd_next;
3134         unsigned long iflag;
3135         struct lpfc_sli4_hdw_queue *qp;
3136         struct lpfc_multixri_pool *multixri_pool;
3137         struct lpfc_pbl_pool *pbl_pool;
3138         struct lpfc_pvt_pool *pvt_pool;
3139
3140         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3141                         "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3142                         phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3143                         phba->sli4_hba.io_xri_cnt);
3144
3145         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3146                 lpfc_create_expedite_pool(phba);
3147
3148         hwq_count = phba->cfg_hdw_queue;
3149         count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3150
3151         for (i = 0; i < hwq_count; i++) {
3152                 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3153
3154                 if (!multixri_pool) {
3155                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3156                                         "1238 Failed to allocate memory for "
3157                                         "multixri_pool\n");
3158
3159                         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3160                                 lpfc_destroy_expedite_pool(phba);
3161
3162                         j = 0;
3163                         while (j < i) {
3164                                 qp = &phba->sli4_hba.hdwq[j];
3165                                 kfree(qp->p_multixri_pool);
3166                                 j++;
3167                         }
3168                         phba->cfg_xri_rebalancing = 0;
3169                         return;
3170                 }
3171
3172                 qp = &phba->sli4_hba.hdwq[i];
3173                 qp->p_multixri_pool = multixri_pool;
3174
3175                 multixri_pool->xri_limit = count_per_hwq;
3176                 multixri_pool->rrb_next_hwqid = i;
3177
3178                 /* Deal with public free xri pool */
3179                 pbl_pool = &multixri_pool->pbl_pool;
3180                 spin_lock_init(&pbl_pool->lock);
3181                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3182                 spin_lock(&pbl_pool->lock);
3183                 INIT_LIST_HEAD(&pbl_pool->list);
3184                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3185                                          &qp->lpfc_io_buf_list_put, list) {
3186                         list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3187                         qp->put_io_bufs--;
3188                         pbl_pool->count++;
3189                 }
3190                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3191                                 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3192                                 pbl_pool->count, i);
3193                 spin_unlock(&pbl_pool->lock);
3194                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3195
3196                 /* Deal with private free xri pool */
3197                 pvt_pool = &multixri_pool->pvt_pool;
3198                 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3199                 pvt_pool->low_watermark = XRI_BATCH;
3200                 spin_lock_init(&pvt_pool->lock);
3201                 spin_lock_irqsave(&pvt_pool->lock, iflag);
3202                 INIT_LIST_HEAD(&pvt_pool->list);
3203                 pvt_pool->count = 0;
3204                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3205         }
3206 }
3207
3208 /**
3209  * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3210  * @phba: pointer to lpfc hba data structure.
3211  *
3212  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3213  **/
3214 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3215 {
3216         u32 i;
3217         u32 hwq_count;
3218         struct lpfc_io_buf *lpfc_ncmd;
3219         struct lpfc_io_buf *lpfc_ncmd_next;
3220         unsigned long iflag;
3221         struct lpfc_sli4_hdw_queue *qp;
3222         struct lpfc_multixri_pool *multixri_pool;
3223         struct lpfc_pbl_pool *pbl_pool;
3224         struct lpfc_pvt_pool *pvt_pool;
3225
3226         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3227                 lpfc_destroy_expedite_pool(phba);
3228
3229         if (!(phba->pport->load_flag & FC_UNLOADING))
3230                 lpfc_sli_flush_io_rings(phba);
3231
3232         hwq_count = phba->cfg_hdw_queue;
3233
3234         for (i = 0; i < hwq_count; i++) {
3235                 qp = &phba->sli4_hba.hdwq[i];
3236                 multixri_pool = qp->p_multixri_pool;
3237                 if (!multixri_pool)
3238                         continue;
3239
3240                 qp->p_multixri_pool = NULL;
3241
3242                 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3243
3244                 /* Deal with public free xri pool */
3245                 pbl_pool = &multixri_pool->pbl_pool;
3246                 spin_lock(&pbl_pool->lock);
3247
3248                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3249                                 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3250                                 pbl_pool->count, i);
3251
3252                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3253                                          &pbl_pool->list, list) {
3254                         list_move_tail(&lpfc_ncmd->list,
3255                                        &qp->lpfc_io_buf_list_put);
3256                         qp->put_io_bufs++;
3257                         pbl_pool->count--;
3258                 }
3259
3260                 INIT_LIST_HEAD(&pbl_pool->list);
3261                 pbl_pool->count = 0;
3262
3263                 spin_unlock(&pbl_pool->lock);
3264
3265                 /* Deal with private free xri pool */
3266                 pvt_pool = &multixri_pool->pvt_pool;
3267                 spin_lock(&pvt_pool->lock);
3268
3269                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3270                                 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3271                                 pvt_pool->count, i);
3272
3273                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3274                                          &pvt_pool->list, list) {
3275                         list_move_tail(&lpfc_ncmd->list,
3276                                        &qp->lpfc_io_buf_list_put);
3277                         qp->put_io_bufs++;
3278                         pvt_pool->count--;
3279                 }
3280
3281                 INIT_LIST_HEAD(&pvt_pool->list);
3282                 pvt_pool->count = 0;
3283
3284                 spin_unlock(&pvt_pool->lock);
3285                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3286
3287                 kfree(multixri_pool);
3288         }
3289 }
3290
3291 /**
3292  * lpfc_online - Initialize and bring a HBA online
3293  * @phba: pointer to lpfc hba data structure.
3294  *
3295  * This routine initializes the HBA and brings a HBA online. During this
3296  * process, the management interface is blocked to prevent user space access
3297  * to the HBA interfering with the driver initialization.
3298  *
3299  * Return codes
3300  *   0 - successful
3301  *   1 - failed
3302  **/
3303 int
3304 lpfc_online(struct lpfc_hba *phba)
3305 {
3306         struct lpfc_vport *vport;
3307         struct lpfc_vport **vports;
3308         int i, error = 0;
3309         bool vpis_cleared = false;
3310
3311         if (!phba)
3312                 return 0;
3313         vport = phba->pport;
3314
3315         if (!(vport->fc_flag & FC_OFFLINE_MODE))
3316                 return 0;
3317
3318         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3319                         "0458 Bring Adapter online\n");
3320
3321         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3322
3323         if (phba->sli_rev == LPFC_SLI_REV4) {
3324                 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3325                         lpfc_unblock_mgmt_io(phba);
3326                         return 1;
3327                 }
3328                 spin_lock_irq(&phba->hbalock);
3329                 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3330                         vpis_cleared = true;
3331                 spin_unlock_irq(&phba->hbalock);
3332
3333                 /* Reestablish the local initiator port.
3334                  * The offline process destroyed the previous lport.
3335                  */
3336                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3337                                 !phba->nvmet_support) {
3338                         error = lpfc_nvme_create_localport(phba->pport);
3339                         if (error)
3340                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3341                                         "6132 NVME restore reg failed "
3342                                         "on nvmei error x%x\n", error);
3343                 }
3344         } else {
3345                 lpfc_sli_queue_init(phba);
3346                 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3347                         lpfc_unblock_mgmt_io(phba);
3348                         return 1;
3349                 }
3350         }
3351
3352         vports = lpfc_create_vport_work_array(phba);
3353         if (vports != NULL) {
3354                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3355                         struct Scsi_Host *shost;
3356                         shost = lpfc_shost_from_vport(vports[i]);
3357                         spin_lock_irq(shost->host_lock);
3358                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3359                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3360                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3361                         if (phba->sli_rev == LPFC_SLI_REV4) {
3362                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3363                                 if ((vpis_cleared) &&
3364                                     (vports[i]->port_type !=
3365                                         LPFC_PHYSICAL_PORT))
3366                                         vports[i]->vpi = 0;
3367                         }
3368                         spin_unlock_irq(shost->host_lock);
3369                 }
3370         }
3371         lpfc_destroy_vport_work_array(phba, vports);
3372
3373         if (phba->cfg_xri_rebalancing)
3374                 lpfc_create_multixri_pools(phba);
3375
3376         lpfc_cpuhp_add(phba);
3377
3378         lpfc_unblock_mgmt_io(phba);
3379         return 0;
3380 }
3381
3382 /**
3383  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3384  * @phba: pointer to lpfc hba data structure.
3385  *
3386  * This routine marks a HBA's management interface as not blocked. Once the
3387  * HBA's management interface is marked as not blocked, all the user space
3388  * access to the HBA, whether they are from sysfs interface or libdfc
3389  * interface will be allowed. The HBA is set to block the management interface
3390  * when the driver prepares the HBA interface for online or offline and then
3391  * set to unblock the management interface afterwards.
3392  **/
3393 void
3394 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3395 {
3396         unsigned long iflag;
3397
3398         spin_lock_irqsave(&phba->hbalock, iflag);
3399         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3400         spin_unlock_irqrestore(&phba->hbalock, iflag);
3401 }
3402
3403 /**
3404  * lpfc_offline_prep - Prepare a HBA to be brought offline
3405  * @phba: pointer to lpfc hba data structure.
3406  *
3407  * This routine is invoked to prepare a HBA to be brought offline. It performs
3408  * unregistration login to all the nodes on all vports and flushes the mailbox
3409  * queue to make it ready to be brought offline.
3410  **/
3411 void
3412 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3413 {
3414         struct lpfc_vport *vport = phba->pport;
3415         struct lpfc_nodelist  *ndlp, *next_ndlp;
3416         struct lpfc_vport **vports;
3417         struct Scsi_Host *shost;
3418         int i;
3419
3420         if (vport->fc_flag & FC_OFFLINE_MODE)
3421                 return;
3422
3423         lpfc_block_mgmt_io(phba, mbx_action);
3424
3425         lpfc_linkdown(phba);
3426
3427         /* Issue an unreg_login to all nodes on all vports */
3428         vports = lpfc_create_vport_work_array(phba);
3429         if (vports != NULL) {
3430                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3431                         if (vports[i]->load_flag & FC_UNLOADING)
3432                                 continue;
3433                         shost = lpfc_shost_from_vport(vports[i]);
3434                         spin_lock_irq(shost->host_lock);
3435                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3436                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3437                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3438                         spin_unlock_irq(shost->host_lock);
3439
3440                         shost = lpfc_shost_from_vport(vports[i]);
3441                         list_for_each_entry_safe(ndlp, next_ndlp,
3442                                                  &vports[i]->fc_nodes,
3443                                                  nlp_listp) {
3444                                 if ((!NLP_CHK_NODE_ACT(ndlp)) ||
3445                                     ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3446                                         /* Driver must assume RPI is invalid for
3447                                          * any unused or inactive node.
3448                                          */
3449                                         ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3450                                         continue;
3451                                 }
3452
3453                                 if (ndlp->nlp_type & NLP_FABRIC) {
3454                                         lpfc_disc_state_machine(vports[i], ndlp,
3455                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
3456                                         lpfc_disc_state_machine(vports[i], ndlp,
3457                                                 NULL, NLP_EVT_DEVICE_RM);
3458                                 }
3459                                 spin_lock_irq(shost->host_lock);
3460                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3461                                 spin_unlock_irq(shost->host_lock);
3462                                 /*
3463                                  * Whenever an SLI4 port goes offline, free the
3464                                  * RPI. Get a new RPI when the adapter port
3465                                  * comes back online.
3466                                  */
3467                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3468                                         lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3469                                                  LOG_NODE | LOG_DISCOVERY,
3470                                                  "0011 Free RPI x%x on "
3471                                                  "ndlp:x%px did x%x "
3472                                                  "usgmap:x%x\n",
3473                                                  ndlp->nlp_rpi, ndlp,
3474                                                  ndlp->nlp_DID,
3475                                                  ndlp->nlp_usg_map);
3476                                         lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3477                                         ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3478                                 }
3479                                 lpfc_unreg_rpi(vports[i], ndlp);
3480                         }
3481                 }
3482         }
3483         lpfc_destroy_vport_work_array(phba, vports);
3484
3485         lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3486
3487         if (phba->wq)
3488                 flush_workqueue(phba->wq);
3489 }
3490
3491 /**
3492  * lpfc_offline - Bring a HBA offline
3493  * @phba: pointer to lpfc hba data structure.
3494  *
3495  * This routine actually brings a HBA offline. It stops all the timers
3496  * associated with the HBA, brings down the SLI layer, and eventually
3497  * marks the HBA as in offline state for the upper layer protocol.
3498  **/
3499 void
3500 lpfc_offline(struct lpfc_hba *phba)
3501 {
3502         struct Scsi_Host  *shost;
3503         struct lpfc_vport **vports;
3504         int i;
3505
3506         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3507                 return;
3508
3509         /* stop port and all timers associated with this hba */
3510         lpfc_stop_port(phba);
3511
3512         /* Tear down the local and target port registrations.  The
3513          * nvme transports need to cleanup.
3514          */
3515         lpfc_nvmet_destroy_targetport(phba);
3516         lpfc_nvme_destroy_localport(phba->pport);
3517
3518         vports = lpfc_create_vport_work_array(phba);
3519         if (vports != NULL)
3520                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3521                         lpfc_stop_vport_timers(vports[i]);
3522         lpfc_destroy_vport_work_array(phba, vports);
3523         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3524                         "0460 Bring Adapter offline\n");
3525         /* Bring down the SLI Layer and cleanup.  The HBA is offline
3526            now.  */
3527         lpfc_sli_hba_down(phba);
3528         spin_lock_irq(&phba->hbalock);
3529         phba->work_ha = 0;
3530         spin_unlock_irq(&phba->hbalock);
3531         vports = lpfc_create_vport_work_array(phba);
3532         if (vports != NULL)
3533                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3534                         shost = lpfc_shost_from_vport(vports[i]);
3535                         spin_lock_irq(shost->host_lock);
3536                         vports[i]->work_port_events = 0;
3537                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
3538                         spin_unlock_irq(shost->host_lock);
3539                 }
3540         lpfc_destroy_vport_work_array(phba, vports);
3541         __lpfc_cpuhp_remove(phba);
3542
3543         if (phba->cfg_xri_rebalancing)
3544                 lpfc_destroy_multixri_pools(phba);
3545 }
3546
3547 /**
3548  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3549  * @phba: pointer to lpfc hba data structure.
3550  *
3551  * This routine is to free all the SCSI buffers and IOCBs from the driver
3552  * list back to kernel. It is called from lpfc_pci_remove_one to free
3553  * the internal resources before the device is removed from the system.
3554  **/
3555 static void
3556 lpfc_scsi_free(struct lpfc_hba *phba)
3557 {
3558         struct lpfc_io_buf *sb, *sb_next;
3559
3560         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3561                 return;
3562
3563         spin_lock_irq(&phba->hbalock);
3564
3565         /* Release all the lpfc_scsi_bufs maintained by this host. */
3566
3567         spin_lock(&phba->scsi_buf_list_put_lock);
3568         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3569                                  list) {
3570                 list_del(&sb->list);
3571                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3572                               sb->dma_handle);
3573                 kfree(sb);
3574                 phba->total_scsi_bufs--;
3575         }
3576         spin_unlock(&phba->scsi_buf_list_put_lock);
3577
3578         spin_lock(&phba->scsi_buf_list_get_lock);
3579         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3580                                  list) {
3581                 list_del(&sb->list);
3582                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3583                               sb->dma_handle);
3584                 kfree(sb);
3585                 phba->total_scsi_bufs--;
3586         }
3587         spin_unlock(&phba->scsi_buf_list_get_lock);
3588         spin_unlock_irq(&phba->hbalock);
3589 }
3590
3591 /**
3592  * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3593  * @phba: pointer to lpfc hba data structure.
3594  *
3595  * This routine is to free all the IO buffers and IOCBs from the driver
3596  * list back to kernel. It is called from lpfc_pci_remove_one to free
3597  * the internal resources before the device is removed from the system.
3598  **/
3599 void
3600 lpfc_io_free(struct lpfc_hba *phba)
3601 {
3602         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3603         struct lpfc_sli4_hdw_queue *qp;
3604         int idx;
3605
3606         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3607                 qp = &phba->sli4_hba.hdwq[idx];
3608                 /* Release all the lpfc_nvme_bufs maintained by this host. */
3609                 spin_lock(&qp->io_buf_list_put_lock);
3610                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3611                                          &qp->lpfc_io_buf_list_put,
3612                                          list) {
3613                         list_del(&lpfc_ncmd->list);
3614                         qp->put_io_bufs--;
3615                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3616                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3617                         if (phba->cfg_xpsgl && !phba->nvmet_support)
3618                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3619                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3620                         kfree(lpfc_ncmd);
3621                         qp->total_io_bufs--;
3622                 }
3623                 spin_unlock(&qp->io_buf_list_put_lock);
3624
3625                 spin_lock(&qp->io_buf_list_get_lock);
3626                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3627                                          &qp->lpfc_io_buf_list_get,
3628                                          list) {
3629                         list_del(&lpfc_ncmd->list);
3630                         qp->get_io_bufs--;
3631                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3632                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3633                         if (phba->cfg_xpsgl && !phba->nvmet_support)
3634                                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3635                         lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3636                         kfree(lpfc_ncmd);
3637                         qp->total_io_bufs--;
3638                 }
3639                 spin_unlock(&qp->io_buf_list_get_lock);
3640         }
3641 }
3642
3643 /**
3644  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3645  * @phba: pointer to lpfc hba data structure.
3646  *
3647  * This routine first calculates the sizes of the current els and allocated
3648  * scsi sgl lists, and then goes through all sgls to updates the physical
3649  * XRIs assigned due to port function reset. During port initialization, the
3650  * current els and allocated scsi sgl lists are 0s.
3651  *
3652  * Return codes
3653  *   0 - successful (for now, it always returns 0)
3654  **/
3655 int
3656 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3657 {
3658         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3659         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3660         LIST_HEAD(els_sgl_list);
3661         int rc;
3662
3663         /*
3664          * update on pci function's els xri-sgl list
3665          */
3666         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3667
3668         if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3669                 /* els xri-sgl expanded */
3670                 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3671                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3672                                 "3157 ELS xri-sgl count increased from "
3673                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3674                                 els_xri_cnt);
3675                 /* allocate the additional els sgls */
3676                 for (i = 0; i < xri_cnt; i++) {
3677                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3678                                              GFP_KERNEL);
3679                         if (sglq_entry == NULL) {
3680                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3681                                                 "2562 Failure to allocate an "
3682                                                 "ELS sgl entry:%d\n", i);
3683                                 rc = -ENOMEM;
3684                                 goto out_free_mem;
3685                         }
3686                         sglq_entry->buff_type = GEN_BUFF_TYPE;
3687                         sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3688                                                            &sglq_entry->phys);
3689                         if (sglq_entry->virt == NULL) {
3690                                 kfree(sglq_entry);
3691                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3692                                                 "2563 Failure to allocate an "
3693                                                 "ELS mbuf:%d\n", i);
3694                                 rc = -ENOMEM;
3695                                 goto out_free_mem;
3696                         }
3697                         sglq_entry->sgl = sglq_entry->virt;
3698                         memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3699                         sglq_entry->state = SGL_FREED;
3700                         list_add_tail(&sglq_entry->list, &els_sgl_list);
3701                 }
3702                 spin_lock_irq(&phba->hbalock);
3703                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3704                 list_splice_init(&els_sgl_list,
3705                                  &phba->sli4_hba.lpfc_els_sgl_list);
3706                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3707                 spin_unlock_irq(&phba->hbalock);
3708         } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3709                 /* els xri-sgl shrinked */
3710                 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3711                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3712                                 "3158 ELS xri-sgl count decreased from "
3713                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3714                                 els_xri_cnt);
3715                 spin_lock_irq(&phba->hbalock);
3716                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3717                 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3718                                  &els_sgl_list);
3719                 /* release extra els sgls from list */
3720                 for (i = 0; i < xri_cnt; i++) {
3721                         list_remove_head(&els_sgl_list,
3722                                          sglq_entry, struct lpfc_sglq, list);
3723                         if (sglq_entry) {
3724                                 __lpfc_mbuf_free(phba, sglq_entry->virt,
3725                                                  sglq_entry->phys);
3726                                 kfree(sglq_entry);
3727                         }
3728                 }
3729                 list_splice_init(&els_sgl_list,
3730                                  &phba->sli4_hba.lpfc_els_sgl_list);
3731                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3732                 spin_unlock_irq(&phba->hbalock);
3733         } else
3734                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3735                                 "3163 ELS xri-sgl count unchanged: %d\n",
3736                                 els_xri_cnt);
3737         phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3738
3739         /* update xris to els sgls on the list */
3740         sglq_entry = NULL;
3741         sglq_entry_next = NULL;
3742         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3743                                  &phba->sli4_hba.lpfc_els_sgl_list, list) {
3744                 lxri = lpfc_sli4_next_xritag(phba);
3745                 if (lxri == NO_XRI) {
3746                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3747                                         "2400 Failed to allocate xri for "
3748                                         "ELS sgl\n");
3749                         rc = -ENOMEM;
3750                         goto out_free_mem;
3751                 }
3752                 sglq_entry->sli4_lxritag = lxri;
3753                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3754         }
3755         return 0;
3756
3757 out_free_mem:
3758         lpfc_free_els_sgl_list(phba);
3759         return rc;
3760 }
3761
3762 /**
3763  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3764  * @phba: pointer to lpfc hba data structure.
3765  *
3766  * This routine first calculates the sizes of the current els and allocated
3767  * scsi sgl lists, and then goes through all sgls to updates the physical
3768  * XRIs assigned due to port function reset. During port initialization, the
3769  * current els and allocated scsi sgl lists are 0s.
3770  *
3771  * Return codes
3772  *   0 - successful (for now, it always returns 0)
3773  **/
3774 int
3775 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3776 {
3777         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3778         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3779         uint16_t nvmet_xri_cnt;
3780         LIST_HEAD(nvmet_sgl_list);
3781         int rc;
3782
3783         /*
3784          * update on pci function's nvmet xri-sgl list
3785          */
3786         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3787
3788         /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3789         nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3790         if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3791                 /* els xri-sgl expanded */
3792                 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3793                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3794                                 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3795                                 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3796                 /* allocate the additional nvmet sgls */
3797                 for (i = 0; i < xri_cnt; i++) {
3798                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3799                                              GFP_KERNEL);
3800                         if (sglq_entry == NULL) {
3801                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802                                                 "6303 Failure to allocate an "
3803                                                 "NVMET sgl entry:%d\n", i);
3804                                 rc = -ENOMEM;
3805                                 goto out_free_mem;
3806                         }
3807                         sglq_entry->buff_type = NVMET_BUFF_TYPE;
3808                         sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3809                                                            &sglq_entry->phys);
3810                         if (sglq_entry->virt == NULL) {
3811                                 kfree(sglq_entry);
3812                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3813                                                 "6304 Failure to allocate an "
3814                                                 "NVMET buf:%d\n", i);
3815                                 rc = -ENOMEM;
3816                                 goto out_free_mem;
3817                         }
3818                         sglq_entry->sgl = sglq_entry->virt;
3819                         memset(sglq_entry->sgl, 0,
3820                                phba->cfg_sg_dma_buf_size);
3821                         sglq_entry->state = SGL_FREED;
3822                         list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3823                 }
3824                 spin_lock_irq(&phba->hbalock);
3825                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3826                 list_splice_init(&nvmet_sgl_list,
3827                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3828                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3829                 spin_unlock_irq(&phba->hbalock);
3830         } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3831                 /* nvmet xri-sgl shrunk */
3832                 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3833                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3834                                 "6305 NVMET xri-sgl count decreased from "
3835                                 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3836                                 nvmet_xri_cnt);
3837                 spin_lock_irq(&phba->hbalock);
3838                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3839                 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3840                                  &nvmet_sgl_list);
3841                 /* release extra nvmet sgls from list */
3842                 for (i = 0; i < xri_cnt; i++) {
3843                         list_remove_head(&nvmet_sgl_list,
3844                                          sglq_entry, struct lpfc_sglq, list);
3845                         if (sglq_entry) {
3846                                 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3847                                                     sglq_entry->phys);
3848                                 kfree(sglq_entry);
3849                         }
3850                 }
3851                 list_splice_init(&nvmet_sgl_list,
3852                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3853                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3854                 spin_unlock_irq(&phba->hbalock);
3855         } else
3856                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3857                                 "6306 NVMET xri-sgl count unchanged: %d\n",
3858                                 nvmet_xri_cnt);
3859         phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3860
3861         /* update xris to nvmet sgls on the list */
3862         sglq_entry = NULL;
3863         sglq_entry_next = NULL;
3864         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3865                                  &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3866                 lxri = lpfc_sli4_next_xritag(phba);
3867                 if (lxri == NO_XRI) {
3868                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3869                                         "6307 Failed to allocate xri for "
3870                                         "NVMET sgl\n");
3871                         rc = -ENOMEM;
3872                         goto out_free_mem;
3873                 }
3874                 sglq_entry->sli4_lxritag = lxri;
3875                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3876         }
3877         return 0;
3878
3879 out_free_mem:
3880         lpfc_free_nvmet_sgl_list(phba);
3881         return rc;
3882 }
3883
3884 int
3885 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3886 {
3887         LIST_HEAD(blist);
3888         struct lpfc_sli4_hdw_queue *qp;
3889         struct lpfc_io_buf *lpfc_cmd;
3890         struct lpfc_io_buf *iobufp, *prev_iobufp;
3891         int idx, cnt, xri, inserted;
3892
3893         cnt = 0;
3894         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3895                 qp = &phba->sli4_hba.hdwq[idx];
3896                 spin_lock_irq(&qp->io_buf_list_get_lock);
3897                 spin_lock(&qp->io_buf_list_put_lock);
3898
3899                 /* Take everything off the get and put lists */
3900                 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3901                 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3902                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3903                 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3904                 cnt += qp->get_io_bufs + qp->put_io_bufs;
3905                 qp->get_io_bufs = 0;
3906                 qp->put_io_bufs = 0;
3907                 qp->total_io_bufs = 0;
3908                 spin_unlock(&qp->io_buf_list_put_lock);
3909                 spin_unlock_irq(&qp->io_buf_list_get_lock);
3910         }
3911
3912         /*
3913          * Take IO buffers off blist and put on cbuf sorted by XRI.
3914          * This is because POST_SGL takes a sequential range of XRIs
3915          * to post to the firmware.
3916          */
3917         for (idx = 0; idx < cnt; idx++) {
3918                 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3919                 if (!lpfc_cmd)
3920                         return cnt;
3921                 if (idx == 0) {
3922                         list_add_tail(&lpfc_cmd->list, cbuf);
3923                         continue;
3924                 }
3925                 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3926                 inserted = 0;
3927                 prev_iobufp = NULL;
3928                 list_for_each_entry(iobufp, cbuf, list) {
3929                         if (xri < iobufp->cur_iocbq.sli4_xritag) {
3930                                 if (prev_iobufp)
3931                                         list_add(&lpfc_cmd->list,
3932                                                  &prev_iobufp->list);
3933                                 else
3934                                         list_add(&lpfc_cmd->list, cbuf);
3935                                 inserted = 1;
3936                                 break;
3937                         }
3938                         prev_iobufp = iobufp;
3939                 }
3940                 if (!inserted)
3941                         list_add_tail(&lpfc_cmd->list, cbuf);
3942         }
3943         return cnt;
3944 }
3945
3946 int
3947 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3948 {
3949         struct lpfc_sli4_hdw_queue *qp;
3950         struct lpfc_io_buf *lpfc_cmd;
3951         int idx, cnt;
3952
3953         qp = phba->sli4_hba.hdwq;
3954         cnt = 0;
3955         while (!list_empty(cbuf)) {
3956                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3957                         list_remove_head(cbuf, lpfc_cmd,
3958                                          struct lpfc_io_buf, list);
3959                         if (!lpfc_cmd)
3960                                 return cnt;
3961                         cnt++;
3962                         qp = &phba->sli4_hba.hdwq[idx];
3963                         lpfc_cmd->hdwq_no = idx;
3964                         lpfc_cmd->hdwq = qp;
3965                         lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3966                         lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3967                         spin_lock(&qp->io_buf_list_put_lock);
3968                         list_add_tail(&lpfc_cmd->list,
3969                                       &qp->lpfc_io_buf_list_put);
3970                         qp->put_io_bufs++;
3971                         qp->total_io_bufs++;
3972                         spin_unlock(&qp->io_buf_list_put_lock);
3973                 }
3974         }
3975         return cnt;
3976 }
3977
3978 /**
3979  * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
3980  * @phba: pointer to lpfc hba data structure.
3981  *
3982  * This routine first calculates the sizes of the current els and allocated
3983  * scsi sgl lists, and then goes through all sgls to updates the physical
3984  * XRIs assigned due to port function reset. During port initialization, the
3985  * current els and allocated scsi sgl lists are 0s.
3986  *
3987  * Return codes
3988  *   0 - successful (for now, it always returns 0)
3989  **/
3990 int
3991 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
3992 {
3993         struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3994         uint16_t i, lxri, els_xri_cnt;
3995         uint16_t io_xri_cnt, io_xri_max;
3996         LIST_HEAD(io_sgl_list);
3997         int rc, cnt;
3998
3999         /*
4000          * update on pci function's allocated nvme xri-sgl list
4001          */
4002
4003         /* maximum number of xris available for nvme buffers */
4004         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4005         io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4006         phba->sli4_hba.io_xri_max = io_xri_max;
4007
4008         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4009                         "6074 Current allocated XRI sgl count:%d, "
4010                         "maximum XRI count:%d\n",
4011                         phba->sli4_hba.io_xri_cnt,
4012                         phba->sli4_hba.io_xri_max);
4013
4014         cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4015
4016         if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4017                 /* max nvme xri shrunk below the allocated nvme buffers */
4018                 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4019                                         phba->sli4_hba.io_xri_max;
4020                 /* release the extra allocated nvme buffers */
4021                 for (i = 0; i < io_xri_cnt; i++) {
4022                         list_remove_head(&io_sgl_list, lpfc_ncmd,
4023                                          struct lpfc_io_buf, list);
4024                         if (lpfc_ncmd) {
4025                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4026                                               lpfc_ncmd->data,
4027                                               lpfc_ncmd->dma_handle);
4028                                 kfree(lpfc_ncmd);
4029                         }
4030                 }
4031                 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4032         }
4033
4034         /* update xris associated to remaining allocated nvme buffers */
4035         lpfc_ncmd = NULL;
4036         lpfc_ncmd_next = NULL;
4037         phba->sli4_hba.io_xri_cnt = cnt;
4038         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4039                                  &io_sgl_list, list) {
4040                 lxri = lpfc_sli4_next_xritag(phba);
4041                 if (lxri == NO_XRI) {
4042                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4043                                         "6075 Failed to allocate xri for "
4044                                         "nvme buffer\n");
4045                         rc = -ENOMEM;
4046                         goto out_free_mem;
4047                 }
4048                 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4049                 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4050         }
4051         cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4052         return 0;
4053
4054 out_free_mem:
4055         lpfc_io_free(phba);
4056         return rc;
4057 }
4058
4059 /**
4060  * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4061  * @vport: The virtual port for which this call being executed.
4062  * @num_to_allocate: The requested number of buffers to allocate.
4063  *
4064  * This routine allocates nvme buffers for device with SLI-4 interface spec,
4065  * the nvme buffer contains all the necessary information needed to initiate
4066  * an I/O. After allocating up to @num_to_allocate IO buffers and put
4067  * them on a list, it post them to the port by using SGL block post.
4068  *
4069  * Return codes:
4070  *   int - number of IO buffers that were allocated and posted.
4071  *   0 = failure, less than num_to_alloc is a partial failure.
4072  **/
4073 int
4074 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4075 {
4076         struct lpfc_io_buf *lpfc_ncmd;
4077         struct lpfc_iocbq *pwqeq;
4078         uint16_t iotag, lxri = 0;
4079         int bcnt, num_posted;
4080         LIST_HEAD(prep_nblist);
4081         LIST_HEAD(post_nblist);
4082         LIST_HEAD(nvme_nblist);
4083
4084         phba->sli4_hba.io_xri_cnt = 0;
4085         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4086                 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4087                 if (!lpfc_ncmd)
4088                         break;
4089                 /*
4090                  * Get memory from the pci pool to map the virt space to
4091                  * pci bus space for an I/O. The DMA buffer includes the
4092                  * number of SGE's necessary to support the sg_tablesize.
4093                  */
4094                 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4095                                                   GFP_KERNEL,
4096                                                   &lpfc_ncmd->dma_handle);
4097                 if (!lpfc_ncmd->data) {
4098                         kfree(lpfc_ncmd);
4099                         break;
4100                 }
4101
4102                 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4103                         INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4104                 } else {
4105                         /*
4106                          * 4K Page alignment is CRITICAL to BlockGuard, double
4107                          * check to be sure.
4108                          */
4109                         if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4110                             (((unsigned long)(lpfc_ncmd->data) &
4111                             (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4112                                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4113                                                 "3369 Memory alignment err: "
4114                                                 "addr=%lx\n",
4115                                                 (unsigned long)lpfc_ncmd->data);
4116                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4117                                               lpfc_ncmd->data,
4118                                               lpfc_ncmd->dma_handle);
4119                                 kfree(lpfc_ncmd);
4120                                 break;
4121                         }
4122                 }
4123
4124                 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4125
4126                 lxri = lpfc_sli4_next_xritag(phba);
4127                 if (lxri == NO_XRI) {
4128                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4129                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4130                         kfree(lpfc_ncmd);
4131                         break;
4132                 }
4133                 pwqeq = &lpfc_ncmd->cur_iocbq;
4134
4135                 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4136                 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4137                 if (iotag == 0) {
4138                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4139                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4140                         kfree(lpfc_ncmd);
4141                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4142                                         "6121 Failed to allocate IOTAG for"
4143                                         " XRI:0x%x\n", lxri);
4144                         lpfc_sli4_free_xri(phba, lxri);
4145                         break;
4146                 }
4147                 pwqeq->sli4_lxritag = lxri;
4148                 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4149                 pwqeq->context1 = lpfc_ncmd;
4150
4151                 /* Initialize local short-hand pointers. */
4152                 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4153                 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4154                 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4155                 spin_lock_init(&lpfc_ncmd->buf_lock);
4156
4157                 /* add the nvme buffer to a post list */
4158                 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4159                 phba->sli4_hba.io_xri_cnt++;
4160         }
4161         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4162                         "6114 Allocate %d out of %d requested new NVME "
4163                         "buffers\n", bcnt, num_to_alloc);
4164
4165         /* post the list of nvme buffer sgls to port if available */
4166         if (!list_empty(&post_nblist))
4167                 num_posted = lpfc_sli4_post_io_sgl_list(
4168                                 phba, &post_nblist, bcnt);
4169         else
4170                 num_posted = 0;
4171
4172         return num_posted;
4173 }
4174
4175 static uint64_t
4176 lpfc_get_wwpn(struct lpfc_hba *phba)
4177 {
4178         uint64_t wwn;
4179         int rc;
4180         LPFC_MBOXQ_t *mboxq;
4181         MAILBOX_t *mb;
4182
4183         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4184                                                 GFP_KERNEL);
4185         if (!mboxq)
4186                 return (uint64_t)-1;
4187
4188         /* First get WWN of HBA instance */
4189         lpfc_read_nv(phba, mboxq);
4190         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4191         if (rc != MBX_SUCCESS) {
4192                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4193                                 "6019 Mailbox failed , mbxCmd x%x "
4194                                 "READ_NV, mbxStatus x%x\n",
4195                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4196                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4197                 mempool_free(mboxq, phba->mbox_mem_pool);
4198                 return (uint64_t) -1;
4199         }
4200         mb = &mboxq->u.mb;
4201         memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4202         /* wwn is WWPN of HBA instance */
4203         mempool_free(mboxq, phba->mbox_mem_pool);
4204         if (phba->sli_rev == LPFC_SLI_REV4)
4205                 return be64_to_cpu(wwn);
4206         else
4207                 return rol64(wwn, 32);
4208 }
4209
4210 /**
4211  * lpfc_create_port - Create an FC port
4212  * @phba: pointer to lpfc hba data structure.
4213  * @instance: a unique integer ID to this FC port.
4214  * @dev: pointer to the device data structure.
4215  *
4216  * This routine creates a FC port for the upper layer protocol. The FC port
4217  * can be created on top of either a physical port or a virtual port provided
4218  * by the HBA. This routine also allocates a SCSI host data structure (shost)
4219  * and associates the FC port created before adding the shost into the SCSI
4220  * layer.
4221  *
4222  * Return codes
4223  *   @vport - pointer to the virtual N_Port data structure.
4224  *   NULL - port create failed.
4225  **/
4226 struct lpfc_vport *
4227 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4228 {
4229         struct lpfc_vport *vport;
4230         struct Scsi_Host  *shost = NULL;
4231         struct scsi_host_template *template;
4232         int error = 0;
4233         int i;
4234         uint64_t wwn;
4235         bool use_no_reset_hba = false;
4236         int rc;
4237
4238         if (lpfc_no_hba_reset_cnt) {
4239                 if (phba->sli_rev < LPFC_SLI_REV4 &&
4240                     dev == &phba->pcidev->dev) {
4241                         /* Reset the port first */
4242                         lpfc_sli_brdrestart(phba);
4243                         rc = lpfc_sli_chipset_init(phba);
4244                         if (rc)
4245                                 return NULL;
4246                 }
4247                 wwn = lpfc_get_wwpn(phba);
4248         }
4249
4250         for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4251                 if (wwn == lpfc_no_hba_reset[i]) {
4252                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4253                                         "6020 Setting use_no_reset port=%llx\n",
4254                                         wwn);
4255                         use_no_reset_hba = true;
4256                         break;
4257                 }
4258         }
4259
4260         /* Seed template for SCSI host registration */
4261         if (dev == &phba->pcidev->dev) {
4262                 template = &phba->port_template;
4263
4264                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4265                         /* Seed physical port template */
4266                         memcpy(template, &lpfc_template, sizeof(*template));
4267
4268                         if (use_no_reset_hba) {
4269                                 /* template is for a no reset SCSI Host */
4270                                 template->max_sectors = 0xffff;
4271                                 template->eh_host_reset_handler = NULL;
4272                         }
4273
4274                         /* Template for all vports this physical port creates */
4275                         memcpy(&phba->vport_template, &lpfc_template,
4276                                sizeof(*template));
4277                         phba->vport_template.max_sectors = 0xffff;
4278                         phba->vport_template.shost_attrs = lpfc_vport_attrs;
4279                         phba->vport_template.eh_bus_reset_handler = NULL;
4280                         phba->vport_template.eh_host_reset_handler = NULL;
4281                         phba->vport_template.vendor_id = 0;
4282
4283                         /* Initialize the host templates with updated value */
4284                         if (phba->sli_rev == LPFC_SLI_REV4) {
4285                                 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4286                                 phba->vport_template.sg_tablesize =
4287                                         phba->cfg_scsi_seg_cnt;
4288                         } else {
4289                                 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4290                                 phba->vport_template.sg_tablesize =
4291                                         phba->cfg_sg_seg_cnt;
4292                         }
4293
4294                 } else {
4295                         /* NVMET is for physical port only */
4296                         memcpy(template, &lpfc_template_nvme,
4297                                sizeof(*template));
4298                 }
4299         } else {
4300                 template = &phba->vport_template;
4301         }
4302
4303         shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4304         if (!shost)
4305                 goto out;
4306
4307         vport = (struct lpfc_vport *) shost->hostdata;
4308         vport->phba = phba;
4309         vport->load_flag |= FC_LOADING;
4310         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4311         vport->fc_rscn_flush = 0;
4312         lpfc_get_vport_cfgparam(vport);
4313
4314         /* Adjust value in vport */
4315         vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4316
4317         shost->unique_id = instance;
4318         shost->max_id = LPFC_MAX_TARGET;
4319         shost->max_lun = vport->cfg_max_luns;
4320         shost->this_id = -1;
4321         shost->max_cmd_len = 16;
4322
4323         if (phba->sli_rev == LPFC_SLI_REV4) {
4324                 if (!phba->cfg_fcp_mq_threshold ||
4325                     phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4326                         phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4327
4328                 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4329                                             phba->cfg_fcp_mq_threshold);
4330
4331                 shost->dma_boundary =
4332                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4333
4334                 if (phba->cfg_xpsgl && !phba->nvmet_support)
4335                         shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4336                 else
4337                         shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4338         } else
4339                 /* SLI-3 has a limited number of hardware queues (3),
4340                  * thus there is only one for FCP processing.
4341                  */
4342                 shost->nr_hw_queues = 1;
4343
4344         /*
4345          * Set initial can_queue value since 0 is no longer supported and
4346          * scsi_add_host will fail. This will be adjusted later based on the
4347          * max xri value determined in hba setup.
4348          */
4349         shost->can_queue = phba->cfg_hba_queue_depth - 10;
4350         if (dev != &phba->pcidev->dev) {
4351                 shost->transportt = lpfc_vport_transport_template;
4352                 vport->port_type = LPFC_NPIV_PORT;
4353         } else {
4354                 shost->transportt = lpfc_transport_template;
4355                 vport->port_type = LPFC_PHYSICAL_PORT;
4356         }
4357
4358         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4359                         "9081 CreatePort TMPLATE type %x TBLsize %d "
4360                         "SEGcnt %d/%d\n",
4361                         vport->port_type, shost->sg_tablesize,
4362                         phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4363
4364         /* Initialize all internally managed lists. */
4365         INIT_LIST_HEAD(&vport->fc_nodes);
4366         INIT_LIST_HEAD(&vport->rcv_buffer_list);
4367         spin_lock_init(&vport->work_port_lock);
4368
4369         timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4370
4371         timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4372
4373         timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4374
4375         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4376                 lpfc_setup_bg(phba, shost);
4377
4378         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4379         if (error)
4380                 goto out_put_shost;
4381
4382         spin_lock_irq(&phba->port_list_lock);
4383         list_add_tail(&vport->listentry, &phba->port_list);
4384         spin_unlock_irq(&phba->port_list_lock);
4385         return vport;
4386
4387 out_put_shost:
4388         scsi_host_put(shost);
4389 out:
4390         return NULL;
4391 }
4392
4393 /**
4394  * destroy_port -  destroy an FC port
4395  * @vport: pointer to an lpfc virtual N_Port data structure.
4396  *
4397  * This routine destroys a FC port from the upper layer protocol. All the
4398  * resources associated with the port are released.
4399  **/
4400 void
4401 destroy_port(struct lpfc_vport *vport)
4402 {
4403         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4404         struct lpfc_hba  *phba = vport->phba;
4405
4406         lpfc_debugfs_terminate(vport);
4407         fc_remove_host(shost);
4408         scsi_remove_host(shost);
4409
4410         spin_lock_irq(&phba->port_list_lock);
4411         list_del_init(&vport->listentry);
4412         spin_unlock_irq(&phba->port_list_lock);
4413
4414         lpfc_cleanup(vport);
4415         return;
4416 }
4417
4418 /**
4419  * lpfc_get_instance - Get a unique integer ID
4420  *
4421  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4422  * uses the kernel idr facility to perform the task.
4423  *
4424  * Return codes:
4425  *   instance - a unique integer ID allocated as the new instance.
4426  *   -1 - lpfc get instance failed.
4427  **/
4428 int
4429 lpfc_get_instance(void)
4430 {
4431         int ret;
4432
4433         ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4434         return ret < 0 ? -1 : ret;
4435 }
4436
4437 /**
4438  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4439  * @shost: pointer to SCSI host data structure.
4440  * @time: elapsed time of the scan in jiffies.
4441  *
4442  * This routine is called by the SCSI layer with a SCSI host to determine
4443  * whether the scan host is finished.
4444  *
4445  * Note: there is no scan_start function as adapter initialization will have
4446  * asynchronously kicked off the link initialization.
4447  *
4448  * Return codes
4449  *   0 - SCSI host scan is not over yet.
4450  *   1 - SCSI host scan is over.
4451  **/
4452 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4453 {
4454         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4455         struct lpfc_hba   *phba = vport->phba;
4456         int stat = 0;
4457
4458         spin_lock_irq(shost->host_lock);
4459
4460         if (vport->load_flag & FC_UNLOADING) {
4461                 stat = 1;
4462                 goto finished;
4463         }
4464         if (time >= msecs_to_jiffies(30 * 1000)) {
4465                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4466                                 "0461 Scanning longer than 30 "
4467                                 "seconds.  Continuing initialization\n");
4468                 stat = 1;
4469                 goto finished;
4470         }
4471         if (time >= msecs_to_jiffies(15 * 1000) &&
4472             phba->link_state <= LPFC_LINK_DOWN) {
4473                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4474                                 "0465 Link down longer than 15 "
4475                                 "seconds.  Continuing initialization\n");
4476                 stat = 1;
4477                 goto finished;
4478         }
4479
4480         if (vport->port_state != LPFC_VPORT_READY)
4481                 goto finished;
4482         if (vport->num_disc_nodes || vport->fc_prli_sent)
4483                 goto finished;
4484         if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4485                 goto finished;
4486         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4487                 goto finished;
4488
4489         stat = 1;
4490
4491 finished:
4492         spin_unlock_irq(shost->host_lock);
4493         return stat;
4494 }
4495
4496 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4497 {
4498         struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4499         struct lpfc_hba   *phba = vport->phba;
4500
4501         fc_host_supported_speeds(shost) = 0;
4502         if (phba->lmt & LMT_128Gb)
4503                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4504         if (phba->lmt & LMT_64Gb)
4505                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4506         if (phba->lmt & LMT_32Gb)
4507                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4508         if (phba->lmt & LMT_16Gb)
4509                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4510         if (phba->lmt & LMT_10Gb)
4511                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4512         if (phba->lmt & LMT_8Gb)
4513                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4514         if (phba->lmt & LMT_4Gb)
4515                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4516         if (phba->lmt & LMT_2Gb)
4517                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4518         if (phba->lmt & LMT_1Gb)
4519                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4520 }
4521
4522 /**
4523  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4524  * @shost: pointer to SCSI host data structure.
4525  *
4526  * This routine initializes a given SCSI host attributes on a FC port. The
4527  * SCSI host can be either on top of a physical port or a virtual port.
4528  **/
4529 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4530 {
4531         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4532         struct lpfc_hba   *phba = vport->phba;
4533         /*
4534          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
4535          */
4536
4537         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4538         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4539         fc_host_supported_classes(shost) = FC_COS_CLASS3;
4540
4541         memset(fc_host_supported_fc4s(shost), 0,
4542                sizeof(fc_host_supported_fc4s(shost)));
4543         fc_host_supported_fc4s(shost)[2] = 1;
4544         fc_host_supported_fc4s(shost)[7] = 1;
4545
4546         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4547                                  sizeof fc_host_symbolic_name(shost));
4548
4549         lpfc_host_supported_speeds_set(shost);
4550
4551         fc_host_maxframe_size(shost) =
4552                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4553                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4554
4555         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4556
4557         /* This value is also unchanging */
4558         memset(fc_host_active_fc4s(shost), 0,
4559                sizeof(fc_host_active_fc4s(shost)));
4560         fc_host_active_fc4s(shost)[2] = 1;
4561         fc_host_active_fc4s(shost)[7] = 1;
4562
4563         fc_host_max_npiv_vports(shost) = phba->max_vpi;
4564         spin_lock_irq(shost->host_lock);
4565         vport->load_flag &= ~FC_LOADING;
4566         spin_unlock_irq(shost->host_lock);
4567 }
4568
4569 /**
4570  * lpfc_stop_port_s3 - Stop SLI3 device port
4571  * @phba: pointer to lpfc hba data structure.
4572  *
4573  * This routine is invoked to stop an SLI3 device port, it stops the device
4574  * from generating interrupts and stops the device driver's timers for the
4575  * device.
4576  **/
4577 static void
4578 lpfc_stop_port_s3(struct lpfc_hba *phba)
4579 {
4580         /* Clear all interrupt enable conditions */
4581         writel(0, phba->HCregaddr);
4582         readl(phba->HCregaddr); /* flush */
4583         /* Clear all pending interrupts */
4584         writel(0xffffffff, phba->HAregaddr);
4585         readl(phba->HAregaddr); /* flush */
4586
4587         /* Reset some HBA SLI setup states */
4588         lpfc_stop_hba_timers(phba);
4589         phba->pport->work_port_events = 0;
4590 }
4591
4592 /**
4593  * lpfc_stop_port_s4 - Stop SLI4 device port
4594  * @phba: pointer to lpfc hba data structure.
4595  *
4596  * This routine is invoked to stop an SLI4 device port, it stops the device
4597  * from generating interrupts and stops the device driver's timers for the
4598  * device.
4599  **/
4600 static void
4601 lpfc_stop_port_s4(struct lpfc_hba *phba)
4602 {
4603         /* Reset some HBA SLI4 setup states */
4604         lpfc_stop_hba_timers(phba);
4605         if (phba->pport)
4606                 phba->pport->work_port_events = 0;
4607         phba->sli4_hba.intr_enable = 0;
4608 }
4609
4610 /**
4611  * lpfc_stop_port - Wrapper function for stopping hba port
4612  * @phba: Pointer to HBA context object.
4613  *
4614  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4615  * the API jump table function pointer from the lpfc_hba struct.
4616  **/
4617 void
4618 lpfc_stop_port(struct lpfc_hba *phba)
4619 {
4620         phba->lpfc_stop_port(phba);
4621
4622         if (phba->wq)
4623                 flush_workqueue(phba->wq);
4624 }
4625
4626 /**
4627  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4628  * @phba: Pointer to hba for which this call is being executed.
4629  *
4630  * This routine starts the timer waiting for the FCF rediscovery to complete.
4631  **/
4632 void
4633 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4634 {
4635         unsigned long fcf_redisc_wait_tmo =
4636                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4637         /* Start fcf rediscovery wait period timer */
4638         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4639         spin_lock_irq(&phba->hbalock);
4640         /* Allow action to new fcf asynchronous event */
4641         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4642         /* Mark the FCF rediscovery pending state */
4643         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4644         spin_unlock_irq(&phba->hbalock);
4645 }
4646
4647 /**
4648  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4649  * @ptr: Map to lpfc_hba data structure pointer.
4650  *
4651  * This routine is invoked when waiting for FCF table rediscover has been
4652  * timed out. If new FCF record(s) has (have) been discovered during the
4653  * wait period, a new FCF event shall be added to the FCOE async event
4654  * list, and then worker thread shall be waked up for processing from the
4655  * worker thread context.
4656  **/
4657 static void
4658 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4659 {
4660         struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4661
4662         /* Don't send FCF rediscovery event if timer cancelled */
4663         spin_lock_irq(&phba->hbalock);
4664         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4665                 spin_unlock_irq(&phba->hbalock);
4666                 return;
4667         }
4668         /* Clear FCF rediscovery timer pending flag */
4669         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4670         /* FCF rediscovery event to worker thread */
4671         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4672         spin_unlock_irq(&phba->hbalock);
4673         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4674                         "2776 FCF rediscover quiescent timer expired\n");
4675         /* wake up worker thread */
4676         lpfc_worker_wake_up(phba);
4677 }
4678
4679 /**
4680  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4681  * @phba: pointer to lpfc hba data structure.
4682  * @acqe_link: pointer to the async link completion queue entry.
4683  *
4684  * This routine is to parse the SLI4 link-attention link fault code.
4685  **/
4686 static void
4687 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4688                            struct lpfc_acqe_link *acqe_link)
4689 {
4690         switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4691         case LPFC_ASYNC_LINK_FAULT_NONE:
4692         case LPFC_ASYNC_LINK_FAULT_LOCAL:
4693         case LPFC_ASYNC_LINK_FAULT_REMOTE:
4694         case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4695                 break;
4696         default:
4697                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4698                                 "0398 Unknown link fault code: x%x\n",
4699                                 bf_get(lpfc_acqe_link_fault, acqe_link));
4700                 break;
4701         }
4702 }
4703
4704 /**
4705  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4706  * @phba: pointer to lpfc hba data structure.
4707  * @acqe_link: pointer to the async link completion queue entry.
4708  *
4709  * This routine is to parse the SLI4 link attention type and translate it
4710  * into the base driver's link attention type coding.
4711  *
4712  * Return: Link attention type in terms of base driver's coding.
4713  **/
4714 static uint8_t
4715 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4716                           struct lpfc_acqe_link *acqe_link)
4717 {
4718         uint8_t att_type;
4719
4720         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4721         case LPFC_ASYNC_LINK_STATUS_DOWN:
4722         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4723                 att_type = LPFC_ATT_LINK_DOWN;
4724                 break;
4725         case LPFC_ASYNC_LINK_STATUS_UP:
4726                 /* Ignore physical link up events - wait for logical link up */
4727                 att_type = LPFC_ATT_RESERVED;
4728                 break;
4729         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4730                 att_type = LPFC_ATT_LINK_UP;
4731                 break;
4732         default:
4733                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4734                                 "0399 Invalid link attention type: x%x\n",
4735                                 bf_get(lpfc_acqe_link_status, acqe_link));
4736                 att_type = LPFC_ATT_RESERVED;
4737                 break;
4738         }
4739         return att_type;
4740 }
4741
4742 /**
4743  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4744  * @phba: pointer to lpfc hba data structure.
4745  *
4746  * This routine is to get an SLI3 FC port's link speed in Mbps.
4747  *
4748  * Return: link speed in terms of Mbps.
4749  **/
4750 uint32_t
4751 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4752 {
4753         uint32_t link_speed;
4754
4755         if (!lpfc_is_link_up(phba))
4756                 return 0;
4757
4758         if (phba->sli_rev <= LPFC_SLI_REV3) {
4759                 switch (phba->fc_linkspeed) {
4760                 case LPFC_LINK_SPEED_1GHZ:
4761                         link_speed = 1000;
4762                         break;
4763                 case LPFC_LINK_SPEED_2GHZ:
4764                         link_speed = 2000;
4765                         break;
4766                 case LPFC_LINK_SPEED_4GHZ:
4767                         link_speed = 4000;
4768                         break;
4769                 case LPFC_LINK_SPEED_8GHZ:
4770                         link_speed = 8000;
4771                         break;
4772                 case LPFC_LINK_SPEED_10GHZ:
4773                         link_speed = 10000;
4774                         break;
4775                 case LPFC_LINK_SPEED_16GHZ:
4776                         link_speed = 16000;
4777                         break;
4778                 default:
4779                         link_speed = 0;
4780                 }
4781         } else {
4782                 if (phba->sli4_hba.link_state.logical_speed)
4783                         link_speed =
4784                               phba->sli4_hba.link_state.logical_speed;
4785                 else
4786                         link_speed = phba->sli4_hba.link_state.speed;
4787         }
4788         return link_speed;
4789 }
4790
4791 /**
4792  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4793  * @phba: pointer to lpfc hba data structure.
4794  * @evt_code: asynchronous event code.
4795  * @speed_code: asynchronous event link speed code.
4796  *
4797  * This routine is to parse the giving SLI4 async event link speed code into
4798  * value of Mbps for the link speed.
4799  *
4800  * Return: link speed in terms of Mbps.
4801  **/
4802 static uint32_t
4803 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4804                            uint8_t speed_code)
4805 {
4806         uint32_t port_speed;
4807
4808         switch (evt_code) {
4809         case LPFC_TRAILER_CODE_LINK:
4810                 switch (speed_code) {
4811                 case LPFC_ASYNC_LINK_SPEED_ZERO:
4812                         port_speed = 0;
4813                         break;
4814                 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4815                         port_speed = 10;
4816                         break;
4817                 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4818                         port_speed = 100;
4819                         break;
4820                 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4821                         port_speed = 1000;
4822                         break;
4823                 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4824                         port_speed = 10000;
4825                         break;
4826                 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4827                         port_speed = 20000;
4828                         break;
4829                 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4830                         port_speed = 25000;
4831                         break;
4832                 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4833                         port_speed = 40000;
4834                         break;
4835                 default:
4836                         port_speed = 0;
4837                 }
4838                 break;
4839         case LPFC_TRAILER_CODE_FC:
4840                 switch (speed_code) {
4841                 case LPFC_FC_LA_SPEED_UNKNOWN:
4842                         port_speed = 0;
4843                         break;
4844                 case LPFC_FC_LA_SPEED_1G:
4845                         port_speed = 1000;
4846                         break;
4847                 case LPFC_FC_LA_SPEED_2G:
4848                         port_speed = 2000;
4849                         break;
4850                 case LPFC_FC_LA_SPEED_4G:
4851                         port_speed = 4000;
4852                         break;
4853                 case LPFC_FC_LA_SPEED_8G:
4854                         port_speed = 8000;
4855                         break;
4856                 case LPFC_FC_LA_SPEED_10G:
4857                         port_speed = 10000;
4858                         break;
4859                 case LPFC_FC_LA_SPEED_16G:
4860                         port_speed = 16000;
4861                         break;
4862                 case LPFC_FC_LA_SPEED_32G:
4863                         port_speed = 32000;
4864                         break;
4865                 case LPFC_FC_LA_SPEED_64G:
4866                         port_speed = 64000;
4867                         break;
4868                 case LPFC_FC_LA_SPEED_128G:
4869                         port_speed = 128000;
4870                         break;
4871                 default:
4872                         port_speed = 0;
4873                 }
4874                 break;
4875         default:
4876                 port_speed = 0;
4877         }
4878         return port_speed;
4879 }
4880
4881 /**
4882  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4883  * @phba: pointer to lpfc hba data structure.
4884  * @acqe_link: pointer to the async link completion queue entry.
4885  *
4886  * This routine is to handle the SLI4 asynchronous FCoE link event.
4887  **/
4888 static void
4889 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4890                          struct lpfc_acqe_link *acqe_link)
4891 {
4892         struct lpfc_dmabuf *mp;
4893         LPFC_MBOXQ_t *pmb;
4894         MAILBOX_t *mb;
4895         struct lpfc_mbx_read_top *la;
4896         uint8_t att_type;
4897         int rc;
4898
4899         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4900         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4901                 return;
4902         phba->fcoe_eventtag = acqe_link->event_tag;
4903         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4904         if (!pmb) {
4905                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4906                                 "0395 The mboxq allocation failed\n");
4907                 return;
4908         }
4909         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4910         if (!mp) {
4911                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4912                                 "0396 The lpfc_dmabuf allocation failed\n");
4913                 goto out_free_pmb;
4914         }
4915         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4916         if (!mp->virt) {
4917                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4918                                 "0397 The mbuf allocation failed\n");
4919                 goto out_free_dmabuf;
4920         }
4921
4922         /* Cleanup any outstanding ELS commands */
4923         lpfc_els_flush_all_cmd(phba);
4924
4925         /* Block ELS IOCBs until we have done process link event */
4926         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4927
4928         /* Update link event statistics */
4929         phba->sli.slistat.link_event++;
4930
4931         /* Create lpfc_handle_latt mailbox command from link ACQE */
4932         lpfc_read_topology(phba, pmb, mp);
4933         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4934         pmb->vport = phba->pport;
4935
4936         /* Keep the link status for extra SLI4 state machine reference */
4937         phba->sli4_hba.link_state.speed =
4938                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4939                                 bf_get(lpfc_acqe_link_speed, acqe_link));
4940         phba->sli4_hba.link_state.duplex =
4941                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
4942         phba->sli4_hba.link_state.status =
4943                                 bf_get(lpfc_acqe_link_status, acqe_link);
4944         phba->sli4_hba.link_state.type =
4945                                 bf_get(lpfc_acqe_link_type, acqe_link);
4946         phba->sli4_hba.link_state.number =
4947                                 bf_get(lpfc_acqe_link_number, acqe_link);
4948         phba->sli4_hba.link_state.fault =
4949                                 bf_get(lpfc_acqe_link_fault, acqe_link);
4950         phba->sli4_hba.link_state.logical_speed =
4951                         bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4952
4953         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4954                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
4955                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4956                         "Logical speed:%dMbps Fault:%d\n",
4957                         phba->sli4_hba.link_state.speed,
4958                         phba->sli4_hba.link_state.topology,
4959                         phba->sli4_hba.link_state.status,
4960                         phba->sli4_hba.link_state.type,
4961                         phba->sli4_hba.link_state.number,
4962                         phba->sli4_hba.link_state.logical_speed,
4963                         phba->sli4_hba.link_state.fault);
4964         /*
4965          * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4966          * topology info. Note: Optional for non FC-AL ports.
4967          */
4968         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4969                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4970                 if (rc == MBX_NOT_FINISHED)
4971                         goto out_free_dmabuf;
4972                 return;
4973         }
4974         /*
4975          * For FCoE Mode: fill in all the topology information we need and call
4976          * the READ_TOPOLOGY completion routine to continue without actually
4977          * sending the READ_TOPOLOGY mailbox command to the port.
4978          */
4979         /* Initialize completion status */
4980         mb = &pmb->u.mb;
4981         mb->mbxStatus = MBX_SUCCESS;
4982
4983         /* Parse port fault information field */
4984         lpfc_sli4_parse_latt_fault(phba, acqe_link);
4985
4986         /* Parse and translate link attention fields */
4987         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4988         la->eventTag = acqe_link->event_tag;
4989         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4990         bf_set(lpfc_mbx_read_top_link_spd, la,
4991                (bf_get(lpfc_acqe_link_speed, acqe_link)));
4992
4993         /* Fake the the following irrelvant fields */
4994         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4995         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4996         bf_set(lpfc_mbx_read_top_il, la, 0);
4997         bf_set(lpfc_mbx_read_top_pb, la, 0);
4998         bf_set(lpfc_mbx_read_top_fa, la, 0);
4999         bf_set(lpfc_mbx_read_top_mm, la, 0);
5000
5001         /* Invoke the lpfc_handle_latt mailbox command callback function */
5002         lpfc_mbx_cmpl_read_topology(phba, pmb);
5003
5004         return;
5005
5006 out_free_dmabuf:
5007         kfree(mp);
5008 out_free_pmb:
5009         mempool_free(pmb, phba->mbox_mem_pool);
5010 }
5011
5012 /**
5013  * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5014  * topology.
5015  * @phba: pointer to lpfc hba data structure.
5016  * @evt_code: asynchronous event code.
5017  * @speed_code: asynchronous event link speed code.
5018  *
5019  * This routine is to parse the giving SLI4 async event link speed code into
5020  * value of Read topology link speed.
5021  *
5022  * Return: link speed in terms of Read topology.
5023  **/
5024 static uint8_t
5025 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5026 {
5027         uint8_t port_speed;
5028
5029         switch (speed_code) {
5030         case LPFC_FC_LA_SPEED_1G:
5031                 port_speed = LPFC_LINK_SPEED_1GHZ;
5032                 break;
5033         case LPFC_FC_LA_SPEED_2G:
5034                 port_speed = LPFC_LINK_SPEED_2GHZ;
5035                 break;
5036         case LPFC_FC_LA_SPEED_4G:
5037                 port_speed = LPFC_LINK_SPEED_4GHZ;
5038                 break;
5039         case LPFC_FC_LA_SPEED_8G:
5040                 port_speed = LPFC_LINK_SPEED_8GHZ;
5041                 break;
5042         case LPFC_FC_LA_SPEED_16G:
5043                 port_speed = LPFC_LINK_SPEED_16GHZ;
5044                 break;
5045         case LPFC_FC_LA_SPEED_32G:
5046                 port_speed = LPFC_LINK_SPEED_32GHZ;
5047                 break;
5048         case LPFC_FC_LA_SPEED_64G:
5049                 port_speed = LPFC_LINK_SPEED_64GHZ;
5050                 break;
5051         case LPFC_FC_LA_SPEED_128G:
5052                 port_speed = LPFC_LINK_SPEED_128GHZ;
5053                 break;
5054         case LPFC_FC_LA_SPEED_256G:
5055                 port_speed = LPFC_LINK_SPEED_256GHZ;
5056                 break;
5057         default:
5058                 port_speed = 0;
5059                 break;
5060         }
5061
5062         return port_speed;
5063 }
5064
5065 #define trunk_link_status(__idx)\
5066         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5067                ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5068                 "Link up" : "Link down") : "NA"
5069 /* Did port __idx reported an error */
5070 #define trunk_port_fault(__idx)\
5071         bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5072                (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5073
5074 static void
5075 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5076                               struct lpfc_acqe_fc_la *acqe_fc)
5077 {
5078         uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5079         uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5080
5081         phba->sli4_hba.link_state.speed =
5082                 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5083                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5084
5085         phba->sli4_hba.link_state.logical_speed =
5086                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5087         /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5088         phba->fc_linkspeed =
5089                  lpfc_async_link_speed_to_read_top(
5090                                 phba,
5091                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5092
5093         if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5094                 phba->trunk_link.link0.state =
5095                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5096                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5097                 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5098         }
5099         if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5100                 phba->trunk_link.link1.state =
5101                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5102                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5103                 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5104         }
5105         if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5106                 phba->trunk_link.link2.state =
5107                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5108                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5109                 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5110         }
5111         if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5112                 phba->trunk_link.link3.state =
5113                         bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5114                         ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5115                 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5116         }
5117
5118         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5119                         "2910 Async FC Trunking Event - Speed:%d\n"
5120                         "\tLogical speed:%d "
5121                         "port0: %s port1: %s port2: %s port3: %s\n",
5122                         phba->sli4_hba.link_state.speed,
5123                         phba->sli4_hba.link_state.logical_speed,
5124                         trunk_link_status(0), trunk_link_status(1),
5125                         trunk_link_status(2), trunk_link_status(3));
5126
5127         if (port_fault)
5128                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5129                                 "3202 trunk error:0x%x (%s) seen on port0:%s "
5130                                 /*
5131                                  * SLI-4: We have only 0xA error codes
5132                                  * defined as of now. print an appropriate
5133                                  * message in case driver needs to be updated.
5134                                  */
5135                                 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5136                                 "UNDEFINED. update driver." : trunk_errmsg[err],
5137                                 trunk_port_fault(0), trunk_port_fault(1),
5138                                 trunk_port_fault(2), trunk_port_fault(3));
5139 }
5140
5141
5142 /**
5143  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5144  * @phba: pointer to lpfc hba data structure.
5145  * @acqe_fc: pointer to the async fc completion queue entry.
5146  *
5147  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5148  * that the event was received and then issue a read_topology mailbox command so
5149  * that the rest of the driver will treat it the same as SLI3.
5150  **/
5151 static void
5152 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5153 {
5154         struct lpfc_dmabuf *mp;
5155         LPFC_MBOXQ_t *pmb;
5156         MAILBOX_t *mb;
5157         struct lpfc_mbx_read_top *la;
5158         int rc;
5159
5160         if (bf_get(lpfc_trailer_type, acqe_fc) !=
5161             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5162                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5163                                 "2895 Non FC link Event detected.(%d)\n",
5164                                 bf_get(lpfc_trailer_type, acqe_fc));
5165                 return;
5166         }
5167
5168         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5169             LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5170                 lpfc_update_trunk_link_status(phba, acqe_fc);
5171                 return;
5172         }
5173
5174         /* Keep the link status for extra SLI4 state machine reference */
5175         phba->sli4_hba.link_state.speed =
5176                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5177                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5178         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5179         phba->sli4_hba.link_state.topology =
5180                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5181         phba->sli4_hba.link_state.status =
5182                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5183         phba->sli4_hba.link_state.type =
5184                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5185         phba->sli4_hba.link_state.number =
5186                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5187         phba->sli4_hba.link_state.fault =
5188                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
5189
5190         if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5191             LPFC_FC_LA_TYPE_LINK_DOWN)
5192                 phba->sli4_hba.link_state.logical_speed = 0;
5193         else if (!phba->sli4_hba.conf_trunk)
5194                 phba->sli4_hba.link_state.logical_speed =
5195                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5196
5197         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5198                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5199                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5200                         "%dMbps Fault:%d\n",
5201                         phba->sli4_hba.link_state.speed,
5202                         phba->sli4_hba.link_state.topology,
5203                         phba->sli4_hba.link_state.status,
5204                         phba->sli4_hba.link_state.type,
5205                         phba->sli4_hba.link_state.number,
5206                         phba->sli4_hba.link_state.logical_speed,
5207                         phba->sli4_hba.link_state.fault);
5208         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5209         if (!pmb) {
5210                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5211                                 "2897 The mboxq allocation failed\n");
5212                 return;
5213         }
5214         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5215         if (!mp) {
5216                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5217                                 "2898 The lpfc_dmabuf allocation failed\n");
5218                 goto out_free_pmb;
5219         }
5220         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5221         if (!mp->virt) {
5222                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5223                                 "2899 The mbuf allocation failed\n");
5224                 goto out_free_dmabuf;
5225         }
5226
5227         /* Cleanup any outstanding ELS commands */
5228         lpfc_els_flush_all_cmd(phba);
5229
5230         /* Block ELS IOCBs until we have done process link event */
5231         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5232
5233         /* Update link event statistics */
5234         phba->sli.slistat.link_event++;
5235
5236         /* Create lpfc_handle_latt mailbox command from link ACQE */
5237         lpfc_read_topology(phba, pmb, mp);
5238         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5239         pmb->vport = phba->pport;
5240
5241         if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5242                 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5243
5244                 switch (phba->sli4_hba.link_state.status) {
5245                 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5246                         phba->link_flag |= LS_MDS_LINK_DOWN;
5247                         break;
5248                 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5249                         phba->link_flag |= LS_MDS_LOOPBACK;
5250                         break;
5251                 default:
5252                         break;
5253                 }
5254
5255                 /* Initialize completion status */
5256                 mb = &pmb->u.mb;
5257                 mb->mbxStatus = MBX_SUCCESS;
5258
5259                 /* Parse port fault information field */
5260                 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5261
5262                 /* Parse and translate link attention fields */
5263                 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5264                 la->eventTag = acqe_fc->event_tag;
5265
5266                 if (phba->sli4_hba.link_state.status ==
5267                     LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5268                         bf_set(lpfc_mbx_read_top_att_type, la,
5269                                LPFC_FC_LA_TYPE_UNEXP_WWPN);
5270                 } else {
5271                         bf_set(lpfc_mbx_read_top_att_type, la,
5272                                LPFC_FC_LA_TYPE_LINK_DOWN);
5273                 }
5274                 /* Invoke the mailbox command callback function */
5275                 lpfc_mbx_cmpl_read_topology(phba, pmb);
5276
5277                 return;
5278         }
5279
5280         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5281         if (rc == MBX_NOT_FINISHED)
5282                 goto out_free_dmabuf;
5283         return;
5284
5285 out_free_dmabuf:
5286         kfree(mp);
5287 out_free_pmb:
5288         mempool_free(pmb, phba->mbox_mem_pool);
5289 }
5290
5291 /**
5292  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5293  * @phba: pointer to lpfc hba data structure.
5294  * @acqe_fc: pointer to the async SLI completion queue entry.
5295  *
5296  * This routine is to handle the SLI4 asynchronous SLI events.
5297  **/
5298 static void
5299 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5300 {
5301         char port_name;
5302         char message[128];
5303         uint8_t status;
5304         uint8_t evt_type;
5305         uint8_t operational = 0;
5306         struct temp_event temp_event_data;
5307         struct lpfc_acqe_misconfigured_event *misconfigured;
5308         struct Scsi_Host  *shost;
5309         struct lpfc_vport **vports;
5310         int rc, i;
5311
5312         evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5313
5314         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5315                         "2901 Async SLI event - Type:%d, Event Data: x%08x "
5316                         "x%08x x%08x x%08x\n", evt_type,
5317                         acqe_sli->event_data1, acqe_sli->event_data2,
5318                         acqe_sli->reserved, acqe_sli->trailer);
5319
5320         port_name = phba->Port[0];
5321         if (port_name == 0x00)
5322                 port_name = '?'; /* get port name is empty */
5323
5324         switch (evt_type) {
5325         case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5326                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5327                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5328                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5329
5330                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5331                                 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5332                                 acqe_sli->event_data1, port_name);
5333
5334                 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5335                 shost = lpfc_shost_from_vport(phba->pport);
5336                 fc_host_post_vendor_event(shost, fc_get_event_number(),
5337                                           sizeof(temp_event_data),
5338                                           (char *)&temp_event_data,
5339                                           SCSI_NL_VID_TYPE_PCI
5340                                           | PCI_VENDOR_ID_EMULEX);
5341                 break;
5342         case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5343                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5344                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5345                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5346
5347                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5348                                 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5349                                 acqe_sli->event_data1, port_name);
5350
5351                 shost = lpfc_shost_from_vport(phba->pport);
5352                 fc_host_post_vendor_event(shost, fc_get_event_number(),
5353                                           sizeof(temp_event_data),
5354                                           (char *)&temp_event_data,
5355                                           SCSI_NL_VID_TYPE_PCI
5356                                           | PCI_VENDOR_ID_EMULEX);
5357                 break;
5358         case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5359                 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5360                                         &acqe_sli->event_data1;
5361
5362                 /* fetch the status for this port */
5363                 switch (phba->sli4_hba.lnk_info.lnk_no) {
5364                 case LPFC_LINK_NUMBER_0:
5365                         status = bf_get(lpfc_sli_misconfigured_port0_state,
5366                                         &misconfigured->theEvent);
5367                         operational = bf_get(lpfc_sli_misconfigured_port0_op,
5368                                         &misconfigured->theEvent);
5369                         break;
5370                 case LPFC_LINK_NUMBER_1:
5371                         status = bf_get(lpfc_sli_misconfigured_port1_state,
5372                                         &misconfigured->theEvent);
5373                         operational = bf_get(lpfc_sli_misconfigured_port1_op,
5374                                         &misconfigured->theEvent);
5375                         break;
5376                 case LPFC_LINK_NUMBER_2:
5377                         status = bf_get(lpfc_sli_misconfigured_port2_state,
5378                                         &misconfigured->theEvent);
5379                         operational = bf_get(lpfc_sli_misconfigured_port2_op,
5380                                         &misconfigured->theEvent);
5381                         break;
5382                 case LPFC_LINK_NUMBER_3:
5383                         status = bf_get(lpfc_sli_misconfigured_port3_state,
5384                                         &misconfigured->theEvent);
5385                         operational = bf_get(lpfc_sli_misconfigured_port3_op,
5386                                         &misconfigured->theEvent);
5387                         break;
5388                 default:
5389                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5390                                         "3296 "
5391                                         "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5392                                         "event: Invalid link %d",
5393                                         phba->sli4_hba.lnk_info.lnk_no);
5394                         return;
5395                 }
5396
5397                 /* Skip if optic state unchanged */
5398                 if (phba->sli4_hba.lnk_info.optic_state == status)
5399                         return;
5400
5401                 switch (status) {
5402                 case LPFC_SLI_EVENT_STATUS_VALID:
5403                         sprintf(message, "Physical Link is functional");
5404                         break;
5405                 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5406                         sprintf(message, "Optics faulted/incorrectly "
5407                                 "installed/not installed - Reseat optics, "
5408                                 "if issue not resolved, replace.");
5409                         break;
5410                 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5411                         sprintf(message,
5412                                 "Optics of two types installed - Remove one "
5413                                 "optic or install matching pair of optics.");
5414                         break;
5415                 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5416                         sprintf(message, "Incompatible optics - Replace with "
5417                                 "compatible optics for card to function.");
5418                         break;
5419                 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5420                         sprintf(message, "Unqualified optics - Replace with "
5421                                 "Avago optics for Warranty and Technical "
5422                                 "Support - Link is%s operational",
5423                                 (operational) ? " not" : "");
5424                         break;
5425                 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5426                         sprintf(message, "Uncertified optics - Replace with "
5427                                 "Avago-certified optics to enable link "
5428                                 "operation - Link is%s operational",
5429                                 (operational) ? " not" : "");
5430                         break;
5431                 default:
5432                         /* firmware is reporting a status we don't know about */
5433                         sprintf(message, "Unknown event status x%02x", status);
5434                         break;
5435                 }
5436
5437                 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5438                 rc = lpfc_sli4_read_config(phba);
5439                 if (rc) {
5440                         phba->lmt = 0;
5441                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5442                                         "3194 Unable to retrieve supported "
5443                                         "speeds, rc = 0x%x\n", rc);
5444                 }
5445                 vports = lpfc_create_vport_work_array(phba);
5446                 if (vports != NULL) {
5447                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5448                                         i++) {
5449                                 shost = lpfc_shost_from_vport(vports[i]);
5450                                 lpfc_host_supported_speeds_set(shost);
5451                         }
5452                 }
5453                 lpfc_destroy_vport_work_array(phba, vports);
5454
5455                 phba->sli4_hba.lnk_info.optic_state = status;
5456                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5457                                 "3176 Port Name %c %s\n", port_name, message);
5458                 break;
5459         case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5460                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5461                                 "3192 Remote DPort Test Initiated - "
5462                                 "Event Data1:x%08x Event Data2: x%08x\n",
5463                                 acqe_sli->event_data1, acqe_sli->event_data2);
5464                 break;
5465         case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5466                 /* Misconfigured WWN. Reports that the SLI Port is configured
5467                  * to use FA-WWN, but the attached device doesn’t support it.
5468                  * No driver action is required.
5469                  * Event Data1 - N.A, Event Data2 - N.A
5470                  */
5471                 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5472                              "2699 Misconfigured FA-WWN - Attached device does "
5473                              "not support FA-WWN\n");
5474                 break;
5475         case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5476                 /* EEPROM failure. No driver action is required */
5477                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5478                              "2518 EEPROM failure - "
5479                              "Event Data1: x%08x Event Data2: x%08x\n",
5480                              acqe_sli->event_data1, acqe_sli->event_data2);
5481                 break;
5482         default:
5483                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5484                                 "3193 Unrecognized SLI event, type: 0x%x",
5485                                 evt_type);
5486                 break;
5487         }
5488 }
5489
5490 /**
5491  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5492  * @vport: pointer to vport data structure.
5493  *
5494  * This routine is to perform Clear Virtual Link (CVL) on a vport in
5495  * response to a CVL event.
5496  *
5497  * Return the pointer to the ndlp with the vport if successful, otherwise
5498  * return NULL.
5499  **/
5500 static struct lpfc_nodelist *
5501 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5502 {
5503         struct lpfc_nodelist *ndlp;
5504         struct Scsi_Host *shost;
5505         struct lpfc_hba *phba;
5506
5507         if (!vport)
5508                 return NULL;
5509         phba = vport->phba;
5510         if (!phba)
5511                 return NULL;
5512         ndlp = lpfc_findnode_did(vport, Fabric_DID);
5513         if (!ndlp) {
5514                 /* Cannot find existing Fabric ndlp, so allocate a new one */
5515                 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5516                 if (!ndlp)
5517                         return 0;
5518                 /* Set the node type */
5519                 ndlp->nlp_type |= NLP_FABRIC;
5520                 /* Put ndlp onto node list */
5521                 lpfc_enqueue_node(vport, ndlp);
5522         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5523                 /* re-setup ndlp without removing from node list */
5524                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5525                 if (!ndlp)
5526                         return 0;
5527         }
5528         if ((phba->pport->port_state < LPFC_FLOGI) &&
5529                 (phba->pport->port_state != LPFC_VPORT_FAILED))
5530                 return NULL;
5531         /* If virtual link is not yet instantiated ignore CVL */
5532         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5533                 && (vport->port_state != LPFC_VPORT_FAILED))
5534                 return NULL;
5535         shost = lpfc_shost_from_vport(vport);
5536         if (!shost)
5537                 return NULL;
5538         lpfc_linkdown_port(vport);
5539         lpfc_cleanup_pending_mbox(vport);
5540         spin_lock_irq(shost->host_lock);
5541         vport->fc_flag |= FC_VPORT_CVL_RCVD;
5542         spin_unlock_irq(shost->host_lock);
5543
5544         return ndlp;
5545 }
5546
5547 /**
5548  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5549  * @vport: pointer to lpfc hba data structure.
5550  *
5551  * This routine is to perform Clear Virtual Link (CVL) on all vports in
5552  * response to a FCF dead event.
5553  **/
5554 static void
5555 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5556 {
5557         struct lpfc_vport **vports;
5558         int i;
5559
5560         vports = lpfc_create_vport_work_array(phba);
5561         if (vports)
5562                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5563                         lpfc_sli4_perform_vport_cvl(vports[i]);
5564         lpfc_destroy_vport_work_array(phba, vports);
5565 }
5566
5567 /**
5568  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5569  * @phba: pointer to lpfc hba data structure.
5570  * @acqe_link: pointer to the async fcoe completion queue entry.
5571  *
5572  * This routine is to handle the SLI4 asynchronous fcoe event.
5573  **/
5574 static void
5575 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5576                         struct lpfc_acqe_fip *acqe_fip)
5577 {
5578         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5579         int rc;
5580         struct lpfc_vport *vport;
5581         struct lpfc_nodelist *ndlp;
5582         struct Scsi_Host  *shost;
5583         int active_vlink_present;
5584         struct lpfc_vport **vports;
5585         int i;
5586
5587         phba->fc_eventTag = acqe_fip->event_tag;
5588         phba->fcoe_eventtag = acqe_fip->event_tag;
5589         switch (event_type) {
5590         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5591         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5592                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5593                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5594                                         LOG_DISCOVERY,
5595                                         "2546 New FCF event, evt_tag:x%x, "
5596                                         "index:x%x\n",
5597                                         acqe_fip->event_tag,
5598                                         acqe_fip->index);
5599                 else
5600                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5601                                         LOG_DISCOVERY,
5602                                         "2788 FCF param modified event, "
5603                                         "evt_tag:x%x, index:x%x\n",
5604                                         acqe_fip->event_tag,
5605                                         acqe_fip->index);
5606                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5607                         /*
5608                          * During period of FCF discovery, read the FCF
5609                          * table record indexed by the event to update
5610                          * FCF roundrobin failover eligible FCF bmask.
5611                          */
5612                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5613                                         LOG_DISCOVERY,
5614                                         "2779 Read FCF (x%x) for updating "
5615                                         "roundrobin FCF failover bmask\n",
5616                                         acqe_fip->index);
5617                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5618                 }
5619
5620                 /* If the FCF discovery is in progress, do nothing. */
5621                 spin_lock_irq(&phba->hbalock);
5622                 if (phba->hba_flag & FCF_TS_INPROG) {
5623                         spin_unlock_irq(&phba->hbalock);
5624                         break;
5625                 }
5626                 /* If fast FCF failover rescan event is pending, do nothing */
5627                 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5628                         spin_unlock_irq(&phba->hbalock);
5629                         break;
5630                 }
5631
5632                 /* If the FCF has been in discovered state, do nothing. */
5633                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5634                         spin_unlock_irq(&phba->hbalock);
5635                         break;
5636                 }
5637                 spin_unlock_irq(&phba->hbalock);
5638
5639                 /* Otherwise, scan the entire FCF table and re-discover SAN */
5640                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5641                                 "2770 Start FCF table scan per async FCF "
5642                                 "event, evt_tag:x%x, index:x%x\n",
5643                                 acqe_fip->event_tag, acqe_fip->index);
5644                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5645                                                      LPFC_FCOE_FCF_GET_FIRST);
5646                 if (rc)
5647                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5648                                         "2547 Issue FCF scan read FCF mailbox "
5649                                         "command failed (x%x)\n", rc);
5650                 break;
5651
5652         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5653                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5654                         "2548 FCF Table full count 0x%x tag 0x%x\n",
5655                         bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5656                         acqe_fip->event_tag);
5657                 break;
5658
5659         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5660                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5661                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5662                         "2549 FCF (x%x) disconnected from network, "
5663                         "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5664                 /*
5665                  * If we are in the middle of FCF failover process, clear
5666                  * the corresponding FCF bit in the roundrobin bitmap.
5667                  */
5668                 spin_lock_irq(&phba->hbalock);
5669                 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5670                     (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5671                         spin_unlock_irq(&phba->hbalock);
5672                         /* Update FLOGI FCF failover eligible FCF bmask */
5673                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5674                         break;
5675                 }
5676                 spin_unlock_irq(&phba->hbalock);
5677
5678                 /* If the event is not for currently used fcf do nothing */
5679                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5680                         break;
5681
5682                 /*
5683                  * Otherwise, request the port to rediscover the entire FCF
5684                  * table for a fast recovery from case that the current FCF
5685                  * is no longer valid as we are not in the middle of FCF
5686                  * failover process already.
5687                  */
5688                 spin_lock_irq(&phba->hbalock);
5689                 /* Mark the fast failover process in progress */
5690                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5691                 spin_unlock_irq(&phba->hbalock);
5692
5693                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5694                                 "2771 Start FCF fast failover process due to "
5695                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5696                                 "\n", acqe_fip->event_tag, acqe_fip->index);
5697                 rc = lpfc_sli4_redisc_fcf_table(phba);
5698                 if (rc) {
5699                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5700                                         LOG_DISCOVERY,
5701                                         "2772 Issue FCF rediscover mailbox "
5702                                         "command failed, fail through to FCF "
5703                                         "dead event\n");
5704                         spin_lock_irq(&phba->hbalock);
5705                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5706                         spin_unlock_irq(&phba->hbalock);
5707                         /*
5708                          * Last resort will fail over by treating this
5709                          * as a link down to FCF registration.
5710                          */
5711                         lpfc_sli4_fcf_dead_failthrough(phba);
5712                 } else {
5713                         /* Reset FCF roundrobin bmask for new discovery */
5714                         lpfc_sli4_clear_fcf_rr_bmask(phba);
5715                         /*
5716                          * Handling fast FCF failover to a DEAD FCF event is
5717                          * considered equalivant to receiving CVL to all vports.
5718                          */
5719                         lpfc_sli4_perform_all_vport_cvl(phba);
5720                 }
5721                 break;
5722         case LPFC_FIP_EVENT_TYPE_CVL:
5723                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5724                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5725                         "2718 Clear Virtual Link Received for VPI 0x%x"
5726                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5727
5728                 vport = lpfc_find_vport_by_vpid(phba,
5729                                                 acqe_fip->index);
5730                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5731                 if (!ndlp)
5732                         break;
5733                 active_vlink_present = 0;
5734
5735                 vports = lpfc_create_vport_work_array(phba);
5736                 if (vports) {
5737                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5738                                         i++) {
5739                                 if ((!(vports[i]->fc_flag &
5740                                         FC_VPORT_CVL_RCVD)) &&
5741                                         (vports[i]->port_state > LPFC_FDISC)) {
5742                                         active_vlink_present = 1;
5743                                         break;
5744                                 }
5745                         }
5746                         lpfc_destroy_vport_work_array(phba, vports);
5747                 }
5748
5749                 /*
5750                  * Don't re-instantiate if vport is marked for deletion.
5751                  * If we are here first then vport_delete is going to wait
5752                  * for discovery to complete.
5753                  */
5754                 if (!(vport->load_flag & FC_UNLOADING) &&
5755                                         active_vlink_present) {
5756                         /*
5757                          * If there are other active VLinks present,
5758                          * re-instantiate the Vlink using FDISC.
5759                          */
5760                         mod_timer(&ndlp->nlp_delayfunc,
5761                                   jiffies + msecs_to_jiffies(1000));
5762                         shost = lpfc_shost_from_vport(vport);
5763                         spin_lock_irq(shost->host_lock);
5764                         ndlp->nlp_flag |= NLP_DELAY_TMO;
5765                         spin_unlock_irq(shost->host_lock);
5766                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5767                         vport->port_state = LPFC_FDISC;
5768                 } else {
5769                         /*
5770                          * Otherwise, we request port to rediscover
5771                          * the entire FCF table for a fast recovery
5772                          * from possible case that the current FCF
5773                          * is no longer valid if we are not already
5774                          * in the FCF failover process.
5775                          */
5776                         spin_lock_irq(&phba->hbalock);
5777                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5778                                 spin_unlock_irq(&phba->hbalock);
5779                                 break;
5780                         }
5781                         /* Mark the fast failover process in progress */
5782                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5783                         spin_unlock_irq(&phba->hbalock);
5784                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5785                                         LOG_DISCOVERY,
5786                                         "2773 Start FCF failover per CVL, "
5787                                         "evt_tag:x%x\n", acqe_fip->event_tag);
5788                         rc = lpfc_sli4_redisc_fcf_table(phba);
5789                         if (rc) {
5790                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5791                                                 LOG_DISCOVERY,
5792                                                 "2774 Issue FCF rediscover "
5793                                                 "mailbox command failed, "
5794                                                 "through to CVL event\n");
5795                                 spin_lock_irq(&phba->hbalock);
5796                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5797                                 spin_unlock_irq(&phba->hbalock);
5798                                 /*
5799                                  * Last resort will be re-try on the
5800                                  * the current registered FCF entry.
5801                                  */
5802                                 lpfc_retry_pport_discovery(phba);
5803                         } else
5804                                 /*
5805                                  * Reset FCF roundrobin bmask for new
5806                                  * discovery.
5807                                  */
5808                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
5809                 }
5810                 break;
5811         default:
5812                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5813                         "0288 Unknown FCoE event type 0x%x event tag "
5814                         "0x%x\n", event_type, acqe_fip->event_tag);
5815                 break;
5816         }
5817 }
5818
5819 /**
5820  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5821  * @phba: pointer to lpfc hba data structure.
5822  * @acqe_link: pointer to the async dcbx completion queue entry.
5823  *
5824  * This routine is to handle the SLI4 asynchronous dcbx event.
5825  **/
5826 static void
5827 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5828                          struct lpfc_acqe_dcbx *acqe_dcbx)
5829 {
5830         phba->fc_eventTag = acqe_dcbx->event_tag;
5831         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5832                         "0290 The SLI4 DCBX asynchronous event is not "
5833                         "handled yet\n");
5834 }
5835
5836 /**
5837  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5838  * @phba: pointer to lpfc hba data structure.
5839  * @acqe_link: pointer to the async grp5 completion queue entry.
5840  *
5841  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5842  * is an asynchronous notified of a logical link speed change.  The Port
5843  * reports the logical link speed in units of 10Mbps.
5844  **/
5845 static void
5846 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5847                          struct lpfc_acqe_grp5 *acqe_grp5)
5848 {
5849         uint16_t prev_ll_spd;
5850
5851         phba->fc_eventTag = acqe_grp5->event_tag;
5852         phba->fcoe_eventtag = acqe_grp5->event_tag;
5853         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5854         phba->sli4_hba.link_state.logical_speed =
5855                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5856         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5857                         "2789 GRP5 Async Event: Updating logical link speed "
5858                         "from %dMbps to %dMbps\n", prev_ll_spd,
5859                         phba->sli4_hba.link_state.logical_speed);
5860 }
5861
5862 /**
5863  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5864  * @phba: pointer to lpfc hba data structure.
5865  *
5866  * This routine is invoked by the worker thread to process all the pending
5867  * SLI4 asynchronous events.
5868  **/
5869 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5870 {
5871         struct lpfc_cq_event *cq_event;
5872
5873         /* First, declare the async event has been handled */
5874         spin_lock_irq(&phba->hbalock);
5875         phba->hba_flag &= ~ASYNC_EVENT;
5876         spin_unlock_irq(&phba->hbalock);
5877         /* Now, handle all the async events */
5878         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5879                 /* Get the first event from the head of the event queue */
5880                 spin_lock_irq(&phba->hbalock);
5881                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5882                                  cq_event, struct lpfc_cq_event, list);
5883                 spin_unlock_irq(&phba->hbalock);
5884                 /* Process the asynchronous event */
5885                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5886                 case LPFC_TRAILER_CODE_LINK:
5887                         lpfc_sli4_async_link_evt(phba,
5888                                                  &cq_event->cqe.acqe_link);
5889                         break;
5890                 case LPFC_TRAILER_CODE_FCOE:
5891                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5892                         break;
5893                 case LPFC_TRAILER_CODE_DCBX:
5894                         lpfc_sli4_async_dcbx_evt(phba,
5895                                                  &cq_event->cqe.acqe_dcbx);
5896                         break;
5897                 case LPFC_TRAILER_CODE_GRP5:
5898                         lpfc_sli4_async_grp5_evt(phba,
5899                                                  &cq_event->cqe.acqe_grp5);
5900                         break;
5901                 case LPFC_TRAILER_CODE_FC:
5902                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5903                         break;
5904                 case LPFC_TRAILER_CODE_SLI:
5905                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5906                         break;
5907                 default:
5908                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5909                                         "1804 Invalid asynchronous event code: "
5910                                         "x%x\n", bf_get(lpfc_trailer_code,
5911                                         &cq_event->cqe.mcqe_cmpl));
5912                         break;
5913                 }
5914                 /* Free the completion event processed to the free pool */
5915                 lpfc_sli4_cq_event_release(phba, cq_event);
5916         }
5917 }
5918
5919 /**
5920  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5921  * @phba: pointer to lpfc hba data structure.
5922  *
5923  * This routine is invoked by the worker thread to process FCF table
5924  * rediscovery pending completion event.
5925  **/
5926 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5927 {
5928         int rc;
5929
5930         spin_lock_irq(&phba->hbalock);
5931         /* Clear FCF rediscovery timeout event */
5932         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5933         /* Clear driver fast failover FCF record flag */
5934         phba->fcf.failover_rec.flag = 0;
5935         /* Set state for FCF fast failover */
5936         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5937         spin_unlock_irq(&phba->hbalock);
5938
5939         /* Scan FCF table from the first entry to re-discover SAN */
5940         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5941                         "2777 Start post-quiescent FCF table scan\n");
5942         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5943         if (rc)
5944                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5945                                 "2747 Issue FCF scan read FCF mailbox "
5946                                 "command failed 0x%x\n", rc);
5947 }
5948
5949 /**
5950  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5951  * @phba: pointer to lpfc hba data structure.
5952  * @dev_grp: The HBA PCI-Device group number.
5953  *
5954  * This routine is invoked to set up the per HBA PCI-Device group function
5955  * API jump table entries.
5956  *
5957  * Return: 0 if success, otherwise -ENODEV
5958  **/
5959 int
5960 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5961 {
5962         int rc;
5963
5964         /* Set up lpfc PCI-device group */
5965         phba->pci_dev_grp = dev_grp;
5966
5967         /* The LPFC_PCI_DEV_OC uses SLI4 */
5968         if (dev_grp == LPFC_PCI_DEV_OC)
5969                 phba->sli_rev = LPFC_SLI_REV4;
5970
5971         /* Set up device INIT API function jump table */
5972         rc = lpfc_init_api_table_setup(phba, dev_grp);
5973         if (rc)
5974                 return -ENODEV;
5975         /* Set up SCSI API function jump table */
5976         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5977         if (rc)
5978                 return -ENODEV;
5979         /* Set up SLI API function jump table */
5980         rc = lpfc_sli_api_table_setup(phba, dev_grp);
5981         if (rc)
5982                 return -ENODEV;
5983         /* Set up MBOX API function jump table */
5984         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5985         if (rc)
5986                 return -ENODEV;
5987
5988         return 0;
5989 }
5990
5991 /**
5992  * lpfc_log_intr_mode - Log the active interrupt mode
5993  * @phba: pointer to lpfc hba data structure.
5994  * @intr_mode: active interrupt mode adopted.
5995  *
5996  * This routine it invoked to log the currently used active interrupt mode
5997  * to the device.
5998  **/
5999 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6000 {
6001         switch (intr_mode) {
6002         case 0:
6003                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6004                                 "0470 Enable INTx interrupt mode.\n");
6005                 break;
6006         case 1:
6007                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6008                                 "0481 Enabled MSI interrupt mode.\n");
6009                 break;
6010         case 2:
6011                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6012                                 "0480 Enabled MSI-X interrupt mode.\n");
6013                 break;
6014         default:
6015                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6016                                 "0482 Illegal interrupt mode.\n");
6017                 break;
6018         }
6019         return;
6020 }
6021
6022 /**
6023  * lpfc_enable_pci_dev - Enable a generic PCI device.
6024  * @phba: pointer to lpfc hba data structure.
6025  *
6026  * This routine is invoked to enable the PCI device that is common to all
6027  * PCI devices.
6028  *
6029  * Return codes
6030  *      0 - successful
6031  *      other values - error
6032  **/
6033 static int
6034 lpfc_enable_pci_dev(struct lpfc_hba *phba)
6035 {
6036         struct pci_dev *pdev;
6037
6038         /* Obtain PCI device reference */
6039         if (!phba->pcidev)
6040                 goto out_error;
6041         else
6042                 pdev = phba->pcidev;
6043         /* Enable PCI device */
6044         if (pci_enable_device_mem(pdev))
6045                 goto out_error;
6046         /* Request PCI resource for the device */
6047         if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6048                 goto out_disable_device;
6049         /* Set up device as PCI master and save state for EEH */
6050         pci_set_master(pdev);
6051         pci_try_set_mwi(pdev);
6052         pci_save_state(pdev);
6053
6054         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6055         if (pci_is_pcie(pdev))
6056                 pdev->needs_freset = 1;
6057
6058         return 0;
6059
6060 out_disable_device:
6061         pci_disable_device(pdev);
6062 out_error:
6063         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6064                         "1401 Failed to enable pci device\n");
6065         return -ENODEV;
6066 }
6067
6068 /**
6069  * lpfc_disable_pci_dev - Disable a generic PCI device.
6070  * @phba: pointer to lpfc hba data structure.
6071  *
6072  * This routine is invoked to disable the PCI device that is common to all
6073  * PCI devices.
6074  **/
6075 static void
6076 lpfc_disable_pci_dev(struct lpfc_hba *phba)
6077 {
6078         struct pci_dev *pdev;
6079
6080         /* Obtain PCI device reference */
6081         if (!phba->pcidev)
6082                 return;
6083         else
6084                 pdev = phba->pcidev;
6085         /* Release PCI resource and disable PCI device */
6086         pci_release_mem_regions(pdev);
6087         pci_disable_device(pdev);
6088
6089         return;
6090 }
6091
6092 /**
6093  * lpfc_reset_hba - Reset a hba
6094  * @phba: pointer to lpfc hba data structure.
6095  *
6096  * This routine is invoked to reset a hba device. It brings the HBA
6097  * offline, performs a board restart, and then brings the board back
6098  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6099  * on outstanding mailbox commands.
6100  **/
6101 void
6102 lpfc_reset_hba(struct lpfc_hba *phba)
6103 {
6104         /* If resets are disabled then set error state and return. */
6105         if (!phba->cfg_enable_hba_reset) {
6106                 phba->link_state = LPFC_HBA_ERROR;
6107                 return;
6108         }
6109         if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6110                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6111         else
6112                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6113         lpfc_offline(phba);
6114         lpfc_sli_brdrestart(phba);
6115         lpfc_online(phba);
6116         lpfc_unblock_mgmt_io(phba);
6117 }
6118
6119 /**
6120  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6121  * @phba: pointer to lpfc hba data structure.
6122  *
6123  * This function enables the PCI SR-IOV virtual functions to a physical
6124  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6125  * enable the number of virtual functions to the physical function. As
6126  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6127  * API call does not considered as an error condition for most of the device.
6128  **/
6129 uint16_t
6130 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6131 {
6132         struct pci_dev *pdev = phba->pcidev;
6133         uint16_t nr_virtfn;
6134         int pos;
6135
6136         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6137         if (pos == 0)
6138                 return 0;
6139
6140         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6141         return nr_virtfn;
6142 }
6143
6144 /**
6145  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6146  * @phba: pointer to lpfc hba data structure.
6147  * @nr_vfn: number of virtual functions to be enabled.
6148  *
6149  * This function enables the PCI SR-IOV virtual functions to a physical
6150  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6151  * enable the number of virtual functions to the physical function. As
6152  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6153  * API call does not considered as an error condition for most of the device.
6154  **/
6155 int
6156 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6157 {
6158         struct pci_dev *pdev = phba->pcidev;
6159         uint16_t max_nr_vfn;
6160         int rc;
6161
6162         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6163         if (nr_vfn > max_nr_vfn) {
6164                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6165                                 "3057 Requested vfs (%d) greater than "
6166                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6167                 return -EINVAL;
6168         }
6169
6170         rc = pci_enable_sriov(pdev, nr_vfn);
6171         if (rc) {
6172                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6173                                 "2806 Failed to enable sriov on this device "
6174                                 "with vfn number nr_vf:%d, rc:%d\n",
6175                                 nr_vfn, rc);
6176         } else
6177                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6178                                 "2807 Successful enable sriov on this device "
6179                                 "with vfn number nr_vf:%d\n", nr_vfn);
6180         return rc;
6181 }
6182
6183 /**
6184  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6185  * @phba: pointer to lpfc hba data structure.
6186  *
6187  * This routine is invoked to set up the driver internal resources before the
6188  * device specific resource setup to support the HBA device it attached to.
6189  *
6190  * Return codes
6191  *      0 - successful
6192  *      other values - error
6193  **/
6194 static int
6195 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6196 {
6197         struct lpfc_sli *psli = &phba->sli;
6198
6199         /*
6200          * Driver resources common to all SLI revisions
6201          */
6202         atomic_set(&phba->fast_event_count, 0);
6203         spin_lock_init(&phba->hbalock);
6204
6205         /* Initialize ndlp management spinlock */
6206         spin_lock_init(&phba->ndlp_lock);
6207
6208         /* Initialize port_list spinlock */
6209         spin_lock_init(&phba->port_list_lock);
6210         INIT_LIST_HEAD(&phba->port_list);
6211
6212         INIT_LIST_HEAD(&phba->work_list);
6213         init_waitqueue_head(&phba->wait_4_mlo_m_q);
6214
6215         /* Initialize the wait queue head for the kernel thread */
6216         init_waitqueue_head(&phba->work_waitq);
6217
6218         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6219                         "1403 Protocols supported %s %s %s\n",
6220                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6221                                 "SCSI" : " "),
6222                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6223                                 "NVME" : " "),
6224                         (phba->nvmet_support ? "NVMET" : " "));
6225
6226         /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6227         spin_lock_init(&phba->scsi_buf_list_get_lock);
6228         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6229         spin_lock_init(&phba->scsi_buf_list_put_lock);
6230         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6231
6232         /* Initialize the fabric iocb list */
6233         INIT_LIST_HEAD(&phba->fabric_iocb_list);
6234
6235         /* Initialize list to save ELS buffers */
6236         INIT_LIST_HEAD(&phba->elsbuf);
6237
6238         /* Initialize FCF connection rec list */
6239         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6240
6241         /* Initialize OAS configuration list */
6242         spin_lock_init(&phba->devicelock);
6243         INIT_LIST_HEAD(&phba->luns);
6244
6245         /* MBOX heartbeat timer */
6246         timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6247         /* Fabric block timer */
6248         timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6249         /* EA polling mode timer */
6250         timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6251         /* Heartbeat timer */
6252         timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6253
6254         INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6255
6256         return 0;
6257 }
6258
6259 /**
6260  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6261  * @phba: pointer to lpfc hba data structure.
6262  *
6263  * This routine is invoked to set up the driver internal resources specific to
6264  * support the SLI-3 HBA device it attached to.
6265  *
6266  * Return codes
6267  * 0 - successful
6268  * other values - error
6269  **/
6270 static int
6271 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6272 {
6273         int rc, entry_sz;
6274
6275         /*
6276          * Initialize timers used by driver
6277          */
6278
6279         /* FCP polling mode timer */
6280         timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6281
6282         /* Host attention work mask setup */
6283         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6284         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6285
6286         /* Get all the module params for configuring this host */
6287         lpfc_get_cfgparam(phba);
6288         /* Set up phase-1 common device driver resources */
6289
6290         rc = lpfc_setup_driver_resource_phase1(phba);
6291         if (rc)
6292                 return -ENODEV;
6293
6294         if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6295                 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6296                 /* check for menlo minimum sg count */
6297                 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6298                         phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6299         }
6300
6301         if (!phba->sli.sli3_ring)
6302                 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6303                                               sizeof(struct lpfc_sli_ring),
6304                                               GFP_KERNEL);
6305         if (!phba->sli.sli3_ring)
6306                 return -ENOMEM;
6307
6308         /*
6309          * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6310          * used to create the sg_dma_buf_pool must be dynamically calculated.
6311          */
6312
6313         if (phba->sli_rev == LPFC_SLI_REV4)
6314                 entry_sz = sizeof(struct sli4_sge);
6315         else
6316                 entry_sz = sizeof(struct ulp_bde64);
6317
6318         /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6319         if (phba->cfg_enable_bg) {
6320                 /*
6321                  * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6322                  * the FCP rsp, and a BDE for each. Sice we have no control
6323                  * over how many protection data segments the SCSI Layer
6324                  * will hand us (ie: there could be one for every block
6325                  * in the IO), we just allocate enough BDEs to accomidate
6326                  * our max amount and we need to limit lpfc_sg_seg_cnt to
6327                  * minimize the risk of running out.
6328                  */
6329                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6330                         sizeof(struct fcp_rsp) +
6331                         (LPFC_MAX_SG_SEG_CNT * entry_sz);
6332
6333                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6334                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6335
6336                 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6337                 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6338         } else {
6339                 /*
6340                  * The scsi_buf for a regular I/O will hold the FCP cmnd,
6341                  * the FCP rsp, a BDE for each, and a BDE for up to
6342                  * cfg_sg_seg_cnt data segments.
6343                  */
6344                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6345                         sizeof(struct fcp_rsp) +
6346                         ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6347
6348                 /* Total BDEs in BPL for scsi_sg_list */
6349                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6350         }
6351
6352         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6353                         "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6354                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6355                         phba->cfg_total_seg_cnt);
6356
6357         phba->max_vpi = LPFC_MAX_VPI;
6358         /* This will be set to correct value after config_port mbox */
6359         phba->max_vports = 0;
6360
6361         /*
6362          * Initialize the SLI Layer to run with lpfc HBAs.
6363          */
6364         lpfc_sli_setup(phba);
6365         lpfc_sli_queue_init(phba);
6366
6367         /* Allocate device driver memory */
6368         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6369                 return -ENOMEM;
6370
6371         phba->lpfc_sg_dma_buf_pool =
6372                 dma_pool_create("lpfc_sg_dma_buf_pool",
6373                                 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6374                                 BPL_ALIGN_SZ, 0);
6375
6376         if (!phba->lpfc_sg_dma_buf_pool)
6377                 goto fail_free_mem;
6378
6379         phba->lpfc_cmd_rsp_buf_pool =
6380                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
6381                                         &phba->pcidev->dev,
6382                                         sizeof(struct fcp_cmnd) +
6383                                         sizeof(struct fcp_rsp),
6384                                         BPL_ALIGN_SZ, 0);
6385
6386         if (!phba->lpfc_cmd_rsp_buf_pool)
6387                 goto fail_free_dma_buf_pool;
6388
6389         /*
6390          * Enable sr-iov virtual functions if supported and configured
6391          * through the module parameter.
6392          */
6393         if (phba->cfg_sriov_nr_virtfn > 0) {
6394                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6395                                                  phba->cfg_sriov_nr_virtfn);
6396                 if (rc) {
6397                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6398                                         "2808 Requested number of SR-IOV "
6399                                         "virtual functions (%d) is not "
6400                                         "supported\n",
6401                                         phba->cfg_sriov_nr_virtfn);
6402                         phba->cfg_sriov_nr_virtfn = 0;
6403                 }
6404         }
6405
6406         return 0;
6407
6408 fail_free_dma_buf_pool:
6409         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6410         phba->lpfc_sg_dma_buf_pool = NULL;
6411 fail_free_mem:
6412         lpfc_mem_free(phba);
6413         return -ENOMEM;
6414 }
6415
6416 /**
6417  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6418  * @phba: pointer to lpfc hba data structure.
6419  *
6420  * This routine is invoked to unset the driver internal resources set up
6421  * specific for supporting the SLI-3 HBA device it attached to.
6422  **/
6423 static void
6424 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6425 {
6426         /* Free device driver memory allocated */
6427         lpfc_mem_free_all(phba);
6428
6429         return;
6430 }
6431
6432 /**
6433  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6434  * @phba: pointer to lpfc hba data structure.
6435  *
6436  * This routine is invoked to set up the driver internal resources specific to
6437  * support the SLI-4 HBA device it attached to.
6438  *
6439  * Return codes
6440  *      0 - successful
6441  *      other values - error
6442  **/
6443 static int
6444 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6445 {
6446         LPFC_MBOXQ_t *mboxq;
6447         MAILBOX_t *mb;
6448         int rc, i, max_buf_size;
6449         uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6450         struct lpfc_mqe *mqe;
6451         int longs;
6452         int extra;
6453         uint64_t wwn;
6454         u32 if_type;
6455         u32 if_fam;
6456
6457         phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6458         phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6459         phba->sli4_hba.curr_disp_cpu = 0;
6460
6461         /* Get all the module params for configuring this host */
6462         lpfc_get_cfgparam(phba);
6463
6464         /* Set up phase-1 common device driver resources */
6465         rc = lpfc_setup_driver_resource_phase1(phba);
6466         if (rc)
6467                 return -ENODEV;
6468
6469         /* Before proceed, wait for POST done and device ready */
6470         rc = lpfc_sli4_post_status_check(phba);
6471         if (rc)
6472                 return -ENODEV;
6473
6474         /* Allocate all driver workqueues here */
6475
6476         /* The lpfc_wq workqueue for deferred irq use */
6477         phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6478
6479         /*
6480          * Initialize timers used by driver
6481          */
6482
6483         timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6484
6485         /* FCF rediscover timer */
6486         timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6487
6488         /*
6489          * Control structure for handling external multi-buffer mailbox
6490          * command pass-through.
6491          */
6492         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6493                 sizeof(struct lpfc_mbox_ext_buf_ctx));
6494         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6495
6496         phba->max_vpi = LPFC_MAX_VPI;
6497
6498         /* This will be set to correct value after the read_config mbox */
6499         phba->max_vports = 0;
6500
6501         /* Program the default value of vlan_id and fc_map */
6502         phba->valid_vlan = 0;
6503         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6504         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6505         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6506
6507         /*
6508          * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6509          * we will associate a new ring, for each EQ/CQ/WQ tuple.
6510          * The WQ create will allocate the ring.
6511          */
6512
6513         /* Initialize buffer queue management fields */
6514         INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6515         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6516         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6517
6518         /*
6519          * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6520          */
6521         /* Initialize the Abort buffer list used by driver */
6522         spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6523         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6524
6525         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6526                 /* Initialize the Abort nvme buffer list used by driver */
6527                 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6528                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6529                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6530                 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6531                 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6532         }
6533
6534         /* This abort list used by worker thread */
6535         spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6536         spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6537
6538         /*
6539          * Initialize driver internal slow-path work queues
6540          */
6541
6542         /* Driver internel slow-path CQ Event pool */
6543         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6544         /* Response IOCB work queue list */
6545         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6546         /* Asynchronous event CQ Event work queue list */
6547         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6548         /* Fast-path XRI aborted CQ Event work queue list */
6549         INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6550         /* Slow-path XRI aborted CQ Event work queue list */
6551         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6552         /* Receive queue CQ Event work queue list */
6553         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6554
6555         /* Initialize extent block lists. */
6556         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6557         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6558         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6559         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6560
6561         /* Initialize mboxq lists. If the early init routines fail
6562          * these lists need to be correctly initialized.
6563          */
6564         INIT_LIST_HEAD(&phba->sli.mboxq);
6565         INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6566
6567         /* initialize optic_state to 0xFF */
6568         phba->sli4_hba.lnk_info.optic_state = 0xff;
6569
6570         /* Allocate device driver memory */
6571         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6572         if (rc)
6573                 return -ENOMEM;
6574
6575         /* IF Type 2 ports get initialized now. */
6576         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6577             LPFC_SLI_INTF_IF_TYPE_2) {
6578                 rc = lpfc_pci_function_reset(phba);
6579                 if (unlikely(rc)) {
6580                         rc = -ENODEV;
6581                         goto out_free_mem;
6582                 }
6583                 phba->temp_sensor_support = 1;
6584         }
6585
6586         /* Create the bootstrap mailbox command */
6587         rc = lpfc_create_bootstrap_mbox(phba);
6588         if (unlikely(rc))
6589                 goto out_free_mem;
6590
6591         /* Set up the host's endian order with the device. */
6592         rc = lpfc_setup_endian_order(phba);
6593         if (unlikely(rc))
6594                 goto out_free_bsmbx;
6595
6596         /* Set up the hba's configuration parameters. */
6597         rc = lpfc_sli4_read_config(phba);
6598         if (unlikely(rc))
6599                 goto out_free_bsmbx;
6600         rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6601         if (unlikely(rc))
6602                 goto out_free_bsmbx;
6603
6604         /* IF Type 0 ports get initialized now. */
6605         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6606             LPFC_SLI_INTF_IF_TYPE_0) {
6607                 rc = lpfc_pci_function_reset(phba);
6608                 if (unlikely(rc))
6609                         goto out_free_bsmbx;
6610         }
6611
6612         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6613                                                        GFP_KERNEL);
6614         if (!mboxq) {
6615                 rc = -ENOMEM;
6616                 goto out_free_bsmbx;
6617         }
6618
6619         /* Check for NVMET being configured */
6620         phba->nvmet_support = 0;
6621         if (lpfc_enable_nvmet_cnt) {
6622
6623                 /* First get WWN of HBA instance */
6624                 lpfc_read_nv(phba, mboxq);
6625                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6626                 if (rc != MBX_SUCCESS) {
6627                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6628                                         "6016 Mailbox failed , mbxCmd x%x "
6629                                         "READ_NV, mbxStatus x%x\n",
6630                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6631                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6632                         mempool_free(mboxq, phba->mbox_mem_pool);
6633                         rc = -EIO;
6634                         goto out_free_bsmbx;
6635                 }
6636                 mb = &mboxq->u.mb;
6637                 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6638                        sizeof(uint64_t));
6639                 wwn = cpu_to_be64(wwn);
6640                 phba->sli4_hba.wwnn.u.name = wwn;
6641                 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6642                        sizeof(uint64_t));
6643                 /* wwn is WWPN of HBA instance */
6644                 wwn = cpu_to_be64(wwn);
6645                 phba->sli4_hba.wwpn.u.name = wwn;
6646
6647                 /* Check to see if it matches any module parameter */
6648                 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6649                         if (wwn == lpfc_enable_nvmet[i]) {
6650 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6651                                 if (lpfc_nvmet_mem_alloc(phba))
6652                                         break;
6653
6654                                 phba->nvmet_support = 1; /* a match */
6655
6656                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6657                                                 "6017 NVME Target %016llx\n",
6658                                                 wwn);
6659 #else
6660                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6661                                                 "6021 Can't enable NVME Target."
6662                                                 " NVME_TARGET_FC infrastructure"
6663                                                 " is not in kernel\n");
6664 #endif
6665                                 /* Not supported for NVMET */
6666                                 phba->cfg_xri_rebalancing = 0;
6667                                 if (phba->irq_chann_mode == NHT_MODE) {
6668                                         phba->cfg_irq_chann =
6669                                                 phba->sli4_hba.num_present_cpu;
6670                                         phba->cfg_hdw_queue =
6671                                                 phba->sli4_hba.num_present_cpu;
6672                                         phba->irq_chann_mode = NORMAL_MODE;
6673                                 }
6674                                 break;
6675                         }
6676                 }
6677         }
6678
6679         lpfc_nvme_mod_param_dep(phba);
6680
6681         /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
6682         lpfc_supported_pages(mboxq);
6683         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6684         if (!rc) {
6685                 mqe = &mboxq->u.mqe;
6686                 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6687                        LPFC_MAX_SUPPORTED_PAGES);
6688                 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6689                         switch (pn_page[i]) {
6690                         case LPFC_SLI4_PARAMETERS:
6691                                 phba->sli4_hba.pc_sli4_params.supported = 1;
6692                                 break;
6693                         default:
6694                                 break;
6695                         }
6696                 }
6697                 /* Read the port's SLI4 Parameters capabilities if supported. */
6698                 if (phba->sli4_hba.pc_sli4_params.supported)
6699                         rc = lpfc_pc_sli4_params_get(phba, mboxq);
6700                 if (rc) {
6701                         mempool_free(mboxq, phba->mbox_mem_pool);
6702                         rc = -EIO;
6703                         goto out_free_bsmbx;
6704                 }
6705         }
6706
6707         /*
6708          * Get sli4 parameters that override parameters from Port capabilities.
6709          * If this call fails, it isn't critical unless the SLI4 parameters come
6710          * back in conflict.
6711          */
6712         rc = lpfc_get_sli4_parameters(phba, mboxq);
6713         if (rc) {
6714                 if_type = bf_get(lpfc_sli_intf_if_type,
6715                                  &phba->sli4_hba.sli_intf);
6716                 if_fam = bf_get(lpfc_sli_intf_sli_family,
6717                                 &phba->sli4_hba.sli_intf);
6718                 if (phba->sli4_hba.extents_in_use &&
6719                     phba->sli4_hba.rpi_hdrs_in_use) {
6720                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6721                                 "2999 Unsupported SLI4 Parameters "
6722                                 "Extents and RPI headers enabled.\n");
6723                         if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6724                             if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
6725                                 mempool_free(mboxq, phba->mbox_mem_pool);
6726                                 rc = -EIO;
6727                                 goto out_free_bsmbx;
6728                         }
6729                 }
6730                 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6731                       if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6732                         mempool_free(mboxq, phba->mbox_mem_pool);
6733                         rc = -EIO;
6734                         goto out_free_bsmbx;
6735                 }
6736         }
6737
6738         /*
6739          * 1 for cmd, 1 for rsp, NVME adds an extra one
6740          * for boundary conditions in its max_sgl_segment template.
6741          */
6742         extra = 2;
6743         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6744                 extra++;
6745
6746         /*
6747          * It doesn't matter what family our adapter is in, we are
6748          * limited to 2 Pages, 512 SGEs, for our SGL.
6749          * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6750          */
6751         max_buf_size = (2 * SLI4_PAGE_SIZE);
6752
6753         /*
6754          * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6755          * used to create the sg_dma_buf_pool must be calculated.
6756          */
6757         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6758                 /* Both cfg_enable_bg and cfg_external_dif code paths */
6759
6760                 /*
6761                  * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6762                  * the FCP rsp, and a SGE. Sice we have no control
6763                  * over how many protection segments the SCSI Layer
6764                  * will hand us (ie: there could be one for every block
6765                  * in the IO), just allocate enough SGEs to accomidate
6766                  * our max amount and we need to limit lpfc_sg_seg_cnt
6767                  * to minimize the risk of running out.
6768                  */
6769                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6770                                 sizeof(struct fcp_rsp) + max_buf_size;
6771
6772                 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6773                 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6774
6775                 /*
6776                  * If supporting DIF, reduce the seg count for scsi to
6777                  * allow room for the DIF sges.
6778                  */
6779                 if (phba->cfg_enable_bg &&
6780                     phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6781                         phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6782                 else
6783                         phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6784
6785         } else {
6786                 /*
6787                  * The scsi_buf for a regular I/O holds the FCP cmnd,
6788                  * the FCP rsp, a SGE for each, and a SGE for up to
6789                  * cfg_sg_seg_cnt data segments.
6790                  */
6791                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6792                                 sizeof(struct fcp_rsp) +
6793                                 ((phba->cfg_sg_seg_cnt + extra) *
6794                                 sizeof(struct sli4_sge));
6795
6796                 /* Total SGEs for scsi_sg_list */
6797                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6798                 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6799
6800                 /*
6801                  * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6802                  * need to post 1 page for the SGL.
6803                  */
6804         }
6805
6806         if (phba->cfg_xpsgl && !phba->nvmet_support)
6807                 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6808         else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
6809                 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6810         else
6811                 phba->cfg_sg_dma_buf_size =
6812                                 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6813
6814         phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6815                                sizeof(struct sli4_sge);
6816
6817         /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6818         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6819                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6820                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6821                                         "6300 Reducing NVME sg segment "
6822                                         "cnt to %d\n",
6823                                         LPFC_MAX_NVME_SEG_CNT);
6824                         phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6825                 } else
6826                         phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6827         }
6828
6829         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6830                         "9087 sg_seg_cnt:%d dmabuf_size:%d "
6831                         "total:%d scsi:%d nvme:%d\n",
6832                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6833                         phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
6834                         phba->cfg_nvme_seg_cnt);
6835
6836         if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6837                 i = phba->cfg_sg_dma_buf_size;
6838         else
6839                 i = SLI4_PAGE_SIZE;
6840
6841         phba->lpfc_sg_dma_buf_pool =
6842                         dma_pool_create("lpfc_sg_dma_buf_pool",
6843                                         &phba->pcidev->dev,
6844                                         phba->cfg_sg_dma_buf_size,
6845                                         i, 0);
6846         if (!phba->lpfc_sg_dma_buf_pool)
6847                 goto out_free_bsmbx;
6848
6849         phba->lpfc_cmd_rsp_buf_pool =
6850                         dma_pool_create("lpfc_cmd_rsp_buf_pool",
6851                                         &phba->pcidev->dev,
6852                                         sizeof(struct fcp_cmnd) +
6853                                         sizeof(struct fcp_rsp),
6854                                         i, 0);
6855         if (!phba->lpfc_cmd_rsp_buf_pool)
6856                 goto out_free_sg_dma_buf;
6857
6858         mempool_free(mboxq, phba->mbox_mem_pool);
6859
6860         /* Verify OAS is supported */
6861         lpfc_sli4_oas_verify(phba);
6862
6863         /* Verify RAS support on adapter */
6864         lpfc_sli4_ras_init(phba);
6865
6866         /* Verify all the SLI4 queues */
6867         rc = lpfc_sli4_queue_verify(phba);
6868         if (rc)
6869                 goto out_free_cmd_rsp_buf;
6870
6871         /* Create driver internal CQE event pool */
6872         rc = lpfc_sli4_cq_event_pool_create(phba);
6873         if (rc)
6874                 goto out_free_cmd_rsp_buf;
6875
6876         /* Initialize sgl lists per host */
6877         lpfc_init_sgl_list(phba);
6878
6879         /* Allocate and initialize active sgl array */
6880         rc = lpfc_init_active_sgl_array(phba);
6881         if (rc) {
6882                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6883                                 "1430 Failed to initialize sgl list.\n");
6884                 goto out_destroy_cq_event_pool;
6885         }
6886         rc = lpfc_sli4_init_rpi_hdrs(phba);
6887         if (rc) {
6888                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6889                                 "1432 Failed to initialize rpi headers.\n");
6890                 goto out_free_active_sgl;
6891         }
6892
6893         /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6894         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6895         phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6896                                          GFP_KERNEL);
6897         if (!phba->fcf.fcf_rr_bmask) {
6898                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6899                                 "2759 Failed allocate memory for FCF round "
6900                                 "robin failover bmask\n");
6901                 rc = -ENOMEM;
6902                 goto out_remove_rpi_hdrs;
6903         }
6904
6905         phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6906                                             sizeof(struct lpfc_hba_eq_hdl),
6907                                             GFP_KERNEL);
6908         if (!phba->sli4_hba.hba_eq_hdl) {
6909                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6910                                 "2572 Failed allocate memory for "
6911                                 "fast-path per-EQ handle array\n");
6912                 rc = -ENOMEM;
6913                 goto out_free_fcf_rr_bmask;
6914         }
6915
6916         phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6917                                         sizeof(struct lpfc_vector_map_info),
6918                                         GFP_KERNEL);
6919         if (!phba->sli4_hba.cpu_map) {
6920                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6921                                 "3327 Failed allocate memory for msi-x "
6922                                 "interrupt vector mapping\n");
6923                 rc = -ENOMEM;
6924                 goto out_free_hba_eq_hdl;
6925         }
6926
6927         phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6928         if (!phba->sli4_hba.eq_info) {
6929                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6930                                 "3321 Failed allocation for per_cpu stats\n");
6931                 rc = -ENOMEM;
6932                 goto out_free_hba_cpu_map;
6933         }
6934
6935 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
6936         phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
6937         if (!phba->sli4_hba.c_stat) {
6938                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6939                                 "3332 Failed allocating per cpu hdwq stats\n");
6940                 rc = -ENOMEM;
6941                 goto out_free_hba_eq_info;
6942         }
6943 #endif
6944
6945         /*
6946          * Enable sr-iov virtual functions if supported and configured
6947          * through the module parameter.
6948          */
6949         if (phba->cfg_sriov_nr_virtfn > 0) {
6950                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6951                                                  phba->cfg_sriov_nr_virtfn);
6952                 if (rc) {
6953                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6954                                         "3020 Requested number of SR-IOV "
6955                                         "virtual functions (%d) is not "
6956                                         "supported\n",
6957                                         phba->cfg_sriov_nr_virtfn);
6958                         phba->cfg_sriov_nr_virtfn = 0;
6959                 }
6960         }
6961
6962         return 0;
6963
6964 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
6965 out_free_hba_eq_info:
6966         free_percpu(phba->sli4_hba.eq_info);
6967 #endif
6968 out_free_hba_cpu_map:
6969         kfree(phba->sli4_hba.cpu_map);
6970 out_free_hba_eq_hdl:
6971         kfree(phba->sli4_hba.hba_eq_hdl);
6972 out_free_fcf_rr_bmask:
6973         kfree(phba->fcf.fcf_rr_bmask);
6974 out_remove_rpi_hdrs:
6975         lpfc_sli4_remove_rpi_hdrs(phba);
6976 out_free_active_sgl:
6977         lpfc_free_active_sgl(phba);
6978 out_destroy_cq_event_pool:
6979         lpfc_sli4_cq_event_pool_destroy(phba);
6980 out_free_cmd_rsp_buf:
6981         dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
6982         phba->lpfc_cmd_rsp_buf_pool = NULL;
6983 out_free_sg_dma_buf:
6984         dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6985         phba->lpfc_sg_dma_buf_pool = NULL;
6986 out_free_bsmbx:
6987         lpfc_destroy_bootstrap_mbox(phba);
6988 out_free_mem:
6989         lpfc_mem_free(phba);
6990         return rc;
6991 }
6992
6993 /**
6994  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6995  * @phba: pointer to lpfc hba data structure.
6996  *
6997  * This routine is invoked to unset the driver internal resources set up
6998  * specific for supporting the SLI-4 HBA device it attached to.
6999  **/
7000 static void
7001 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7002 {
7003         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7004
7005         free_percpu(phba->sli4_hba.eq_info);
7006 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7007         free_percpu(phba->sli4_hba.c_stat);
7008 #endif
7009
7010         /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7011         kfree(phba->sli4_hba.cpu_map);
7012         phba->sli4_hba.num_possible_cpu = 0;
7013         phba->sli4_hba.num_present_cpu = 0;
7014         phba->sli4_hba.curr_disp_cpu = 0;
7015         cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7016
7017         /* Free memory allocated for fast-path work queue handles */
7018         kfree(phba->sli4_hba.hba_eq_hdl);
7019
7020         /* Free the allocated rpi headers. */
7021         lpfc_sli4_remove_rpi_hdrs(phba);
7022         lpfc_sli4_remove_rpis(phba);
7023
7024         /* Free eligible FCF index bmask */
7025         kfree(phba->fcf.fcf_rr_bmask);
7026
7027         /* Free the ELS sgl list */
7028         lpfc_free_active_sgl(phba);
7029         lpfc_free_els_sgl_list(phba);
7030         lpfc_free_nvmet_sgl_list(phba);
7031
7032         /* Free the completion queue EQ event pool */
7033         lpfc_sli4_cq_event_release_all(phba);
7034         lpfc_sli4_cq_event_pool_destroy(phba);
7035
7036         /* Release resource identifiers. */
7037         lpfc_sli4_dealloc_resource_identifiers(phba);
7038
7039         /* Free the bsmbx region. */
7040         lpfc_destroy_bootstrap_mbox(phba);
7041
7042         /* Free the SLI Layer memory with SLI4 HBAs */
7043         lpfc_mem_free_all(phba);
7044
7045         /* Free the current connect table */
7046         list_for_each_entry_safe(conn_entry, next_conn_entry,
7047                 &phba->fcf_conn_rec_list, list) {
7048                 list_del_init(&conn_entry->list);
7049                 kfree(conn_entry);
7050         }
7051
7052         return;
7053 }
7054
7055 /**
7056  * lpfc_init_api_table_setup - Set up init api function jump table
7057  * @phba: The hba struct for which this call is being executed.
7058  * @dev_grp: The HBA PCI-Device group number.
7059  *
7060  * This routine sets up the device INIT interface API function jump table
7061  * in @phba struct.
7062  *
7063  * Returns: 0 - success, -ENODEV - failure.
7064  **/
7065 int
7066 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7067 {
7068         phba->lpfc_hba_init_link = lpfc_hba_init_link;
7069         phba->lpfc_hba_down_link = lpfc_hba_down_link;
7070         phba->lpfc_selective_reset = lpfc_selective_reset;
7071         switch (dev_grp) {
7072         case LPFC_PCI_DEV_LP:
7073                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7074                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7075                 phba->lpfc_stop_port = lpfc_stop_port_s3;
7076                 break;
7077         case LPFC_PCI_DEV_OC:
7078                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7079                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7080                 phba->lpfc_stop_port = lpfc_stop_port_s4;
7081                 break;
7082         default:
7083                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7084                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
7085                                 dev_grp);
7086                 return -ENODEV;
7087                 break;
7088         }
7089         return 0;
7090 }
7091
7092 /**
7093  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7094  * @phba: pointer to lpfc hba data structure.
7095  *
7096  * This routine is invoked to set up the driver internal resources after the
7097  * device specific resource setup to support the HBA device it attached to.
7098  *
7099  * Return codes
7100  *      0 - successful
7101  *      other values - error
7102  **/
7103 static int
7104 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7105 {
7106         int error;
7107
7108         /* Startup the kernel thread for this host adapter. */
7109         phba->worker_thread = kthread_run(lpfc_do_work, phba,
7110                                           "lpfc_worker_%d", phba->brd_no);
7111         if (IS_ERR(phba->worker_thread)) {
7112                 error = PTR_ERR(phba->worker_thread);
7113                 return error;
7114         }
7115
7116         return 0;
7117 }
7118
7119 /**
7120  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7121  * @phba: pointer to lpfc hba data structure.
7122  *
7123  * This routine is invoked to unset the driver internal resources set up after
7124  * the device specific resource setup for supporting the HBA device it
7125  * attached to.
7126  **/
7127 static void
7128 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7129 {
7130         if (phba->wq) {
7131                 flush_workqueue(phba->wq);
7132                 destroy_workqueue(phba->wq);
7133                 phba->wq = NULL;
7134         }
7135
7136         /* Stop kernel worker thread */
7137         if (phba->worker_thread)
7138                 kthread_stop(phba->worker_thread);
7139 }
7140
7141 /**
7142  * lpfc_free_iocb_list - Free iocb list.
7143  * @phba: pointer to lpfc hba data structure.
7144  *
7145  * This routine is invoked to free the driver's IOCB list and memory.
7146  **/
7147 void
7148 lpfc_free_iocb_list(struct lpfc_hba *phba)
7149 {
7150         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7151
7152         spin_lock_irq(&phba->hbalock);
7153         list_for_each_entry_safe(iocbq_entry, iocbq_next,
7154                                  &phba->lpfc_iocb_list, list) {
7155                 list_del(&iocbq_entry->list);
7156                 kfree(iocbq_entry);
7157                 phba->total_iocbq_bufs--;
7158         }
7159         spin_unlock_irq(&phba->hbalock);
7160
7161         return;
7162 }
7163
7164 /**
7165  * lpfc_init_iocb_list - Allocate and initialize iocb list.
7166  * @phba: pointer to lpfc hba data structure.
7167  *
7168  * This routine is invoked to allocate and initizlize the driver's IOCB
7169  * list and set up the IOCB tag array accordingly.
7170  *
7171  * Return codes
7172  *      0 - successful
7173  *      other values - error
7174  **/
7175 int
7176 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7177 {
7178         struct lpfc_iocbq *iocbq_entry = NULL;
7179         uint16_t iotag;
7180         int i;
7181
7182         /* Initialize and populate the iocb list per host.  */
7183         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7184         for (i = 0; i < iocb_count; i++) {
7185                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7186                 if (iocbq_entry == NULL) {
7187                         printk(KERN_ERR "%s: only allocated %d iocbs of "
7188                                 "expected %d count. Unloading driver.\n",
7189                                 __func__, i, iocb_count);
7190                         goto out_free_iocbq;
7191                 }
7192
7193                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7194                 if (iotag == 0) {
7195                         kfree(iocbq_entry);
7196                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
7197                                 "Unloading driver.\n", __func__);
7198                         goto out_free_iocbq;
7199                 }
7200                 iocbq_entry->sli4_lxritag = NO_XRI;
7201                 iocbq_entry->sli4_xritag = NO_XRI;
7202
7203                 spin_lock_irq(&phba->hbalock);
7204                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7205                 phba->total_iocbq_bufs++;
7206                 spin_unlock_irq(&phba->hbalock);
7207         }
7208
7209         return 0;
7210
7211 out_free_iocbq:
7212         lpfc_free_iocb_list(phba);
7213
7214         return -ENOMEM;
7215 }
7216
7217 /**
7218  * lpfc_free_sgl_list - Free a given sgl list.
7219  * @phba: pointer to lpfc hba data structure.
7220  * @sglq_list: pointer to the head of sgl list.
7221  *
7222  * This routine is invoked to free a give sgl list and memory.
7223  **/
7224 void
7225 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7226 {
7227         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7228
7229         list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7230                 list_del(&sglq_entry->list);
7231                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7232                 kfree(sglq_entry);
7233         }
7234 }
7235
7236 /**
7237  * lpfc_free_els_sgl_list - Free els sgl list.
7238  * @phba: pointer to lpfc hba data structure.
7239  *
7240  * This routine is invoked to free the driver's els sgl list and memory.
7241  **/
7242 static void
7243 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7244 {
7245         LIST_HEAD(sglq_list);
7246
7247         /* Retrieve all els sgls from driver list */
7248         spin_lock_irq(&phba->hbalock);
7249         spin_lock(&phba->sli4_hba.sgl_list_lock);
7250         list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7251         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7252         spin_unlock_irq(&phba->hbalock);
7253
7254         /* Now free the sgl list */
7255         lpfc_free_sgl_list(phba, &sglq_list);
7256 }
7257
7258 /**
7259  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7260  * @phba: pointer to lpfc hba data structure.
7261  *
7262  * This routine is invoked to free the driver's nvmet sgl list and memory.
7263  **/
7264 static void
7265 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7266 {
7267         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7268         LIST_HEAD(sglq_list);
7269
7270         /* Retrieve all nvmet sgls from driver list */
7271         spin_lock_irq(&phba->hbalock);
7272         spin_lock(&phba->sli4_hba.sgl_list_lock);
7273         list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7274         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7275         spin_unlock_irq(&phba->hbalock);
7276
7277         /* Now free the sgl list */
7278         list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7279                 list_del(&sglq_entry->list);
7280                 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7281                 kfree(sglq_entry);
7282         }
7283
7284         /* Update the nvmet_xri_cnt to reflect no current sgls.
7285          * The next initialization cycle sets the count and allocates
7286          * the sgls over again.
7287          */
7288         phba->sli4_hba.nvmet_xri_cnt = 0;
7289 }
7290
7291 /**
7292  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7293  * @phba: pointer to lpfc hba data structure.
7294  *
7295  * This routine is invoked to allocate the driver's active sgl memory.
7296  * This array will hold the sglq_entry's for active IOs.
7297  **/
7298 static int
7299 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7300 {
7301         int size;
7302         size = sizeof(struct lpfc_sglq *);
7303         size *= phba->sli4_hba.max_cfg_param.max_xri;
7304
7305         phba->sli4_hba.lpfc_sglq_active_list =
7306                 kzalloc(size, GFP_KERNEL);
7307         if (!phba->sli4_hba.lpfc_sglq_active_list)
7308                 return -ENOMEM;
7309         return 0;
7310 }
7311
7312 /**
7313  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7314  * @phba: pointer to lpfc hba data structure.
7315  *
7316  * This routine is invoked to walk through the array of active sglq entries
7317  * and free all of the resources.
7318  * This is just a place holder for now.
7319  **/
7320 static void
7321 lpfc_free_active_sgl(struct lpfc_hba *phba)
7322 {
7323         kfree(phba->sli4_hba.lpfc_sglq_active_list);
7324 }
7325
7326 /**
7327  * lpfc_init_sgl_list - Allocate and initialize sgl list.
7328  * @phba: pointer to lpfc hba data structure.
7329  *
7330  * This routine is invoked to allocate and initizlize the driver's sgl
7331  * list and set up the sgl xritag tag array accordingly.
7332  *
7333  **/
7334 static void
7335 lpfc_init_sgl_list(struct lpfc_hba *phba)
7336 {
7337         /* Initialize and populate the sglq list per host/VF. */
7338         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7339         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7340         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7341         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7342
7343         /* els xri-sgl book keeping */
7344         phba->sli4_hba.els_xri_cnt = 0;
7345
7346         /* nvme xri-buffer book keeping */
7347         phba->sli4_hba.io_xri_cnt = 0;
7348 }
7349
7350 /**
7351  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7352  * @phba: pointer to lpfc hba data structure.
7353  *
7354  * This routine is invoked to post rpi header templates to the
7355  * port for those SLI4 ports that do not support extents.  This routine
7356  * posts a PAGE_SIZE memory region to the port to hold up to
7357  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
7358  * and should be called only when interrupts are disabled.
7359  *
7360  * Return codes
7361  *      0 - successful
7362  *      -ERROR - otherwise.
7363  **/
7364 int
7365 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7366 {
7367         int rc = 0;
7368         struct lpfc_rpi_hdr *rpi_hdr;
7369
7370         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7371         if (!phba->sli4_hba.rpi_hdrs_in_use)
7372                 return rc;
7373         if (phba->sli4_hba.extents_in_use)
7374                 return -EIO;
7375
7376         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7377         if (!rpi_hdr) {
7378                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7379                                 "0391 Error during rpi post operation\n");
7380                 lpfc_sli4_remove_rpis(phba);
7381                 rc = -ENODEV;
7382         }
7383
7384         return rc;
7385 }
7386
7387 /**
7388  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7389  * @phba: pointer to lpfc hba data structure.
7390  *
7391  * This routine is invoked to allocate a single 4KB memory region to
7392  * support rpis and stores them in the phba.  This single region
7393  * provides support for up to 64 rpis.  The region is used globally
7394  * by the device.
7395  *
7396  * Returns:
7397  *   A valid rpi hdr on success.
7398  *   A NULL pointer on any failure.
7399  **/
7400 struct lpfc_rpi_hdr *
7401 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7402 {
7403         uint16_t rpi_limit, curr_rpi_range;
7404         struct lpfc_dmabuf *dmabuf;
7405         struct lpfc_rpi_hdr *rpi_hdr;
7406
7407         /*
7408          * If the SLI4 port supports extents, posting the rpi header isn't
7409          * required.  Set the expected maximum count and let the actual value
7410          * get set when extents are fully allocated.
7411          */
7412         if (!phba->sli4_hba.rpi_hdrs_in_use)
7413                 return NULL;
7414         if (phba->sli4_hba.extents_in_use)
7415                 return NULL;
7416
7417         /* The limit on the logical index is just the max_rpi count. */
7418         rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7419
7420         spin_lock_irq(&phba->hbalock);
7421         /*
7422          * Establish the starting RPI in this header block.  The starting
7423          * rpi is normalized to a zero base because the physical rpi is
7424          * port based.
7425          */
7426         curr_rpi_range = phba->sli4_hba.next_rpi;
7427         spin_unlock_irq(&phba->hbalock);
7428
7429         /* Reached full RPI range */
7430         if (curr_rpi_range == rpi_limit)
7431                 return NULL;
7432
7433         /*
7434          * First allocate the protocol header region for the port.  The
7435          * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7436          */
7437         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7438         if (!dmabuf)
7439                 return NULL;
7440
7441         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7442                                           LPFC_HDR_TEMPLATE_SIZE,
7443                                           &dmabuf->phys, GFP_KERNEL);
7444         if (!dmabuf->virt) {
7445                 rpi_hdr = NULL;
7446                 goto err_free_dmabuf;
7447         }
7448
7449         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7450                 rpi_hdr = NULL;
7451                 goto err_free_coherent;
7452         }
7453
7454         /* Save the rpi header data for cleanup later. */
7455         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7456         if (!rpi_hdr)
7457                 goto err_free_coherent;
7458
7459         rpi_hdr->dmabuf = dmabuf;
7460         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7461         rpi_hdr->page_count = 1;
7462         spin_lock_irq(&phba->hbalock);
7463
7464         /* The rpi_hdr stores the logical index only. */
7465         rpi_hdr->start_rpi = curr_rpi_range;
7466         rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7467         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7468
7469         spin_unlock_irq(&phba->hbalock);
7470         return rpi_hdr;
7471
7472  err_free_coherent:
7473         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7474                           dmabuf->virt, dmabuf->phys);
7475  err_free_dmabuf:
7476         kfree(dmabuf);
7477         return NULL;
7478 }
7479
7480 /**
7481  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7482  * @phba: pointer to lpfc hba data structure.
7483  *
7484  * This routine is invoked to remove all memory resources allocated
7485  * to support rpis for SLI4 ports not supporting extents. This routine
7486  * presumes the caller has released all rpis consumed by fabric or port
7487  * logins and is prepared to have the header pages removed.
7488  **/
7489 void
7490 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7491 {
7492         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7493
7494         if (!phba->sli4_hba.rpi_hdrs_in_use)
7495                 goto exit;
7496
7497         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7498                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7499                 list_del(&rpi_hdr->list);
7500                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7501                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7502                 kfree(rpi_hdr->dmabuf);
7503                 kfree(rpi_hdr);
7504         }
7505  exit:
7506         /* There are no rpis available to the port now. */
7507         phba->sli4_hba.next_rpi = 0;
7508 }
7509
7510 /**
7511  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7512  * @pdev: pointer to pci device data structure.
7513  *
7514  * This routine is invoked to allocate the driver hba data structure for an
7515  * HBA device. If the allocation is successful, the phba reference to the
7516  * PCI device data structure is set.
7517  *
7518  * Return codes
7519  *      pointer to @phba - successful
7520  *      NULL - error
7521  **/
7522 static struct lpfc_hba *
7523 lpfc_hba_alloc(struct pci_dev *pdev)
7524 {
7525         struct lpfc_hba *phba;
7526
7527         /* Allocate memory for HBA structure */
7528         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7529         if (!phba) {
7530                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7531                 return NULL;
7532         }
7533
7534         /* Set reference to PCI device in HBA structure */
7535         phba->pcidev = pdev;
7536
7537         /* Assign an unused board number */
7538         phba->brd_no = lpfc_get_instance();
7539         if (phba->brd_no < 0) {
7540                 kfree(phba);
7541                 return NULL;
7542         }
7543         phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7544
7545         spin_lock_init(&phba->ct_ev_lock);
7546         INIT_LIST_HEAD(&phba->ct_ev_waiters);
7547
7548         return phba;
7549 }
7550
7551 /**
7552  * lpfc_hba_free - Free driver hba data structure with a device.
7553  * @phba: pointer to lpfc hba data structure.
7554  *
7555  * This routine is invoked to free the driver hba data structure with an
7556  * HBA device.
7557  **/
7558 static void
7559 lpfc_hba_free(struct lpfc_hba *phba)
7560 {
7561         if (phba->sli_rev == LPFC_SLI_REV4)
7562                 kfree(phba->sli4_hba.hdwq);
7563
7564         /* Release the driver assigned board number */
7565         idr_remove(&lpfc_hba_index, phba->brd_no);
7566
7567         /* Free memory allocated with sli3 rings */
7568         kfree(phba->sli.sli3_ring);
7569         phba->sli.sli3_ring = NULL;
7570
7571         kfree(phba);
7572         return;
7573 }
7574
7575 /**
7576  * lpfc_create_shost - Create hba physical port with associated scsi host.
7577  * @phba: pointer to lpfc hba data structure.
7578  *
7579  * This routine is invoked to create HBA physical port and associate a SCSI
7580  * host with it.
7581  *
7582  * Return codes
7583  *      0 - successful
7584  *      other values - error
7585  **/
7586 static int
7587 lpfc_create_shost(struct lpfc_hba *phba)
7588 {
7589         struct lpfc_vport *vport;
7590         struct Scsi_Host  *shost;
7591
7592         /* Initialize HBA FC structure */
7593         phba->fc_edtov = FF_DEF_EDTOV;
7594         phba->fc_ratov = FF_DEF_RATOV;
7595         phba->fc_altov = FF_DEF_ALTOV;
7596         phba->fc_arbtov = FF_DEF_ARBTOV;
7597
7598         atomic_set(&phba->sdev_cnt, 0);
7599         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7600         if (!vport)
7601                 return -ENODEV;
7602
7603         shost = lpfc_shost_from_vport(vport);
7604         phba->pport = vport;
7605
7606         if (phba->nvmet_support) {
7607                 /* Only 1 vport (pport) will support NVME target */
7608                 phba->targetport = NULL;
7609                 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7610                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7611                                 "6076 NVME Target Found\n");
7612         }
7613
7614         lpfc_debugfs_initialize(vport);
7615         /* Put reference to SCSI host to driver's device private data */
7616         pci_set_drvdata(phba->pcidev, shost);
7617
7618         /*
7619          * At this point we are fully registered with PSA. In addition,
7620          * any initial discovery should be completed.
7621          */
7622         vport->load_flag |= FC_ALLOW_FDMI;
7623         if (phba->cfg_enable_SmartSAN ||
7624             (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7625
7626                 /* Setup appropriate attribute masks */
7627                 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7628                 if (phba->cfg_enable_SmartSAN)
7629                         vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7630                 else
7631                         vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7632         }
7633         return 0;
7634 }
7635
7636 /**
7637  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7638  * @phba: pointer to lpfc hba data structure.
7639  *
7640  * This routine is invoked to destroy HBA physical port and the associated
7641  * SCSI host.
7642  **/
7643 static void
7644 lpfc_destroy_shost(struct lpfc_hba *phba)
7645 {
7646         struct lpfc_vport *vport = phba->pport;
7647
7648         /* Destroy physical port that associated with the SCSI host */
7649         destroy_port(vport);
7650
7651         return;
7652 }
7653
7654 /**
7655  * lpfc_setup_bg - Setup Block guard structures and debug areas.
7656  * @phba: pointer to lpfc hba data structure.
7657  * @shost: the shost to be used to detect Block guard settings.
7658  *
7659  * This routine sets up the local Block guard protocol settings for @shost.
7660  * This routine also allocates memory for debugging bg buffers.
7661  **/
7662 static void
7663 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7664 {
7665         uint32_t old_mask;
7666         uint32_t old_guard;
7667
7668         if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7669                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7670                                 "1478 Registering BlockGuard with the "
7671                                 "SCSI layer\n");
7672
7673                 old_mask = phba->cfg_prot_mask;
7674                 old_guard = phba->cfg_prot_guard;
7675
7676                 /* Only allow supported values */
7677                 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7678                         SHOST_DIX_TYPE0_PROTECTION |
7679                         SHOST_DIX_TYPE1_PROTECTION);
7680                 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7681                                          SHOST_DIX_GUARD_CRC);
7682
7683                 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7684                 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7685                         phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7686
7687                 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7688                         if ((old_mask != phba->cfg_prot_mask) ||
7689                                 (old_guard != phba->cfg_prot_guard))
7690                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7691                                         "1475 Registering BlockGuard with the "
7692                                         "SCSI layer: mask %d  guard %d\n",
7693                                         phba->cfg_prot_mask,
7694                                         phba->cfg_prot_guard);
7695
7696                         scsi_host_set_prot(shost, phba->cfg_prot_mask);
7697                         scsi_host_set_guard(shost, phba->cfg_prot_guard);
7698                 } else
7699                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700                                 "1479 Not Registering BlockGuard with the SCSI "
7701                                 "layer, Bad protection parameters: %d %d\n",
7702                                 old_mask, old_guard);
7703         }
7704 }
7705
7706 /**
7707  * lpfc_post_init_setup - Perform necessary device post initialization setup.
7708  * @phba: pointer to lpfc hba data structure.
7709  *
7710  * This routine is invoked to perform all the necessary post initialization
7711  * setup for the device.
7712  **/
7713 static void
7714 lpfc_post_init_setup(struct lpfc_hba *phba)
7715 {
7716         struct Scsi_Host  *shost;
7717         struct lpfc_adapter_event_header adapter_event;
7718
7719         /* Get the default values for Model Name and Description */
7720         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7721
7722         /*
7723          * hba setup may have changed the hba_queue_depth so we need to
7724          * adjust the value of can_queue.
7725          */
7726         shost = pci_get_drvdata(phba->pcidev);
7727         shost->can_queue = phba->cfg_hba_queue_depth - 10;
7728
7729         lpfc_host_attrib_init(shost);
7730
7731         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7732                 spin_lock_irq(shost->host_lock);
7733                 lpfc_poll_start_timer(phba);
7734                 spin_unlock_irq(shost->host_lock);
7735         }
7736
7737         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7738                         "0428 Perform SCSI scan\n");
7739         /* Send board arrival event to upper layer */
7740         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7741         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7742         fc_host_post_vendor_event(shost, fc_get_event_number(),
7743                                   sizeof(adapter_event),
7744                                   (char *) &adapter_event,
7745                                   LPFC_NL_VENDOR_ID);
7746         return;
7747 }
7748
7749 /**
7750  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7751  * @phba: pointer to lpfc hba data structure.
7752  *
7753  * This routine is invoked to set up the PCI device memory space for device
7754  * with SLI-3 interface spec.
7755  *
7756  * Return codes
7757  *      0 - successful
7758  *      other values - error
7759  **/
7760 static int
7761 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7762 {
7763         struct pci_dev *pdev = phba->pcidev;
7764         unsigned long bar0map_len, bar2map_len;
7765         int i, hbq_count;
7766         void *ptr;
7767         int error;
7768
7769         if (!pdev)
7770                 return -ENODEV;
7771
7772         /* Set the device DMA mask size */
7773         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7774         if (error)
7775                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7776         if (error)
7777                 return error;
7778         error = -ENODEV;
7779
7780         /* Get the bus address of Bar0 and Bar2 and the number of bytes
7781          * required by each mapping.
7782          */
7783         phba->pci_bar0_map = pci_resource_start(pdev, 0);
7784         bar0map_len = pci_resource_len(pdev, 0);
7785
7786         phba->pci_bar2_map = pci_resource_start(pdev, 2);
7787         bar2map_len = pci_resource_len(pdev, 2);
7788
7789         /* Map HBA SLIM to a kernel virtual address. */
7790         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7791         if (!phba->slim_memmap_p) {
7792                 dev_printk(KERN_ERR, &pdev->dev,
7793                            "ioremap failed for SLIM memory.\n");
7794                 goto out;
7795         }
7796
7797         /* Map HBA Control Registers to a kernel virtual address. */
7798         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7799         if (!phba->ctrl_regs_memmap_p) {
7800                 dev_printk(KERN_ERR, &pdev->dev,
7801                            "ioremap failed for HBA control registers.\n");
7802                 goto out_iounmap_slim;
7803         }
7804
7805         /* Allocate memory for SLI-2 structures */
7806         phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7807                                                &phba->slim2p.phys, GFP_KERNEL);
7808         if (!phba->slim2p.virt)
7809                 goto out_iounmap;
7810
7811         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7812         phba->mbox_ext = (phba->slim2p.virt +
7813                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7814         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7815         phba->IOCBs = (phba->slim2p.virt +
7816                        offsetof(struct lpfc_sli2_slim, IOCBs));
7817
7818         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7819                                                  lpfc_sli_hbq_size(),
7820                                                  &phba->hbqslimp.phys,
7821                                                  GFP_KERNEL);
7822         if (!phba->hbqslimp.virt)
7823                 goto out_free_slim;
7824
7825         hbq_count = lpfc_sli_hbq_count();
7826         ptr = phba->hbqslimp.virt;
7827         for (i = 0; i < hbq_count; ++i) {
7828                 phba->hbqs[i].hbq_virt = ptr;
7829                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7830                 ptr += (lpfc_hbq_defs[i]->entry_count *
7831                         sizeof(struct lpfc_hbq_entry));
7832         }
7833         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7834         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7835
7836         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7837
7838         phba->MBslimaddr = phba->slim_memmap_p;
7839         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7840         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7841         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7842         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7843
7844         return 0;
7845
7846 out_free_slim:
7847         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7848                           phba->slim2p.virt, phba->slim2p.phys);
7849 out_iounmap:
7850         iounmap(phba->ctrl_regs_memmap_p);
7851 out_iounmap_slim:
7852         iounmap(phba->slim_memmap_p);
7853 out:
7854         return error;
7855 }
7856
7857 /**
7858  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7859  * @phba: pointer to lpfc hba data structure.
7860  *
7861  * This routine is invoked to unset the PCI device memory space for device
7862  * with SLI-3 interface spec.
7863  **/
7864 static void
7865 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7866 {
7867         struct pci_dev *pdev;
7868
7869         /* Obtain PCI device reference */
7870         if (!phba->pcidev)
7871                 return;
7872         else
7873                 pdev = phba->pcidev;
7874
7875         /* Free coherent DMA memory allocated */
7876         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7877                           phba->hbqslimp.virt, phba->hbqslimp.phys);
7878         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7879                           phba->slim2p.virt, phba->slim2p.phys);
7880
7881         /* I/O memory unmap */
7882         iounmap(phba->ctrl_regs_memmap_p);
7883         iounmap(phba->slim_memmap_p);
7884
7885         return;
7886 }
7887
7888 /**
7889  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7890  * @phba: pointer to lpfc hba data structure.
7891  *
7892  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7893  * done and check status.
7894  *
7895  * Return 0 if successful, otherwise -ENODEV.
7896  **/
7897 int
7898 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7899 {
7900         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7901         struct lpfc_register reg_data;
7902         int i, port_error = 0;
7903         uint32_t if_type;
7904
7905         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7906         memset(&reg_data, 0, sizeof(reg_data));
7907         if (!phba->sli4_hba.PSMPHRregaddr)
7908                 return -ENODEV;
7909
7910         /* Wait up to 30 seconds for the SLI Port POST done and ready */
7911         for (i = 0; i < 3000; i++) {
7912                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7913                         &portsmphr_reg.word0) ||
7914                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7915                         /* Port has a fatal POST error, break out */
7916                         port_error = -ENODEV;
7917                         break;
7918                 }
7919                 if (LPFC_POST_STAGE_PORT_READY ==
7920                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7921                         break;
7922                 msleep(10);
7923         }
7924
7925         /*
7926          * If there was a port error during POST, then don't proceed with
7927          * other register reads as the data may not be valid.  Just exit.
7928          */
7929         if (port_error) {
7930                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7931                         "1408 Port Failed POST - portsmphr=0x%x, "
7932                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7933                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7934                         portsmphr_reg.word0,
7935                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7936                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7937                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7938                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7939                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7940                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7941                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7942                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7943         } else {
7944                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7945                                 "2534 Device Info: SLIFamily=0x%x, "
7946                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7947                                 "SLIHint_2=0x%x, FT=0x%x\n",
7948                                 bf_get(lpfc_sli_intf_sli_family,
7949                                        &phba->sli4_hba.sli_intf),
7950                                 bf_get(lpfc_sli_intf_slirev,
7951                                        &phba->sli4_hba.sli_intf),
7952                                 bf_get(lpfc_sli_intf_if_type,
7953                                        &phba->sli4_hba.sli_intf),
7954                                 bf_get(lpfc_sli_intf_sli_hint1,
7955                                        &phba->sli4_hba.sli_intf),
7956                                 bf_get(lpfc_sli_intf_sli_hint2,
7957                                        &phba->sli4_hba.sli_intf),
7958                                 bf_get(lpfc_sli_intf_func_type,
7959                                        &phba->sli4_hba.sli_intf));
7960                 /*
7961                  * Check for other Port errors during the initialization
7962                  * process.  Fail the load if the port did not come up
7963                  * correctly.
7964                  */
7965                 if_type = bf_get(lpfc_sli_intf_if_type,
7966                                  &phba->sli4_hba.sli_intf);
7967                 switch (if_type) {
7968                 case LPFC_SLI_INTF_IF_TYPE_0:
7969                         phba->sli4_hba.ue_mask_lo =
7970                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7971                         phba->sli4_hba.ue_mask_hi =
7972                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7973                         uerrlo_reg.word0 =
7974                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7975                         uerrhi_reg.word0 =
7976                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7977                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7978                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7979                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7980                                                 "1422 Unrecoverable Error "
7981                                                 "Detected during POST "
7982                                                 "uerr_lo_reg=0x%x, "
7983                                                 "uerr_hi_reg=0x%x, "
7984                                                 "ue_mask_lo_reg=0x%x, "
7985                                                 "ue_mask_hi_reg=0x%x\n",
7986                                                 uerrlo_reg.word0,
7987                                                 uerrhi_reg.word0,
7988                                                 phba->sli4_hba.ue_mask_lo,
7989                                                 phba->sli4_hba.ue_mask_hi);
7990                                 port_error = -ENODEV;
7991                         }
7992                         break;
7993                 case LPFC_SLI_INTF_IF_TYPE_2:
7994                 case LPFC_SLI_INTF_IF_TYPE_6:
7995                         /* Final checks.  The port status should be clean. */
7996                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7997                                 &reg_data.word0) ||
7998                                 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7999                                  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
8000                                 phba->work_status[0] =
8001                                         readl(phba->sli4_hba.u.if_type2.
8002                                               ERR1regaddr);
8003                                 phba->work_status[1] =
8004                                         readl(phba->sli4_hba.u.if_type2.
8005                                               ERR2regaddr);
8006                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8007                                         "2888 Unrecoverable port error "
8008                                         "following POST: port status reg "
8009                                         "0x%x, port_smphr reg 0x%x, "
8010                                         "error 1=0x%x, error 2=0x%x\n",
8011                                         reg_data.word0,
8012                                         portsmphr_reg.word0,
8013                                         phba->work_status[0],
8014                                         phba->work_status[1]);
8015                                 port_error = -ENODEV;
8016                         }
8017                         break;
8018                 case LPFC_SLI_INTF_IF_TYPE_1:
8019                 default:
8020                         break;
8021                 }
8022         }
8023         return port_error;
8024 }
8025
8026 /**
8027  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8028  * @phba: pointer to lpfc hba data structure.
8029  * @if_type:  The SLI4 interface type getting configured.
8030  *
8031  * This routine is invoked to set up SLI4 BAR0 PCI config space register
8032  * memory map.
8033  **/
8034 static void
8035 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8036 {
8037         switch (if_type) {
8038         case LPFC_SLI_INTF_IF_TYPE_0:
8039                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8040                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8041                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8042                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8043                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8044                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8045                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8046                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8047                 phba->sli4_hba.SLIINTFregaddr =
8048                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8049                 break;
8050         case LPFC_SLI_INTF_IF_TYPE_2:
8051                 phba->sli4_hba.u.if_type2.EQDregaddr =
8052                         phba->sli4_hba.conf_regs_memmap_p +
8053                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8054                 phba->sli4_hba.u.if_type2.ERR1regaddr =
8055                         phba->sli4_hba.conf_regs_memmap_p +
8056                                                 LPFC_CTL_PORT_ER1_OFFSET;
8057                 phba->sli4_hba.u.if_type2.ERR2regaddr =
8058                         phba->sli4_hba.conf_regs_memmap_p +
8059                                                 LPFC_CTL_PORT_ER2_OFFSET;
8060                 phba->sli4_hba.u.if_type2.CTRLregaddr =
8061                         phba->sli4_hba.conf_regs_memmap_p +
8062                                                 LPFC_CTL_PORT_CTL_OFFSET;
8063                 phba->sli4_hba.u.if_type2.STATUSregaddr =
8064                         phba->sli4_hba.conf_regs_memmap_p +
8065                                                 LPFC_CTL_PORT_STA_OFFSET;
8066                 phba->sli4_hba.SLIINTFregaddr =
8067                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8068                 phba->sli4_hba.PSMPHRregaddr =
8069                         phba->sli4_hba.conf_regs_memmap_p +
8070                                                 LPFC_CTL_PORT_SEM_OFFSET;
8071                 phba->sli4_hba.RQDBregaddr =
8072                         phba->sli4_hba.conf_regs_memmap_p +
8073                                                 LPFC_ULP0_RQ_DOORBELL;
8074                 phba->sli4_hba.WQDBregaddr =
8075                         phba->sli4_hba.conf_regs_memmap_p +
8076                                                 LPFC_ULP0_WQ_DOORBELL;
8077                 phba->sli4_hba.CQDBregaddr =
8078                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8079                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8080                 phba->sli4_hba.MQDBregaddr =
8081                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8082                 phba->sli4_hba.BMBXregaddr =
8083                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8084                 break;
8085         case LPFC_SLI_INTF_IF_TYPE_6:
8086                 phba->sli4_hba.u.if_type2.EQDregaddr =
8087                         phba->sli4_hba.conf_regs_memmap_p +
8088                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8089                 phba->sli4_hba.u.if_type2.ERR1regaddr =
8090                         phba->sli4_hba.conf_regs_memmap_p +
8091                                                 LPFC_CTL_PORT_ER1_OFFSET;
8092                 phba->sli4_hba.u.if_type2.ERR2regaddr =
8093                         phba->sli4_hba.conf_regs_memmap_p +
8094                                                 LPFC_CTL_PORT_ER2_OFFSET;
8095                 phba->sli4_hba.u.if_type2.CTRLregaddr =
8096                         phba->sli4_hba.conf_regs_memmap_p +
8097                                                 LPFC_CTL_PORT_CTL_OFFSET;
8098                 phba->sli4_hba.u.if_type2.STATUSregaddr =
8099                         phba->sli4_hba.conf_regs_memmap_p +
8100                                                 LPFC_CTL_PORT_STA_OFFSET;
8101                 phba->sli4_hba.PSMPHRregaddr =
8102                         phba->sli4_hba.conf_regs_memmap_p +
8103                                                 LPFC_CTL_PORT_SEM_OFFSET;
8104                 phba->sli4_hba.BMBXregaddr =
8105                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8106                 break;
8107         case LPFC_SLI_INTF_IF_TYPE_1:
8108         default:
8109                 dev_printk(KERN_ERR, &phba->pcidev->dev,
8110                            "FATAL - unsupported SLI4 interface type - %d\n",
8111                            if_type);
8112                 break;
8113         }
8114 }
8115
8116 /**
8117  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8118  * @phba: pointer to lpfc hba data structure.
8119  *
8120  * This routine is invoked to set up SLI4 BAR1 register memory map.
8121  **/
8122 static void
8123 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8124 {
8125         switch (if_type) {
8126         case LPFC_SLI_INTF_IF_TYPE_0:
8127                 phba->sli4_hba.PSMPHRregaddr =
8128                         phba->sli4_hba.ctrl_regs_memmap_p +
8129                         LPFC_SLIPORT_IF0_SMPHR;
8130                 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8131                         LPFC_HST_ISR0;
8132                 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8133                         LPFC_HST_IMR0;
8134                 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8135                         LPFC_HST_ISCR0;
8136                 break;
8137         case LPFC_SLI_INTF_IF_TYPE_6:
8138                 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8139                         LPFC_IF6_RQ_DOORBELL;
8140                 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8141                         LPFC_IF6_WQ_DOORBELL;
8142                 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8143                         LPFC_IF6_CQ_DOORBELL;
8144                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8145                         LPFC_IF6_EQ_DOORBELL;
8146                 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8147                         LPFC_IF6_MQ_DOORBELL;
8148                 break;
8149         case LPFC_SLI_INTF_IF_TYPE_2:
8150         case LPFC_SLI_INTF_IF_TYPE_1:
8151         default:
8152                 dev_err(&phba->pcidev->dev,
8153                            "FATAL - unsupported SLI4 interface type - %d\n",
8154                            if_type);
8155                 break;
8156         }
8157 }
8158
8159 /**
8160  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8161  * @phba: pointer to lpfc hba data structure.
8162  * @vf: virtual function number
8163  *
8164  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8165  * based on the given viftual function number, @vf.
8166  *
8167  * Return 0 if successful, otherwise -ENODEV.
8168  **/
8169 static int
8170 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8171 {
8172         if (vf > LPFC_VIR_FUNC_MAX)
8173                 return -ENODEV;
8174
8175         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8176                                 vf * LPFC_VFR_PAGE_SIZE +
8177                                         LPFC_ULP0_RQ_DOORBELL);
8178         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8179                                 vf * LPFC_VFR_PAGE_SIZE +
8180                                         LPFC_ULP0_WQ_DOORBELL);
8181         phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8182                                 vf * LPFC_VFR_PAGE_SIZE +
8183                                         LPFC_EQCQ_DOORBELL);
8184         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8185         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8186                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8187         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8188                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8189         return 0;
8190 }
8191
8192 /**
8193  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8194  * @phba: pointer to lpfc hba data structure.
8195  *
8196  * This routine is invoked to create the bootstrap mailbox
8197  * region consistent with the SLI-4 interface spec.  This
8198  * routine allocates all memory necessary to communicate
8199  * mailbox commands to the port and sets up all alignment
8200  * needs.  No locks are expected to be held when calling
8201  * this routine.
8202  *
8203  * Return codes
8204  *      0 - successful
8205  *      -ENOMEM - could not allocated memory.
8206  **/
8207 static int
8208 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8209 {
8210         uint32_t bmbx_size;
8211         struct lpfc_dmabuf *dmabuf;
8212         struct dma_address *dma_address;
8213         uint32_t pa_addr;
8214         uint64_t phys_addr;
8215
8216         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8217         if (!dmabuf)
8218                 return -ENOMEM;
8219
8220         /*
8221          * The bootstrap mailbox region is comprised of 2 parts
8222          * plus an alignment restriction of 16 bytes.
8223          */
8224         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8225         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8226                                           &dmabuf->phys, GFP_KERNEL);
8227         if (!dmabuf->virt) {
8228                 kfree(dmabuf);
8229                 return -ENOMEM;
8230         }
8231
8232         /*
8233          * Initialize the bootstrap mailbox pointers now so that the register
8234          * operations are simple later.  The mailbox dma address is required
8235          * to be 16-byte aligned.  Also align the virtual memory as each
8236          * maibox is copied into the bmbx mailbox region before issuing the
8237          * command to the port.
8238          */
8239         phba->sli4_hba.bmbx.dmabuf = dmabuf;
8240         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8241
8242         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8243                                               LPFC_ALIGN_16_BYTE);
8244         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8245                                               LPFC_ALIGN_16_BYTE);
8246
8247         /*
8248          * Set the high and low physical addresses now.  The SLI4 alignment
8249          * requirement is 16 bytes and the mailbox is posted to the port
8250          * as two 30-bit addresses.  The other data is a bit marking whether
8251          * the 30-bit address is the high or low address.
8252          * Upcast bmbx aphys to 64bits so shift instruction compiles
8253          * clean on 32 bit machines.
8254          */
8255         dma_address = &phba->sli4_hba.bmbx.dma_address;
8256         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8257         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8258         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8259                                            LPFC_BMBX_BIT1_ADDR_HI);
8260
8261         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8262         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8263                                            LPFC_BMBX_BIT1_ADDR_LO);
8264         return 0;
8265 }
8266
8267 /**
8268  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8269  * @phba: pointer to lpfc hba data structure.
8270  *
8271  * This routine is invoked to teardown the bootstrap mailbox
8272  * region and release all host resources. This routine requires
8273  * the caller to ensure all mailbox commands recovered, no
8274  * additional mailbox comands are sent, and interrupts are disabled
8275  * before calling this routine.
8276  *
8277  **/
8278 static void
8279 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8280 {
8281         dma_free_coherent(&phba->pcidev->dev,
8282                           phba->sli4_hba.bmbx.bmbx_size,
8283                           phba->sli4_hba.bmbx.dmabuf->virt,
8284                           phba->sli4_hba.bmbx.dmabuf->phys);
8285
8286         kfree(phba->sli4_hba.bmbx.dmabuf);
8287         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8288 }
8289
8290 static const char * const lpfc_topo_to_str[] = {
8291         "Loop then P2P",
8292         "Loopback",
8293         "P2P Only",
8294         "Unsupported",
8295         "Loop Only",
8296         "Unsupported",
8297         "P2P then Loop",
8298 };
8299
8300 /**
8301  * lpfc_map_topology - Map the topology read from READ_CONFIG
8302  * @phba: pointer to lpfc hba data structure.
8303  * @rdconf: pointer to read config data
8304  *
8305  * This routine is invoked to map the topology values as read
8306  * from the read config mailbox command. If the persistent
8307  * topology feature is supported, the firmware will provide the
8308  * saved topology information to be used in INIT_LINK
8309  *
8310  **/
8311 #define LINK_FLAGS_DEF  0x0
8312 #define LINK_FLAGS_P2P  0x1
8313 #define LINK_FLAGS_LOOP 0x2
8314 static void
8315 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8316 {
8317         u8 ptv, tf, pt;
8318
8319         ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8320         tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8321         pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8322
8323         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8324                         "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8325                          ptv, tf, pt);
8326         if (!ptv) {
8327                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8328                                 "2019 FW does not support persistent topology "
8329                                 "Using driver parameter defined value [%s]",
8330                                 lpfc_topo_to_str[phba->cfg_topology]);
8331                 return;
8332         }
8333         /* FW supports persistent topology - override module parameter value */
8334         phba->hba_flag |= HBA_PERSISTENT_TOPO;
8335         switch (phba->pcidev->device) {
8336         case PCI_DEVICE_ID_LANCER_G7_FC:
8337         case PCI_DEVICE_ID_LANCER_G6_FC:
8338                 if (!tf) {
8339                         phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8340                                         ? FLAGS_TOPOLOGY_MODE_LOOP
8341                                         : FLAGS_TOPOLOGY_MODE_PT_PT);
8342                 } else {
8343                         phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8344                 }
8345                 break;
8346         default:        /* G5 */
8347                 if (tf) {
8348                         /* If topology failover set - pt is '0' or '1' */
8349                         phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8350                                               FLAGS_TOPOLOGY_MODE_LOOP_PT);
8351                 } else {
8352                         phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8353                                         ? FLAGS_TOPOLOGY_MODE_PT_PT
8354                                         : FLAGS_TOPOLOGY_MODE_LOOP);
8355                 }
8356                 break;
8357         }
8358         if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8359                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8360                                 "2020 Using persistent topology value [%s]",
8361                                 lpfc_topo_to_str[phba->cfg_topology]);
8362         } else {
8363                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8364                                 "2021 Invalid topology values from FW "
8365                                 "Using driver parameter defined value [%s]",
8366                                 lpfc_topo_to_str[phba->cfg_topology]);
8367         }
8368 }
8369
8370 /**
8371  * lpfc_sli4_read_config - Get the config parameters.
8372  * @phba: pointer to lpfc hba data structure.
8373  *
8374  * This routine is invoked to read the configuration parameters from the HBA.
8375  * The configuration parameters are used to set the base and maximum values
8376  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8377  * allocation for the port.
8378  *
8379  * Return codes
8380  *      0 - successful
8381  *      -ENOMEM - No available memory
8382  *      -EIO - The mailbox failed to complete successfully.
8383  **/
8384 int
8385 lpfc_sli4_read_config(struct lpfc_hba *phba)
8386 {
8387         LPFC_MBOXQ_t *pmb;
8388         struct lpfc_mbx_read_config *rd_config;
8389         union  lpfc_sli4_cfg_shdr *shdr;
8390         uint32_t shdr_status, shdr_add_status;
8391         struct lpfc_mbx_get_func_cfg *get_func_cfg;
8392         struct lpfc_rsrc_desc_fcfcoe *desc;
8393         char *pdesc_0;
8394         uint16_t forced_link_speed;
8395         uint32_t if_type, qmin;
8396         int length, i, rc = 0, rc2;
8397
8398         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8399         if (!pmb) {
8400                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8401                                 "2011 Unable to allocate memory for issuing "
8402                                 "SLI_CONFIG_SPECIAL mailbox command\n");
8403                 return -ENOMEM;
8404         }
8405
8406         lpfc_read_config(phba, pmb);
8407
8408         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8409         if (rc != MBX_SUCCESS) {
8410                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8411                         "2012 Mailbox failed , mbxCmd x%x "
8412                         "READ_CONFIG, mbxStatus x%x\n",
8413                         bf_get(lpfc_mqe_command, &pmb->u.mqe),
8414                         bf_get(lpfc_mqe_status, &pmb->u.mqe));
8415                 rc = -EIO;
8416         } else {
8417                 rd_config = &pmb->u.mqe.un.rd_config;
8418                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8419                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8420                         phba->sli4_hba.lnk_info.lnk_tp =
8421                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8422                         phba->sli4_hba.lnk_info.lnk_no =
8423                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8424                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8425                                         "3081 lnk_type:%d, lnk_numb:%d\n",
8426                                         phba->sli4_hba.lnk_info.lnk_tp,
8427                                         phba->sli4_hba.lnk_info.lnk_no);
8428                 } else
8429                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8430                                         "3082 Mailbox (x%x) returned ldv:x0\n",
8431                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
8432                 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8433                         phba->bbcredit_support = 1;
8434                         phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8435                 }
8436
8437                 phba->sli4_hba.conf_trunk =
8438                         bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8439                 phba->sli4_hba.extents_in_use =
8440                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8441                 phba->sli4_hba.max_cfg_param.max_xri =
8442                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8443                 /* Reduce resource usage in kdump environment */
8444                 if (is_kdump_kernel() &&
8445                     phba->sli4_hba.max_cfg_param.max_xri > 512)
8446                         phba->sli4_hba.max_cfg_param.max_xri = 512;
8447                 phba->sli4_hba.max_cfg_param.xri_base =
8448                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8449                 phba->sli4_hba.max_cfg_param.max_vpi =
8450                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8451                 /* Limit the max we support */
8452                 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8453                         phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8454                 phba->sli4_hba.max_cfg_param.vpi_base =
8455                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8456                 phba->sli4_hba.max_cfg_param.max_rpi =
8457                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8458                 phba->sli4_hba.max_cfg_param.rpi_base =
8459                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8460                 phba->sli4_hba.max_cfg_param.max_vfi =
8461                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8462                 phba->sli4_hba.max_cfg_param.vfi_base =
8463                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8464                 phba->sli4_hba.max_cfg_param.max_fcfi =
8465                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8466                 phba->sli4_hba.max_cfg_param.max_eq =
8467                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8468                 phba->sli4_hba.max_cfg_param.max_rq =
8469                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8470                 phba->sli4_hba.max_cfg_param.max_wq =
8471                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8472                 phba->sli4_hba.max_cfg_param.max_cq =
8473                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8474                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8475                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8476                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8477                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8478                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8479                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8480                 phba->max_vports = phba->max_vpi;
8481                 lpfc_map_topology(phba, rd_config);
8482                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8483                                 "2003 cfg params Extents? %d "
8484                                 "XRI(B:%d M:%d), "
8485                                 "VPI(B:%d M:%d) "
8486                                 "VFI(B:%d M:%d) "
8487                                 "RPI(B:%d M:%d) "
8488                                 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8489                                 phba->sli4_hba.extents_in_use,
8490                                 phba->sli4_hba.max_cfg_param.xri_base,
8491                                 phba->sli4_hba.max_cfg_param.max_xri,
8492                                 phba->sli4_hba.max_cfg_param.vpi_base,
8493                                 phba->sli4_hba.max_cfg_param.max_vpi,
8494                                 phba->sli4_hba.max_cfg_param.vfi_base,
8495                                 phba->sli4_hba.max_cfg_param.max_vfi,
8496                                 phba->sli4_hba.max_cfg_param.rpi_base,
8497                                 phba->sli4_hba.max_cfg_param.max_rpi,
8498                                 phba->sli4_hba.max_cfg_param.max_fcfi,
8499                                 phba->sli4_hba.max_cfg_param.max_eq,
8500                                 phba->sli4_hba.max_cfg_param.max_cq,
8501                                 phba->sli4_hba.max_cfg_param.max_wq,
8502                                 phba->sli4_hba.max_cfg_param.max_rq);
8503
8504                 /*
8505                  * Calculate queue resources based on how
8506                  * many WQ/CQ/EQs are available.
8507                  */
8508                 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8509                 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8510                         qmin = phba->sli4_hba.max_cfg_param.max_cq;
8511                 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8512                         qmin = phba->sli4_hba.max_cfg_param.max_eq;
8513                 /*
8514                  * Whats left after this can go toward NVME / FCP.
8515                  * The minus 4 accounts for ELS, NVME LS, MBOX
8516                  * plus one extra. When configured for
8517                  * NVMET, FCP io channel WQs are not created.
8518                  */
8519                 qmin -= 4;
8520
8521                 /* Check to see if there is enough for NVME */
8522                 if ((phba->cfg_irq_chann > qmin) ||
8523                     (phba->cfg_hdw_queue > qmin)) {
8524                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8525                                         "2005 Reducing Queues: "
8526                                         "WQ %d CQ %d EQ %d: min %d: "
8527                                         "IRQ %d HDWQ %d\n",
8528                                         phba->sli4_hba.max_cfg_param.max_wq,
8529                                         phba->sli4_hba.max_cfg_param.max_cq,
8530                                         phba->sli4_hba.max_cfg_param.max_eq,
8531                                         qmin, phba->cfg_irq_chann,
8532                                         phba->cfg_hdw_queue);
8533
8534                         if (phba->cfg_irq_chann > qmin)
8535                                 phba->cfg_irq_chann = qmin;
8536                         if (phba->cfg_hdw_queue > qmin)
8537                                 phba->cfg_hdw_queue = qmin;
8538                 }
8539         }
8540
8541         if (rc)
8542                 goto read_cfg_out;
8543
8544         /* Update link speed if forced link speed is supported */
8545         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8546         if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8547                 forced_link_speed =
8548                         bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8549                 if (forced_link_speed) {
8550                         phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8551
8552                         switch (forced_link_speed) {
8553                         case LINK_SPEED_1G:
8554                                 phba->cfg_link_speed =
8555                                         LPFC_USER_LINK_SPEED_1G;
8556                                 break;
8557                         case LINK_SPEED_2G:
8558                                 phba->cfg_link_speed =
8559                                         LPFC_USER_LINK_SPEED_2G;
8560                                 break;
8561                         case LINK_SPEED_4G:
8562                                 phba->cfg_link_speed =
8563                                         LPFC_USER_LINK_SPEED_4G;
8564                                 break;
8565                         case LINK_SPEED_8G:
8566                                 phba->cfg_link_speed =
8567                                         LPFC_USER_LINK_SPEED_8G;
8568                                 break;
8569                         case LINK_SPEED_10G:
8570                                 phba->cfg_link_speed =
8571                                         LPFC_USER_LINK_SPEED_10G;
8572                                 break;
8573                         case LINK_SPEED_16G:
8574                                 phba->cfg_link_speed =
8575                                         LPFC_USER_LINK_SPEED_16G;
8576                                 break;
8577                         case LINK_SPEED_32G:
8578                                 phba->cfg_link_speed =
8579                                         LPFC_USER_LINK_SPEED_32G;
8580                                 break;
8581                         case LINK_SPEED_64G:
8582                                 phba->cfg_link_speed =
8583                                         LPFC_USER_LINK_SPEED_64G;
8584                                 break;
8585                         case 0xffff:
8586                                 phba->cfg_link_speed =
8587                                         LPFC_USER_LINK_SPEED_AUTO;
8588                                 break;
8589                         default:
8590                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8591                                                 "0047 Unrecognized link "
8592                                                 "speed : %d\n",
8593                                                 forced_link_speed);
8594                                 phba->cfg_link_speed =
8595                                         LPFC_USER_LINK_SPEED_AUTO;
8596                         }
8597                 }
8598         }
8599
8600         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
8601         length = phba->sli4_hba.max_cfg_param.max_xri -
8602                         lpfc_sli4_get_els_iocb_cnt(phba);
8603         if (phba->cfg_hba_queue_depth > length) {
8604                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8605                                 "3361 HBA queue depth changed from %d to %d\n",
8606                                 phba->cfg_hba_queue_depth, length);
8607                 phba->cfg_hba_queue_depth = length;
8608         }
8609
8610         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8611             LPFC_SLI_INTF_IF_TYPE_2)
8612                 goto read_cfg_out;
8613
8614         /* get the pf# and vf# for SLI4 if_type 2 port */
8615         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8616                   sizeof(struct lpfc_sli4_cfg_mhdr));
8617         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8618                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8619                          length, LPFC_SLI4_MBX_EMBED);
8620
8621         rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8622         shdr = (union lpfc_sli4_cfg_shdr *)
8623                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8624         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8625         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8626         if (rc2 || shdr_status || shdr_add_status) {
8627                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8628                                 "3026 Mailbox failed , mbxCmd x%x "
8629                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8630                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8631                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8632                 goto read_cfg_out;
8633         }
8634
8635         /* search for fc_fcoe resrouce descriptor */
8636         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8637
8638         pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8639         desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8640         length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8641         if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8642                 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8643         else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8644                 goto read_cfg_out;
8645
8646         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8647                 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8648                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8649                     bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8650                         phba->sli4_hba.iov.pf_number =
8651                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8652                         phba->sli4_hba.iov.vf_number =
8653                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8654                         break;
8655                 }
8656         }
8657
8658         if (i < LPFC_RSRC_DESC_MAX_NUM)
8659                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8660                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8661                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8662                                 phba->sli4_hba.iov.vf_number);
8663         else
8664                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8665                                 "3028 GET_FUNCTION_CONFIG: failed to find "
8666                                 "Resource Descriptor:x%x\n",
8667                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
8668
8669 read_cfg_out:
8670         mempool_free(pmb, phba->mbox_mem_pool);
8671         return rc;
8672 }
8673
8674 /**
8675  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8676  * @phba: pointer to lpfc hba data structure.
8677  *
8678  * This routine is invoked to setup the port-side endian order when
8679  * the port if_type is 0.  This routine has no function for other
8680  * if_types.
8681  *
8682  * Return codes
8683  *      0 - successful
8684  *      -ENOMEM - No available memory
8685  *      -EIO - The mailbox failed to complete successfully.
8686  **/
8687 static int
8688 lpfc_setup_endian_order(struct lpfc_hba *phba)
8689 {
8690         LPFC_MBOXQ_t *mboxq;
8691         uint32_t if_type, rc = 0;
8692         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8693                                       HOST_ENDIAN_HIGH_WORD1};
8694
8695         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8696         switch (if_type) {
8697         case LPFC_SLI_INTF_IF_TYPE_0:
8698                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8699                                                        GFP_KERNEL);
8700                 if (!mboxq) {
8701                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8702                                         "0492 Unable to allocate memory for "
8703                                         "issuing SLI_CONFIG_SPECIAL mailbox "
8704                                         "command\n");
8705                         return -ENOMEM;
8706                 }
8707
8708                 /*
8709                  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8710                  * two words to contain special data values and no other data.
8711                  */
8712                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8713                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8714                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8715                 if (rc != MBX_SUCCESS) {
8716                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8717                                         "0493 SLI_CONFIG_SPECIAL mailbox "
8718                                         "failed with status x%x\n",
8719                                         rc);
8720                         rc = -EIO;
8721                 }
8722                 mempool_free(mboxq, phba->mbox_mem_pool);
8723                 break;
8724         case LPFC_SLI_INTF_IF_TYPE_6:
8725         case LPFC_SLI_INTF_IF_TYPE_2:
8726         case LPFC_SLI_INTF_IF_TYPE_1:
8727         default:
8728                 break;
8729         }
8730         return rc;
8731 }
8732
8733 /**
8734  * lpfc_sli4_queue_verify - Verify and update EQ counts
8735  * @phba: pointer to lpfc hba data structure.
8736  *
8737  * This routine is invoked to check the user settable queue counts for EQs.
8738  * After this routine is called the counts will be set to valid values that
8739  * adhere to the constraints of the system's interrupt vectors and the port's
8740  * queue resources.
8741  *
8742  * Return codes
8743  *      0 - successful
8744  *      -ENOMEM - No available memory
8745  **/
8746 static int
8747 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8748 {
8749         /*
8750          * Sanity check for configured queue parameters against the run-time
8751          * device parameters
8752          */
8753
8754         if (phba->nvmet_support) {
8755                 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8756                         phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8757                 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8758                         phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8759         }
8760
8761         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8762                         "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8763                         phba->cfg_hdw_queue, phba->cfg_irq_chann,
8764                         phba->cfg_nvmet_mrq);
8765
8766         /* Get EQ depth from module parameter, fake the default for now */
8767         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8768         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8769
8770         /* Get CQ depth from module parameter, fake the default for now */
8771         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8772         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8773         return 0;
8774 }
8775
8776 static int
8777 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8778 {
8779         struct lpfc_queue *qdesc;
8780         u32 wqesize;
8781         int cpu;
8782
8783         cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8784         /* Create Fast Path IO CQs */
8785         if (phba->enab_exp_wqcq_pages)
8786                 /* Increase the CQ size when WQEs contain an embedded cdb */
8787                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8788                                               phba->sli4_hba.cq_esize,
8789                                               LPFC_CQE_EXP_COUNT, cpu);
8790
8791         else
8792                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8793                                               phba->sli4_hba.cq_esize,
8794                                               phba->sli4_hba.cq_ecount, cpu);
8795         if (!qdesc) {
8796                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8797                         "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
8798                 return 1;
8799         }
8800         qdesc->qe_valid = 1;
8801         qdesc->hdwq = idx;
8802         qdesc->chann = cpu;
8803         phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8804
8805         /* Create Fast Path IO WQs */
8806         if (phba->enab_exp_wqcq_pages) {
8807                 /* Increase the WQ size when WQEs contain an embedded cdb */
8808                 wqesize = (phba->fcp_embed_io) ?
8809                         LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8810                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8811                                               wqesize,
8812                                               LPFC_WQE_EXP_COUNT, cpu);
8813         } else
8814                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8815                                               phba->sli4_hba.wq_esize,
8816                                               phba->sli4_hba.wq_ecount, cpu);
8817
8818         if (!qdesc) {
8819                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8820                                 "0503 Failed allocate fast-path IO WQ (%d)\n",
8821                                 idx);
8822                 return 1;
8823         }
8824         qdesc->hdwq = idx;
8825         qdesc->chann = cpu;
8826         phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8827         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8828         return 0;
8829 }
8830
8831 /**
8832  * lpfc_sli4_queue_create - Create all the SLI4 queues
8833  * @phba: pointer to lpfc hba data structure.
8834  *
8835  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8836  * operation. For each SLI4 queue type, the parameters such as queue entry
8837  * count (queue depth) shall be taken from the module parameter. For now,
8838  * we just use some constant number as place holder.
8839  *
8840  * Return codes
8841  *      0 - successful
8842  *      -ENOMEM - No availble memory
8843  *      -EIO - The mailbox failed to complete successfully.
8844  **/
8845 int
8846 lpfc_sli4_queue_create(struct lpfc_hba *phba)
8847 {
8848         struct lpfc_queue *qdesc;
8849         int idx, cpu, eqcpu;
8850         struct lpfc_sli4_hdw_queue *qp;
8851         struct lpfc_vector_map_info *cpup;
8852         struct lpfc_vector_map_info *eqcpup;
8853         struct lpfc_eq_intr_info *eqi;
8854
8855         /*
8856          * Create HBA Record arrays.
8857          * Both NVME and FCP will share that same vectors / EQs
8858          */
8859         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8860         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8861         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8862         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8863         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8864         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8865         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8866         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8867         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8868         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8869
8870         if (!phba->sli4_hba.hdwq) {
8871                 phba->sli4_hba.hdwq = kcalloc(
8872                         phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8873                         GFP_KERNEL);
8874                 if (!phba->sli4_hba.hdwq) {
8875                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8876                                         "6427 Failed allocate memory for "
8877                                         "fast-path Hardware Queue array\n");
8878                         goto out_error;
8879                 }
8880                 /* Prepare hardware queues to take IO buffers */
8881                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8882                         qp = &phba->sli4_hba.hdwq[idx];
8883                         spin_lock_init(&qp->io_buf_list_get_lock);
8884                         spin_lock_init(&qp->io_buf_list_put_lock);
8885                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8886                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8887                         qp->get_io_bufs = 0;
8888                         qp->put_io_bufs = 0;
8889                         qp->total_io_bufs = 0;
8890                         spin_lock_init(&qp->abts_io_buf_list_lock);
8891                         INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8892                         qp->abts_scsi_io_bufs = 0;
8893                         qp->abts_nvme_io_bufs = 0;
8894                         INIT_LIST_HEAD(&qp->sgl_list);
8895                         INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8896                         spin_lock_init(&qp->hdwq_lock);
8897                 }
8898         }
8899
8900         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8901                 if (phba->nvmet_support) {
8902                         phba->sli4_hba.nvmet_cqset = kcalloc(
8903                                         phba->cfg_nvmet_mrq,
8904                                         sizeof(struct lpfc_queue *),
8905                                         GFP_KERNEL);
8906                         if (!phba->sli4_hba.nvmet_cqset) {
8907                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8908                                         "3121 Fail allocate memory for "
8909                                         "fast-path CQ set array\n");
8910                                 goto out_error;
8911                         }
8912                         phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8913                                         phba->cfg_nvmet_mrq,
8914                                         sizeof(struct lpfc_queue *),
8915                                         GFP_KERNEL);
8916                         if (!phba->sli4_hba.nvmet_mrq_hdr) {
8917                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8918                                         "3122 Fail allocate memory for "
8919                                         "fast-path RQ set hdr array\n");
8920                                 goto out_error;
8921                         }
8922                         phba->sli4_hba.nvmet_mrq_data = kcalloc(
8923                                         phba->cfg_nvmet_mrq,
8924                                         sizeof(struct lpfc_queue *),
8925                                         GFP_KERNEL);
8926                         if (!phba->sli4_hba.nvmet_mrq_data) {
8927                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8928                                         "3124 Fail allocate memory for "
8929                                         "fast-path RQ set data array\n");
8930                                 goto out_error;
8931                         }
8932                 }
8933         }
8934
8935         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8936
8937         /* Create HBA Event Queues (EQs) */
8938         for_each_present_cpu(cpu) {
8939                 /* We only want to create 1 EQ per vector, even though
8940                  * multiple CPUs might be using that vector. so only
8941                  * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
8942                  */
8943                 cpup = &phba->sli4_hba.cpu_map[cpu];
8944                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8945                         continue;
8946
8947                 /* Get a ptr to the Hardware Queue associated with this CPU */
8948                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8949
8950                 /* Allocate an EQ */
8951                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8952                                               phba->sli4_hba.eq_esize,
8953                                               phba->sli4_hba.eq_ecount, cpu);
8954                 if (!qdesc) {
8955                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8956                                         "0497 Failed allocate EQ (%d)\n",
8957                                         cpup->hdwq);
8958                         goto out_error;
8959                 }
8960                 qdesc->qe_valid = 1;
8961                 qdesc->hdwq = cpup->hdwq;
8962                 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
8963                 qdesc->last_cpu = qdesc->chann;
8964
8965                 /* Save the allocated EQ in the Hardware Queue */
8966                 qp->hba_eq = qdesc;
8967
8968                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8969                 list_add(&qdesc->cpu_list, &eqi->list);
8970         }
8971
8972         /* Now we need to populate the other Hardware Queues, that share
8973          * an IRQ vector, with the associated EQ ptr.
8974          */
8975         for_each_present_cpu(cpu) {
8976                 cpup = &phba->sli4_hba.cpu_map[cpu];
8977
8978                 /* Check for EQ already allocated in previous loop */
8979                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8980                         continue;
8981
8982                 /* Check for multiple CPUs per hdwq */
8983                 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8984                 if (qp->hba_eq)
8985                         continue;
8986
8987                 /* We need to share an EQ for this hdwq */
8988                 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8989                 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8990                 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8991         }
8992
8993         /* Allocate IO Path SLI4 CQ/WQs */
8994         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8995                 if (lpfc_alloc_io_wq_cq(phba, idx))
8996                         goto out_error;
8997         }
8998
8999         if (phba->nvmet_support) {
9000                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9001                         cpu = lpfc_find_cpu_handle(phba, idx,
9002                                                    LPFC_FIND_BY_HDWQ);
9003                         qdesc = lpfc_sli4_queue_alloc(phba,
9004                                                       LPFC_DEFAULT_PAGE_SIZE,
9005                                                       phba->sli4_hba.cq_esize,
9006                                                       phba->sli4_hba.cq_ecount,
9007                                                       cpu);
9008                         if (!qdesc) {
9009                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010                                                 "3142 Failed allocate NVME "
9011                                                 "CQ Set (%d)\n", idx);
9012                                 goto out_error;
9013                         }
9014                         qdesc->qe_valid = 1;
9015                         qdesc->hdwq = idx;
9016                         qdesc->chann = cpu;
9017                         phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9018                 }
9019         }
9020
9021         /*
9022          * Create Slow Path Completion Queues (CQs)
9023          */
9024
9025         cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9026         /* Create slow-path Mailbox Command Complete Queue */
9027         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9028                                       phba->sli4_hba.cq_esize,
9029                                       phba->sli4_hba.cq_ecount, cpu);
9030         if (!qdesc) {
9031                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9032                                 "0500 Failed allocate slow-path mailbox CQ\n");
9033                 goto out_error;
9034         }
9035         qdesc->qe_valid = 1;
9036         phba->sli4_hba.mbx_cq = qdesc;
9037
9038         /* Create slow-path ELS Complete Queue */
9039         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9040                                       phba->sli4_hba.cq_esize,
9041                                       phba->sli4_hba.cq_ecount, cpu);
9042         if (!qdesc) {
9043                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9044                                 "0501 Failed allocate slow-path ELS CQ\n");
9045                 goto out_error;
9046         }
9047         qdesc->qe_valid = 1;
9048         qdesc->chann = cpu;
9049         phba->sli4_hba.els_cq = qdesc;
9050
9051
9052         /*
9053          * Create Slow Path Work Queues (WQs)
9054          */
9055
9056         /* Create Mailbox Command Queue */
9057
9058         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9059                                       phba->sli4_hba.mq_esize,
9060                                       phba->sli4_hba.mq_ecount, cpu);
9061         if (!qdesc) {
9062                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9063                                 "0505 Failed allocate slow-path MQ\n");
9064                 goto out_error;
9065         }
9066         qdesc->chann = cpu;
9067         phba->sli4_hba.mbx_wq = qdesc;
9068
9069         /*
9070          * Create ELS Work Queues
9071          */
9072
9073         /* Create slow-path ELS Work Queue */
9074         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9075                                       phba->sli4_hba.wq_esize,
9076                                       phba->sli4_hba.wq_ecount, cpu);
9077         if (!qdesc) {
9078                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9079                                 "0504 Failed allocate slow-path ELS WQ\n");
9080                 goto out_error;
9081         }
9082         qdesc->chann = cpu;
9083         phba->sli4_hba.els_wq = qdesc;
9084         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9085
9086         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9087                 /* Create NVME LS Complete Queue */
9088                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9089                                               phba->sli4_hba.cq_esize,
9090                                               phba->sli4_hba.cq_ecount, cpu);
9091                 if (!qdesc) {
9092                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9093                                         "6079 Failed allocate NVME LS CQ\n");
9094                         goto out_error;
9095                 }
9096                 qdesc->chann = cpu;
9097                 qdesc->qe_valid = 1;
9098                 phba->sli4_hba.nvmels_cq = qdesc;
9099
9100                 /* Create NVME LS Work Queue */
9101                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9102                                               phba->sli4_hba.wq_esize,
9103                                               phba->sli4_hba.wq_ecount, cpu);
9104                 if (!qdesc) {
9105                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9106                                         "6080 Failed allocate NVME LS WQ\n");
9107                         goto out_error;
9108                 }
9109                 qdesc->chann = cpu;
9110                 phba->sli4_hba.nvmels_wq = qdesc;
9111                 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9112         }
9113
9114         /*
9115          * Create Receive Queue (RQ)
9116          */
9117
9118         /* Create Receive Queue for header */
9119         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9120                                       phba->sli4_hba.rq_esize,
9121                                       phba->sli4_hba.rq_ecount, cpu);
9122         if (!qdesc) {
9123                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9124                                 "0506 Failed allocate receive HRQ\n");
9125                 goto out_error;
9126         }
9127         phba->sli4_hba.hdr_rq = qdesc;
9128
9129         /* Create Receive Queue for data */
9130         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9131                                       phba->sli4_hba.rq_esize,
9132                                       phba->sli4_hba.rq_ecount, cpu);
9133         if (!qdesc) {
9134                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9135                                 "0507 Failed allocate receive DRQ\n");
9136                 goto out_error;
9137         }
9138         phba->sli4_hba.dat_rq = qdesc;
9139
9140         if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9141             phba->nvmet_support) {
9142                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9143                         cpu = lpfc_find_cpu_handle(phba, idx,
9144                                                    LPFC_FIND_BY_HDWQ);
9145                         /* Create NVMET Receive Queue for header */
9146                         qdesc = lpfc_sli4_queue_alloc(phba,
9147                                                       LPFC_DEFAULT_PAGE_SIZE,
9148                                                       phba->sli4_hba.rq_esize,
9149                                                       LPFC_NVMET_RQE_DEF_COUNT,
9150                                                       cpu);
9151                         if (!qdesc) {
9152                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9153                                                 "3146 Failed allocate "
9154                                                 "receive HRQ\n");
9155                                 goto out_error;
9156                         }
9157                         qdesc->hdwq = idx;
9158                         phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9159
9160                         /* Only needed for header of RQ pair */
9161                         qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9162                                                    GFP_KERNEL,
9163                                                    cpu_to_node(cpu));
9164                         if (qdesc->rqbp == NULL) {
9165                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9166                                                 "6131 Failed allocate "
9167                                                 "Header RQBP\n");
9168                                 goto out_error;
9169                         }
9170
9171                         /* Put list in known state in case driver load fails. */
9172                         INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9173
9174                         /* Create NVMET Receive Queue for data */
9175                         qdesc = lpfc_sli4_queue_alloc(phba,
9176                                                       LPFC_DEFAULT_PAGE_SIZE,
9177                                                       phba->sli4_hba.rq_esize,
9178                                                       LPFC_NVMET_RQE_DEF_COUNT,
9179                                                       cpu);
9180                         if (!qdesc) {
9181                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9182                                                 "3156 Failed allocate "
9183                                                 "receive DRQ\n");
9184                                 goto out_error;
9185                         }
9186                         qdesc->hdwq = idx;
9187                         phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9188                 }
9189         }
9190
9191         /* Clear NVME stats */
9192         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9193                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9194                         memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9195                                sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9196                 }
9197         }
9198
9199         /* Clear SCSI stats */
9200         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9201                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9202                         memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9203                                sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9204                 }
9205         }
9206
9207         return 0;
9208
9209 out_error:
9210         lpfc_sli4_queue_destroy(phba);
9211         return -ENOMEM;
9212 }
9213
9214 static inline void
9215 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
9216 {
9217         if (*qp != NULL) {
9218                 lpfc_sli4_queue_free(*qp);
9219                 *qp = NULL;
9220         }
9221 }
9222
9223 static inline void
9224 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9225 {
9226         int idx;
9227
9228         if (*qs == NULL)
9229                 return;
9230
9231         for (idx = 0; idx < max; idx++)
9232                 __lpfc_sli4_release_queue(&(*qs)[idx]);
9233
9234         kfree(*qs);
9235         *qs = NULL;
9236 }
9237
9238 static inline void
9239 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9240 {
9241         struct lpfc_sli4_hdw_queue *hdwq;
9242         struct lpfc_queue *eq;
9243         uint32_t idx;
9244
9245         hdwq = phba->sli4_hba.hdwq;
9246
9247         /* Loop thru all Hardware Queues */
9248         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9249                 /* Free the CQ/WQ corresponding to the Hardware Queue */
9250                 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9251                 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9252                 hdwq[idx].hba_eq = NULL;
9253                 hdwq[idx].io_cq = NULL;
9254                 hdwq[idx].io_wq = NULL;
9255                 if (phba->cfg_xpsgl && !phba->nvmet_support)
9256                         lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9257                 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9258         }
9259         /* Loop thru all IRQ vectors */
9260         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9261                 /* Free the EQ corresponding to the IRQ vector */
9262                 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9263                 lpfc_sli4_queue_free(eq);
9264                 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9265         }
9266 }
9267
9268 /**
9269  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9270  * @phba: pointer to lpfc hba data structure.
9271  *
9272  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9273  * operation.
9274  *
9275  * Return codes
9276  *      0 - successful
9277  *      -ENOMEM - No available memory
9278  *      -EIO - The mailbox failed to complete successfully.
9279  **/
9280 void
9281 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9282 {
9283         /*
9284          * Set FREE_INIT before beginning to free the queues.
9285          * Wait until the users of queues to acknowledge to
9286          * release queues by clearing FREE_WAIT.
9287          */
9288         spin_lock_irq(&phba->hbalock);
9289         phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9290         while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9291                 spin_unlock_irq(&phba->hbalock);
9292                 msleep(20);
9293                 spin_lock_irq(&phba->hbalock);
9294         }
9295         spin_unlock_irq(&phba->hbalock);
9296
9297         lpfc_sli4_cleanup_poll_list(phba);
9298
9299         /* Release HBA eqs */
9300         if (phba->sli4_hba.hdwq)
9301                 lpfc_sli4_release_hdwq(phba);
9302
9303         if (phba->nvmet_support) {
9304                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9305                                          phba->cfg_nvmet_mrq);
9306
9307                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9308                                          phba->cfg_nvmet_mrq);
9309                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9310                                          phba->cfg_nvmet_mrq);
9311         }
9312
9313         /* Release mailbox command work queue */
9314         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9315
9316         /* Release ELS work queue */
9317         __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9318
9319         /* Release ELS work queue */
9320         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9321
9322         /* Release unsolicited receive queue */
9323         __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9324         __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9325
9326         /* Release ELS complete queue */
9327         __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9328
9329         /* Release NVME LS complete queue */
9330         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9331
9332         /* Release mailbox command complete queue */
9333         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9334
9335         /* Everything on this list has been freed */
9336         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9337
9338         /* Done with freeing the queues */
9339         spin_lock_irq(&phba->hbalock);
9340         phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9341         spin_unlock_irq(&phba->hbalock);
9342 }
9343
9344 int
9345 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9346 {
9347         struct lpfc_rqb *rqbp;
9348         struct lpfc_dmabuf *h_buf;
9349         struct rqb_dmabuf *rqb_buffer;
9350
9351         rqbp = rq->rqbp;
9352         while (!list_empty(&rqbp->rqb_buffer_list)) {
9353                 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9354                                  struct lpfc_dmabuf, list);
9355
9356                 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9357                 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9358                 rqbp->buffer_count--;
9359         }
9360         return 1;
9361 }
9362
9363 static int
9364 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9365         struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9366         int qidx, uint32_t qtype)
9367 {
9368         struct lpfc_sli_ring *pring;
9369         int rc;
9370
9371         if (!eq || !cq || !wq) {
9372                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9373                         "6085 Fast-path %s (%d) not allocated\n",
9374                         ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9375                 return -ENOMEM;
9376         }
9377
9378         /* create the Cq first */
9379         rc = lpfc_cq_create(phba, cq, eq,
9380                         (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9381         if (rc) {
9382                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9383                         "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9384                         qidx, (uint32_t)rc);
9385                 return rc;
9386         }
9387
9388         if (qtype != LPFC_MBOX) {
9389                 /* Setup cq_map for fast lookup */
9390                 if (cq_map)
9391                         *cq_map = cq->queue_id;
9392
9393                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9394                         "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9395                         qidx, cq->queue_id, qidx, eq->queue_id);
9396
9397                 /* create the wq */
9398                 rc = lpfc_wq_create(phba, wq, cq, qtype);
9399                 if (rc) {
9400                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9401                                 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9402                                 qidx, (uint32_t)rc);
9403                         /* no need to tear down cq - caller will do so */
9404                         return rc;
9405                 }
9406
9407                 /* Bind this CQ/WQ to the NVME ring */
9408                 pring = wq->pring;
9409                 pring->sli.sli4.wqp = (void *)wq;
9410                 cq->pring = pring;
9411
9412                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9413                         "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9414                         qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9415         } else {
9416                 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9417                 if (rc) {
9418                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9419                                 "0539 Failed setup of slow-path MQ: "
9420                                 "rc = 0x%x\n", rc);
9421                         /* no need to tear down cq - caller will do so */
9422                         return rc;
9423                 }
9424
9425                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9426                         "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9427                         phba->sli4_hba.mbx_wq->queue_id,
9428                         phba->sli4_hba.mbx_cq->queue_id);
9429         }
9430
9431         return 0;
9432 }
9433
9434 /**
9435  * lpfc_setup_cq_lookup - Setup the CQ lookup table
9436  * @phba: pointer to lpfc hba data structure.
9437  *
9438  * This routine will populate the cq_lookup table by all
9439  * available CQ queue_id's.
9440  **/
9441 static void
9442 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9443 {
9444         struct lpfc_queue *eq, *childq;
9445         int qidx;
9446
9447         memset(phba->sli4_hba.cq_lookup, 0,
9448                (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9449         /* Loop thru all IRQ vectors */
9450         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9451                 /* Get the EQ corresponding to the IRQ vector */
9452                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9453                 if (!eq)
9454                         continue;
9455                 /* Loop through all CQs associated with that EQ */
9456                 list_for_each_entry(childq, &eq->child_list, list) {
9457                         if (childq->queue_id > phba->sli4_hba.cq_max)
9458                                 continue;
9459                         if (childq->subtype == LPFC_IO)
9460                                 phba->sli4_hba.cq_lookup[childq->queue_id] =
9461                                         childq;
9462                 }
9463         }
9464 }
9465
9466 /**
9467  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9468  * @phba: pointer to lpfc hba data structure.
9469  *
9470  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9471  * operation.
9472  *
9473  * Return codes
9474  *      0 - successful
9475  *      -ENOMEM - No available memory
9476  *      -EIO - The mailbox failed to complete successfully.
9477  **/
9478 int
9479 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9480 {
9481         uint32_t shdr_status, shdr_add_status;
9482         union lpfc_sli4_cfg_shdr *shdr;
9483         struct lpfc_vector_map_info *cpup;
9484         struct lpfc_sli4_hdw_queue *qp;
9485         LPFC_MBOXQ_t *mboxq;
9486         int qidx, cpu;
9487         uint32_t length, usdelay;
9488         int rc = -ENOMEM;
9489
9490         /* Check for dual-ULP support */
9491         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9492         if (!mboxq) {
9493                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9494                                 "3249 Unable to allocate memory for "
9495                                 "QUERY_FW_CFG mailbox command\n");
9496                 return -ENOMEM;
9497         }
9498         length = (sizeof(struct lpfc_mbx_query_fw_config) -
9499                   sizeof(struct lpfc_sli4_cfg_mhdr));
9500         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9501                          LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9502                          length, LPFC_SLI4_MBX_EMBED);
9503
9504         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9505
9506         shdr = (union lpfc_sli4_cfg_shdr *)
9507                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9508         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9509         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9510         if (shdr_status || shdr_add_status || rc) {
9511                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9512                                 "3250 QUERY_FW_CFG mailbox failed with status "
9513                                 "x%x add_status x%x, mbx status x%x\n",
9514                                 shdr_status, shdr_add_status, rc);
9515                 if (rc != MBX_TIMEOUT)
9516                         mempool_free(mboxq, phba->mbox_mem_pool);
9517                 rc = -ENXIO;
9518                 goto out_error;
9519         }
9520
9521         phba->sli4_hba.fw_func_mode =
9522                         mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9523         phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9524         phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9525         phba->sli4_hba.physical_port =
9526                         mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9527         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9528                         "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9529                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9530                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9531
9532         if (rc != MBX_TIMEOUT)
9533                 mempool_free(mboxq, phba->mbox_mem_pool);
9534
9535         /*
9536          * Set up HBA Event Queues (EQs)
9537          */
9538         qp = phba->sli4_hba.hdwq;
9539
9540         /* Set up HBA event queue */
9541         if (!qp) {
9542                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9543                                 "3147 Fast-path EQs not allocated\n");
9544                 rc = -ENOMEM;
9545                 goto out_error;
9546         }
9547
9548         /* Loop thru all IRQ vectors */
9549         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9550                 /* Create HBA Event Queues (EQs) in order */
9551                 for_each_present_cpu(cpu) {
9552                         cpup = &phba->sli4_hba.cpu_map[cpu];
9553
9554                         /* Look for the CPU thats using that vector with
9555                          * LPFC_CPU_FIRST_IRQ set.
9556                          */
9557                         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9558                                 continue;
9559                         if (qidx != cpup->eq)
9560                                 continue;
9561
9562                         /* Create an EQ for that vector */
9563                         rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9564                                             phba->cfg_fcp_imax);
9565                         if (rc) {
9566                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9567                                                 "0523 Failed setup of fast-path"
9568                                                 " EQ (%d), rc = 0x%x\n",
9569                                                 cpup->eq, (uint32_t)rc);
9570                                 goto out_destroy;
9571                         }
9572
9573                         /* Save the EQ for that vector in the hba_eq_hdl */
9574                         phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9575                                 qp[cpup->hdwq].hba_eq;
9576
9577                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9578                                         "2584 HBA EQ setup: queue[%d]-id=%d\n",
9579                                         cpup->eq,
9580                                         qp[cpup->hdwq].hba_eq->queue_id);
9581                 }
9582         }
9583
9584         /* Loop thru all Hardware Queues */
9585         for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9586                 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9587                 cpup = &phba->sli4_hba.cpu_map[cpu];
9588
9589                 /* Create the CQ/WQ corresponding to the Hardware Queue */
9590                 rc = lpfc_create_wq_cq(phba,
9591                                        phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9592                                        qp[qidx].io_cq,
9593                                        qp[qidx].io_wq,
9594                                        &phba->sli4_hba.hdwq[qidx].io_cq_map,
9595                                        qidx,
9596                                        LPFC_IO);
9597                 if (rc) {
9598                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9599                                         "0535 Failed to setup fastpath "
9600                                         "IO WQ/CQ (%d), rc = 0x%x\n",
9601                                         qidx, (uint32_t)rc);
9602                         goto out_destroy;
9603                 }
9604         }
9605
9606         /*
9607          * Set up Slow Path Complete Queues (CQs)
9608          */
9609
9610         /* Set up slow-path MBOX CQ/MQ */
9611
9612         if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9613                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9614                                 "0528 %s not allocated\n",
9615                                 phba->sli4_hba.mbx_cq ?
9616                                 "Mailbox WQ" : "Mailbox CQ");
9617                 rc = -ENOMEM;
9618                 goto out_destroy;
9619         }
9620
9621         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9622                                phba->sli4_hba.mbx_cq,
9623                                phba->sli4_hba.mbx_wq,
9624                                NULL, 0, LPFC_MBOX);
9625         if (rc) {
9626                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9627                         "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9628                         (uint32_t)rc);
9629                 goto out_destroy;
9630         }
9631         if (phba->nvmet_support) {
9632                 if (!phba->sli4_hba.nvmet_cqset) {
9633                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9634                                         "3165 Fast-path NVME CQ Set "
9635                                         "array not allocated\n");
9636                         rc = -ENOMEM;
9637                         goto out_destroy;
9638                 }
9639                 if (phba->cfg_nvmet_mrq > 1) {
9640                         rc = lpfc_cq_create_set(phba,
9641                                         phba->sli4_hba.nvmet_cqset,
9642                                         qp,
9643                                         LPFC_WCQ, LPFC_NVMET);
9644                         if (rc) {
9645                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9646                                                 "3164 Failed setup of NVME CQ "
9647                                                 "Set, rc = 0x%x\n",
9648                                                 (uint32_t)rc);
9649                                 goto out_destroy;
9650                         }
9651                 } else {
9652                         /* Set up NVMET Receive Complete Queue */
9653                         rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9654                                             qp[0].hba_eq,
9655                                             LPFC_WCQ, LPFC_NVMET);
9656                         if (rc) {
9657                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9658                                                 "6089 Failed setup NVMET CQ: "
9659                                                 "rc = 0x%x\n", (uint32_t)rc);
9660                                 goto out_destroy;
9661                         }
9662                         phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9663
9664                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9665                                         "6090 NVMET CQ setup: cq-id=%d, "
9666                                         "parent eq-id=%d\n",
9667                                         phba->sli4_hba.nvmet_cqset[0]->queue_id,
9668                                         qp[0].hba_eq->queue_id);
9669                 }
9670         }
9671
9672         /* Set up slow-path ELS WQ/CQ */
9673         if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9674                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9675                                 "0530 ELS %s not allocated\n",
9676                                 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9677                 rc = -ENOMEM;
9678                 goto out_destroy;
9679         }
9680         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9681                                phba->sli4_hba.els_cq,
9682                                phba->sli4_hba.els_wq,
9683                                NULL, 0, LPFC_ELS);
9684         if (rc) {
9685                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9686                                 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9687                                 (uint32_t)rc);
9688                 goto out_destroy;
9689         }
9690         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9691                         "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9692                         phba->sli4_hba.els_wq->queue_id,
9693                         phba->sli4_hba.els_cq->queue_id);
9694
9695         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9696                 /* Set up NVME LS Complete Queue */
9697                 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9698                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9699                                         "6091 LS %s not allocated\n",
9700                                         phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9701                         rc = -ENOMEM;
9702                         goto out_destroy;
9703                 }
9704                 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9705                                        phba->sli4_hba.nvmels_cq,
9706                                        phba->sli4_hba.nvmels_wq,
9707                                        NULL, 0, LPFC_NVME_LS);
9708                 if (rc) {
9709                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9710                                         "0526 Failed setup of NVVME LS WQ/CQ: "
9711                                         "rc = 0x%x\n", (uint32_t)rc);
9712                         goto out_destroy;
9713                 }
9714
9715                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9716                                 "6096 ELS WQ setup: wq-id=%d, "
9717                                 "parent cq-id=%d\n",
9718                                 phba->sli4_hba.nvmels_wq->queue_id,
9719                                 phba->sli4_hba.nvmels_cq->queue_id);
9720         }
9721
9722         /*
9723          * Create NVMET Receive Queue (RQ)
9724          */
9725         if (phba->nvmet_support) {
9726                 if ((!phba->sli4_hba.nvmet_cqset) ||
9727                     (!phba->sli4_hba.nvmet_mrq_hdr) ||
9728                     (!phba->sli4_hba.nvmet_mrq_data)) {
9729                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9730                                         "6130 MRQ CQ Queues not "
9731                                         "allocated\n");
9732                         rc = -ENOMEM;
9733                         goto out_destroy;
9734                 }
9735                 if (phba->cfg_nvmet_mrq > 1) {
9736                         rc = lpfc_mrq_create(phba,
9737                                              phba->sli4_hba.nvmet_mrq_hdr,
9738                                              phba->sli4_hba.nvmet_mrq_data,
9739                                              phba->sli4_hba.nvmet_cqset,
9740                                              LPFC_NVMET);
9741                         if (rc) {
9742                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9743                                                 "6098 Failed setup of NVMET "
9744                                                 "MRQ: rc = 0x%x\n",
9745                                                 (uint32_t)rc);
9746                                 goto out_destroy;
9747                         }
9748
9749                 } else {
9750                         rc = lpfc_rq_create(phba,
9751                                             phba->sli4_hba.nvmet_mrq_hdr[0],
9752                                             phba->sli4_hba.nvmet_mrq_data[0],
9753                                             phba->sli4_hba.nvmet_cqset[0],
9754                                             LPFC_NVMET);
9755                         if (rc) {
9756                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9757                                                 "6057 Failed setup of NVMET "
9758                                                 "Receive Queue: rc = 0x%x\n",
9759                                                 (uint32_t)rc);
9760                                 goto out_destroy;
9761                         }
9762
9763                         lpfc_printf_log(
9764                                 phba, KERN_INFO, LOG_INIT,
9765                                 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9766                                 "dat-rq-id=%d parent cq-id=%d\n",
9767                                 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9768                                 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9769                                 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9770
9771                 }
9772         }
9773
9774         if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9775                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9776                                 "0540 Receive Queue not allocated\n");
9777                 rc = -ENOMEM;
9778                 goto out_destroy;
9779         }
9780
9781         rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9782                             phba->sli4_hba.els_cq, LPFC_USOL);
9783         if (rc) {
9784                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9785                                 "0541 Failed setup of Receive Queue: "
9786                                 "rc = 0x%x\n", (uint32_t)rc);
9787                 goto out_destroy;
9788         }
9789
9790         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9791                         "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9792                         "parent cq-id=%d\n",
9793                         phba->sli4_hba.hdr_rq->queue_id,
9794                         phba->sli4_hba.dat_rq->queue_id,
9795                         phba->sli4_hba.els_cq->queue_id);
9796
9797         if (phba->cfg_fcp_imax)
9798                 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9799         else
9800                 usdelay = 0;
9801
9802         for (qidx = 0; qidx < phba->cfg_irq_chann;
9803              qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9804                 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9805                                          usdelay);
9806
9807         if (phba->sli4_hba.cq_max) {
9808                 kfree(phba->sli4_hba.cq_lookup);
9809                 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9810                         sizeof(struct lpfc_queue *), GFP_KERNEL);
9811                 if (!phba->sli4_hba.cq_lookup) {
9812                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9813                                         "0549 Failed setup of CQ Lookup table: "
9814                                         "size 0x%x\n", phba->sli4_hba.cq_max);
9815                         rc = -ENOMEM;
9816                         goto out_destroy;
9817                 }
9818                 lpfc_setup_cq_lookup(phba);
9819         }
9820         return 0;
9821
9822 out_destroy:
9823         lpfc_sli4_queue_unset(phba);
9824 out_error:
9825         return rc;
9826 }
9827
9828 /**
9829  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9830  * @phba: pointer to lpfc hba data structure.
9831  *
9832  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9833  * operation.
9834  *
9835  * Return codes
9836  *      0 - successful
9837  *      -ENOMEM - No available memory
9838  *      -EIO - The mailbox failed to complete successfully.
9839  **/
9840 void
9841 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9842 {
9843         struct lpfc_sli4_hdw_queue *qp;
9844         struct lpfc_queue *eq;
9845         int qidx;
9846
9847         /* Unset mailbox command work queue */
9848         if (phba->sli4_hba.mbx_wq)
9849                 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9850
9851         /* Unset NVME LS work queue */
9852         if (phba->sli4_hba.nvmels_wq)
9853                 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9854
9855         /* Unset ELS work queue */
9856         if (phba->sli4_hba.els_wq)
9857                 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9858
9859         /* Unset unsolicited receive queue */
9860         if (phba->sli4_hba.hdr_rq)
9861                 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9862                                 phba->sli4_hba.dat_rq);
9863
9864         /* Unset mailbox command complete queue */
9865         if (phba->sli4_hba.mbx_cq)
9866                 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9867
9868         /* Unset ELS complete queue */
9869         if (phba->sli4_hba.els_cq)
9870                 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9871
9872         /* Unset NVME LS complete queue */
9873         if (phba->sli4_hba.nvmels_cq)
9874                 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9875
9876         if (phba->nvmet_support) {
9877                 /* Unset NVMET MRQ queue */
9878                 if (phba->sli4_hba.nvmet_mrq_hdr) {
9879                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9880                                 lpfc_rq_destroy(
9881                                         phba,
9882                                         phba->sli4_hba.nvmet_mrq_hdr[qidx],
9883                                         phba->sli4_hba.nvmet_mrq_data[qidx]);
9884                 }
9885
9886                 /* Unset NVMET CQ Set complete queue */
9887                 if (phba->sli4_hba.nvmet_cqset) {
9888                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9889                                 lpfc_cq_destroy(
9890                                         phba, phba->sli4_hba.nvmet_cqset[qidx]);
9891                 }
9892         }
9893
9894         /* Unset fast-path SLI4 queues */
9895         if (phba->sli4_hba.hdwq) {
9896                 /* Loop thru all Hardware Queues */
9897                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9898                         /* Destroy the CQ/WQ corresponding to Hardware Queue */
9899                         qp = &phba->sli4_hba.hdwq[qidx];
9900                         lpfc_wq_destroy(phba, qp->io_wq);
9901                         lpfc_cq_destroy(phba, qp->io_cq);
9902                 }
9903                 /* Loop thru all IRQ vectors */
9904                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9905                         /* Destroy the EQ corresponding to the IRQ vector */
9906                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9907                         lpfc_eq_destroy(phba, eq);
9908                 }
9909         }
9910
9911         kfree(phba->sli4_hba.cq_lookup);
9912         phba->sli4_hba.cq_lookup = NULL;
9913         phba->sli4_hba.cq_max = 0;
9914 }
9915
9916 /**
9917  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9918  * @phba: pointer to lpfc hba data structure.
9919  *
9920  * This routine is invoked to allocate and set up a pool of completion queue
9921  * events. The body of the completion queue event is a completion queue entry
9922  * CQE. For now, this pool is used for the interrupt service routine to queue
9923  * the following HBA completion queue events for the worker thread to process:
9924  *   - Mailbox asynchronous events
9925  *   - Receive queue completion unsolicited events
9926  * Later, this can be used for all the slow-path events.
9927  *
9928  * Return codes
9929  *      0 - successful
9930  *      -ENOMEM - No available memory
9931  **/
9932 static int
9933 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9934 {
9935         struct lpfc_cq_event *cq_event;
9936         int i;
9937
9938         for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9939                 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9940                 if (!cq_event)
9941                         goto out_pool_create_fail;
9942                 list_add_tail(&cq_event->list,
9943                               &phba->sli4_hba.sp_cqe_event_pool);
9944         }
9945         return 0;
9946
9947 out_pool_create_fail:
9948         lpfc_sli4_cq_event_pool_destroy(phba);
9949         return -ENOMEM;
9950 }
9951
9952 /**
9953  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9954  * @phba: pointer to lpfc hba data structure.
9955  *
9956  * This routine is invoked to free the pool of completion queue events at
9957  * driver unload time. Note that, it is the responsibility of the driver
9958  * cleanup routine to free all the outstanding completion-queue events
9959  * allocated from this pool back into the pool before invoking this routine
9960  * to destroy the pool.
9961  **/
9962 static void
9963 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9964 {
9965         struct lpfc_cq_event *cq_event, *next_cq_event;
9966
9967         list_for_each_entry_safe(cq_event, next_cq_event,
9968                                  &phba->sli4_hba.sp_cqe_event_pool, list) {
9969                 list_del(&cq_event->list);
9970                 kfree(cq_event);
9971         }
9972 }
9973
9974 /**
9975  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9976  * @phba: pointer to lpfc hba data structure.
9977  *
9978  * This routine is the lock free version of the API invoked to allocate a
9979  * completion-queue event from the free pool.
9980  *
9981  * Return: Pointer to the newly allocated completion-queue event if successful
9982  *         NULL otherwise.
9983  **/
9984 struct lpfc_cq_event *
9985 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9986 {
9987         struct lpfc_cq_event *cq_event = NULL;
9988
9989         list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9990                          struct lpfc_cq_event, list);
9991         return cq_event;
9992 }
9993
9994 /**
9995  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9996  * @phba: pointer to lpfc hba data structure.
9997  *
9998  * This routine is the lock version of the API invoked to allocate a
9999  * completion-queue event from the free pool.
10000  *
10001  * Return: Pointer to the newly allocated completion-queue event if successful
10002  *         NULL otherwise.
10003  **/
10004 struct lpfc_cq_event *
10005 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10006 {
10007         struct lpfc_cq_event *cq_event;
10008         unsigned long iflags;
10009
10010         spin_lock_irqsave(&phba->hbalock, iflags);
10011         cq_event = __lpfc_sli4_cq_event_alloc(phba);
10012         spin_unlock_irqrestore(&phba->hbalock, iflags);
10013         return cq_event;
10014 }
10015
10016 /**
10017  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10018  * @phba: pointer to lpfc hba data structure.
10019  * @cq_event: pointer to the completion queue event to be freed.
10020  *
10021  * This routine is the lock free version of the API invoked to release a
10022  * completion-queue event back into the free pool.
10023  **/
10024 void
10025 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10026                              struct lpfc_cq_event *cq_event)
10027 {
10028         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10029 }
10030
10031 /**
10032  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10033  * @phba: pointer to lpfc hba data structure.
10034  * @cq_event: pointer to the completion queue event to be freed.
10035  *
10036  * This routine is the lock version of the API invoked to release a
10037  * completion-queue event back into the free pool.
10038  **/
10039 void
10040 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10041                            struct lpfc_cq_event *cq_event)
10042 {
10043         unsigned long iflags;
10044         spin_lock_irqsave(&phba->hbalock, iflags);
10045         __lpfc_sli4_cq_event_release(phba, cq_event);
10046         spin_unlock_irqrestore(&phba->hbalock, iflags);
10047 }
10048
10049 /**
10050  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10051  * @phba: pointer to lpfc hba data structure.
10052  *
10053  * This routine is to free all the pending completion-queue events to the
10054  * back into the free pool for device reset.
10055  **/
10056 static void
10057 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10058 {
10059         LIST_HEAD(cqelist);
10060         struct lpfc_cq_event *cqe;
10061         unsigned long iflags;
10062
10063         /* Retrieve all the pending WCQEs from pending WCQE lists */
10064         spin_lock_irqsave(&phba->hbalock, iflags);
10065         /* Pending FCP XRI abort events */
10066         list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10067                          &cqelist);
10068         /* Pending ELS XRI abort events */
10069         list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10070                          &cqelist);
10071         /* Pending asynnc events */
10072         list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10073                          &cqelist);
10074         spin_unlock_irqrestore(&phba->hbalock, iflags);
10075
10076         while (!list_empty(&cqelist)) {
10077                 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10078                 lpfc_sli4_cq_event_release(phba, cqe);
10079         }
10080 }
10081
10082 /**
10083  * lpfc_pci_function_reset - Reset pci function.
10084  * @phba: pointer to lpfc hba data structure.
10085  *
10086  * This routine is invoked to request a PCI function reset. It will destroys
10087  * all resources assigned to the PCI function which originates this request.
10088  *
10089  * Return codes
10090  *      0 - successful
10091  *      -ENOMEM - No available memory
10092  *      -EIO - The mailbox failed to complete successfully.
10093  **/
10094 int
10095 lpfc_pci_function_reset(struct lpfc_hba *phba)
10096 {
10097         LPFC_MBOXQ_t *mboxq;
10098         uint32_t rc = 0, if_type;
10099         uint32_t shdr_status, shdr_add_status;
10100         uint32_t rdy_chk;
10101         uint32_t port_reset = 0;
10102         union lpfc_sli4_cfg_shdr *shdr;
10103         struct lpfc_register reg_data;
10104         uint16_t devid;
10105
10106         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10107         switch (if_type) {
10108         case LPFC_SLI_INTF_IF_TYPE_0:
10109                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10110                                                        GFP_KERNEL);
10111                 if (!mboxq) {
10112                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10113                                         "0494 Unable to allocate memory for "
10114                                         "issuing SLI_FUNCTION_RESET mailbox "
10115                                         "command\n");
10116                         return -ENOMEM;
10117                 }
10118
10119                 /* Setup PCI function reset mailbox-ioctl command */
10120                 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10121                                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10122                                  LPFC_SLI4_MBX_EMBED);
10123                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10124                 shdr = (union lpfc_sli4_cfg_shdr *)
10125                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10126                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10127                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10128                                          &shdr->response);
10129                 if (rc != MBX_TIMEOUT)
10130                         mempool_free(mboxq, phba->mbox_mem_pool);
10131                 if (shdr_status || shdr_add_status || rc) {
10132                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10133                                         "0495 SLI_FUNCTION_RESET mailbox "
10134                                         "failed with status x%x add_status x%x,"
10135                                         " mbx status x%x\n",
10136                                         shdr_status, shdr_add_status, rc);
10137                         rc = -ENXIO;
10138                 }
10139                 break;
10140         case LPFC_SLI_INTF_IF_TYPE_2:
10141         case LPFC_SLI_INTF_IF_TYPE_6:
10142 wait:
10143                 /*
10144                  * Poll the Port Status Register and wait for RDY for
10145                  * up to 30 seconds. If the port doesn't respond, treat
10146                  * it as an error.
10147                  */
10148                 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10149                         if (lpfc_readl(phba->sli4_hba.u.if_type2.
10150                                 STATUSregaddr, &reg_data.word0)) {
10151                                 rc = -ENODEV;
10152                                 goto out;
10153                         }
10154                         if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10155                                 break;
10156                         msleep(20);
10157                 }
10158
10159                 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10160                         phba->work_status[0] = readl(
10161                                 phba->sli4_hba.u.if_type2.ERR1regaddr);
10162                         phba->work_status[1] = readl(
10163                                 phba->sli4_hba.u.if_type2.ERR2regaddr);
10164                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10165                                         "2890 Port not ready, port status reg "
10166                                         "0x%x error 1=0x%x, error 2=0x%x\n",
10167                                         reg_data.word0,
10168                                         phba->work_status[0],
10169                                         phba->work_status[1]);
10170                         rc = -ENODEV;
10171                         goto out;
10172                 }
10173
10174                 if (!port_reset) {
10175                         /*
10176                          * Reset the port now
10177                          */
10178                         reg_data.word0 = 0;
10179                         bf_set(lpfc_sliport_ctrl_end, &reg_data,
10180                                LPFC_SLIPORT_LITTLE_ENDIAN);
10181                         bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10182                                LPFC_SLIPORT_INIT_PORT);
10183                         writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10184                                CTRLregaddr);
10185                         /* flush */
10186                         pci_read_config_word(phba->pcidev,
10187                                              PCI_DEVICE_ID, &devid);
10188
10189                         port_reset = 1;
10190                         msleep(20);
10191                         goto wait;
10192                 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10193                         rc = -ENODEV;
10194                         goto out;
10195                 }
10196                 break;
10197
10198         case LPFC_SLI_INTF_IF_TYPE_1:
10199         default:
10200                 break;
10201         }
10202
10203 out:
10204         /* Catch the not-ready port failure after a port reset. */
10205         if (rc) {
10206                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10207                                 "3317 HBA not functional: IP Reset Failed "
10208                                 "try: echo fw_reset > board_mode\n");
10209                 rc = -ENODEV;
10210         }
10211
10212         return rc;
10213 }
10214
10215 /**
10216  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10217  * @phba: pointer to lpfc hba data structure.
10218  *
10219  * This routine is invoked to set up the PCI device memory space for device
10220  * with SLI-4 interface spec.
10221  *
10222  * Return codes
10223  *      0 - successful
10224  *      other values - error
10225  **/
10226 static int
10227 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10228 {
10229         struct pci_dev *pdev = phba->pcidev;
10230         unsigned long bar0map_len, bar1map_len, bar2map_len;
10231         int error;
10232         uint32_t if_type;
10233
10234         if (!pdev)
10235                 return -ENODEV;
10236
10237         /* Set the device DMA mask size */
10238         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10239         if (error)
10240                 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10241         if (error)
10242                 return error;
10243
10244         /*
10245          * The BARs and register set definitions and offset locations are
10246          * dependent on the if_type.
10247          */
10248         if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10249                                   &phba->sli4_hba.sli_intf.word0)) {
10250                 return -ENODEV;
10251         }
10252
10253         /* There is no SLI3 failback for SLI4 devices. */
10254         if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10255             LPFC_SLI_INTF_VALID) {
10256                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10257                                 "2894 SLI_INTF reg contents invalid "
10258                                 "sli_intf reg 0x%x\n",
10259                                 phba->sli4_hba.sli_intf.word0);
10260                 return -ENODEV;
10261         }
10262
10263         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10264         /*
10265          * Get the bus address of SLI4 device Bar regions and the
10266          * number of bytes required by each mapping. The mapping of the
10267          * particular PCI BARs regions is dependent on the type of
10268          * SLI4 device.
10269          */
10270         if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10271                 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10272                 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10273
10274                 /*
10275                  * Map SLI4 PCI Config Space Register base to a kernel virtual
10276                  * addr
10277                  */
10278                 phba->sli4_hba.conf_regs_memmap_p =
10279                         ioremap(phba->pci_bar0_map, bar0map_len);
10280                 if (!phba->sli4_hba.conf_regs_memmap_p) {
10281                         dev_printk(KERN_ERR, &pdev->dev,
10282                                    "ioremap failed for SLI4 PCI config "
10283                                    "registers.\n");
10284                         return -ENODEV;
10285                 }
10286                 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10287                 /* Set up BAR0 PCI config space register memory map */
10288                 lpfc_sli4_bar0_register_memmap(phba, if_type);
10289         } else {
10290                 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10291                 bar0map_len = pci_resource_len(pdev, 1);
10292                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10293                         dev_printk(KERN_ERR, &pdev->dev,
10294                            "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10295                         return -ENODEV;
10296                 }
10297                 phba->sli4_hba.conf_regs_memmap_p =
10298                                 ioremap(phba->pci_bar0_map, bar0map_len);
10299                 if (!phba->sli4_hba.conf_regs_memmap_p) {
10300                         dev_printk(KERN_ERR, &pdev->dev,
10301                                 "ioremap failed for SLI4 PCI config "
10302                                 "registers.\n");
10303                         return -ENODEV;
10304                 }
10305                 lpfc_sli4_bar0_register_memmap(phba, if_type);
10306         }
10307
10308         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10309                 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10310                         /*
10311                          * Map SLI4 if type 0 HBA Control Register base to a
10312                          * kernel virtual address and setup the registers.
10313                          */
10314                         phba->pci_bar1_map = pci_resource_start(pdev,
10315                                                                 PCI_64BIT_BAR2);
10316                         bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10317                         phba->sli4_hba.ctrl_regs_memmap_p =
10318                                         ioremap(phba->pci_bar1_map,
10319                                                 bar1map_len);
10320                         if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10321                                 dev_err(&pdev->dev,
10322                                            "ioremap failed for SLI4 HBA "
10323                                             "control registers.\n");
10324                                 error = -ENOMEM;
10325                                 goto out_iounmap_conf;
10326                         }
10327                         phba->pci_bar2_memmap_p =
10328                                          phba->sli4_hba.ctrl_regs_memmap_p;
10329                         lpfc_sli4_bar1_register_memmap(phba, if_type);
10330                 } else {
10331                         error = -ENOMEM;
10332                         goto out_iounmap_conf;
10333                 }
10334         }
10335
10336         if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10337             (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10338                 /*
10339                  * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10340                  * virtual address and setup the registers.
10341                  */
10342                 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10343                 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10344                 phba->sli4_hba.drbl_regs_memmap_p =
10345                                 ioremap(phba->pci_bar1_map, bar1map_len);
10346                 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10347                         dev_err(&pdev->dev,
10348                            "ioremap failed for SLI4 HBA doorbell registers.\n");
10349                         error = -ENOMEM;
10350                         goto out_iounmap_conf;
10351                 }
10352                 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10353                 lpfc_sli4_bar1_register_memmap(phba, if_type);
10354         }
10355
10356         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10357                 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10358                         /*
10359                          * Map SLI4 if type 0 HBA Doorbell Register base to
10360                          * a kernel virtual address and setup the registers.
10361                          */
10362                         phba->pci_bar2_map = pci_resource_start(pdev,
10363                                                                 PCI_64BIT_BAR4);
10364                         bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10365                         phba->sli4_hba.drbl_regs_memmap_p =
10366                                         ioremap(phba->pci_bar2_map,
10367                                                 bar2map_len);
10368                         if (!phba->sli4_hba.drbl_regs_memmap_p) {
10369                                 dev_err(&pdev->dev,
10370                                            "ioremap failed for SLI4 HBA"
10371                                            " doorbell registers.\n");
10372                                 error = -ENOMEM;
10373                                 goto out_iounmap_ctrl;
10374                         }
10375                         phba->pci_bar4_memmap_p =
10376                                         phba->sli4_hba.drbl_regs_memmap_p;
10377                         error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10378                         if (error)
10379                                 goto out_iounmap_all;
10380                 } else {
10381                         error = -ENOMEM;
10382                         goto out_iounmap_all;
10383                 }
10384         }
10385
10386         if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10387             pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10388                 /*
10389                  * Map SLI4 if type 6 HBA DPP Register base to a kernel
10390                  * virtual address and setup the registers.
10391                  */
10392                 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10393                 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10394                 phba->sli4_hba.dpp_regs_memmap_p =
10395                                 ioremap(phba->pci_bar2_map, bar2map_len);
10396                 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10397                         dev_err(&pdev->dev,
10398                            "ioremap failed for SLI4 HBA dpp registers.\n");
10399                         error = -ENOMEM;
10400                         goto out_iounmap_ctrl;
10401                 }
10402                 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10403         }
10404
10405         /* Set up the EQ/CQ register handeling functions now */
10406         switch (if_type) {
10407         case LPFC_SLI_INTF_IF_TYPE_0:
10408         case LPFC_SLI_INTF_IF_TYPE_2:
10409                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10410                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10411                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10412                 break;
10413         case LPFC_SLI_INTF_IF_TYPE_6:
10414                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10415                 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10416                 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10417                 break;
10418         default:
10419                 break;
10420         }
10421
10422         return 0;
10423
10424 out_iounmap_all:
10425         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10426 out_iounmap_ctrl:
10427         iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10428 out_iounmap_conf:
10429         iounmap(phba->sli4_hba.conf_regs_memmap_p);
10430
10431         return error;
10432 }
10433
10434 /**
10435  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10436  * @phba: pointer to lpfc hba data structure.
10437  *
10438  * This routine is invoked to unset the PCI device memory space for device
10439  * with SLI-4 interface spec.
10440  **/
10441 static void
10442 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10443 {
10444         uint32_t if_type;
10445         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10446
10447         switch (if_type) {
10448         case LPFC_SLI_INTF_IF_TYPE_0:
10449                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10450                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10451                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10452                 break;
10453         case LPFC_SLI_INTF_IF_TYPE_2:
10454                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10455                 break;
10456         case LPFC_SLI_INTF_IF_TYPE_6:
10457                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10458                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10459                 if (phba->sli4_hba.dpp_regs_memmap_p)
10460                         iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10461                 break;
10462         case LPFC_SLI_INTF_IF_TYPE_1:
10463         default:
10464                 dev_printk(KERN_ERR, &phba->pcidev->dev,
10465                            "FATAL - unsupported SLI4 interface type - %d\n",
10466                            if_type);
10467                 break;
10468         }
10469 }
10470
10471 /**
10472  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10473  * @phba: pointer to lpfc hba data structure.
10474  *
10475  * This routine is invoked to enable the MSI-X interrupt vectors to device
10476  * with SLI-3 interface specs.
10477  *
10478  * Return codes
10479  *   0 - successful
10480  *   other values - error
10481  **/
10482 static int
10483 lpfc_sli_enable_msix(struct lpfc_hba *phba)
10484 {
10485         int rc;
10486         LPFC_MBOXQ_t *pmb;
10487
10488         /* Set up MSI-X multi-message vectors */
10489         rc = pci_alloc_irq_vectors(phba->pcidev,
10490                         LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10491         if (rc < 0) {
10492                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10493                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
10494                 goto vec_fail_out;
10495         }
10496
10497         /*
10498          * Assign MSI-X vectors to interrupt handlers
10499          */
10500
10501         /* vector-0 is associated to slow-path handler */
10502         rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10503                          &lpfc_sli_sp_intr_handler, 0,
10504                          LPFC_SP_DRIVER_HANDLER_NAME, phba);
10505         if (rc) {
10506                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10507                                 "0421 MSI-X slow-path request_irq failed "
10508                                 "(%d)\n", rc);
10509                 goto msi_fail_out;
10510         }
10511
10512         /* vector-1 is associated to fast-path handler */
10513         rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10514                          &lpfc_sli_fp_intr_handler, 0,
10515                          LPFC_FP_DRIVER_HANDLER_NAME, phba);
10516
10517         if (rc) {
10518                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10519                                 "0429 MSI-X fast-path request_irq failed "
10520                                 "(%d)\n", rc);
10521                 goto irq_fail_out;
10522         }
10523
10524         /*
10525          * Configure HBA MSI-X attention conditions to messages
10526          */
10527         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10528
10529         if (!pmb) {
10530                 rc = -ENOMEM;
10531                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10532                                 "0474 Unable to allocate memory for issuing "
10533                                 "MBOX_CONFIG_MSI command\n");
10534                 goto mem_fail_out;
10535         }
10536         rc = lpfc_config_msi(phba, pmb);
10537         if (rc)
10538                 goto mbx_fail_out;
10539         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10540         if (rc != MBX_SUCCESS) {
10541                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10542                                 "0351 Config MSI mailbox command failed, "
10543                                 "mbxCmd x%x, mbxStatus x%x\n",
10544                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10545                 goto mbx_fail_out;
10546         }
10547
10548         /* Free memory allocated for mailbox command */
10549         mempool_free(pmb, phba->mbox_mem_pool);
10550         return rc;
10551
10552 mbx_fail_out:
10553         /* Free memory allocated for mailbox command */
10554         mempool_free(pmb, phba->mbox_mem_pool);
10555
10556 mem_fail_out:
10557         /* free the irq already requested */
10558         free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10559
10560 irq_fail_out:
10561         /* free the irq already requested */
10562         free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10563
10564 msi_fail_out:
10565         /* Unconfigure MSI-X capability structure */
10566         pci_free_irq_vectors(phba->pcidev);
10567
10568 vec_fail_out:
10569         return rc;
10570 }
10571
10572 /**
10573  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10574  * @phba: pointer to lpfc hba data structure.
10575  *
10576  * This routine is invoked to enable the MSI interrupt mode to device with
10577  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10578  * enable the MSI vector. The device driver is responsible for calling the
10579  * request_irq() to register MSI vector with a interrupt the handler, which
10580  * is done in this function.
10581  *
10582  * Return codes
10583  *      0 - successful
10584  *      other values - error
10585  */
10586 static int
10587 lpfc_sli_enable_msi(struct lpfc_hba *phba)
10588 {
10589         int rc;
10590
10591         rc = pci_enable_msi(phba->pcidev);
10592         if (!rc)
10593                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10594                                 "0462 PCI enable MSI mode success.\n");
10595         else {
10596                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10597                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
10598                 return rc;
10599         }
10600
10601         rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10602                          0, LPFC_DRIVER_NAME, phba);
10603         if (rc) {
10604                 pci_disable_msi(phba->pcidev);
10605                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10606                                 "0478 MSI request_irq failed (%d)\n", rc);
10607         }
10608         return rc;
10609 }
10610
10611 /**
10612  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10613  * @phba: pointer to lpfc hba data structure.
10614  *
10615  * This routine is invoked to enable device interrupt and associate driver's
10616  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10617  * spec. Depends on the interrupt mode configured to the driver, the driver
10618  * will try to fallback from the configured interrupt mode to an interrupt
10619  * mode which is supported by the platform, kernel, and device in the order
10620  * of:
10621  * MSI-X -> MSI -> IRQ.
10622  *
10623  * Return codes
10624  *   0 - successful
10625  *   other values - error
10626  **/
10627 static uint32_t
10628 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10629 {
10630         uint32_t intr_mode = LPFC_INTR_ERROR;
10631         int retval;
10632
10633         if (cfg_mode == 2) {
10634                 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10635                 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10636                 if (!retval) {
10637                         /* Now, try to enable MSI-X interrupt mode */
10638                         retval = lpfc_sli_enable_msix(phba);
10639                         if (!retval) {
10640                                 /* Indicate initialization to MSI-X mode */
10641                                 phba->intr_type = MSIX;
10642                                 intr_mode = 2;
10643                         }
10644                 }
10645         }
10646
10647         /* Fallback to MSI if MSI-X initialization failed */
10648         if (cfg_mode >= 1 && phba->intr_type == NONE) {
10649                 retval = lpfc_sli_enable_msi(phba);
10650                 if (!retval) {
10651                         /* Indicate initialization to MSI mode */
10652                         phba->intr_type = MSI;
10653                         intr_mode = 1;
10654                 }
10655         }
10656
10657         /* Fallback to INTx if both MSI-X/MSI initalization failed */
10658         if (phba->intr_type == NONE) {
10659                 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10660                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10661                 if (!retval) {
10662                         /* Indicate initialization to INTx mode */
10663                         phba->intr_type = INTx;
10664                         intr_mode = 0;
10665                 }
10666         }
10667         return intr_mode;
10668 }
10669
10670 /**
10671  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10672  * @phba: pointer to lpfc hba data structure.
10673  *
10674  * This routine is invoked to disable device interrupt and disassociate the
10675  * driver's interrupt handler(s) from interrupt vector(s) to device with
10676  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10677  * release the interrupt vector(s) for the message signaled interrupt.
10678  **/
10679 static void
10680 lpfc_sli_disable_intr(struct lpfc_hba *phba)
10681 {
10682         int nr_irqs, i;
10683
10684         if (phba->intr_type == MSIX)
10685                 nr_irqs = LPFC_MSIX_VECTORS;
10686         else
10687                 nr_irqs = 1;
10688
10689         for (i = 0; i < nr_irqs; i++)
10690                 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10691         pci_free_irq_vectors(phba->pcidev);
10692
10693         /* Reset interrupt management states */
10694         phba->intr_type = NONE;
10695         phba->sli.slistat.sli_intr = 0;
10696 }
10697
10698 /**
10699  * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10700  * @phba: pointer to lpfc hba data structure.
10701  * @id: EQ vector index or Hardware Queue index
10702  * @match: LPFC_FIND_BY_EQ = match by EQ
10703  *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
10704  * Return the CPU that matches the selection criteria
10705  */
10706 static uint16_t
10707 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10708 {
10709         struct lpfc_vector_map_info *cpup;
10710         int cpu;
10711
10712         /* Loop through all CPUs */
10713         for_each_present_cpu(cpu) {
10714                 cpup = &phba->sli4_hba.cpu_map[cpu];
10715
10716                 /* If we are matching by EQ, there may be multiple CPUs using
10717                  * using the same vector, so select the one with
10718                  * LPFC_CPU_FIRST_IRQ set.
10719                  */
10720                 if ((match == LPFC_FIND_BY_EQ) &&
10721                     (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10722                     (cpup->eq == id))
10723                         return cpu;
10724
10725                 /* If matching by HDWQ, select the first CPU that matches */
10726                 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10727                         return cpu;
10728         }
10729         return 0;
10730 }
10731
10732 #ifdef CONFIG_X86
10733 /**
10734  * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10735  * @phba: pointer to lpfc hba data structure.
10736  * @cpu: CPU map index
10737  * @phys_id: CPU package physical id
10738  * @core_id: CPU core id
10739  */
10740 static int
10741 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10742                 uint16_t phys_id, uint16_t core_id)
10743 {
10744         struct lpfc_vector_map_info *cpup;
10745         int idx;
10746
10747         for_each_present_cpu(idx) {
10748                 cpup = &phba->sli4_hba.cpu_map[idx];
10749                 /* Does the cpup match the one we are looking for */
10750                 if ((cpup->phys_id == phys_id) &&
10751                     (cpup->core_id == core_id) &&
10752                     (cpu != idx))
10753                         return 1;
10754         }
10755         return 0;
10756 }
10757 #endif
10758
10759 /*
10760  * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10761  * @phba: pointer to lpfc hba data structure.
10762  * @eqidx: index for eq and irq vector
10763  * @flag: flags to set for vector_map structure
10764  * @cpu: cpu used to index vector_map structure
10765  *
10766  * The routine assigns eq info into vector_map structure
10767  */
10768 static inline void
10769 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10770                         unsigned int cpu)
10771 {
10772         struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10773         struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10774
10775         cpup->eq = eqidx;
10776         cpup->flag |= flag;
10777
10778         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10779                         "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10780                         cpu, eqhdl->irq, cpup->eq, cpup->flag);
10781 }
10782
10783 /**
10784  * lpfc_cpu_map_array_init - Initialize cpu_map structure
10785  * @phba: pointer to lpfc hba data structure.
10786  *
10787  * The routine initializes the cpu_map array structure
10788  */
10789 static void
10790 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10791 {
10792         struct lpfc_vector_map_info *cpup;
10793         struct lpfc_eq_intr_info *eqi;
10794         int cpu;
10795
10796         for_each_possible_cpu(cpu) {
10797                 cpup = &phba->sli4_hba.cpu_map[cpu];
10798                 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10799                 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10800                 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10801                 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10802                 cpup->flag = 0;
10803                 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10804                 INIT_LIST_HEAD(&eqi->list);
10805                 eqi->icnt = 0;
10806         }
10807 }
10808
10809 /**
10810  * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
10811  * @phba: pointer to lpfc hba data structure.
10812  *
10813  * The routine initializes the hba_eq_hdl array structure
10814  */
10815 static void
10816 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10817 {
10818         struct lpfc_hba_eq_hdl *eqhdl;
10819         int i;
10820
10821         for (i = 0; i < phba->cfg_irq_chann; i++) {
10822                 eqhdl = lpfc_get_eq_hdl(i);
10823                 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10824                 eqhdl->phba = phba;
10825         }
10826 }
10827
10828 /**
10829  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10830  * @phba: pointer to lpfc hba data structure.
10831  * @vectors: number of msix vectors allocated.
10832  *
10833  * The routine will figure out the CPU affinity assignment for every
10834  * MSI-X vector allocated for the HBA.
10835  * In addition, the CPU to IO channel mapping will be calculated
10836  * and the phba->sli4_hba.cpu_map array will reflect this.
10837  */
10838 static void
10839 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10840 {
10841         int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10842         int max_phys_id, min_phys_id;
10843         int max_core_id, min_core_id;
10844         struct lpfc_vector_map_info *cpup;
10845         struct lpfc_vector_map_info *new_cpup;
10846 #ifdef CONFIG_X86
10847         struct cpuinfo_x86 *cpuinfo;
10848 #endif
10849 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10850         struct lpfc_hdwq_stat *c_stat;
10851 #endif
10852
10853         max_phys_id = 0;
10854         min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10855         max_core_id = 0;
10856         min_core_id = LPFC_VECTOR_MAP_EMPTY;
10857
10858         /* Update CPU map with physical id and core id of each CPU */
10859         for_each_present_cpu(cpu) {
10860                 cpup = &phba->sli4_hba.cpu_map[cpu];
10861 #ifdef CONFIG_X86
10862                 cpuinfo = &cpu_data(cpu);
10863                 cpup->phys_id = cpuinfo->phys_proc_id;
10864                 cpup->core_id = cpuinfo->cpu_core_id;
10865                 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10866                         cpup->flag |= LPFC_CPU_MAP_HYPER;
10867 #else
10868                 /* No distinction between CPUs for other platforms */
10869                 cpup->phys_id = 0;
10870                 cpup->core_id = cpu;
10871 #endif
10872
10873                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10874                                 "3328 CPU %d physid %d coreid %d flag x%x\n",
10875                                 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10876
10877                 if (cpup->phys_id > max_phys_id)
10878                         max_phys_id = cpup->phys_id;
10879                 if (cpup->phys_id < min_phys_id)
10880                         min_phys_id = cpup->phys_id;
10881
10882                 if (cpup->core_id > max_core_id)
10883                         max_core_id = cpup->core_id;
10884                 if (cpup->core_id < min_core_id)
10885                         min_core_id = cpup->core_id;
10886         }
10887
10888         /* After looking at each irq vector assigned to this pcidev, its
10889          * possible to see that not ALL CPUs have been accounted for.
10890          * Next we will set any unassigned (unaffinitized) cpu map
10891          * entries to a IRQ on the same phys_id.
10892          */
10893         first_cpu = cpumask_first(cpu_present_mask);
10894         start_cpu = first_cpu;
10895
10896         for_each_present_cpu(cpu) {
10897                 cpup = &phba->sli4_hba.cpu_map[cpu];
10898
10899                 /* Is this CPU entry unassigned */
10900                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10901                         /* Mark CPU as IRQ not assigned by the kernel */
10902                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10903
10904                         /* If so, find a new_cpup thats on the the SAME
10905                          * phys_id as cpup. start_cpu will start where we
10906                          * left off so all unassigned entries don't get assgined
10907                          * the IRQ of the first entry.
10908                          */
10909                         new_cpu = start_cpu;
10910                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10911                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10912                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10913                                     (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
10914                                     (new_cpup->phys_id == cpup->phys_id))
10915                                         goto found_same;
10916                                 new_cpu = cpumask_next(
10917                                         new_cpu, cpu_present_mask);
10918                                 if (new_cpu == nr_cpumask_bits)
10919                                         new_cpu = first_cpu;
10920                         }
10921                         /* At this point, we leave the CPU as unassigned */
10922                         continue;
10923 found_same:
10924                         /* We found a matching phys_id, so copy the IRQ info */
10925                         cpup->eq = new_cpup->eq;
10926
10927                         /* Bump start_cpu to the next slot to minmize the
10928                          * chance of having multiple unassigned CPU entries
10929                          * selecting the same IRQ.
10930                          */
10931                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10932                         if (start_cpu == nr_cpumask_bits)
10933                                 start_cpu = first_cpu;
10934
10935                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10936                                         "3337 Set Affinity: CPU %d "
10937                                         "eq %d from peer cpu %d same "
10938                                         "phys_id (%d)\n",
10939                                         cpu, cpup->eq, new_cpu,
10940                                         cpup->phys_id);
10941                 }
10942         }
10943
10944         /* Set any unassigned cpu map entries to a IRQ on any phys_id */
10945         start_cpu = first_cpu;
10946
10947         for_each_present_cpu(cpu) {
10948                 cpup = &phba->sli4_hba.cpu_map[cpu];
10949
10950                 /* Is this entry unassigned */
10951                 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10952                         /* Mark it as IRQ not assigned by the kernel */
10953                         cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10954
10955                         /* If so, find a new_cpup thats on ANY phys_id
10956                          * as the cpup. start_cpu will start where we
10957                          * left off so all unassigned entries don't get
10958                          * assigned the IRQ of the first entry.
10959                          */
10960                         new_cpu = start_cpu;
10961                         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10962                                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10963                                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10964                                     (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
10965                                         goto found_any;
10966                                 new_cpu = cpumask_next(
10967                                         new_cpu, cpu_present_mask);
10968                                 if (new_cpu == nr_cpumask_bits)
10969                                         new_cpu = first_cpu;
10970                         }
10971                         /* We should never leave an entry unassigned */
10972                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10973                                         "3339 Set Affinity: CPU %d "
10974                                         "eq %d UNASSIGNED\n",
10975                                         cpup->hdwq, cpup->eq);
10976                         continue;
10977 found_any:
10978                         /* We found an available entry, copy the IRQ info */
10979                         cpup->eq = new_cpup->eq;
10980
10981                         /* Bump start_cpu to the next slot to minmize the
10982                          * chance of having multiple unassigned CPU entries
10983                          * selecting the same IRQ.
10984                          */
10985                         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10986                         if (start_cpu == nr_cpumask_bits)
10987                                 start_cpu = first_cpu;
10988
10989                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10990                                         "3338 Set Affinity: CPU %d "
10991                                         "eq %d from peer cpu %d (%d/%d)\n",
10992                                         cpu, cpup->eq, new_cpu,
10993                                         new_cpup->phys_id, new_cpup->core_id);
10994                 }
10995         }
10996
10997         /* Assign hdwq indices that are unique across all cpus in the map
10998          * that are also FIRST_CPUs.
10999          */
11000         idx = 0;
11001         for_each_present_cpu(cpu) {
11002                 cpup = &phba->sli4_hba.cpu_map[cpu];
11003
11004                 /* Only FIRST IRQs get a hdwq index assignment. */
11005                 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11006                         continue;
11007
11008                 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11009                 cpup->hdwq = idx;
11010                 idx++;
11011                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11012                                 "3333 Set Affinity: CPU %d (phys %d core %d): "
11013                                 "hdwq %d eq %d flg x%x\n",
11014                                 cpu, cpup->phys_id, cpup->core_id,
11015                                 cpup->hdwq, cpup->eq, cpup->flag);
11016         }
11017         /* Associate a hdwq with each cpu_map entry
11018          * This will be 1 to 1 - hdwq to cpu, unless there are less
11019          * hardware queues then CPUs. For that case we will just round-robin
11020          * the available hardware queues as they get assigned to CPUs.
11021          * The next_idx is the idx from the FIRST_CPU loop above to account
11022          * for irq_chann < hdwq.  The idx is used for round-robin assignments
11023          * and needs to start at 0.
11024          */
11025         next_idx = idx;
11026         start_cpu = 0;
11027         idx = 0;
11028         for_each_present_cpu(cpu) {
11029                 cpup = &phba->sli4_hba.cpu_map[cpu];
11030
11031                 /* FIRST cpus are already mapped. */
11032                 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11033                         continue;
11034
11035                 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11036                  * of the unassigned cpus to the next idx so that all
11037                  * hdw queues are fully utilized.
11038                  */
11039                 if (next_idx < phba->cfg_hdw_queue) {
11040                         cpup->hdwq = next_idx;
11041                         next_idx++;
11042                         continue;
11043                 }
11044
11045                 /* Not a First CPU and all hdw_queues are used.  Reuse a
11046                  * Hardware Queue for another CPU, so be smart about it
11047                  * and pick one that has its IRQ/EQ mapped to the same phys_id
11048                  * (CPU package) and core_id.
11049                  */
11050                 new_cpu = start_cpu;
11051                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11052                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11053                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11054                             new_cpup->phys_id == cpup->phys_id &&
11055                             new_cpup->core_id == cpup->core_id) {
11056                                 goto found_hdwq;
11057                         }
11058                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11059                         if (new_cpu == nr_cpumask_bits)
11060                                 new_cpu = first_cpu;
11061                 }
11062
11063                 /* If we can't match both phys_id and core_id,
11064                  * settle for just a phys_id match.
11065                  */
11066                 new_cpu = start_cpu;
11067                 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11068                         new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11069                         if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11070                             new_cpup->phys_id == cpup->phys_id)
11071                                 goto found_hdwq;
11072
11073                         new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11074                         if (new_cpu == nr_cpumask_bits)
11075                                 new_cpu = first_cpu;
11076                 }
11077
11078                 /* Otherwise just round robin on cfg_hdw_queue */
11079                 cpup->hdwq = idx % phba->cfg_hdw_queue;
11080                 idx++;
11081                 goto logit;
11082  found_hdwq:
11083                 /* We found an available entry, copy the IRQ info */
11084                 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11085                 if (start_cpu == nr_cpumask_bits)
11086                         start_cpu = first_cpu;
11087                 cpup->hdwq = new_cpup->hdwq;
11088  logit:
11089                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11090                                 "3335 Set Affinity: CPU %d (phys %d core %d): "
11091                                 "hdwq %d eq %d flg x%x\n",
11092                                 cpu, cpup->phys_id, cpup->core_id,
11093                                 cpup->hdwq, cpup->eq, cpup->flag);
11094         }
11095
11096         /*
11097          * Initialize the cpu_map slots for not-present cpus in case
11098          * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11099          */
11100         idx = 0;
11101         for_each_possible_cpu(cpu) {
11102                 cpup = &phba->sli4_hba.cpu_map[cpu];
11103 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11104                 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11105                 c_stat->hdwq_no = cpup->hdwq;
11106 #endif
11107                 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11108                         continue;
11109
11110                 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11111 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11112                 c_stat->hdwq_no = cpup->hdwq;
11113 #endif
11114                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11115                                 "3340 Set Affinity: not present "
11116                                 "CPU %d hdwq %d\n",
11117                                 cpu, cpup->hdwq);
11118         }
11119
11120         /* The cpu_map array will be used later during initialization
11121          * when EQ / CQ / WQs are allocated and configured.
11122          */
11123         return;
11124 }
11125
11126 /**
11127  * lpfc_cpuhp_get_eq
11128  *
11129  * @phba:   pointer to lpfc hba data structure.
11130  * @cpu:    cpu going offline
11131  * @eqlist:
11132  */
11133 static int
11134 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11135                   struct list_head *eqlist)
11136 {
11137         const struct cpumask *maskp;
11138         struct lpfc_queue *eq;
11139         struct cpumask *tmp;
11140         u16 idx;
11141
11142         tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11143         if (!tmp)
11144                 return -ENOMEM;
11145
11146         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11147                 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11148                 if (!maskp)
11149                         continue;
11150                 /*
11151                  * if irq is not affinitized to the cpu going
11152                  * then we don't need to poll the eq attached
11153                  * to it.
11154                  */
11155                 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11156                         continue;
11157                 /* get the cpus that are online and are affini-
11158                  * tized to this irq vector.  If the count is
11159                  * more than 1 then cpuhp is not going to shut-
11160                  * down this vector.  Since this cpu has not
11161                  * gone offline yet, we need >1.
11162                  */
11163                 cpumask_and(tmp, maskp, cpu_online_mask);
11164                 if (cpumask_weight(tmp) > 1)
11165                         continue;
11166
11167                 /* Now that we have an irq to shutdown, get the eq
11168                  * mapped to this irq.  Note: multiple hdwq's in
11169                  * the software can share an eq, but eventually
11170                  * only eq will be mapped to this vector
11171                  */
11172                 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11173                 list_add(&eq->_poll_list, eqlist);
11174         }
11175         kfree(tmp);
11176         return 0;
11177 }
11178
11179 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11180 {
11181         if (phba->sli_rev != LPFC_SLI_REV4)
11182                 return;
11183
11184         cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11185                                             &phba->cpuhp);
11186         /*
11187          * unregistering the instance doesn't stop the polling
11188          * timer. Wait for the poll timer to retire.
11189          */
11190         synchronize_rcu();
11191         del_timer_sync(&phba->cpuhp_poll_timer);
11192 }
11193
11194 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11195 {
11196         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11197                 return;
11198
11199         __lpfc_cpuhp_remove(phba);
11200 }
11201
11202 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11203 {
11204         if (phba->sli_rev != LPFC_SLI_REV4)
11205                 return;
11206
11207         rcu_read_lock();
11208
11209         if (!list_empty(&phba->poll_list))
11210                 mod_timer(&phba->cpuhp_poll_timer,
11211                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11212
11213         rcu_read_unlock();
11214
11215         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11216                                          &phba->cpuhp);
11217 }
11218
11219 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11220 {
11221         if (phba->pport->load_flag & FC_UNLOADING) {
11222                 *retval = -EAGAIN;
11223                 return true;
11224         }
11225
11226         if (phba->sli_rev != LPFC_SLI_REV4) {
11227                 *retval = 0;
11228                 return true;
11229         }
11230
11231         /* proceed with the hotplug */
11232         return false;
11233 }
11234
11235 /**
11236  * lpfc_irq_set_aff - set IRQ affinity
11237  * @eqhdl: EQ handle
11238  * @cpu: cpu to set affinity
11239  *
11240  **/
11241 static inline void
11242 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11243 {
11244         cpumask_clear(&eqhdl->aff_mask);
11245         cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11246         irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11247         irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11248 }
11249
11250 /**
11251  * lpfc_irq_clear_aff - clear IRQ affinity
11252  * @eqhdl: EQ handle
11253  *
11254  **/
11255 static inline void
11256 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11257 {
11258         cpumask_clear(&eqhdl->aff_mask);
11259         irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11260         irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11261 }
11262
11263 /**
11264  * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11265  * @phba: pointer to HBA context object.
11266  * @cpu: cpu going offline/online
11267  * @offline: true, cpu is going offline. false, cpu is coming online.
11268  *
11269  * If cpu is going offline, we'll try our best effort to find the next
11270  * online cpu on the phba's original_mask and migrate all offlining IRQ
11271  * affinities.
11272  *
11273  * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
11274  *
11275  * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
11276  *       PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11277  *
11278  **/
11279 static void
11280 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11281 {
11282         struct lpfc_vector_map_info *cpup;
11283         struct cpumask *aff_mask;
11284         unsigned int cpu_select, cpu_next, idx;
11285         const struct cpumask *orig_mask;
11286
11287         if (phba->irq_chann_mode == NORMAL_MODE)
11288                 return;
11289
11290         orig_mask = &phba->sli4_hba.irq_aff_mask;
11291
11292         if (!cpumask_test_cpu(cpu, orig_mask))
11293                 return;
11294
11295         cpup = &phba->sli4_hba.cpu_map[cpu];
11296
11297         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11298                 return;
11299
11300         if (offline) {
11301                 /* Find next online CPU on original mask */
11302                 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11303                 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11304
11305                 /* Found a valid CPU */
11306                 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11307                         /* Go through each eqhdl and ensure offlining
11308                          * cpu aff_mask is migrated
11309                          */
11310                         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11311                                 aff_mask = lpfc_get_aff_mask(idx);
11312
11313                                 /* Migrate affinity */
11314                                 if (cpumask_test_cpu(cpu, aff_mask))
11315                                         lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11316                                                          cpu_select);
11317                         }
11318                 } else {
11319                         /* Rely on irqbalance if no online CPUs left on NUMA */
11320                         for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11321                                 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11322                 }
11323         } else {
11324                 /* Migrate affinity back to this CPU */
11325                 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11326         }
11327 }
11328
11329 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11330 {
11331         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11332         struct lpfc_queue *eq, *next;
11333         LIST_HEAD(eqlist);
11334         int retval;
11335
11336         if (!phba) {
11337                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11338                 return 0;
11339         }
11340
11341         if (__lpfc_cpuhp_checks(phba, &retval))
11342                 return retval;
11343
11344         lpfc_irq_rebalance(phba, cpu, true);
11345
11346         retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11347         if (retval)
11348                 return retval;
11349
11350         /* start polling on these eq's */
11351         list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11352                 list_del_init(&eq->_poll_list);
11353                 lpfc_sli4_start_polling(eq);
11354         }
11355
11356         return 0;
11357 }
11358
11359 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11360 {
11361         struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11362         struct lpfc_queue *eq, *next;
11363         unsigned int n;
11364         int retval;
11365
11366         if (!phba) {
11367                 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11368                 return 0;
11369         }
11370
11371         if (__lpfc_cpuhp_checks(phba, &retval))
11372                 return retval;
11373
11374         lpfc_irq_rebalance(phba, cpu, false);
11375
11376         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11377                 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11378                 if (n == cpu)
11379                         lpfc_sli4_stop_polling(eq);
11380         }
11381
11382         return 0;
11383 }
11384
11385 /**
11386  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11387  * @phba: pointer to lpfc hba data structure.
11388  *
11389  * This routine is invoked to enable the MSI-X interrupt vectors to device
11390  * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
11391  * to cpus on the system.
11392  *
11393  * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11394  * the number of cpus on the same numa node as this adapter.  The vectors are
11395  * allocated without requesting OS affinity mapping.  A vector will be
11396  * allocated and assigned to each online and offline cpu.  If the cpu is
11397  * online, then affinity will be set to that cpu.  If the cpu is offline, then
11398  * affinity will be set to the nearest peer cpu within the numa node that is
11399  * online.  If there are no online cpus within the numa node, affinity is not
11400  * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11401  * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11402  * configured.
11403  *
11404  * If numa mode is not enabled and there is more than 1 vector allocated, then
11405  * the driver relies on the managed irq interface where the OS assigns vector to
11406  * cpu affinity.  The driver will then use that affinity mapping to setup its
11407  * cpu mapping table.
11408  *
11409  * Return codes
11410  * 0 - successful
11411  * other values - error
11412  **/
11413 static int
11414 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11415 {
11416         int vectors, rc, index;
11417         char *name;
11418         const struct cpumask *aff_mask = NULL;
11419         unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11420         struct lpfc_hba_eq_hdl *eqhdl;
11421         const struct cpumask *maskp;
11422         bool first;
11423         unsigned int flags = PCI_IRQ_MSIX;
11424
11425         /* Set up MSI-X multi-message vectors */
11426         vectors = phba->cfg_irq_chann;
11427
11428         if (phba->irq_chann_mode != NORMAL_MODE)
11429                 aff_mask = &phba->sli4_hba.irq_aff_mask;
11430
11431         if (aff_mask) {
11432                 cpu_cnt = cpumask_weight(aff_mask);
11433                 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11434
11435                 /* cpu: iterates over aff_mask including offline or online
11436                  * cpu_select: iterates over online aff_mask to set affinity
11437                  */
11438                 cpu = cpumask_first(aff_mask);
11439                 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11440         } else {
11441                 flags |= PCI_IRQ_AFFINITY;
11442         }
11443
11444         rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11445         if (rc < 0) {
11446                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11447                                 "0484 PCI enable MSI-X failed (%d)\n", rc);
11448                 goto vec_fail_out;
11449         }
11450         vectors = rc;
11451
11452         /* Assign MSI-X vectors to interrupt handlers */
11453         for (index = 0; index < vectors; index++) {
11454                 eqhdl = lpfc_get_eq_hdl(index);
11455                 name = eqhdl->handler_name;
11456                 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11457                 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11458                          LPFC_DRIVER_HANDLER_NAME"%d", index);
11459
11460                 eqhdl->idx = index;
11461                 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11462                          &lpfc_sli4_hba_intr_handler, 0,
11463                          name, eqhdl);
11464                 if (rc) {
11465                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11466                                         "0486 MSI-X fast-path (%d) "
11467                                         "request_irq failed (%d)\n", index, rc);
11468                         goto cfg_fail_out;
11469                 }
11470
11471                 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11472
11473                 if (aff_mask) {
11474                         /* If found a neighboring online cpu, set affinity */
11475                         if (cpu_select < nr_cpu_ids)
11476                                 lpfc_irq_set_aff(eqhdl, cpu_select);
11477
11478                         /* Assign EQ to cpu_map */
11479                         lpfc_assign_eq_map_info(phba, index,
11480                                                 LPFC_CPU_FIRST_IRQ,
11481                                                 cpu);
11482
11483                         /* Iterate to next offline or online cpu in aff_mask */
11484                         cpu = cpumask_next(cpu, aff_mask);
11485
11486                         /* Find next online cpu in aff_mask to set affinity */
11487                         cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11488                 } else if (vectors == 1) {
11489                         cpu = cpumask_first(cpu_present_mask);
11490                         lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11491                                                 cpu);
11492                 } else {
11493                         maskp = pci_irq_get_affinity(phba->pcidev, index);
11494
11495                         first = true;
11496                         /* Loop through all CPUs associated with vector index */
11497                         for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11498                                 /* If this is the first CPU thats assigned to
11499                                  * this vector, set LPFC_CPU_FIRST_IRQ.
11500                                  */
11501                                 lpfc_assign_eq_map_info(phba, index,
11502                                                         first ?
11503                                                         LPFC_CPU_FIRST_IRQ : 0,
11504                                                         cpu);
11505                                 if (first)
11506                                         first = false;
11507                         }
11508                 }
11509         }
11510
11511         if (vectors != phba->cfg_irq_chann) {
11512                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11513                                 "3238 Reducing IO channels to match number of "
11514                                 "MSI-X vectors, requested %d got %d\n",
11515                                 phba->cfg_irq_chann, vectors);
11516                 if (phba->cfg_irq_chann > vectors)
11517                         phba->cfg_irq_chann = vectors;
11518         }
11519
11520         return rc;
11521
11522 cfg_fail_out:
11523         /* free the irq already requested */
11524         for (--index; index >= 0; index--) {
11525                 eqhdl = lpfc_get_eq_hdl(index);
11526                 lpfc_irq_clear_aff(eqhdl);
11527                 irq_set_affinity_hint(eqhdl->irq, NULL);
11528                 free_irq(eqhdl->irq, eqhdl);
11529         }
11530
11531         /* Unconfigure MSI-X capability structure */
11532         pci_free_irq_vectors(phba->pcidev);
11533
11534 vec_fail_out:
11535         return rc;
11536 }
11537
11538 /**
11539  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11540  * @phba: pointer to lpfc hba data structure.
11541  *
11542  * This routine is invoked to enable the MSI interrupt mode to device with
11543  * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11544  * called to enable the MSI vector. The device driver is responsible for
11545  * calling the request_irq() to register MSI vector with a interrupt the
11546  * handler, which is done in this function.
11547  *
11548  * Return codes
11549  *      0 - successful
11550  *      other values - error
11551  **/
11552 static int
11553 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11554 {
11555         int rc, index;
11556         unsigned int cpu;
11557         struct lpfc_hba_eq_hdl *eqhdl;
11558
11559         rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11560                                    PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11561         if (rc > 0)
11562                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11563                                 "0487 PCI enable MSI mode success.\n");
11564         else {
11565                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11566                                 "0488 PCI enable MSI mode failed (%d)\n", rc);
11567                 return rc ? rc : -1;
11568         }
11569
11570         rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11571                          0, LPFC_DRIVER_NAME, phba);
11572         if (rc) {
11573                 pci_free_irq_vectors(phba->pcidev);
11574                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11575                                 "0490 MSI request_irq failed (%d)\n", rc);
11576                 return rc;
11577         }
11578
11579         eqhdl = lpfc_get_eq_hdl(0);
11580         eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11581
11582         cpu = cpumask_first(cpu_present_mask);
11583         lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11584
11585         for (index = 0; index < phba->cfg_irq_chann; index++) {
11586                 eqhdl = lpfc_get_eq_hdl(index);
11587                 eqhdl->idx = index;
11588         }
11589
11590         return 0;
11591 }
11592
11593 /**
11594  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11595  * @phba: pointer to lpfc hba data structure.
11596  *
11597  * This routine is invoked to enable device interrupt and associate driver's
11598  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11599  * interface spec. Depends on the interrupt mode configured to the driver,
11600  * the driver will try to fallback from the configured interrupt mode to an
11601  * interrupt mode which is supported by the platform, kernel, and device in
11602  * the order of:
11603  * MSI-X -> MSI -> IRQ.
11604  *
11605  * Return codes
11606  *      0 - successful
11607  *      other values - error
11608  **/
11609 static uint32_t
11610 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11611 {
11612         uint32_t intr_mode = LPFC_INTR_ERROR;
11613         int retval, idx;
11614
11615         if (cfg_mode == 2) {
11616                 /* Preparation before conf_msi mbox cmd */
11617                 retval = 0;
11618                 if (!retval) {
11619                         /* Now, try to enable MSI-X interrupt mode */
11620                         retval = lpfc_sli4_enable_msix(phba);
11621                         if (!retval) {
11622                                 /* Indicate initialization to MSI-X mode */
11623                                 phba->intr_type = MSIX;
11624                                 intr_mode = 2;
11625                         }
11626                 }
11627         }
11628
11629         /* Fallback to MSI if MSI-X initialization failed */
11630         if (cfg_mode >= 1 && phba->intr_type == NONE) {
11631                 retval = lpfc_sli4_enable_msi(phba);
11632                 if (!retval) {
11633                         /* Indicate initialization to MSI mode */
11634                         phba->intr_type = MSI;
11635                         intr_mode = 1;
11636                 }
11637         }
11638
11639         /* Fallback to INTx if both MSI-X/MSI initalization failed */
11640         if (phba->intr_type == NONE) {
11641                 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11642                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11643                 if (!retval) {
11644                         struct lpfc_hba_eq_hdl *eqhdl;
11645                         unsigned int cpu;
11646
11647                         /* Indicate initialization to INTx mode */
11648                         phba->intr_type = INTx;
11649                         intr_mode = 0;
11650
11651                         eqhdl = lpfc_get_eq_hdl(0);
11652                         eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11653
11654                         cpu = cpumask_first(cpu_present_mask);
11655                         lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11656                                                 cpu);
11657                         for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11658                                 eqhdl = lpfc_get_eq_hdl(idx);
11659                                 eqhdl->idx = idx;
11660                         }
11661                 }
11662         }
11663         return intr_mode;
11664 }
11665
11666 /**
11667  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11668  * @phba: pointer to lpfc hba data structure.
11669  *
11670  * This routine is invoked to disable device interrupt and disassociate
11671  * the driver's interrupt handler(s) from interrupt vector(s) to device
11672  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11673  * will release the interrupt vector(s) for the message signaled interrupt.
11674  **/
11675 static void
11676 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11677 {
11678         /* Disable the currently initialized interrupt mode */
11679         if (phba->intr_type == MSIX) {
11680                 int index;
11681                 struct lpfc_hba_eq_hdl *eqhdl;
11682
11683                 /* Free up MSI-X multi-message vectors */
11684                 for (index = 0; index < phba->cfg_irq_chann; index++) {
11685                         eqhdl = lpfc_get_eq_hdl(index);
11686                         lpfc_irq_clear_aff(eqhdl);
11687                         irq_set_affinity_hint(eqhdl->irq, NULL);
11688                         free_irq(eqhdl->irq, eqhdl);
11689                 }
11690         } else {
11691                 free_irq(phba->pcidev->irq, phba);
11692         }
11693
11694         pci_free_irq_vectors(phba->pcidev);
11695
11696         /* Reset interrupt management states */
11697         phba->intr_type = NONE;
11698         phba->sli.slistat.sli_intr = 0;
11699 }
11700
11701 /**
11702  * lpfc_unset_hba - Unset SLI3 hba device initialization
11703  * @phba: pointer to lpfc hba data structure.
11704  *
11705  * This routine is invoked to unset the HBA device initialization steps to
11706  * a device with SLI-3 interface spec.
11707  **/
11708 static void
11709 lpfc_unset_hba(struct lpfc_hba *phba)
11710 {
11711         struct lpfc_vport *vport = phba->pport;
11712         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
11713
11714         spin_lock_irq(shost->host_lock);
11715         vport->load_flag |= FC_UNLOADING;
11716         spin_unlock_irq(shost->host_lock);
11717
11718         kfree(phba->vpi_bmask);
11719         kfree(phba->vpi_ids);
11720
11721         lpfc_stop_hba_timers(phba);
11722
11723         phba->pport->work_port_events = 0;
11724
11725         lpfc_sli_hba_down(phba);
11726
11727         lpfc_sli_brdrestart(phba);
11728
11729         lpfc_sli_disable_intr(phba);
11730
11731         return;
11732 }
11733
11734 /**
11735  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11736  * @phba: Pointer to HBA context object.
11737  *
11738  * This function is called in the SLI4 code path to wait for completion
11739  * of device's XRIs exchange busy. It will check the XRI exchange busy
11740  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11741  * that, it will check the XRI exchange busy on outstanding FCP and ELS
11742  * I/Os every 30 seconds, log error message, and wait forever. Only when
11743  * all XRI exchange busy complete, the driver unload shall proceed with
11744  * invoking the function reset ioctl mailbox command to the CNA and the
11745  * the rest of the driver unload resource release.
11746  **/
11747 static void
11748 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11749 {
11750         struct lpfc_sli4_hdw_queue *qp;
11751         int idx, ccnt;
11752         int wait_time = 0;
11753         int io_xri_cmpl = 1;
11754         int nvmet_xri_cmpl = 1;
11755         int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11756
11757         /* Driver just aborted IOs during the hba_unset process.  Pause
11758          * here to give the HBA time to complete the IO and get entries
11759          * into the abts lists.
11760          */
11761         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11762
11763         /* Wait for NVME pending IO to flush back to transport. */
11764         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11765                 lpfc_nvme_wait_for_io_drain(phba);
11766
11767         ccnt = 0;
11768         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11769                 qp = &phba->sli4_hba.hdwq[idx];
11770                 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11771                 if (!io_xri_cmpl) /* if list is NOT empty */
11772                         ccnt++;
11773         }
11774         if (ccnt)
11775                 io_xri_cmpl = 0;
11776
11777         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11778                 nvmet_xri_cmpl =
11779                         list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11780         }
11781
11782         while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11783                 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11784                         if (!nvmet_xri_cmpl)
11785                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11786                                                 "6424 NVMET XRI exchange busy "
11787                                                 "wait time: %d seconds.\n",
11788                                                 wait_time/1000);
11789                         if (!io_xri_cmpl)
11790                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11791                                                 "6100 IO XRI exchange busy "
11792                                                 "wait time: %d seconds.\n",
11793                                                 wait_time/1000);
11794                         if (!els_xri_cmpl)
11795                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11796                                                 "2878 ELS XRI exchange busy "
11797                                                 "wait time: %d seconds.\n",
11798                                                 wait_time/1000);
11799                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11800                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11801                 } else {
11802                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11803                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11804                 }
11805
11806                 ccnt = 0;
11807                 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11808                         qp = &phba->sli4_hba.hdwq[idx];
11809                         io_xri_cmpl = list_empty(
11810                             &qp->lpfc_abts_io_buf_list);
11811                         if (!io_xri_cmpl) /* if list is NOT empty */
11812                                 ccnt++;
11813                 }
11814                 if (ccnt)
11815                         io_xri_cmpl = 0;
11816
11817                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11818                         nvmet_xri_cmpl = list_empty(
11819                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11820                 }
11821                 els_xri_cmpl =
11822                         list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11823
11824         }
11825 }
11826
11827 /**
11828  * lpfc_sli4_hba_unset - Unset the fcoe hba
11829  * @phba: Pointer to HBA context object.
11830  *
11831  * This function is called in the SLI4 code path to reset the HBA's FCoE
11832  * function. The caller is not required to hold any lock. This routine
11833  * issues PCI function reset mailbox command to reset the FCoE function.
11834  * At the end of the function, it calls lpfc_hba_down_post function to
11835  * free any pending commands.
11836  **/
11837 static void
11838 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11839 {
11840         int wait_cnt = 0;
11841         LPFC_MBOXQ_t *mboxq;
11842         struct pci_dev *pdev = phba->pcidev;
11843
11844         lpfc_stop_hba_timers(phba);
11845         if (phba->pport)
11846                 phba->sli4_hba.intr_enable = 0;
11847
11848         /*
11849          * Gracefully wait out the potential current outstanding asynchronous
11850          * mailbox command.
11851          */
11852
11853         /* First, block any pending async mailbox command from posted */
11854         spin_lock_irq(&phba->hbalock);
11855         phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11856         spin_unlock_irq(&phba->hbalock);
11857         /* Now, trying to wait it out if we can */
11858         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11859                 msleep(10);
11860                 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11861                         break;
11862         }
11863         /* Forcefully release the outstanding mailbox command if timed out */
11864         if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11865                 spin_lock_irq(&phba->hbalock);
11866                 mboxq = phba->sli.mbox_active;
11867                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11868                 __lpfc_mbox_cmpl_put(phba, mboxq);
11869                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11870                 phba->sli.mbox_active = NULL;
11871                 spin_unlock_irq(&phba->hbalock);
11872         }
11873
11874         /* Abort all iocbs associated with the hba */
11875         lpfc_sli_hba_iocb_abort(phba);
11876
11877         /* Wait for completion of device XRI exchange busy */
11878         lpfc_sli4_xri_exchange_busy_wait(phba);
11879
11880         /* per-phba callback de-registration for hotplug event */
11881         if (phba->pport)
11882                 lpfc_cpuhp_remove(phba);
11883
11884         /* Disable PCI subsystem interrupt */
11885         lpfc_sli4_disable_intr(phba);
11886
11887         /* Disable SR-IOV if enabled */
11888         if (phba->cfg_sriov_nr_virtfn)
11889                 pci_disable_sriov(pdev);
11890
11891         /* Stop kthread signal shall trigger work_done one more time */
11892         kthread_stop(phba->worker_thread);
11893
11894         /* Disable FW logging to host memory */
11895         lpfc_ras_stop_fwlog(phba);
11896
11897         /* Unset the queues shared with the hardware then release all
11898          * allocated resources.
11899          */
11900         lpfc_sli4_queue_unset(phba);
11901         lpfc_sli4_queue_destroy(phba);
11902
11903         /* Reset SLI4 HBA FCoE function */
11904         lpfc_pci_function_reset(phba);
11905
11906         /* Free RAS DMA memory */
11907         if (phba->ras_fwlog.ras_enabled)
11908                 lpfc_sli4_ras_dma_free(phba);
11909
11910         /* Stop the SLI4 device port */
11911         if (phba->pport)
11912                 phba->pport->work_port_events = 0;
11913 }
11914
11915  /**
11916  * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
11917  * @phba: Pointer to HBA context object.
11918  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11919  *
11920  * This function is called in the SLI4 code path to read the port's
11921  * sli4 capabilities.
11922  *
11923  * This function may be be called from any context that can block-wait
11924  * for the completion.  The expectation is that this routine is called
11925  * typically from probe_one or from the online routine.
11926  **/
11927 int
11928 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11929 {
11930         int rc;
11931         struct lpfc_mqe *mqe;
11932         struct lpfc_pc_sli4_params *sli4_params;
11933         uint32_t mbox_tmo;
11934
11935         rc = 0;
11936         mqe = &mboxq->u.mqe;
11937
11938         /* Read the port's SLI4 Parameters port capabilities */
11939         lpfc_pc_sli4_params(mboxq);
11940         if (!phba->sli4_hba.intr_enable)
11941                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11942         else {
11943                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11944                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11945         }
11946
11947         if (unlikely(rc))
11948                 return 1;
11949
11950         sli4_params = &phba->sli4_hba.pc_sli4_params;
11951         sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11952         sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11953         sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11954         sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11955                                              &mqe->un.sli4_params);
11956         sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11957                                              &mqe->un.sli4_params);
11958         sli4_params->proto_types = mqe->un.sli4_params.word3;
11959         sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11960         sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11961         sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11962         sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11963         sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11964         sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11965         sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11966         sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11967         sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11968         sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11969         sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11970         sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11971         sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11972         sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11973         sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11974         sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11975         sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11976         sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11977         sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11978         sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11979
11980         /* Make sure that sge_supp_len can be handled by the driver */
11981         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11982                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11983
11984         return rc;
11985 }
11986
11987 /**
11988  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
11989  * @phba: Pointer to HBA context object.
11990  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11991  *
11992  * This function is called in the SLI4 code path to read the port's
11993  * sli4 capabilities.
11994  *
11995  * This function may be be called from any context that can block-wait
11996  * for the completion.  The expectation is that this routine is called
11997  * typically from probe_one or from the online routine.
11998  **/
11999 int
12000 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12001 {
12002         int rc;
12003         struct lpfc_mqe *mqe = &mboxq->u.mqe;
12004         struct lpfc_pc_sli4_params *sli4_params;
12005         uint32_t mbox_tmo;
12006         int length;
12007         bool exp_wqcq_pages = true;
12008         struct lpfc_sli4_parameters *mbx_sli4_parameters;
12009
12010         /*
12011          * By default, the driver assumes the SLI4 port requires RPI
12012          * header postings.  The SLI4_PARAM response will correct this
12013          * assumption.
12014          */
12015         phba->sli4_hba.rpi_hdrs_in_use = 1;
12016
12017         /* Read the port's SLI4 Config Parameters */
12018         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12019                   sizeof(struct lpfc_sli4_cfg_mhdr));
12020         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12021                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12022                          length, LPFC_SLI4_MBX_EMBED);
12023         if (!phba->sli4_hba.intr_enable)
12024                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12025         else {
12026                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12027                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12028         }
12029         if (unlikely(rc))
12030                 return rc;
12031         sli4_params = &phba->sli4_hba.pc_sli4_params;
12032         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12033         sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12034         sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12035         sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12036         sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12037                                              mbx_sli4_parameters);
12038         sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12039                                              mbx_sli4_parameters);
12040         if (bf_get(cfg_phwq, mbx_sli4_parameters))
12041                 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12042         else
12043                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12044         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12045         sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
12046         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12047         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12048         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12049         sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12050         sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12051         sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12052         sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12053         sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12054         sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12055         sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12056         sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12057                                             mbx_sli4_parameters);
12058         sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12059         sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12060                                            mbx_sli4_parameters);
12061         phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12062         phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12063
12064         /* Check for Extended Pre-Registered SGL support */
12065         phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12066
12067         /* Check for firmware nvme support */
12068         rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12069                      bf_get(cfg_xib, mbx_sli4_parameters));
12070
12071         if (rc) {
12072                 /* Save this to indicate the Firmware supports NVME */
12073                 sli4_params->nvme = 1;
12074
12075                 /* Firmware NVME support, check driver FC4 NVME support */
12076                 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12077                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12078                                         "6133 Disabling NVME support: "
12079                                         "FC4 type not supported: x%x\n",
12080                                         phba->cfg_enable_fc4_type);
12081                         goto fcponly;
12082                 }
12083         } else {
12084                 /* No firmware NVME support, check driver FC4 NVME support */
12085                 sli4_params->nvme = 0;
12086                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12087                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12088                                         "6101 Disabling NVME support: Not "
12089                                         "supported by firmware (%d %d) x%x\n",
12090                                         bf_get(cfg_nvme, mbx_sli4_parameters),
12091                                         bf_get(cfg_xib, mbx_sli4_parameters),
12092                                         phba->cfg_enable_fc4_type);
12093 fcponly:
12094                         phba->nvme_support = 0;
12095                         phba->nvmet_support = 0;
12096                         phba->cfg_nvmet_mrq = 0;
12097                         phba->cfg_nvme_seg_cnt = 0;
12098
12099                         /* If no FC4 type support, move to just SCSI support */
12100                         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12101                                 return -ENODEV;
12102                         phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12103                 }
12104         }
12105
12106         /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
12107          * accommodate 512K and 1M IOs in a single nvme buf.
12108          */
12109         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12110                 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12111
12112         /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
12113         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12114             LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12115                 phba->cfg_enable_pbde = 0;
12116
12117         /*
12118          * To support Suppress Response feature we must satisfy 3 conditions.
12119          * lpfc_suppress_rsp module parameter must be set (default).
12120          * In SLI4-Parameters Descriptor:
12121          * Extended Inline Buffers (XIB) must be supported.
12122          * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12123          * (double negative).
12124          */
12125         if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12126             !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12127                 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12128         else
12129                 phba->cfg_suppress_rsp = 0;
12130
12131         if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12132                 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12133
12134         /* Make sure that sge_supp_len can be handled by the driver */
12135         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12136                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12137
12138         /*
12139          * Check whether the adapter supports an embedded copy of the
12140          * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12141          * to use this option, 128-byte WQEs must be used.
12142          */
12143         if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12144                 phba->fcp_embed_io = 1;
12145         else
12146                 phba->fcp_embed_io = 0;
12147
12148         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12149                         "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12150                         bf_get(cfg_xib, mbx_sli4_parameters),
12151                         phba->cfg_enable_pbde,
12152                         phba->fcp_embed_io, phba->nvme_support,
12153                         phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12154
12155         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12156             LPFC_SLI_INTF_IF_TYPE_2) &&
12157             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12158                  LPFC_SLI_INTF_FAMILY_LNCR_A0))
12159                 exp_wqcq_pages = false;
12160
12161         if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12162             (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12163             exp_wqcq_pages &&
12164             (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12165                 phba->enab_exp_wqcq_pages = 1;
12166         else
12167                 phba->enab_exp_wqcq_pages = 0;
12168         /*
12169          * Check if the SLI port supports MDS Diagnostics
12170          */
12171         if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12172                 phba->mds_diags_support = 1;
12173         else
12174                 phba->mds_diags_support = 0;
12175
12176         /*
12177          * Check if the SLI port supports NSLER
12178          */
12179         if (bf_get(cfg_nsler, mbx_sli4_parameters))
12180                 phba->nsler = 1;
12181         else
12182                 phba->nsler = 0;
12183
12184         return 0;
12185 }
12186
12187 /**
12188  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12189  * @pdev: pointer to PCI device
12190  * @pid: pointer to PCI device identifier
12191  *
12192  * This routine is to be called to attach a device with SLI-3 interface spec
12193  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12194  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12195  * information of the device and driver to see if the driver state that it can
12196  * support this kind of device. If the match is successful, the driver core
12197  * invokes this routine. If this routine determines it can claim the HBA, it
12198  * does all the initialization that it needs to do to handle the HBA properly.
12199  *
12200  * Return code
12201  *      0 - driver can claim the device
12202  *      negative value - driver can not claim the device
12203  **/
12204 static int
12205 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12206 {
12207         struct lpfc_hba   *phba;
12208         struct lpfc_vport *vport = NULL;
12209         struct Scsi_Host  *shost = NULL;
12210         int error;
12211         uint32_t cfg_mode, intr_mode;
12212
12213         /* Allocate memory for HBA structure */
12214         phba = lpfc_hba_alloc(pdev);
12215         if (!phba)
12216                 return -ENOMEM;
12217
12218         /* Perform generic PCI device enabling operation */
12219         error = lpfc_enable_pci_dev(phba);
12220         if (error)
12221                 goto out_free_phba;
12222
12223         /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12224         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12225         if (error)
12226                 goto out_disable_pci_dev;
12227
12228         /* Set up SLI-3 specific device PCI memory space */
12229         error = lpfc_sli_pci_mem_setup(phba);
12230         if (error) {
12231                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12232                                 "1402 Failed to set up pci memory space.\n");
12233                 goto out_disable_pci_dev;
12234         }
12235
12236         /* Set up SLI-3 specific device driver resources */
12237         error = lpfc_sli_driver_resource_setup(phba);
12238         if (error) {
12239                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12240                                 "1404 Failed to set up driver resource.\n");
12241                 goto out_unset_pci_mem_s3;
12242         }
12243
12244         /* Initialize and populate the iocb list per host */
12245
12246         error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12247         if (error) {
12248                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12249                                 "1405 Failed to initialize iocb list.\n");
12250                 goto out_unset_driver_resource_s3;
12251         }
12252
12253         /* Set up common device driver resources */
12254         error = lpfc_setup_driver_resource_phase2(phba);
12255         if (error) {
12256                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12257                                 "1406 Failed to set up driver resource.\n");
12258                 goto out_free_iocb_list;
12259         }
12260
12261         /* Get the default values for Model Name and Description */
12262         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12263
12264         /* Create SCSI host to the physical port */
12265         error = lpfc_create_shost(phba);
12266         if (error) {
12267                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12268                                 "1407 Failed to create scsi host.\n");
12269                 goto out_unset_driver_resource;
12270         }
12271
12272         /* Configure sysfs attributes */
12273         vport = phba->pport;
12274         error = lpfc_alloc_sysfs_attr(vport);
12275         if (error) {
12276                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12277                                 "1476 Failed to allocate sysfs attr\n");
12278                 goto out_destroy_shost;
12279         }
12280
12281         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12282         /* Now, trying to enable interrupt and bring up the device */
12283         cfg_mode = phba->cfg_use_msi;
12284         while (true) {
12285                 /* Put device to a known state before enabling interrupt */
12286                 lpfc_stop_port(phba);
12287                 /* Configure and enable interrupt */
12288                 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12289                 if (intr_mode == LPFC_INTR_ERROR) {
12290                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12291                                         "0431 Failed to enable interrupt.\n");
12292                         error = -ENODEV;
12293                         goto out_free_sysfs_attr;
12294                 }
12295                 /* SLI-3 HBA setup */
12296                 if (lpfc_sli_hba_setup(phba)) {
12297                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12298                                         "1477 Failed to set up hba\n");
12299                         error = -ENODEV;
12300                         goto out_remove_device;
12301                 }
12302
12303                 /* Wait 50ms for the interrupts of previous mailbox commands */
12304                 msleep(50);
12305                 /* Check active interrupts on message signaled interrupts */
12306                 if (intr_mode == 0 ||
12307                     phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12308                         /* Log the current active interrupt mode */
12309                         phba->intr_mode = intr_mode;
12310                         lpfc_log_intr_mode(phba, intr_mode);
12311                         break;
12312                 } else {
12313                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12314                                         "0447 Configure interrupt mode (%d) "
12315                                         "failed active interrupt test.\n",
12316                                         intr_mode);
12317                         /* Disable the current interrupt mode */
12318                         lpfc_sli_disable_intr(phba);
12319                         /* Try next level of interrupt mode */
12320                         cfg_mode = --intr_mode;
12321                 }
12322         }
12323
12324         /* Perform post initialization setup */
12325         lpfc_post_init_setup(phba);
12326
12327         /* Check if there are static vports to be created. */
12328         lpfc_create_static_vport(phba);
12329
12330         return 0;
12331
12332 out_remove_device:
12333         lpfc_unset_hba(phba);
12334 out_free_sysfs_attr:
12335         lpfc_free_sysfs_attr(vport);
12336 out_destroy_shost:
12337         lpfc_destroy_shost(phba);
12338 out_unset_driver_resource:
12339         lpfc_unset_driver_resource_phase2(phba);
12340 out_free_iocb_list:
12341         lpfc_free_iocb_list(phba);
12342 out_unset_driver_resource_s3:
12343         lpfc_sli_driver_resource_unset(phba);
12344 out_unset_pci_mem_s3:
12345         lpfc_sli_pci_mem_unset(phba);
12346 out_disable_pci_dev:
12347         lpfc_disable_pci_dev(phba);
12348         if (shost)
12349                 scsi_host_put(shost);
12350 out_free_phba:
12351         lpfc_hba_free(phba);
12352         return error;
12353 }
12354
12355 /**
12356  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12357  * @pdev: pointer to PCI device
12358  *
12359  * This routine is to be called to disattach a device with SLI-3 interface
12360  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12361  * removed from PCI bus, it performs all the necessary cleanup for the HBA
12362  * device to be removed from the PCI subsystem properly.
12363  **/
12364 static void
12365 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12366 {
12367         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
12368         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12369         struct lpfc_vport **vports;
12370         struct lpfc_hba   *phba = vport->phba;
12371         int i;
12372
12373         spin_lock_irq(&phba->hbalock);
12374         vport->load_flag |= FC_UNLOADING;
12375         spin_unlock_irq(&phba->hbalock);
12376
12377         lpfc_free_sysfs_attr(vport);
12378
12379         /* Release all the vports against this physical port */
12380         vports = lpfc_create_vport_work_array(phba);
12381         if (vports != NULL)
12382                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12383                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12384                                 continue;
12385                         fc_vport_terminate(vports[i]->fc_vport);
12386                 }
12387         lpfc_destroy_vport_work_array(phba, vports);
12388
12389         /* Remove FC host and then SCSI host with the physical port */
12390         fc_remove_host(shost);
12391         scsi_remove_host(shost);
12392
12393         lpfc_cleanup(vport);
12394
12395         /*
12396          * Bring down the SLI Layer. This step disable all interrupts,
12397          * clears the rings, discards all mailbox commands, and resets
12398          * the HBA.
12399          */
12400
12401         /* HBA interrupt will be disabled after this call */
12402         lpfc_sli_hba_down(phba);
12403         /* Stop kthread signal shall trigger work_done one more time */
12404         kthread_stop(phba->worker_thread);
12405         /* Final cleanup of txcmplq and reset the HBA */
12406         lpfc_sli_brdrestart(phba);
12407
12408         kfree(phba->vpi_bmask);
12409         kfree(phba->vpi_ids);
12410
12411         lpfc_stop_hba_timers(phba);
12412         spin_lock_irq(&phba->port_list_lock);
12413         list_del_init(&vport->listentry);
12414         spin_unlock_irq(&phba->port_list_lock);
12415
12416         lpfc_debugfs_terminate(vport);
12417
12418         /* Disable SR-IOV if enabled */
12419         if (phba->cfg_sriov_nr_virtfn)
12420                 pci_disable_sriov(pdev);
12421
12422         /* Disable interrupt */
12423         lpfc_sli_disable_intr(phba);
12424
12425         scsi_host_put(shost);
12426
12427         /*
12428          * Call scsi_free before mem_free since scsi bufs are released to their
12429          * corresponding pools here.
12430          */
12431         lpfc_scsi_free(phba);
12432         lpfc_free_iocb_list(phba);
12433
12434         lpfc_mem_free_all(phba);
12435
12436         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12437                           phba->hbqslimp.virt, phba->hbqslimp.phys);
12438
12439         /* Free resources associated with SLI2 interface */
12440         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12441                           phba->slim2p.virt, phba->slim2p.phys);
12442
12443         /* unmap adapter SLIM and Control Registers */
12444         iounmap(phba->ctrl_regs_memmap_p);
12445         iounmap(phba->slim_memmap_p);
12446
12447         lpfc_hba_free(phba);
12448
12449         pci_release_mem_regions(pdev);
12450         pci_disable_device(pdev);
12451 }
12452
12453 /**
12454  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12455  * @pdev: pointer to PCI device
12456  * @msg: power management message
12457  *
12458  * This routine is to be called from the kernel's PCI subsystem to support
12459  * system Power Management (PM) to device with SLI-3 interface spec. When
12460  * PM invokes this method, it quiesces the device by stopping the driver's
12461  * worker thread for the device, turning off device's interrupt and DMA,
12462  * and bring the device offline. Note that as the driver implements the
12463  * minimum PM requirements to a power-aware driver's PM support for the
12464  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12465  * to the suspend() method call will be treated as SUSPEND and the driver will
12466  * fully reinitialize its device during resume() method call, the driver will
12467  * set device to PCI_D3hot state in PCI config space instead of setting it
12468  * according to the @msg provided by the PM.
12469  *
12470  * Return code
12471  *      0 - driver suspended the device
12472  *      Error otherwise
12473  **/
12474 static int
12475 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12476 {
12477         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12478         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12479
12480         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12481                         "0473 PCI device Power Management suspend.\n");
12482
12483         /* Bring down the device */
12484         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12485         lpfc_offline(phba);
12486         kthread_stop(phba->worker_thread);
12487
12488         /* Disable interrupt from device */
12489         lpfc_sli_disable_intr(phba);
12490
12491         /* Save device state to PCI config space */
12492         pci_save_state(pdev);
12493         pci_set_power_state(pdev, PCI_D3hot);
12494
12495         return 0;
12496 }
12497
12498 /**
12499  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12500  * @pdev: pointer to PCI device
12501  *
12502  * This routine is to be called from the kernel's PCI subsystem to support
12503  * system Power Management (PM) to device with SLI-3 interface spec. When PM
12504  * invokes this method, it restores the device's PCI config space state and
12505  * fully reinitializes the device and brings it online. Note that as the
12506  * driver implements the minimum PM requirements to a power-aware driver's
12507  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12508  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12509  * driver will fully reinitialize its device during resume() method call,
12510  * the device will be set to PCI_D0 directly in PCI config space before
12511  * restoring the state.
12512  *
12513  * Return code
12514  *      0 - driver suspended the device
12515  *      Error otherwise
12516  **/
12517 static int
12518 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12519 {
12520         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12521         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12522         uint32_t intr_mode;
12523         int error;
12524
12525         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12526                         "0452 PCI device Power Management resume.\n");
12527
12528         /* Restore device state from PCI config space */
12529         pci_set_power_state(pdev, PCI_D0);
12530         pci_restore_state(pdev);
12531
12532         /*
12533          * As the new kernel behavior of pci_restore_state() API call clears
12534          * device saved_state flag, need to save the restored state again.
12535          */
12536         pci_save_state(pdev);
12537
12538         if (pdev->is_busmaster)
12539                 pci_set_master(pdev);
12540
12541         /* Startup the kernel thread for this host adapter. */
12542         phba->worker_thread = kthread_run(lpfc_do_work, phba,
12543                                         "lpfc_worker_%d", phba->brd_no);
12544         if (IS_ERR(phba->worker_thread)) {
12545                 error = PTR_ERR(phba->worker_thread);
12546                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12547                                 "0434 PM resume failed to start worker "
12548                                 "thread: error=x%x.\n", error);
12549                 return error;
12550         }
12551
12552         /* Configure and enable interrupt */
12553         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12554         if (intr_mode == LPFC_INTR_ERROR) {
12555                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12556                                 "0430 PM resume Failed to enable interrupt\n");
12557                 return -EIO;
12558         } else
12559                 phba->intr_mode = intr_mode;
12560
12561         /* Restart HBA and bring it online */
12562         lpfc_sli_brdrestart(phba);
12563         lpfc_online(phba);
12564
12565         /* Log the current active interrupt mode */
12566         lpfc_log_intr_mode(phba, phba->intr_mode);
12567
12568         return 0;
12569 }
12570
12571 /**
12572  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12573  * @phba: pointer to lpfc hba data structure.
12574  *
12575  * This routine is called to prepare the SLI3 device for PCI slot recover. It
12576  * aborts all the outstanding SCSI I/Os to the pci device.
12577  **/
12578 static void
12579 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12580 {
12581         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12582                         "2723 PCI channel I/O abort preparing for recovery\n");
12583
12584         /*
12585          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12586          * and let the SCSI mid-layer to retry them to recover.
12587          */
12588         lpfc_sli_abort_fcp_rings(phba);
12589 }
12590
12591 /**
12592  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12593  * @phba: pointer to lpfc hba data structure.
12594  *
12595  * This routine is called to prepare the SLI3 device for PCI slot reset. It
12596  * disables the device interrupt and pci device, and aborts the internal FCP
12597  * pending I/Os.
12598  **/
12599 static void
12600 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12601 {
12602         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12603                         "2710 PCI channel disable preparing for reset\n");
12604
12605         /* Block any management I/Os to the device */
12606         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12607
12608         /* Block all SCSI devices' I/Os on the host */
12609         lpfc_scsi_dev_block(phba);
12610
12611         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12612         lpfc_sli_flush_io_rings(phba);
12613
12614         /* stop all timers */
12615         lpfc_stop_hba_timers(phba);
12616
12617         /* Disable interrupt and pci device */
12618         lpfc_sli_disable_intr(phba);
12619         pci_disable_device(phba->pcidev);
12620 }
12621
12622 /**
12623  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12624  * @phba: pointer to lpfc hba data structure.
12625  *
12626  * This routine is called to prepare the SLI3 device for PCI slot permanently
12627  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12628  * pending I/Os.
12629  **/
12630 static void
12631 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12632 {
12633         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12634                         "2711 PCI channel permanent disable for failure\n");
12635         /* Block all SCSI devices' I/Os on the host */
12636         lpfc_scsi_dev_block(phba);
12637
12638         /* stop all timers */
12639         lpfc_stop_hba_timers(phba);
12640
12641         /* Clean up all driver's outstanding SCSI I/Os */
12642         lpfc_sli_flush_io_rings(phba);
12643 }
12644
12645 /**
12646  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12647  * @pdev: pointer to PCI device.
12648  * @state: the current PCI connection state.
12649  *
12650  * This routine is called from the PCI subsystem for I/O error handling to
12651  * device with SLI-3 interface spec. This function is called by the PCI
12652  * subsystem after a PCI bus error affecting this device has been detected.
12653  * When this function is invoked, it will need to stop all the I/Os and
12654  * interrupt(s) to the device. Once that is done, it will return
12655  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12656  * as desired.
12657  *
12658  * Return codes
12659  *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12660  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12661  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12662  **/
12663 static pci_ers_result_t
12664 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12665 {
12666         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12667         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12668
12669         switch (state) {
12670         case pci_channel_io_normal:
12671                 /* Non-fatal error, prepare for recovery */
12672                 lpfc_sli_prep_dev_for_recover(phba);
12673                 return PCI_ERS_RESULT_CAN_RECOVER;
12674         case pci_channel_io_frozen:
12675                 /* Fatal error, prepare for slot reset */
12676                 lpfc_sli_prep_dev_for_reset(phba);
12677                 return PCI_ERS_RESULT_NEED_RESET;
12678         case pci_channel_io_perm_failure:
12679                 /* Permanent failure, prepare for device down */
12680                 lpfc_sli_prep_dev_for_perm_failure(phba);
12681                 return PCI_ERS_RESULT_DISCONNECT;
12682         default:
12683                 /* Unknown state, prepare and request slot reset */
12684                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12685                                 "0472 Unknown PCI error state: x%x\n", state);
12686                 lpfc_sli_prep_dev_for_reset(phba);
12687                 return PCI_ERS_RESULT_NEED_RESET;
12688         }
12689 }
12690
12691 /**
12692  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12693  * @pdev: pointer to PCI device.
12694  *
12695  * This routine is called from the PCI subsystem for error handling to
12696  * device with SLI-3 interface spec. This is called after PCI bus has been
12697  * reset to restart the PCI card from scratch, as if from a cold-boot.
12698  * During the PCI subsystem error recovery, after driver returns
12699  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12700  * recovery and then call this routine before calling the .resume method
12701  * to recover the device. This function will initialize the HBA device,
12702  * enable the interrupt, but it will just put the HBA to offline state
12703  * without passing any I/O traffic.
12704  *
12705  * Return codes
12706  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
12707  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12708  */
12709 static pci_ers_result_t
12710 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12711 {
12712         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12713         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12714         struct lpfc_sli *psli = &phba->sli;
12715         uint32_t intr_mode;
12716
12717         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12718         if (pci_enable_device_mem(pdev)) {
12719                 printk(KERN_ERR "lpfc: Cannot re-enable "
12720                         "PCI device after reset.\n");
12721                 return PCI_ERS_RESULT_DISCONNECT;
12722         }
12723
12724         pci_restore_state(pdev);
12725
12726         /*
12727          * As the new kernel behavior of pci_restore_state() API call clears
12728          * device saved_state flag, need to save the restored state again.
12729          */
12730         pci_save_state(pdev);
12731
12732         if (pdev->is_busmaster)
12733                 pci_set_master(pdev);
12734
12735         spin_lock_irq(&phba->hbalock);
12736         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12737         spin_unlock_irq(&phba->hbalock);
12738
12739         /* Configure and enable interrupt */
12740         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12741         if (intr_mode == LPFC_INTR_ERROR) {
12742                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12743                                 "0427 Cannot re-enable interrupt after "
12744                                 "slot reset.\n");
12745                 return PCI_ERS_RESULT_DISCONNECT;
12746         } else
12747                 phba->intr_mode = intr_mode;
12748
12749         /* Take device offline, it will perform cleanup */
12750         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12751         lpfc_offline(phba);
12752         lpfc_sli_brdrestart(phba);
12753
12754         /* Log the current active interrupt mode */
12755         lpfc_log_intr_mode(phba, phba->intr_mode);
12756
12757         return PCI_ERS_RESULT_RECOVERED;
12758 }
12759
12760 /**
12761  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12762  * @pdev: pointer to PCI device
12763  *
12764  * This routine is called from the PCI subsystem for error handling to device
12765  * with SLI-3 interface spec. It is called when kernel error recovery tells
12766  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12767  * error recovery. After this call, traffic can start to flow from this device
12768  * again.
12769  */
12770 static void
12771 lpfc_io_resume_s3(struct pci_dev *pdev)
12772 {
12773         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12774         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12775
12776         /* Bring device online, it will be no-op for non-fatal error resume */
12777         lpfc_online(phba);
12778 }
12779
12780 /**
12781  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12782  * @phba: pointer to lpfc hba data structure.
12783  *
12784  * returns the number of ELS/CT IOCBs to reserve
12785  **/
12786 int
12787 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12788 {
12789         int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12790
12791         if (phba->sli_rev == LPFC_SLI_REV4) {
12792                 if (max_xri <= 100)
12793                         return 10;
12794                 else if (max_xri <= 256)
12795                         return 25;
12796                 else if (max_xri <= 512)
12797                         return 50;
12798                 else if (max_xri <= 1024)
12799                         return 100;
12800                 else if (max_xri <= 1536)
12801                         return 150;
12802                 else if (max_xri <= 2048)
12803                         return 200;
12804                 else
12805                         return 250;
12806         } else
12807                 return 0;
12808 }
12809
12810 /**
12811  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12812  * @phba: pointer to lpfc hba data structure.
12813  *
12814  * returns the number of ELS/CT + NVMET IOCBs to reserve
12815  **/
12816 int
12817 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12818 {
12819         int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12820
12821         if (phba->nvmet_support)
12822                 max_xri += LPFC_NVMET_BUF_POST;
12823         return max_xri;
12824 }
12825
12826
12827 static int
12828 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12829         uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12830         const struct firmware *fw)
12831 {
12832         int rc;
12833
12834         /* Three cases:  (1) FW was not supported on the detected adapter.
12835          * (2) FW update has been locked out administratively.
12836          * (3) Some other error during FW update.
12837          * In each case, an unmaskable message is written to the console
12838          * for admin diagnosis.
12839          */
12840         if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12841             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12842              magic_number != MAGIC_NUMBER_G6) ||
12843             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12844              magic_number != MAGIC_NUMBER_G7)) {
12845                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12846                                 "3030 This firmware version is not supported on"
12847                                 " this HBA model. Device:%x Magic:%x Type:%x "
12848                                 "ID:%x Size %d %zd\n",
12849                                 phba->pcidev->device, magic_number, ftype, fid,
12850                                 fsize, fw->size);
12851                 rc = -EINVAL;
12852         } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
12853                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12854                                 "3021 Firmware downloads have been prohibited "
12855                                 "by a system configuration setting on "
12856                                 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12857                                 "%zd\n",
12858                                 phba->pcidev->device, magic_number, ftype, fid,
12859                                 fsize, fw->size);
12860                 rc = -EACCES;
12861         } else {
12862                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12863                                 "3022 FW Download failed. Add Status x%x "
12864                                 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12865                                 "%zd\n",
12866                                 offset, phba->pcidev->device, magic_number,
12867                                 ftype, fid, fsize, fw->size);
12868                 rc = -EIO;
12869         }
12870         return rc;
12871 }
12872
12873 /**
12874  * lpfc_write_firmware - attempt to write a firmware image to the port
12875  * @fw: pointer to firmware image returned from request_firmware.
12876  * @context: pointer to firmware image returned from request_firmware.
12877  * @ret: return value this routine provides to the caller.
12878  *
12879  **/
12880 static void
12881 lpfc_write_firmware(const struct firmware *fw, void *context)
12882 {
12883         struct lpfc_hba *phba = (struct lpfc_hba *)context;
12884         char fwrev[FW_REV_STR_SIZE];
12885         struct lpfc_grp_hdr *image;
12886         struct list_head dma_buffer_list;
12887         int i, rc = 0;
12888         struct lpfc_dmabuf *dmabuf, *next;
12889         uint32_t offset = 0, temp_offset = 0;
12890         uint32_t magic_number, ftype, fid, fsize;
12891
12892         /* It can be null in no-wait mode, sanity check */
12893         if (!fw) {
12894                 rc = -ENXIO;
12895                 goto out;
12896         }
12897         image = (struct lpfc_grp_hdr *)fw->data;
12898
12899         magic_number = be32_to_cpu(image->magic_number);
12900         ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12901         fid = bf_get_be32(lpfc_grp_hdr_id, image);
12902         fsize = be32_to_cpu(image->size);
12903
12904         INIT_LIST_HEAD(&dma_buffer_list);
12905         lpfc_decode_firmware_rev(phba, fwrev, 1);
12906         if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12907                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12908                                 "3023 Updating Firmware, Current Version:%s "
12909                                 "New Version:%s\n",
12910                                 fwrev, image->revision);
12911                 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12912                         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12913                                          GFP_KERNEL);
12914                         if (!dmabuf) {
12915                                 rc = -ENOMEM;
12916                                 goto release_out;
12917                         }
12918                         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12919                                                           SLI4_PAGE_SIZE,
12920                                                           &dmabuf->phys,
12921                                                           GFP_KERNEL);
12922                         if (!dmabuf->virt) {
12923                                 kfree(dmabuf);
12924                                 rc = -ENOMEM;
12925                                 goto release_out;
12926                         }
12927                         list_add_tail(&dmabuf->list, &dma_buffer_list);
12928                 }
12929                 while (offset < fw->size) {
12930                         temp_offset = offset;
12931                         list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12932                                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12933                                         memcpy(dmabuf->virt,
12934                                                fw->data + temp_offset,
12935                                                fw->size - temp_offset);
12936                                         temp_offset = fw->size;
12937                                         break;
12938                                 }
12939                                 memcpy(dmabuf->virt, fw->data + temp_offset,
12940                                        SLI4_PAGE_SIZE);
12941                                 temp_offset += SLI4_PAGE_SIZE;
12942                         }
12943                         rc = lpfc_wr_object(phba, &dma_buffer_list,
12944                                     (fw->size - offset), &offset);
12945                         if (rc) {
12946                                 rc = lpfc_log_write_firmware_error(phba, offset,
12947                                                                    magic_number,
12948                                                                    ftype,
12949                                                                    fid,
12950                                                                    fsize,
12951                                                                    fw);
12952                                 goto release_out;
12953                         }
12954                 }
12955                 rc = offset;
12956         } else
12957                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12958                                 "3029 Skipped Firmware update, Current "
12959                                 "Version:%s New Version:%s\n",
12960                                 fwrev, image->revision);
12961
12962 release_out:
12963         list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12964                 list_del(&dmabuf->list);
12965                 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12966                                   dmabuf->virt, dmabuf->phys);
12967                 kfree(dmabuf);
12968         }
12969         release_firmware(fw);
12970 out:
12971         if (rc < 0)
12972                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12973                                 "3062 Firmware update error, status %d.\n", rc);
12974         else
12975                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12976                                 "3024 Firmware update success: size %d.\n", rc);
12977 }
12978
12979 /**
12980  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
12981  * @phba: pointer to lpfc hba data structure.
12982  *
12983  * This routine is called to perform Linux generic firmware upgrade on device
12984  * that supports such feature.
12985  **/
12986 int
12987 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12988 {
12989         uint8_t file_name[ELX_MODEL_NAME_SIZE];
12990         int ret;
12991         const struct firmware *fw;
12992
12993         /* Only supported on SLI4 interface type 2 for now */
12994         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
12995             LPFC_SLI_INTF_IF_TYPE_2)
12996                 return -EPERM;
12997
12998         snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12999
13000         if (fw_upgrade == INT_FW_UPGRADE) {
13001                 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
13002                                         file_name, &phba->pcidev->dev,
13003                                         GFP_KERNEL, (void *)phba,
13004                                         lpfc_write_firmware);
13005         } else if (fw_upgrade == RUN_FW_UPGRADE) {
13006                 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13007                 if (!ret)
13008                         lpfc_write_firmware(fw, (void *)phba);
13009         } else {
13010                 ret = -EINVAL;
13011         }
13012
13013         return ret;
13014 }
13015
13016 /**
13017  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
13018  * @pdev: pointer to PCI device
13019  * @pid: pointer to PCI device identifier
13020  *
13021  * This routine is called from the kernel's PCI subsystem to device with
13022  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13023  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13024  * information of the device and driver to see if the driver state that it
13025  * can support this kind of device. If the match is successful, the driver
13026  * core invokes this routine. If this routine determines it can claim the HBA,
13027  * it does all the initialization that it needs to do to handle the HBA
13028  * properly.
13029  *
13030  * Return code
13031  *      0 - driver can claim the device
13032  *      negative value - driver can not claim the device
13033  **/
13034 static int
13035 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13036 {
13037         struct lpfc_hba   *phba;
13038         struct lpfc_vport *vport = NULL;
13039         struct Scsi_Host  *shost = NULL;
13040         int error;
13041         uint32_t cfg_mode, intr_mode;
13042
13043         /* Allocate memory for HBA structure */
13044         phba = lpfc_hba_alloc(pdev);
13045         if (!phba)
13046                 return -ENOMEM;
13047
13048         /* Perform generic PCI device enabling operation */
13049         error = lpfc_enable_pci_dev(phba);
13050         if (error)
13051                 goto out_free_phba;
13052
13053         /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13054         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13055         if (error)
13056                 goto out_disable_pci_dev;
13057
13058         /* Set up SLI-4 specific device PCI memory space */
13059         error = lpfc_sli4_pci_mem_setup(phba);
13060         if (error) {
13061                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13062                                 "1410 Failed to set up pci memory space.\n");
13063                 goto out_disable_pci_dev;
13064         }
13065
13066         /* Set up SLI-4 Specific device driver resources */
13067         error = lpfc_sli4_driver_resource_setup(phba);
13068         if (error) {
13069                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13070                                 "1412 Failed to set up driver resource.\n");
13071                 goto out_unset_pci_mem_s4;
13072         }
13073
13074         INIT_LIST_HEAD(&phba->active_rrq_list);
13075         INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13076
13077         /* Set up common device driver resources */
13078         error = lpfc_setup_driver_resource_phase2(phba);
13079         if (error) {
13080                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13081                                 "1414 Failed to set up driver resource.\n");
13082                 goto out_unset_driver_resource_s4;
13083         }
13084
13085         /* Get the default values for Model Name and Description */
13086         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13087
13088         /* Now, trying to enable interrupt and bring up the device */
13089         cfg_mode = phba->cfg_use_msi;
13090
13091         /* Put device to a known state before enabling interrupt */
13092         phba->pport = NULL;
13093         lpfc_stop_port(phba);
13094
13095         /* Init cpu_map array */
13096         lpfc_cpu_map_array_init(phba);
13097
13098         /* Init hba_eq_hdl array */
13099         lpfc_hba_eq_hdl_array_init(phba);
13100
13101         /* Configure and enable interrupt */
13102         intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13103         if (intr_mode == LPFC_INTR_ERROR) {
13104                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13105                                 "0426 Failed to enable interrupt.\n");
13106                 error = -ENODEV;
13107                 goto out_unset_driver_resource;
13108         }
13109         /* Default to single EQ for non-MSI-X */
13110         if (phba->intr_type != MSIX) {
13111                 phba->cfg_irq_chann = 1;
13112                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13113                         if (phba->nvmet_support)
13114                                 phba->cfg_nvmet_mrq = 1;
13115                 }
13116         }
13117         lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13118
13119         /* Create SCSI host to the physical port */
13120         error = lpfc_create_shost(phba);
13121         if (error) {
13122                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13123                                 "1415 Failed to create scsi host.\n");
13124                 goto out_disable_intr;
13125         }
13126         vport = phba->pport;
13127         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13128
13129         /* Configure sysfs attributes */
13130         error = lpfc_alloc_sysfs_attr(vport);
13131         if (error) {
13132                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13133                                 "1416 Failed to allocate sysfs attr\n");
13134                 goto out_destroy_shost;
13135         }
13136
13137         /* Set up SLI-4 HBA */
13138         if (lpfc_sli4_hba_setup(phba)) {
13139                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13140                                 "1421 Failed to set up hba\n");
13141                 error = -ENODEV;
13142                 goto out_free_sysfs_attr;
13143         }
13144
13145         /* Log the current active interrupt mode */
13146         phba->intr_mode = intr_mode;
13147         lpfc_log_intr_mode(phba, intr_mode);
13148
13149         /* Perform post initialization setup */
13150         lpfc_post_init_setup(phba);
13151
13152         /* NVME support in FW earlier in the driver load corrects the
13153          * FC4 type making a check for nvme_support unnecessary.
13154          */
13155         if (phba->nvmet_support == 0) {
13156                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13157                         /* Create NVME binding with nvme_fc_transport. This
13158                          * ensures the vport is initialized.  If the localport
13159                          * create fails, it should not unload the driver to
13160                          * support field issues.
13161                          */
13162                         error = lpfc_nvme_create_localport(vport);
13163                         if (error) {
13164                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13165                                                 "6004 NVME registration "
13166                                                 "failed, error x%x\n",
13167                                                 error);
13168                         }
13169                 }
13170         }
13171
13172         /* check for firmware upgrade or downgrade */
13173         if (phba->cfg_request_firmware_upgrade)
13174                 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13175
13176         /* Check if there are static vports to be created. */
13177         lpfc_create_static_vport(phba);
13178
13179         /* Enable RAS FW log support */
13180         lpfc_sli4_ras_setup(phba);
13181
13182         INIT_LIST_HEAD(&phba->poll_list);
13183         timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13184         cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13185
13186         return 0;
13187
13188 out_free_sysfs_attr:
13189         lpfc_free_sysfs_attr(vport);
13190 out_destroy_shost:
13191         lpfc_destroy_shost(phba);
13192 out_disable_intr:
13193         lpfc_sli4_disable_intr(phba);
13194 out_unset_driver_resource:
13195         lpfc_unset_driver_resource_phase2(phba);
13196 out_unset_driver_resource_s4:
13197         lpfc_sli4_driver_resource_unset(phba);
13198 out_unset_pci_mem_s4:
13199         lpfc_sli4_pci_mem_unset(phba);
13200 out_disable_pci_dev:
13201         lpfc_disable_pci_dev(phba);
13202         if (shost)
13203                 scsi_host_put(shost);
13204 out_free_phba:
13205         lpfc_hba_free(phba);
13206         return error;
13207 }
13208
13209 /**
13210  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
13211  * @pdev: pointer to PCI device
13212  *
13213  * This routine is called from the kernel's PCI subsystem to device with
13214  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13215  * removed from PCI bus, it performs all the necessary cleanup for the HBA
13216  * device to be removed from the PCI subsystem properly.
13217  **/
13218 static void
13219 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13220 {
13221         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13222         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13223         struct lpfc_vport **vports;
13224         struct lpfc_hba *phba = vport->phba;
13225         int i;
13226
13227         /* Mark the device unloading flag */
13228         spin_lock_irq(&phba->hbalock);
13229         vport->load_flag |= FC_UNLOADING;
13230         spin_unlock_irq(&phba->hbalock);
13231
13232         /* Free the HBA sysfs attributes */
13233         lpfc_free_sysfs_attr(vport);
13234
13235         /* Release all the vports against this physical port */
13236         vports = lpfc_create_vport_work_array(phba);
13237         if (vports != NULL)
13238                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13239                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13240                                 continue;
13241                         fc_vport_terminate(vports[i]->fc_vport);
13242                 }
13243         lpfc_destroy_vport_work_array(phba, vports);
13244
13245         /* Remove FC host and then SCSI host with the physical port */
13246         fc_remove_host(shost);
13247         scsi_remove_host(shost);
13248
13249         /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
13250          * localports are destroyed after to cleanup all transport memory.
13251          */
13252         lpfc_cleanup(vport);
13253         lpfc_nvmet_destroy_targetport(phba);
13254         lpfc_nvme_destroy_localport(vport);
13255
13256         /* De-allocate multi-XRI pools */
13257         if (phba->cfg_xri_rebalancing)
13258                 lpfc_destroy_multixri_pools(phba);
13259
13260         /*
13261          * Bring down the SLI Layer. This step disables all interrupts,
13262          * clears the rings, discards all mailbox commands, and resets
13263          * the HBA FCoE function.
13264          */
13265         lpfc_debugfs_terminate(vport);
13266
13267         lpfc_stop_hba_timers(phba);
13268         spin_lock_irq(&phba->port_list_lock);
13269         list_del_init(&vport->listentry);
13270         spin_unlock_irq(&phba->port_list_lock);
13271
13272         /* Perform scsi free before driver resource_unset since scsi
13273          * buffers are released to their corresponding pools here.
13274          */
13275         lpfc_io_free(phba);
13276         lpfc_free_iocb_list(phba);
13277         lpfc_sli4_hba_unset(phba);
13278
13279         lpfc_unset_driver_resource_phase2(phba);
13280         lpfc_sli4_driver_resource_unset(phba);
13281
13282         /* Unmap adapter Control and Doorbell registers */
13283         lpfc_sli4_pci_mem_unset(phba);
13284
13285         /* Release PCI resources and disable device's PCI function */
13286         scsi_host_put(shost);
13287         lpfc_disable_pci_dev(phba);
13288
13289         /* Finally, free the driver's device data structure */
13290         lpfc_hba_free(phba);
13291
13292         return;
13293 }
13294
13295 /**
13296  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
13297  * @pdev: pointer to PCI device
13298  * @msg: power management message
13299  *
13300  * This routine is called from the kernel's PCI subsystem to support system
13301  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13302  * this method, it quiesces the device by stopping the driver's worker
13303  * thread for the device, turning off device's interrupt and DMA, and bring
13304  * the device offline. Note that as the driver implements the minimum PM
13305  * requirements to a power-aware driver's PM support for suspend/resume -- all
13306  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13307  * method call will be treated as SUSPEND and the driver will fully
13308  * reinitialize its device during resume() method call, the driver will set
13309  * device to PCI_D3hot state in PCI config space instead of setting it
13310  * according to the @msg provided by the PM.
13311  *
13312  * Return code
13313  *      0 - driver suspended the device
13314  *      Error otherwise
13315  **/
13316 static int
13317 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
13318 {
13319         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13320         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13321
13322         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13323                         "2843 PCI device Power Management suspend.\n");
13324
13325         /* Bring down the device */
13326         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13327         lpfc_offline(phba);
13328         kthread_stop(phba->worker_thread);
13329
13330         /* Disable interrupt from device */
13331         lpfc_sli4_disable_intr(phba);
13332         lpfc_sli4_queue_destroy(phba);
13333
13334         /* Save device state to PCI config space */
13335         pci_save_state(pdev);
13336         pci_set_power_state(pdev, PCI_D3hot);
13337
13338         return 0;
13339 }
13340
13341 /**
13342  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
13343  * @pdev: pointer to PCI device
13344  *
13345  * This routine is called from the kernel's PCI subsystem to support system
13346  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13347  * this method, it restores the device's PCI config space state and fully
13348  * reinitializes the device and brings it online. Note that as the driver
13349  * implements the minimum PM requirements to a power-aware driver's PM for
13350  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13351  * to the suspend() method call will be treated as SUSPEND and the driver
13352  * will fully reinitialize its device during resume() method call, the device
13353  * will be set to PCI_D0 directly in PCI config space before restoring the
13354  * state.
13355  *
13356  * Return code
13357  *      0 - driver suspended the device
13358  *      Error otherwise
13359  **/
13360 static int
13361 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
13362 {
13363         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13364         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13365         uint32_t intr_mode;
13366         int error;
13367
13368         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13369                         "0292 PCI device Power Management resume.\n");
13370
13371         /* Restore device state from PCI config space */
13372         pci_set_power_state(pdev, PCI_D0);
13373         pci_restore_state(pdev);
13374
13375         /*
13376          * As the new kernel behavior of pci_restore_state() API call clears
13377          * device saved_state flag, need to save the restored state again.
13378          */
13379         pci_save_state(pdev);
13380
13381         if (pdev->is_busmaster)
13382                 pci_set_master(pdev);
13383
13384          /* Startup the kernel thread for this host adapter. */
13385         phba->worker_thread = kthread_run(lpfc_do_work, phba,
13386                                         "lpfc_worker_%d", phba->brd_no);
13387         if (IS_ERR(phba->worker_thread)) {
13388                 error = PTR_ERR(phba->worker_thread);
13389                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13390                                 "0293 PM resume failed to start worker "
13391                                 "thread: error=x%x.\n", error);
13392                 return error;
13393         }
13394
13395         /* Configure and enable interrupt */
13396         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13397         if (intr_mode == LPFC_INTR_ERROR) {
13398                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13399                                 "0294 PM resume Failed to enable interrupt\n");
13400                 return -EIO;
13401         } else
13402                 phba->intr_mode = intr_mode;
13403
13404         /* Restart HBA and bring it online */
13405         lpfc_sli_brdrestart(phba);
13406         lpfc_online(phba);
13407
13408         /* Log the current active interrupt mode */
13409         lpfc_log_intr_mode(phba, phba->intr_mode);
13410
13411         return 0;
13412 }
13413
13414 /**
13415  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13416  * @phba: pointer to lpfc hba data structure.
13417  *
13418  * This routine is called to prepare the SLI4 device for PCI slot recover. It
13419  * aborts all the outstanding SCSI I/Os to the pci device.
13420  **/
13421 static void
13422 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13423 {
13424         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13425                         "2828 PCI channel I/O abort preparing for recovery\n");
13426         /*
13427          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13428          * and let the SCSI mid-layer to retry them to recover.
13429          */
13430         lpfc_sli_abort_fcp_rings(phba);
13431 }
13432
13433 /**
13434  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13435  * @phba: pointer to lpfc hba data structure.
13436  *
13437  * This routine is called to prepare the SLI4 device for PCI slot reset. It
13438  * disables the device interrupt and pci device, and aborts the internal FCP
13439  * pending I/Os.
13440  **/
13441 static void
13442 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13443 {
13444         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13445                         "2826 PCI channel disable preparing for reset\n");
13446
13447         /* Block any management I/Os to the device */
13448         lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13449
13450         /* Block all SCSI devices' I/Os on the host */
13451         lpfc_scsi_dev_block(phba);
13452
13453         /* Flush all driver's outstanding I/Os as we are to reset */
13454         lpfc_sli_flush_io_rings(phba);
13455
13456         /* stop all timers */
13457         lpfc_stop_hba_timers(phba);
13458
13459         /* Disable interrupt and pci device */
13460         lpfc_sli4_disable_intr(phba);
13461         lpfc_sli4_queue_destroy(phba);
13462         pci_disable_device(phba->pcidev);
13463 }
13464
13465 /**
13466  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13467  * @phba: pointer to lpfc hba data structure.
13468  *
13469  * This routine is called to prepare the SLI4 device for PCI slot permanently
13470  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13471  * pending I/Os.
13472  **/
13473 static void
13474 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13475 {
13476         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13477                         "2827 PCI channel permanent disable for failure\n");
13478
13479         /* Block all SCSI devices' I/Os on the host */
13480         lpfc_scsi_dev_block(phba);
13481
13482         /* stop all timers */
13483         lpfc_stop_hba_timers(phba);
13484
13485         /* Clean up all driver's outstanding I/Os */
13486         lpfc_sli_flush_io_rings(phba);
13487 }
13488
13489 /**
13490  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13491  * @pdev: pointer to PCI device.
13492  * @state: the current PCI connection state.
13493  *
13494  * This routine is called from the PCI subsystem for error handling to device
13495  * with SLI-4 interface spec. This function is called by the PCI subsystem
13496  * after a PCI bus error affecting this device has been detected. When this
13497  * function is invoked, it will need to stop all the I/Os and interrupt(s)
13498  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13499  * for the PCI subsystem to perform proper recovery as desired.
13500  *
13501  * Return codes
13502  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13503  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13504  **/
13505 static pci_ers_result_t
13506 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13507 {
13508         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13509         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13510
13511         switch (state) {
13512         case pci_channel_io_normal:
13513                 /* Non-fatal error, prepare for recovery */
13514                 lpfc_sli4_prep_dev_for_recover(phba);
13515                 return PCI_ERS_RESULT_CAN_RECOVER;
13516         case pci_channel_io_frozen:
13517                 /* Fatal error, prepare for slot reset */
13518                 lpfc_sli4_prep_dev_for_reset(phba);
13519                 return PCI_ERS_RESULT_NEED_RESET;
13520         case pci_channel_io_perm_failure:
13521                 /* Permanent failure, prepare for device down */
13522                 lpfc_sli4_prep_dev_for_perm_failure(phba);
13523                 return PCI_ERS_RESULT_DISCONNECT;
13524         default:
13525                 /* Unknown state, prepare and request slot reset */
13526                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13527                                 "2825 Unknown PCI error state: x%x\n", state);
13528                 lpfc_sli4_prep_dev_for_reset(phba);
13529                 return PCI_ERS_RESULT_NEED_RESET;
13530         }
13531 }
13532
13533 /**
13534  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13535  * @pdev: pointer to PCI device.
13536  *
13537  * This routine is called from the PCI subsystem for error handling to device
13538  * with SLI-4 interface spec. It is called after PCI bus has been reset to
13539  * restart the PCI card from scratch, as if from a cold-boot. During the
13540  * PCI subsystem error recovery, after the driver returns
13541  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13542  * recovery and then call this routine before calling the .resume method to
13543  * recover the device. This function will initialize the HBA device, enable
13544  * the interrupt, but it will just put the HBA to offline state without
13545  * passing any I/O traffic.
13546  *
13547  * Return codes
13548  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
13549  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13550  */
13551 static pci_ers_result_t
13552 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13553 {
13554         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13555         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13556         struct lpfc_sli *psli = &phba->sli;
13557         uint32_t intr_mode;
13558
13559         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13560         if (pci_enable_device_mem(pdev)) {
13561                 printk(KERN_ERR "lpfc: Cannot re-enable "
13562                         "PCI device after reset.\n");
13563                 return PCI_ERS_RESULT_DISCONNECT;
13564         }
13565
13566         pci_restore_state(pdev);
13567
13568         /*
13569          * As the new kernel behavior of pci_restore_state() API call clears
13570          * device saved_state flag, need to save the restored state again.
13571          */
13572         pci_save_state(pdev);
13573
13574         if (pdev->is_busmaster)
13575                 pci_set_master(pdev);
13576
13577         spin_lock_irq(&phba->hbalock);
13578         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13579         spin_unlock_irq(&phba->hbalock);
13580
13581         /* Configure and enable interrupt */
13582         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13583         if (intr_mode == LPFC_INTR_ERROR) {
13584                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13585                                 "2824 Cannot re-enable interrupt after "
13586                                 "slot reset.\n");
13587                 return PCI_ERS_RESULT_DISCONNECT;
13588         } else
13589                 phba->intr_mode = intr_mode;
13590
13591         /* Log the current active interrupt mode */
13592         lpfc_log_intr_mode(phba, phba->intr_mode);
13593
13594         return PCI_ERS_RESULT_RECOVERED;
13595 }
13596
13597 /**
13598  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13599  * @pdev: pointer to PCI device
13600  *
13601  * This routine is called from the PCI subsystem for error handling to device
13602  * with SLI-4 interface spec. It is called when kernel error recovery tells
13603  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13604  * error recovery. After this call, traffic can start to flow from this device
13605  * again.
13606  **/
13607 static void
13608 lpfc_io_resume_s4(struct pci_dev *pdev)
13609 {
13610         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13611         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13612
13613         /*
13614          * In case of slot reset, as function reset is performed through
13615          * mailbox command which needs DMA to be enabled, this operation
13616          * has to be moved to the io resume phase. Taking device offline
13617          * will perform the necessary cleanup.
13618          */
13619         if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13620                 /* Perform device reset */
13621                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13622                 lpfc_offline(phba);
13623                 lpfc_sli_brdrestart(phba);
13624                 /* Bring the device back online */
13625                 lpfc_online(phba);
13626         }
13627 }
13628
13629 /**
13630  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13631  * @pdev: pointer to PCI device
13632  * @pid: pointer to PCI device identifier
13633  *
13634  * This routine is to be registered to the kernel's PCI subsystem. When an
13635  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13636  * at PCI device-specific information of the device and driver to see if the
13637  * driver state that it can support this kind of device. If the match is
13638  * successful, the driver core invokes this routine. This routine dispatches
13639  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13640  * do all the initialization that it needs to do to handle the HBA device
13641  * properly.
13642  *
13643  * Return code
13644  *      0 - driver can claim the device
13645  *      negative value - driver can not claim the device
13646  **/
13647 static int
13648 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13649 {
13650         int rc;
13651         struct lpfc_sli_intf intf;
13652
13653         if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13654                 return -ENODEV;
13655
13656         if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13657             (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13658                 rc = lpfc_pci_probe_one_s4(pdev, pid);
13659         else
13660                 rc = lpfc_pci_probe_one_s3(pdev, pid);
13661
13662         return rc;
13663 }
13664
13665 /**
13666  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13667  * @pdev: pointer to PCI device
13668  *
13669  * This routine is to be registered to the kernel's PCI subsystem. When an
13670  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13671  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13672  * remove routine, which will perform all the necessary cleanup for the
13673  * device to be removed from the PCI subsystem properly.
13674  **/
13675 static void
13676 lpfc_pci_remove_one(struct pci_dev *pdev)
13677 {
13678         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13679         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13680
13681         switch (phba->pci_dev_grp) {
13682         case LPFC_PCI_DEV_LP:
13683                 lpfc_pci_remove_one_s3(pdev);
13684                 break;
13685         case LPFC_PCI_DEV_OC:
13686                 lpfc_pci_remove_one_s4(pdev);
13687                 break;
13688         default:
13689                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13690                                 "1424 Invalid PCI device group: 0x%x\n",
13691                                 phba->pci_dev_grp);
13692                 break;
13693         }
13694         return;
13695 }
13696
13697 /**
13698  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13699  * @pdev: pointer to PCI device
13700  * @msg: power management message
13701  *
13702  * This routine is to be registered to the kernel's PCI subsystem to support
13703  * system Power Management (PM). When PM invokes this method, it dispatches
13704  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13705  * suspend the device.
13706  *
13707  * Return code
13708  *      0 - driver suspended the device
13709  *      Error otherwise
13710  **/
13711 static int
13712 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13713 {
13714         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13715         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13716         int rc = -ENODEV;
13717
13718         switch (phba->pci_dev_grp) {
13719         case LPFC_PCI_DEV_LP:
13720                 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13721                 break;
13722         case LPFC_PCI_DEV_OC:
13723                 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13724                 break;
13725         default:
13726                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13727                                 "1425 Invalid PCI device group: 0x%x\n",
13728                                 phba->pci_dev_grp);
13729                 break;
13730         }
13731         return rc;
13732 }
13733
13734 /**
13735  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13736  * @pdev: pointer to PCI device
13737  *
13738  * This routine is to be registered to the kernel's PCI subsystem to support
13739  * system Power Management (PM). When PM invokes this method, it dispatches
13740  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13741  * resume the device.
13742  *
13743  * Return code
13744  *      0 - driver suspended the device
13745  *      Error otherwise
13746  **/
13747 static int
13748 lpfc_pci_resume_one(struct pci_dev *pdev)
13749 {
13750         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13751         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13752         int rc = -ENODEV;
13753
13754         switch (phba->pci_dev_grp) {
13755         case LPFC_PCI_DEV_LP:
13756                 rc = lpfc_pci_resume_one_s3(pdev);
13757                 break;
13758         case LPFC_PCI_DEV_OC:
13759                 rc = lpfc_pci_resume_one_s4(pdev);
13760                 break;
13761         default:
13762                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13763                                 "1426 Invalid PCI device group: 0x%x\n",
13764                                 phba->pci_dev_grp);
13765                 break;
13766         }
13767         return rc;
13768 }
13769
13770 /**
13771  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13772  * @pdev: pointer to PCI device.
13773  * @state: the current PCI connection state.
13774  *
13775  * This routine is registered to the PCI subsystem for error handling. This
13776  * function is called by the PCI subsystem after a PCI bus error affecting
13777  * this device has been detected. When this routine is invoked, it dispatches
13778  * the action to the proper SLI-3 or SLI-4 device error detected handling
13779  * routine, which will perform the proper error detected operation.
13780  *
13781  * Return codes
13782  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13783  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13784  **/
13785 static pci_ers_result_t
13786 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13787 {
13788         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13789         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13790         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13791
13792         switch (phba->pci_dev_grp) {
13793         case LPFC_PCI_DEV_LP:
13794                 rc = lpfc_io_error_detected_s3(pdev, state);
13795                 break;
13796         case LPFC_PCI_DEV_OC:
13797                 rc = lpfc_io_error_detected_s4(pdev, state);
13798                 break;
13799         default:
13800                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13801                                 "1427 Invalid PCI device group: 0x%x\n",
13802                                 phba->pci_dev_grp);
13803                 break;
13804         }
13805         return rc;
13806 }
13807
13808 /**
13809  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13810  * @pdev: pointer to PCI device.
13811  *
13812  * This routine is registered to the PCI subsystem for error handling. This
13813  * function is called after PCI bus has been reset to restart the PCI card
13814  * from scratch, as if from a cold-boot. When this routine is invoked, it
13815  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13816  * routine, which will perform the proper device reset.
13817  *
13818  * Return codes
13819  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
13820  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13821  **/
13822 static pci_ers_result_t
13823 lpfc_io_slot_reset(struct pci_dev *pdev)
13824 {
13825         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13826         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13827         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13828
13829         switch (phba->pci_dev_grp) {
13830         case LPFC_PCI_DEV_LP:
13831                 rc = lpfc_io_slot_reset_s3(pdev);
13832                 break;
13833         case LPFC_PCI_DEV_OC:
13834                 rc = lpfc_io_slot_reset_s4(pdev);
13835                 break;
13836         default:
13837                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13838                                 "1428 Invalid PCI device group: 0x%x\n",
13839                                 phba->pci_dev_grp);
13840                 break;
13841         }
13842         return rc;
13843 }
13844
13845 /**
13846  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13847  * @pdev: pointer to PCI device
13848  *
13849  * This routine is registered to the PCI subsystem for error handling. It
13850  * is called when kernel error recovery tells the lpfc driver that it is
13851  * OK to resume normal PCI operation after PCI bus error recovery. When
13852  * this routine is invoked, it dispatches the action to the proper SLI-3
13853  * or SLI-4 device io_resume routine, which will resume the device operation.
13854  **/
13855 static void
13856 lpfc_io_resume(struct pci_dev *pdev)
13857 {
13858         struct Scsi_Host *shost = pci_get_drvdata(pdev);
13859         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13860
13861         switch (phba->pci_dev_grp) {
13862         case LPFC_PCI_DEV_LP:
13863                 lpfc_io_resume_s3(pdev);
13864                 break;
13865         case LPFC_PCI_DEV_OC:
13866                 lpfc_io_resume_s4(pdev);
13867                 break;
13868         default:
13869                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13870                                 "1429 Invalid PCI device group: 0x%x\n",
13871                                 phba->pci_dev_grp);
13872                 break;
13873         }
13874         return;
13875 }
13876
13877 /**
13878  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13879  * @phba: pointer to lpfc hba data structure.
13880  *
13881  * This routine checks to see if OAS is supported for this adapter. If
13882  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
13883  * the enable oas flag is cleared and the pool created for OAS device data
13884  * is destroyed.
13885  *
13886  **/
13887 static void
13888 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13889 {
13890
13891         if (!phba->cfg_EnableXLane)
13892                 return;
13893
13894         if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13895                 phba->cfg_fof = 1;
13896         } else {
13897                 phba->cfg_fof = 0;
13898                 mempool_destroy(phba->device_data_mem_pool);
13899                 phba->device_data_mem_pool = NULL;
13900         }
13901
13902         return;
13903 }
13904
13905 /**
13906  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
13907  * @phba: pointer to lpfc hba data structure.
13908  *
13909  * This routine checks to see if RAS is supported by the adapter. Check the
13910  * function through which RAS support enablement is to be done.
13911  **/
13912 void
13913 lpfc_sli4_ras_init(struct lpfc_hba *phba)
13914 {
13915         switch (phba->pcidev->device) {
13916         case PCI_DEVICE_ID_LANCER_G6_FC:
13917         case PCI_DEVICE_ID_LANCER_G7_FC:
13918                 phba->ras_fwlog.ras_hwsupport = true;
13919                 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13920                     phba->cfg_ras_fwlog_buffsize)
13921                         phba->ras_fwlog.ras_enabled = true;
13922                 else
13923                         phba->ras_fwlog.ras_enabled = false;
13924                 break;
13925         default:
13926                 phba->ras_fwlog.ras_hwsupport = false;
13927         }
13928 }
13929
13930
13931 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13932
13933 static const struct pci_error_handlers lpfc_err_handler = {
13934         .error_detected = lpfc_io_error_detected,
13935         .slot_reset = lpfc_io_slot_reset,
13936         .resume = lpfc_io_resume,
13937 };
13938
13939 static struct pci_driver lpfc_driver = {
13940         .name           = LPFC_DRIVER_NAME,
13941         .id_table       = lpfc_id_table,
13942         .probe          = lpfc_pci_probe_one,
13943         .remove         = lpfc_pci_remove_one,
13944         .shutdown       = lpfc_pci_remove_one,
13945         .suspend        = lpfc_pci_suspend_one,
13946         .resume         = lpfc_pci_resume_one,
13947         .err_handler    = &lpfc_err_handler,
13948 };
13949
13950 static const struct file_operations lpfc_mgmt_fop = {
13951         .owner = THIS_MODULE,
13952 };
13953
13954 static struct miscdevice lpfc_mgmt_dev = {
13955         .minor = MISC_DYNAMIC_MINOR,
13956         .name = "lpfcmgmt",
13957         .fops = &lpfc_mgmt_fop,
13958 };
13959
13960 /**
13961  * lpfc_init - lpfc module initialization routine
13962  *
13963  * This routine is to be invoked when the lpfc module is loaded into the
13964  * kernel. The special kernel macro module_init() is used to indicate the
13965  * role of this routine to the kernel as lpfc module entry point.
13966  *
13967  * Return codes
13968  *   0 - successful
13969  *   -ENOMEM - FC attach transport failed
13970  *   all others - failed
13971  */
13972 static int __init
13973 lpfc_init(void)
13974 {
13975         int error = 0;
13976
13977         printk(LPFC_MODULE_DESC "\n");
13978         printk(LPFC_COPYRIGHT "\n");
13979
13980         error = misc_register(&lpfc_mgmt_dev);
13981         if (error)
13982                 printk(KERN_ERR "Could not register lpfcmgmt device, "
13983                         "misc_register returned with status %d", error);
13984
13985         lpfc_transport_functions.vport_create = lpfc_vport_create;
13986         lpfc_transport_functions.vport_delete = lpfc_vport_delete;
13987         lpfc_transport_template =
13988                                 fc_attach_transport(&lpfc_transport_functions);
13989         if (lpfc_transport_template == NULL)
13990                 return -ENOMEM;
13991         lpfc_vport_transport_template =
13992                 fc_attach_transport(&lpfc_vport_transport_functions);
13993         if (lpfc_vport_transport_template == NULL) {
13994                 fc_release_transport(lpfc_transport_template);
13995                 return -ENOMEM;
13996         }
13997         lpfc_nvme_cmd_template();
13998         lpfc_nvmet_cmd_template();
13999
14000         /* Initialize in case vector mapping is needed */
14001         lpfc_present_cpu = num_present_cpus();
14002
14003         error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14004                                         "lpfc/sli4:online",
14005                                         lpfc_cpu_online, lpfc_cpu_offline);
14006         if (error < 0)
14007                 goto cpuhp_failure;
14008         lpfc_cpuhp_state = error;
14009
14010         error = pci_register_driver(&lpfc_driver);
14011         if (error)
14012                 goto unwind;
14013
14014         return error;
14015
14016 unwind:
14017         cpuhp_remove_multi_state(lpfc_cpuhp_state);
14018 cpuhp_failure:
14019         fc_release_transport(lpfc_transport_template);
14020         fc_release_transport(lpfc_vport_transport_template);
14021
14022         return error;
14023 }
14024
14025 /**
14026  * lpfc_exit - lpfc module removal routine
14027  *
14028  * This routine is invoked when the lpfc module is removed from the kernel.
14029  * The special kernel macro module_exit() is used to indicate the role of
14030  * this routine to the kernel as lpfc module exit point.
14031  */
14032 static void __exit
14033 lpfc_exit(void)
14034 {
14035         misc_deregister(&lpfc_mgmt_dev);
14036         pci_unregister_driver(&lpfc_driver);
14037         cpuhp_remove_multi_state(lpfc_cpuhp_state);
14038         fc_release_transport(lpfc_transport_template);
14039         fc_release_transport(lpfc_vport_transport_template);
14040         idr_destroy(&lpfc_hba_index);
14041 }
14042
14043 module_init(lpfc_init);
14044 module_exit(lpfc_exit);
14045 MODULE_LICENSE("GPL");
14046 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14047 MODULE_AUTHOR("Broadcom");
14048 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
This page took 0.870752 seconds and 4 git commands to generate.